From fba7a6beae0128064903e66f7e9f38a23fcfe4fb Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Sat, 9 Apr 2022 21:09:14 +0800 Subject: [PATCH 0001/2432] Convert edge types input for `LinkNeighborLoader` to canonical form. (#4441) * handle canonical types * update Co-authored-by: rusty1s --- test/loader/test_link_neighbor_loader.py | 4 ++-- torch_geometric/loader/link_neighbor_loader.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 10f5ca6c6176..f919b6953153 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -81,7 +81,7 @@ def test_heterogeneous_link_neighbor_loader(directed): data['author', 'paper'].edge_attr = torch.arange(1500, 2500) loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, - edge_label_index=('paper', 'to', 'author'), + edge_label_index=('paper', 'author'), batch_size=20, directed=directed, shuffle=True) assert str(loader) == 'LinkNeighborLoader()' @@ -112,7 +112,7 @@ def test_heterogeneous_link_neighbor_loader_loop(directed): data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000) loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, - edge_label_index=('paper', 'to', 'paper'), + edge_label_index=('paper', 'paper'), batch_size=20, directed=directed) for batch in loader: diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index a9cc783fc82c..c6775a3fa3a3 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -263,11 +263,16 @@ def get_edge_label_index( if isinstance(edge_label_index[0], str): edge_type = edge_label_index + edge_type = data._to_canonical(*edge_type) + assert edge_type in data.edge_types return edge_type, data[edge_type].edge_index assert len(edge_label_index) == 2 edge_type, edge_label_index = edge_label_index + edge_type = data._to_canonical(*edge_type) + assert edge_type in data.edge_types + if edge_label_index is None: return edge_type, data[edge_type].edge_index From d9c6f54d3bfccd36367ab1afb7175dfb10797bd5 Mon Sep 17 00:00:00 2001 From: Domenico Tortorella Date: Sat, 9 Apr 2022 15:27:12 +0200 Subject: [PATCH 0002/2432] Add Geom-GCN splits to `Planetoid` dataset (#4442) * Add Geom-GCN splits to `Planetoid` dataset * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merge Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- torch_geometric/datasets/planetoid.py | 41 ++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/torch_geometric/datasets/planetoid.py b/torch_geometric/datasets/planetoid.py index 4c4a43adedee..89fdfd21733d 100644 --- a/torch_geometric/datasets/planetoid.py +++ b/torch_geometric/datasets/planetoid.py @@ -1,6 +1,7 @@ import os.path as osp from typing import Callable, List, Optional +import numpy as np import torch from torch_geometric.data import InMemoryDataset, download_url @@ -19,15 +20,18 @@ class Planetoid(InMemoryDataset): name (string): The name of the dataset (:obj:`"Cora"`, :obj:`"CiteSeer"`, :obj:`"PubMed"`). split (string): The type of dataset split - (:obj:`"public"`, :obj:`"full"`, :obj:`"random"`). + (:obj:`"public"`, :obj:`"full"`, :obj:`"geom-gcn"`, + :obj:`"random"`). If set to :obj:`"public"`, the split will be the public fixed split - from the - `"Revisiting Semi-Supervised Learning with Graph Embeddings" - `_ paper. + from the `"Revisiting Semi-Supervised Learning with Graph + Embeddings" `_ paper. If set to :obj:`"full"`, all nodes except those in the validation and test sets will be used for training (as in the `"FastGCN: Fast Learning with Graph Convolutional Networks via Importance Sampling" `_ paper). + If set to :obj:`"geom-gcn"`, the 10 public fixed splits from the + `"Geom-GCN: Geometric Graph Convolutional Networks" + `_ paper are given. If set to :obj:`"random"`, train, validation, and test sets will be randomly generated, according to :obj:`num_train_per_class`, :obj:`num_val` and :obj:`num_test`. (default: :obj:`"public"`) @@ -74,6 +78,8 @@ class Planetoid(InMemoryDataset): """ url = 'https://github.com/kimiyoung/planetoid/raw/master/data' + geom_gcn_url = ('https://raw.githubusercontent.com/graphdml-uiuc-jlu/' + 'geom-gcn/master') def __init__(self, root: str, name: str, split: str = "public", num_train_per_class: int = 20, num_val: int = 500, @@ -81,12 +87,12 @@ def __init__(self, root: str, name: str, split: str = "public", pre_transform: Optional[Callable] = None): self.name = name + self.split = split.lower() + assert self.split in ['public', 'full', 'geom-gcn', 'random'] + super().__init__(root, transform, pre_transform) self.data, self.slices = torch.load(self.processed_paths[0]) - self.split = split - assert self.split in ['public', 'full', 'random'] - if split == 'full': data = self.get(0) data.train_mask.fill_(True) @@ -114,10 +120,14 @@ def __init__(self, root: str, name: str, split: str = "public", @property def raw_dir(self) -> str: + if self.split == 'geom-gcn': + return osp.join(self.root, self.name, 'geom-gcn', 'raw') return osp.join(self.root, self.name, 'raw') @property def processed_dir(self) -> str: + if self.split == 'geom-gcn': + return osp.join(self.root, self.name, 'geom-gcn', 'processed') return osp.join(self.root, self.name, 'processed') @property @@ -132,9 +142,26 @@ def processed_file_names(self) -> str: def download(self): for name in self.raw_file_names: download_url(http://github.com/f'%7Bself.url%7D/%7Bname%7D',%20self.raw_dir) + if self.split == 'geom-gcn': + for i in range(10): + url = f'{self.geom_gcn_url}/splits/{self.name.lower()}' + download_url(http://github.com/f'%7Burl%7D_split_0.6_0.2_%7Bi%7D.npz',%20self.raw_dir) def process(self): data = read_planetoid_data(self.raw_dir, self.name) + + if self.split == 'geom-gcn': + train_masks, val_masks, test_masks = [], [], [] + for i in range(10): + name = f'{self.name.lower()}_split_0.6_0.2_{i}.npz' + splits = np.load(osp.join(self.raw_dir, name)) + train_masks.append(torch.from_numpy(splits['train_mask'])) + val_masks.append(torch.from_numpy(splits['val_mask'])) + test_masks.append(torch.from_numpy(splits['test_mask'])) + data.train_mask = torch.stack(train_masks, dim=1) + data.val_mask = torch.stack(val_masks, dim=1) + data.test_mask = torch.stack(test_masks, dim=1) + data = data if self.pre_transform is None else self.pre_transform(data) torch.save(self.collate([data]), self.processed_paths[0]) From ea0bff17e91d74a759a26a021dca91b1e37ae7b3 Mon Sep 17 00:00:00 2001 From: "Wilfried L. Bounsi" Date: Sun, 10 Apr 2022 10:35:33 +0100 Subject: [PATCH 0003/2432] Add support for projecting features before aggregation in `SAGEConv` (#4437) * Add support for mean and max pool in SAGEConv * add test * merge Co-authored-by: rusty1s --- test/nn/conv/test_sage_conv.py | 7 ++++--- torch_geometric/nn/conv/sage_conv.py | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index 9b945b5da661..aa96b79cd72a 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -6,14 +6,15 @@ from torch_geometric.testing import is_full_test -def test_sage_conv(): +@pytest.mark.parametrize('project', [False, True]) +def test_sage_conv(project): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) row, col = edge_index adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - conv = SAGEConv(8, 32) + conv = SAGEConv(8, 32, project=project) assert str(conv) == 'SAGEConv(8, 32, aggr=mean)' out = conv(x1, edge_index) assert out.size() == (4, 32) @@ -31,7 +32,7 @@ def test_sage_conv(): assert jit(x1, adj.t()).tolist() == out.tolist() adj = adj.sparse_resize((4, 2)) - conv = SAGEConv((8, 16), 32) + conv = SAGEConv((8, 16), 32, project=project) assert str(conv) == 'SAGEConv((8, 16), 32, aggr=mean)' out1 = conv((x1, x2), edge_index) out2 = conv((x1, None), edge_index, (4, 2)) diff --git a/torch_geometric/nn/conv/sage_conv.py b/torch_geometric/nn/conv/sage_conv.py index 2344269e8736..27c3d38bbd98 100644 --- a/torch_geometric/nn/conv/sage_conv.py +++ b/torch_geometric/nn/conv/sage_conv.py @@ -21,6 +21,15 @@ class SAGEConv(MessagePassing): \mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \mathbf{W}_2 \cdot \mathrm{mean}_{j \in \mathcal{N(i)}} \mathbf{x}_j + If :obj:`project = True`, then :math:`\mathbf{x}_j` will first get + projected via + + .. math:: + \mathbf{x}_j \leftarrow \sigma ( \mathbf{W}_3 \mathbf{x}_j + + \mathbf{b}) + + as described in Eq. (3) of the paper. + Args: in_channels (int or tuple): Size of each input sample, or :obj:`-1` to derive the size from the first input(s) to the forward method. @@ -38,6 +47,10 @@ class SAGEConv(MessagePassing): root_weight (bool, optional): If set to :obj:`False`, the layer will not add transformed root node features to the output. (default: :obj:`True`) + project (bool, optional): If set to :obj:`True`, the layer will apply a + linear transformation followed by an activation function before + aggregation (as described in Eq. (3) of the paper). + (default: :obj:`False`) bias (bool, optional): If set to :obj:`False`, the layer will not learn an additive bias. (default: :obj:`True`) **kwargs (optional): Additional arguments of @@ -59,6 +72,7 @@ def __init__( aggr: str = 'mean', normalize: bool = False, root_weight: bool = True, + project: bool = False, bias: bool = True, **kwargs, ): @@ -69,10 +83,14 @@ def __init__( self.out_channels = out_channels self.normalize = normalize self.root_weight = root_weight + self.project = project if isinstance(in_channels, int): in_channels = (in_channels, in_channels) + if self.project: + self.lin = Linear(in_channels[0], in_channels[0], bias=True) + if self.aggr is None: self.fuse = False # No "fused" message_and_aggregate. self.lstm = LSTM(in_channels[0], in_channels[0], batch_first=True) @@ -84,6 +102,8 @@ def __init__( self.reset_parameters() def reset_parameters(self): + if self.project: + self.lin.reset_parameters() if self.aggr is None: self.lstm.reset_parameters() self.lin_l.reset_parameters() @@ -96,6 +116,9 @@ def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, if isinstance(x, Tensor): x: OptPairTensor = (x, x) + if self.project and hasattr(self, 'lin'): + x = (self.lin(x[0]).relu(), x[1]) + # propagate_type: (x: OptPairTensor) out = self.propagate(edge_index, x=x, size=size) out = self.lin_l(out) From fac99d13e579543bff3de1592ac48d5d92e8bdcc Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 10 Apr 2022 15:45:05 +0200 Subject: [PATCH 0004/2432] custom explain_message test (#4448) --- test/nn/models/test_explainer.py | 27 +++++++++++++++++++++- torch_geometric/nn/conv/message_passing.py | 6 +++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/test/nn/models/test_explainer.py b/test/nn/models/test_explainer.py index 103dbf904241..55a1ad1ab4ff 100644 --- a/test/nn/models/test_explainer.py +++ b/test/nn/models/test_explainer.py @@ -1,7 +1,8 @@ import pytest import torch -from torch_geometric.nn import GAT, GCN, Explainer, to_captum +from torch_geometric.nn import GAT, GCN, Explainer, SAGEConv, to_captum +from torch_geometric.nn.conv import MessagePassing from torch_geometric.testing import withPackage x = torch.randn(8, 3, requires_grad=True) @@ -119,3 +120,27 @@ def test_explainer_to_log_prob(model): assert torch.allclose(raw_to_log(raw), prob_to_log(prob)) assert torch.allclose(prob_to_log(prob), log_to_log(log_prob)) + + +def test_custom_explain_message(): + x = torch.randn(4, 8) + edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) + + conv = SAGEConv(8, 32) + + def explain_message(self, inputs, x_i, x_j): + assert isinstance(self, SAGEConv) + assert inputs.size() == (6, 8) + assert inputs.size() == x_i.size() == x_j.size() + assert torch.allclose(inputs, x_j) + self.x_i = x_i + self.x_j = x_j + return inputs + + conv.explain_message = explain_message.__get__(conv, MessagePassing) + conv.explain = True + + conv(x, edge_index) + + assert torch.allclose(conv.x_i, x[edge_index[1]]) + assert torch.allclose(conv.x_j, x[edge_index[0]]) diff --git a/torch_geometric/nn/conv/message_passing.py b/torch_geometric/nn/conv/message_passing.py index 29dc08686faa..62ed10bcae59 100644 --- a/torch_geometric/nn/conv/message_passing.py +++ b/torch_geometric/nn/conv/message_passing.py @@ -111,7 +111,6 @@ def __init__(self, aggr: Optional[Union[str, List[str]]] = "add", self.inspector = Inspector(self) self.inspector.inspect(self.message) - self.inspector.inspect(self.explain_message, pop_first=True) self.inspector.inspect(self.aggregate, pop_first=True) self.inspector.params['aggregate'].pop('aggr', None) self.inspector.inspect(self.message_and_aggregate, pop_first=True) @@ -435,12 +434,15 @@ def explain(self, explain: bool): methods = ['message', 'aggregate', 'update'] self._explain = explain + self.inspector.inspect(self.explain_message, pop_first=True) self.__user_args__ = self.inspector.keys(methods).difference( self.special_args) def explain_message(self, inputs: Tensor, size_i: int) -> Tensor: # NOTE Replace this method in custom explainers per message-passing - # layer to customize how messages shall be explained. + # layer to customize how messages shall be explained, e.g., via: + # conv.explain_message = explain_message.__get__(conv, MessagePassing) + # see stackoverflow.com: 394770/override-a-method-at-instance-level edge_mask = self._edge_mask From e18198fcd4c6c8ce7f9a7463ba14c1923aa094c2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 22:28:50 +0100 Subject: [PATCH 0005/2432] [pre-commit.ci] pre-commit autoupdate (#4456) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.1.0 → v4.2.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.1.0...v4.2.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 21f491f40925..9c2b60735637 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.2.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace From 359bbdc73f951fc9a72e5157625096008cd5ecc1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Apr 2022 02:16:10 +0200 Subject: [PATCH 0006/2432] [pre-commit.ci] pre-commit autoupdate (#4497) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/regebro/pyroma: 4.0b2 → 4.0](https://github.com/regebro/pyroma/compare/4.0b2...4.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9c2b60735637..0175c2f4bfda 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ repos: args: [-c=.yamllint.yml] - repo: https://github.com/regebro/pyroma - rev: "4.0b2" + rev: "4.0" hooks: - id: pyroma name: Check packaging From 5fdeae558ddff8b79f3fbc425f07d3a4ccbb420a Mon Sep 17 00:00:00 2001 From: rusty1s Date: Tue, 19 Apr 2022 18:29:12 +0200 Subject: [PATCH 0007/2432] optional rev_edge_types in case of multiple edge types --- torch_geometric/transforms/random_link_split.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/torch_geometric/transforms/random_link_split.py b/torch_geometric/transforms/random_link_split.py index af9d89527ebf..eff40bac42ee 100644 --- a/torch_geometric/transforms/random_link_split.py +++ b/torch_geometric/transforms/random_link_split.py @@ -95,6 +95,13 @@ def __init__( edge_types: Optional[Union[EdgeType, List[EdgeType]]] = None, rev_edge_types: Optional[Union[EdgeType, List[EdgeType]]] = None, ): + if isinstance(edge_types, list): + if rev_edge_types is None: + rev_edge_types = [None] * len(edge_types) + + assert isinstance(rev_edge_types, list) + assert len(edge_types) == len(rev_edge_types) + self.num_val = num_val self.num_test = num_test self.is_undirected = is_undirected @@ -106,10 +113,6 @@ def __init__( self.edge_types = edge_types self.rev_edge_types = rev_edge_types - if isinstance(edge_types, list): - assert isinstance(rev_edge_types, list) - assert len(edge_types) == len(rev_edge_types) - def __call__(self, data: Union[Data, HeteroData]): edge_types = self.edge_types rev_edge_types = self.rev_edge_types From 025b1cb0c94eeac768d6facbb942d84c223c0b19 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 20 Apr 2022 11:10:31 +0200 Subject: [PATCH 0008/2432] GATConv: require edge_dim to be set --- torch_geometric/nn/conv/gat_conv.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/torch_geometric/nn/conv/gat_conv.py b/torch_geometric/nn/conv/gat_conv.py index 13b1af546b18..b5d0e18194da 100644 --- a/torch_geometric/nn/conv/gat_conv.py +++ b/torch_geometric/nn/conv/gat_conv.py @@ -268,10 +268,9 @@ def edge_update(self, alpha_j: Tensor, alpha_i: OptTensor, # we simply need to sum them up to "emulate" concatenation: alpha = alpha_j if alpha_i is None else alpha_j + alpha_i - if edge_attr is not None: + if edge_attr is not None and self.lin_edge is not None: if edge_attr.dim() == 1: edge_attr = edge_attr.view(-1, 1) - assert self.lin_edge is not None edge_attr = self.lin_edge(edge_attr) edge_attr = edge_attr.view(-1, self.heads, self.out_channels) alpha_edge = (edge_attr * self.att_edge).sum(dim=-1) From ecf637418e77431520f97beb30976182d0668ebd Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 20 Apr 2022 11:19:55 +0200 Subject: [PATCH 0009/2432] Add 'data.n_id' trick to 'NeighborLoader' doc (fixes #4468) --- torch_geometric/loader/neighbor_loader.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 7425001c2718..8d4530b25708 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -173,6 +173,21 @@ class NeighborLoader(torch.utils.data.DataLoader): `examples/hetero/to_hetero_mag.py `_. + The :class:`~torch_geometric.loader.NeighborLoader` will return subgraphs + where global node indices are mapped to local indices corresponding to this + specific subgraph. However, often times it is desired to map the nodes of + the current subgraph back to the global node indices. A simple trick to + achieve this is to include this mapping as part of the :obj:`data` object: + + .. code-block:: python + + # Assign each node its global node index: + data.n_id = torch.arange(data.num_nodes) + + loader = NeighborLoader(data, ...) + sampled_data = next(iter(loader)) + print(sampled_data.n_id) + Args: data (torch_geometric.data.Data or torch_geometric.data.HeteroData): The :class:`~torch_geometric.data.Data` or From e3891b1a40b3d8539d2ed317b82ee160473be571 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 20 Apr 2022 11:42:27 +0200 Subject: [PATCH 0010/2432] HeteroData: num_features impl (#4504) --- test/data/test_hetero_data.py | 20 +++++++++++++++----- torch_geometric/data/data.py | 2 +- torch_geometric/data/hetero_data.py | 22 ++++++++++++++++++++++ 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 22a0df0e3088..b8b5c5acdb7f 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -14,6 +14,8 @@ edge_index_paper_author = torch.stack([idx_paper[:30], idx_author[:30]], dim=0) edge_index_author_paper = torch.stack([idx_paper[:30], idx_author[:30]], dim=0) +edge_attr_paper_paper = torch.randn(edge_index_paper_paper.size(1), 8) + def get_edge_index(num_src_nodes, num_dst_nodes, num_edges): row = torch.randint(num_src_nodes, (num_edges, ), dtype=torch.long) @@ -77,12 +79,20 @@ def test_hetero_data_functions(): data['paper', 'paper'].edge_index = edge_index_paper_paper data['paper', 'author'].edge_index = edge_index_paper_author data['author', 'paper'].edge_index = edge_index_author_paper - assert len(data) == 2 - assert sorted(data.keys) == ['edge_index', 'x'] - assert 'x' in data and 'edge_index' in data + data['paper', 'paper'].edge_attr = edge_attr_paper_paper + assert len(data) == 3 + assert sorted(data.keys) == ['edge_attr', 'edge_index', 'x'] + assert 'x' in data and 'edge_index' in data and 'edge_attr' in data assert data.num_nodes == 15 assert data.num_edges == 110 + assert data.num_node_features == {'paper': 16, 'author': 32} + assert data.num_edge_features == { + ('paper', 'to', 'paper'): 8, + ('paper', 'to', 'author'): 0, + ('author', 'to', 'paper'): 0, + } + node_types, edge_types = data.metadata() assert node_types == ['paper', 'author'] assert edge_types == [ @@ -99,8 +109,8 @@ def test_hetero_data_functions(): data.y = 0 assert data['y'] == 0 and data.y == 0 - assert len(data) == 3 - assert sorted(data.keys) == ['edge_index', 'x', 'y'] + assert len(data) == 4 + assert sorted(data.keys) == ['edge_attr', 'edge_index', 'x', 'y'] del data['paper', 'author'] node_types, edge_types = data.metadata() diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 1f86a939af3c..0a778214a582 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -630,7 +630,7 @@ def num_node_features(self) -> int: def num_features(self) -> int: r"""Returns the number of features per node in the graph. Alias for :py:attr:`~num_node_features`.""" - return self._store.num_features + return self.num_node_features @property def num_edge_features(self) -> int: diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 5ebed15d3baa..41d011248840 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -273,6 +273,28 @@ def num_nodes(self) -> Optional[int]: r"""Returns the number of nodes in the graph.""" return super().num_nodes + @property + def num_node_features(self) -> Dict[NodeType, int]: + r"""Returns the number of features per node type in the graph.""" + return { + key: store.num_node_features + for key, store in self._node_store_dict.items() + } + + @property + def num_features(self) -> Dict[NodeType, int]: + r"""Returns the number of features per node type in the graph. + Alias for :py:attr:`~num_node_features`.""" + return self.num_node_features + + @property + def num_edge_features(self) -> Dict[EdgeType, int]: + r"""Returns the number of features per edge type in the graph.""" + return { + key: store.num_edge_features + for key, store in self._edge_store_dict.items() + } + def debug(self): pass # TODO From 27972088029d8c61428c89d55e75fd276c87e6aa Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Wed, 20 Apr 2022 18:48:06 +0800 Subject: [PATCH 0011/2432] Fix `from_networkx` in case where attributes are tensors (#4486) * bug fix for tensor valued graphx * merge Co-authored-by: rusty1s --- test/utils/test_convert.py | 21 +++++++++++++++++++++ torch_geometric/utils/convert.py | 11 +++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index 641a7ee3ec2f..353f52ea80fb 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -67,6 +67,27 @@ def test_to_networkx(): assert nx.to_numpy_array(G).tolist() == [[3, 1], [2, 0]] +@withPackage('networkx') +def test_from_networkx_set_node_attributes(): + import networkx as nx + + G = nx.path_graph(3) + attrs = { + 0: { + 'x': torch.tensor([1, 0, 0]) + }, + 1: { + 'x': torch.tensor([0, 1, 0]) + }, + 2: { + 'x': torch.tensor([0, 0, 1]) + }, + } + nx.set_node_attributes(G, attrs) + + assert from_networkx(G).x.tolist() == [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + + @withPackage('networkx') def test_to_networkx_undirected(): import networkx as nx diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index 3186ddb1dbc5..3316c13bc811 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -185,10 +185,13 @@ def from_networkx(G, group_node_attrs: Optional[Union[List[str], all]] = None, data[str(key)] = value for key, value in data.items(): - try: - data[key] = torch.tensor(value) - except ValueError: - pass + if isinstance(value, (tuple, list)) and isinstance(value[0], Tensor): + data[key] = torch.stack(value, dim=0) + else: + try: + data[key] = torch.tensor(value) + except ValueError: + pass data['edge_index'] = edge_index.view(2, -1) data = Data.from_dict(data) From 5fe70770e2d8b6c8abe11cb57a1e2d2e39a4e353 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Wed, 20 Apr 2022 19:33:01 +0800 Subject: [PATCH 0012/2432] Add `HeteroData` support to `RemoveIsolatedNodes` (#4479) * hetero isolated support * update * typo Co-authored-by: rusty1s --- test/transforms/test_remove_isolated_nodes.py | 51 +++++++++++++--- .../transforms/remove_isolated_nodes.py | 59 +++++++++++++++---- 2 files changed, 89 insertions(+), 21 deletions(-) diff --git a/test/transforms/test_remove_isolated_nodes.py b/test/transforms/test_remove_isolated_nodes.py index 5040a517d52c..fbaed4a4ec96 100644 --- a/test/transforms/test_remove_isolated_nodes.py +++ b/test/transforms/test_remove_isolated_nodes.py @@ -1,18 +1,51 @@ import torch -from torch_geometric.data import Data +from torch_geometric.data import Data, HeteroData from torch_geometric.transforms import RemoveIsolatedNodes def test_remove_isolated_nodes(): - assert RemoveIsolatedNodes().__repr__() == 'RemoveIsolatedNodes()' + assert str(RemoveIsolatedNodes()) == 'RemoveIsolatedNodes()' + + data = Data() + data.x = torch.arange(3) + data.edge_index = torch.tensor([[0, 2], [2, 0]]) + data.edge_attr = torch.arange(2) - edge_index = torch.tensor([[0, 2, 1, 0], [2, 0, 1, 0]]) - edge_attr = torch.tensor([1, 2, 3, 4]) - x = torch.tensor([[1], [2], [3]]) - data = Data(edge_index=edge_index, edge_attr=edge_attr, x=x) data = RemoveIsolatedNodes()(data) + assert len(data) == 3 - assert data.edge_index.tolist() == [[0, 1, 0], [1, 0, 0]] - assert data.edge_attr.tolist() == [1, 2, 4] - assert data.x.tolist() == [[1], [3]] + assert data.x.tolist() == [0, 2] + assert data.edge_index.tolist() == [[0, 1], [1, 0]] + assert data.edge_attr.tolist() == [0, 1] + + +def test_remove_isolated_nodes_in_hetero_data(): + data = HeteroData() + + data['p'].x = torch.arange(6) + data['a'].x = torch.arange(6) + data['i'].num_nodes = 4 + + # isolated paper nodes: {4} + # isolated author nodes: {3, 4, 5} + # isolated institution nodes: {0, 1, 2, 3} + data['p', '1', 'p'].edge_index = torch.tensor([[0, 1, 2], [0, 1, 3]]) + data['p', '2', 'a'].edge_index = torch.tensor([[1, 3, 5], [0, 1, 2]]) + data['p', '2', 'a'].edge_attr = torch.arange(3) + data['p', '3', 'a'].edge_index = torch.tensor([[5], [2]]) + + data = RemoveIsolatedNodes()(data) + + assert len(data) == 4 + assert data['p'].num_nodes == 5 + assert data['a'].num_nodes == 3 + assert data['i'].num_nodes == 0 + + assert data['p'].x.tolist() == [0, 1, 2, 3, 5] + assert data['a'].x.tolist() == [0, 1, 2] + + assert data['1'].edge_index.tolist() == [[0, 1, 2], [0, 1, 3]] + assert data['2'].edge_index.tolist() == [[1, 3, 4], [0, 1, 2]] + assert data['2'].edge_attr.tolist() == [0, 1, 2] + assert data['3'].edge_index.tolist() == [[4], [2]] diff --git a/torch_geometric/transforms/remove_isolated_nodes.py b/torch_geometric/transforms/remove_isolated_nodes.py index 8c6969bdeeb2..bda19f68b8e7 100644 --- a/torch_geometric/transforms/remove_isolated_nodes.py +++ b/torch_geometric/transforms/remove_isolated_nodes.py @@ -1,28 +1,63 @@ -import re +from collections import defaultdict +from typing import Union import torch +from torch_geometric.data import Data, HeteroData from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.utils import remove_isolated_nodes @functional_transform('remove_isolated_nodes') class RemoveIsolatedNodes(BaseTransform): r"""Removes isolated nodes from the graph (functional name: :obj:`remove_isolated_nodes`).""" - def __call__(self, data): - num_nodes = data.num_nodes - out = remove_isolated_nodes(data.edge_index, data.edge_attr, num_nodes) - data.edge_index, data.edge_attr, mask = out + def __call__(self, data: Union[Data, HeteroData]): + # Gather all nodes that occur in at least one edge (across all types): + n_id_dict = defaultdict(list) + for store in data.edge_stores: + if 'edge_index' not in store: + continue + + if store._key is None: + src = dst = None + else: + src, _, dst = store._key + + n_id_dict[src].append(store.edge_index[0]) + n_id_dict[dst].append(store.edge_index[1]) + + n_id_dict = {k: torch.cat(v).unique() for k, v in n_id_dict.items()} + + n_map_dict = {} + for store in data.node_stores: + if store._key not in n_id_dict: + n_id_dict[store._key] = torch.empty((0, ), dtype=torch.long) - if hasattr(data, '__num_nodes__'): - data.num_nodes = int(mask.sum()) + idx = n_id_dict[store._key] + mapping = idx.new_zeros(data.num_nodes) + mapping[idx] = torch.arange(idx.numel(), device=mapping.device) + n_map_dict[store._key] = mapping - for key, item in data: - if bool(re.search('edge', key)): + for store in data.edge_stores: + if 'edge_index' not in store: continue - if torch.is_tensor(item) and item.size(0) == num_nodes: - data[key] = item[mask] + + if store._key is None: + src = dst = None + else: + src, _, dst = store._key + + row = n_map_dict[src][store.edge_index[0]] + col = n_map_dict[dst][store.edge_index[1]] + store.edge_index = torch.stack([row, col], dim=0) + + for store in data.node_stores: + for key, value in store.items(): + if key == 'num_nodes': + store.num_nodes = n_id_dict[store._key].numel() + + elif store.is_node_attr(key): + store[key] = value[n_id_dict[store._key]] return data From cae37d3d0d23a2c33bc1a0e164aeceaff5915d3e Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Wed, 20 Apr 2022 20:30:39 +0800 Subject: [PATCH 0013/2432] Add random negative sampling to `LinkNeighborLoader` (#4446) * wip * add negative sampling to link loader * change doc * convert to true ratio * add test for negative sampling label * revert size manipulation * update Co-authored-by: rusty1s --- test/loader/test_link_neighbor_loader.py | 83 +++++++++++++------ .../loader/link_neighbor_loader.py | 63 +++++++++++++- 2 files changed, 119 insertions(+), 27 deletions(-) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index f919b6953153..76b1a7099975 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -16,7 +16,8 @@ def unique_edge_pairs(edge_index): @pytest.mark.parametrize('directed', [True, False]) -def test_homogeneous_link_neighbor_loader(directed): +@pytest.mark.parametrize('neg_sampling_ratio', [0.0, 1.0]) +def test_homogeneous_link_neighbor_loader(directed, neg_sampling_ratio): torch.manual_seed(12345) pos_edge_index = get_edge_index(100, 50, 500) @@ -32,10 +33,16 @@ def test_homogeneous_link_neighbor_loader(directed): data.x = torch.arange(100) data.edge_attr = torch.arange(500) - loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, batch_size=20, - edge_label_index=edge_label_index, - edge_label=edge_label, directed=directed, - shuffle=True) + loader = LinkNeighborLoader( + data, + num_neighbors=[-1] * 2, + batch_size=20, + edge_label_index=edge_label_index, + edge_label=edge_label if neg_sampling_ratio == 0.0 else None, + directed=directed, + neg_sampling_ratio=neg_sampling_ratio, + shuffle=True, + ) assert str(loader) == 'LinkNeighborLoader()' assert len(loader) == 1000 / 20 @@ -51,21 +58,30 @@ def test_homogeneous_link_neighbor_loader(directed): assert batch.edge_attr.min() >= 0 assert batch.edge_attr.max() < 500 - # Assert positive samples are present in the original graph: - edge_index = unique_edge_pairs(batch.edge_index) - edge_label_index = batch.edge_label_index[:, batch.edge_label == 1] - edge_label_index = unique_edge_pairs(edge_label_index) - assert len(edge_index | edge_label_index) == len(edge_index) + if neg_sampling_ratio == 0.0: + assert batch.edge_label_index.size(1) == 20 - # Assert negative samples are not present in the original graph: - edge_index = unique_edge_pairs(batch.edge_index) - edge_label_index = batch.edge_label_index[:, batch.edge_label == 0] - edge_label_index = unique_edge_pairs(edge_label_index) - assert len(edge_index & edge_label_index) == 0 + # Assert positive samples are present in the original graph: + edge_index = unique_edge_pairs(batch.edge_index) + edge_label_index = batch.edge_label_index[:, batch.edge_label == 1] + edge_label_index = unique_edge_pairs(edge_label_index) + assert len(edge_index | edge_label_index) == len(edge_index) + + # Assert negative samples are not present in the original graph: + edge_index = unique_edge_pairs(batch.edge_index) + edge_label_index = batch.edge_label_index[:, batch.edge_label == 0] + edge_label_index = unique_edge_pairs(edge_label_index) + assert len(edge_index & edge_label_index) == 0 + + else: + assert batch.edge_label_index.size(1) == 40 + assert torch.all(batch.edge_label[:20] == 1) + assert torch.all(batch.edge_label[20:] == 0) @pytest.mark.parametrize('directed', [True, False]) -def test_heterogeneous_link_neighbor_loader(directed): +@pytest.mark.parametrize('neg_sampling_ratio', [0.0, 1.0]) +def test_heterogeneous_link_neighbor_loader(directed, neg_sampling_ratio): torch.manual_seed(12345) data = HeteroData() @@ -80,22 +96,37 @@ def test_heterogeneous_link_neighbor_loader(directed): data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000) data['author', 'paper'].edge_attr = torch.arange(1500, 2500) - loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, - edge_label_index=('paper', 'author'), - batch_size=20, directed=directed, shuffle=True) + loader = LinkNeighborLoader( + data, + num_neighbors=[-1] * 2, + edge_label_index=('paper', 'author'), + batch_size=20, + directed=directed, + neg_sampling_ratio=neg_sampling_ratio, + shuffle=True, + ) assert str(loader) == 'LinkNeighborLoader()' - assert len(loader) == int(1000 / 20) + assert len(loader) == 1000 / 20 for batch in loader: assert isinstance(batch, HeteroData) - assert len(batch) == 4 - # Assert positive samples are present in the original graph: - edge_index = unique_edge_pairs(batch['paper', 'author'].edge_index) - edge_label_index = batch['paper', 'author'].edge_label_index - edge_label_index = unique_edge_pairs(edge_label_index) - assert len(edge_index | edge_label_index) == len(edge_index) + if neg_sampling_ratio == 0.0: + assert len(batch) == 4 + + # Assert positive samples are present in the original graph: + edge_index = unique_edge_pairs(batch['paper', 'author'].edge_index) + edge_label_index = batch['paper', 'author'].edge_label_index + edge_label_index = unique_edge_pairs(edge_label_index) + assert len(edge_index | edge_label_index) == len(edge_index) + + else: + assert len(batch) == 5 + + assert batch['paper', 'author'].edge_label_index.size(1) == 40 + assert torch.all(batch['paper', 'author'].edge_label[:20] == 1) + assert torch.all(batch['paper', 'author'].edge_label[20:] == 0) @pytest.mark.parametrize('directed', [True, False]) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index c6775a3fa3a3..3cb2a6645bea 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -11,6 +11,47 @@ class LinkNeighborSampler(NeighborSampler): + def __init__(self, data, *args, neg_sampling_ratio: float = 0.0, **kwargs): + super().__init__(data, *args, **kwargs) + self.neg_sampling_ratio = neg_sampling_ratio + + if issubclass(self.data_cls, Data): + self.num_src_nodes = self.num_dst_nodes = len(data.x) + else: + self.num_src_nodes = data[self.input_type[0]].num_nodes + self.num_dst_nodes = data[self.input_type[-1]].num_nodes + + def _create_label(self, edge_label_index, edge_label): + device = edge_label_index.device + + num_pos_edges = edge_label_index.size(1) + num_neg_edges = int(num_pos_edges * self.neg_sampling_ratio) + + if num_neg_edges == 0: + return edge_label_index, edge_label + + if edge_label is None: + edge_label = torch.ones(num_pos_edges, device=device) + else: + assert edge_label.dtype == torch.long + edge_label = edge_label + 1 + + neg_row = torch.randint(self.num_src_nodes, (num_neg_edges, )) + neg_col = torch.randint(self.num_dst_nodes, (num_neg_edges, )) + neg_edge_label_index = torch.stack([neg_row, neg_col], dim=0) + + neg_edge_label = edge_label.new_zeros((num_neg_edges, ) + + edge_label.size()[1:]) + + edge_label_index = torch.cat([ + edge_label_index, + neg_edge_label_index, + ], dim=1) + + edge_label = torch.cat([edge_label, neg_edge_label], dim=0) + + return edge_label_index, edge_label + def __call__(self, query: List[Tuple[Tensor]]): query = [torch.tensor(s) for s in zip(*query)] if len(query) == 2: @@ -20,6 +61,9 @@ def __call__(self, query: List[Tuple[Tensor]]): edge_label_index = torch.stack(query[:2], dim=0) edge_label = query[2] + edge_label_index, edge_label = self._create_label( + edge_label_index, edge_label) + if issubclass(self.data_cls, Data): sample_fn = torch.ops.torch_sparse.neighbor_sample @@ -130,6 +174,10 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): :class:`~torch_geometric.loader.NeighborLoader`, including support for heterogenous graphs. + .. note:: + :obj:`neg_sampling_ratio` is currently implemented in an approximate + way, *i.e.* negative edges may contain false negatives. + Args: data (torch_geometric.data.Data or torch_geometric.data.HeteroData): The :class:`~torch_geometric.data.Data` or @@ -156,6 +204,16 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): transform (Callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) + neg_sampling_ratio (float, optional): The ratio of sampled negative + edges to the number of positive edges. + If :obj:`edge_label` does not exist, it will be automatically + created and represents a binary classification task + (:obj:`1` = edge, :obj:`0` = no edge). + If :obj:`edge_label` exists, it has to be a categorical label from + :obj:`0` to :obj:`num_classes - 1`. + After negative sampling, label :obj:`0` represents negative edges, + and labels :obj:`1` to :obj:`num_classes` represent the labels of + positive edges. (default: :obj:`0.0`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -170,6 +228,7 @@ def __init__( directed: bool = True, transform: Callable = None, neighbor_sampler: Optional[LinkNeighborSampler] = None, + neg_sampling_ratio: float = 0.0, **kwargs, ): # Remove for PyTorch Lightning: @@ -188,6 +247,7 @@ def __init__( self.directed = directed self.transform = transform self.neighbor_sampler = neighbor_sampler + self.neg_sampling_ratio = neg_sampling_ratio edge_type, edge_label_index = get_edge_label_index( data, edge_label_index) @@ -195,7 +255,8 @@ def __init__( if neighbor_sampler is None: self.neighbor_sampler = LinkNeighborSampler( data, num_neighbors, replace, directed, edge_type, - share_memory=kwargs.get('num_workers', 0) > 0) + share_memory=kwargs.get('num_workers', 0) > 0, + neg_sampling_ratio=self.neg_sampling_ratio) super().__init__(Dataset(edge_label_index, edge_label), collate_fn=self.neighbor_sampler, **kwargs) From 79b156ea2831305e430bd115611cccad1d588428 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Thu, 21 Apr 2022 12:17:05 +0800 Subject: [PATCH 0014/2432] fix minor bug when data.x does not exist (#4509) --- torch_geometric/loader/link_neighbor_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 3cb2a6645bea..8dc1fea388ab 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -16,7 +16,7 @@ def __init__(self, data, *args, neg_sampling_ratio: float = 0.0, **kwargs): self.neg_sampling_ratio = neg_sampling_ratio if issubclass(self.data_cls, Data): - self.num_src_nodes = self.num_dst_nodes = len(data.x) + self.num_src_nodes = self.num_dst_nodes = data.num_nodes else: self.num_src_nodes = data[self.input_type[0]].num_nodes self.num_dst_nodes = data[self.input_type[-1]].num_nodes From bdbbe403d7aa440dc8cc1545d6022102ca5b4075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Z=C3=BCgner?= Date: Thu, 21 Apr 2022 09:37:15 +0200 Subject: [PATCH 0015/2432] Fix `DimeNet` envelope bug with missing clamping (#4506) * Fix DimeNet envelope bug with missing clamping * Update dimenet.py Co-authored-by: Matthias Fey --- torch_geometric/nn/models/dimenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index 88bbd5fd8c2e..5be16d3f55b0 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -64,7 +64,7 @@ def reset_parameters(self): self.freq.requires_grad_() def forward(self, dist): - dist = dist.unsqueeze(-1) / self.cutoff + dist = (dist.unsqueeze(-1) / self.cutoff).clamp(max=1.0) return self.envelope(dist) * (self.freq * dist).sin() From 6a2b04741a0d511e8599bd648291e659179da5d9 Mon Sep 17 00:00:00 2001 From: Aniket Maurya Date: Thu, 21 Apr 2022 16:40:09 +0530 Subject: [PATCH 0016/2432] Implement Lightning module for GraphGym (#4511) * add LitModule * format * add pl dep * add pl to min deps * add type hint * apply suggestions * apply suggestions * Update setup.py * fix tests * graphgym_requires * graphgym install Co-authored-by: Matthias Fey --- setup.py | 11 +++++-- test/graphgym/test_graphgym.py | 1 + torch_geometric/graphgym/model_builder.py | 37 ++++++++++++++++++++++- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index fcfab1a796ad..c25d609943f7 100644 --- a/setup.py +++ b/setup.py @@ -13,9 +13,14 @@ 'scikit-learn', ] -full_install_requires = [ - 'h5py', +graphgym_requires = [ 'yacs', + 'hydra-core', + 'pytorch-lightning', +] + +full_install_requires = graphgym_requires + [ + 'h5py', 'numba', 'pandas', 'captum', @@ -23,7 +28,6 @@ 'trimesh', 'networkx', 'tabulate', - 'hydra-core', 'matplotlib', 'scikit-image', 'pytorch-memlab', @@ -57,6 +61,7 @@ python_requires='>=3.7', install_requires=install_requires, extras_require={ + 'graphgym': graphgym_requires, 'full': full_install_requires, 'test': test_requires, 'dev': dev_requires, diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index 003c4f8ea696..57258d070115 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -41,6 +41,7 @@ def trivial_metric(true, pred, task_type): @withPackage('yacs') +@withPackage('pytorch_lightning') @pytest.mark.parametrize('auto_resume', [True, False]) @pytest.mark.parametrize('skip_train_eval', [True, False]) @pytest.mark.parametrize('use_trivial_metric', [True, False]) diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index b1f12f19ffd4..6e9d370cd7bf 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -1,12 +1,47 @@ +import warnings + import torch from torch_geometric.graphgym.config import cfg from torch_geometric.graphgym.models.gnn import GNN from torch_geometric.graphgym.register import network_dict, register_network +try: + from pytorch_lightning import LightningModule +except ImportError: + LightningModule = object + warnings.warn("Please install 'pytorch_lightning' for using the GraphGym " + "experiment manager via 'pip install pytorch_lightning'") + register_network('gnn', GNN) +class GraphGymModule(LightningModule): + def __init__(self, dim_in, dim_out, cfg): + super().__init__() + self.model = network_dict[cfg.model.type](dim_in=dim_in, + dim_out=dim_out) + + def forward(self, *args, **kwargs): + return self.model(*args, **kwargs) + + @property + def encoder(self) -> torch.nn.Module: + return self.model.encoder + + @property + def mp(self) -> torch.nn.Module: + return self.model.mp + + @property + def post_mp(self) -> torch.nn.Module: + return self.model.post_mp + + @property + def pre_mp(self) -> torch.nn.Module: + return self.model.pre_mp + + def create_model(to_device=True, dim_in=None, dim_out=None): r""" Create model for graph machine learning @@ -22,7 +57,7 @@ def create_model(to_device=True, dim_in=None, dim_out=None): if 'classification' in cfg.dataset.task_type and dim_out == 2: dim_out = 1 - model = network_dict[cfg.model.type](dim_in=dim_in, dim_out=dim_out) + model = GraphGymModule(dim_in, dim_out, cfg) if to_device: model.to(torch.device(cfg.device)) return model From e65b295d7e613941d9a056e9444b3b9b08a1cf25 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Fri, 22 Apr 2022 13:12:24 +0800 Subject: [PATCH 0017/2432] Test labels in `LinkNeighborLoader` (#4508) * test label types * update docstring * update docstring * Update torch_geometric/loader/link_neighbor_loader.py Co-authored-by: Matthias Fey * Update torch_geometric/loader/link_neighbor_loader.py Co-authored-by: Matthias Fey * line lengtH * typo Co-authored-by: Matthias Fey --- test/loader/test_link_neighbor_loader.py | 32 +++++++++++++++++++ .../loader/link_neighbor_loader.py | 9 ++++-- 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 76b1a7099975..f2dd71f415f0 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -155,3 +155,35 @@ def test_heterogeneous_link_neighbor_loader_loop(directed): edge_label_index = batch['paper', 'paper'].edge_label_index edge_label_index = unique_edge_pairs(edge_label_index) assert len(edge_index | edge_label_index) == len(edge_index) + + +def test_link_neighbor_loader_edge_label(): + torch.manual_seed(12345) + + edge_index = get_edge_index(100, 100, 500) + data = Data(edge_index=edge_index, x=torch.arange(100)) + + loader = LinkNeighborLoader( + data, + num_neighbors=[-1] * 2, + batch_size=10, + neg_sampling_ratio=1.0, + ) + + for batch in loader: + assert batch.edge_label.dtype == torch.float + assert torch.all(batch.edge_label[:10] == 1.0) + assert torch.all(batch.edge_label[10:] == 0.0) + + loader = LinkNeighborLoader( + data, + num_neighbors=[-1] * 2, + batch_size=10, + edge_label=torch.ones(500, dtype=torch.long), + neg_sampling_ratio=1.0, + ) + + for batch in loader: + assert batch.edge_label.dtype == torch.long + assert torch.all(batch.edge_label[:10] == 2) + assert torch.all(batch.edge_label[10:] == 0) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 8dc1fea388ab..08390100e2f0 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -170,7 +170,7 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): train_mask=[1368], val_mask=[1368], test_mask=[1368], edge_label_index=[2, 128], edge_label=[128]) - The rest of the functionality mirros that of + The rest of the functionality mirrors that of :class:`~torch_geometric.loader.NeighborLoader`, including support for heterogenous graphs. @@ -213,7 +213,12 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): :obj:`0` to :obj:`num_classes - 1`. After negative sampling, label :obj:`0` represents negative edges, and labels :obj:`1` to :obj:`num_classes` represent the labels of - positive edges. (default: :obj:`0.0`) + positive edges. + Note that returned labels are of type :obj:`torch.float` for binary + classification (to facilitate the ease-of-use of + :meth:`F.binary_cross_entropy`) and of type + :obj:`torch.long` for multi-class classification (to facilitate the + ease-of-use of :meth:`F.cross_entropy`). (default: :obj:`0.0`). **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. From 814bd46f2d3814ae583888ba14fbc3e4798c990d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 22 Apr 2022 07:14:43 +0200 Subject: [PATCH 0018/2432] better fallback in case PL is missing (#4516) --- torch_geometric/graphgym/model_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index 6e9d370cd7bf..2d22a7c6aa75 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -9,7 +9,7 @@ try: from pytorch_lightning import LightningModule except ImportError: - LightningModule = object + LightningModule = torch.nn.Module warnings.warn("Please install 'pytorch_lightning' for using the GraphGym " "experiment manager via 'pip install pytorch_lightning'") From be7b23c17b516c6263fbaa3104e1c289a0a4bfc6 Mon Sep 17 00:00:00 2001 From: "Wilfried L. Bounsi" Date: Fri, 22 Apr 2022 16:16:33 +0100 Subject: [PATCH 0019/2432] Unsupervised `GraphSAGE` example on `PPI` (#4416) * Add example for PPI unsupervised * update * typo * typo * reset num epochs Co-authored-by: rusty1s --- README.md | 2 +- examples/graph_sage_unsup_ppi.py | 102 +++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 examples/graph_sage_unsup_ppi.py diff --git a/README.md b/README.md index 0aafd5a5078f..5e132037d380 100644 --- a/README.md +++ b/README.md @@ -202,7 +202,7 @@ These GNN layers can be stacked together to create Graph Neural Network models. * **[EGConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.EGConv)** from Tailor *et al.*: [Adaptive Filters and Aggregator Fusion for Efficient Graph Convolutions](https://arxiv.org/abs/2104.01481) (GNNSys 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/egc.py)] * **[GATv2Conv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.GATv2Conv)** from Brody *et al.*: [How Attentive are Graph Attention Networks?](https://arxiv.org/abs/2105.14491) (CoRR 2021) * **[TransformerConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.TransformerConv)** from Shi *et al.*: [Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification](https://arxiv.org/abs/2009.03509) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/unimp_arxiv.py)] -* **[SAGEConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.SAGEConv)** from Hamilton *et al.*: [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_sage.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup.py)] +* **[SAGEConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.SAGEConv)** from Hamilton *et al.*: [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_sage.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup.py), [**Example4**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup_ppi.py)] * **[GraphConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.GraphConv)** from, *e.g.*, Morris *et al.*: [Weisfeiler and Leman Go Neural: Higher-order Graph Neural Networks](https://arxiv.org/abs/1810.02244) (AAAI 2019) * **[GatedGraphConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.GatedGraphConv)** from Li *et al.*: [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493) (ICLR 2016) * **[ResGatedGraphConv](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.ResGatedGraphConv)** from Bresson and Laurent: [Residual Gated Graph ConvNets](https://arxiv.org/abs/1711.07553) (CoRR 2017) diff --git a/examples/graph_sage_unsup_ppi.py b/examples/graph_sage_unsup_ppi.py new file mode 100644 index 000000000000..28277bd09af9 --- /dev/null +++ b/examples/graph_sage_unsup_ppi.py @@ -0,0 +1,102 @@ +import os.path as osp + +import torch +import torch.nn.functional as F +import tqdm +from sklearn.linear_model import SGDClassifier +from sklearn.metrics import f1_score +from sklearn.multioutput import MultiOutputClassifier + +from torch_geometric.data import Batch +from torch_geometric.datasets import PPI +from torch_geometric.loader import DataLoader, LinkNeighborLoader +from torch_geometric.nn import GraphSAGE + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PPI') +train_dataset = PPI(path, split='train') +val_dataset = PPI(path, split='val') +test_dataset = PPI(path, split='test') + +# Group all training graphs into a single graph to perform sampling: +train_data = Batch.from_data_list(train_dataset) +loader = LinkNeighborLoader(train_data, batch_size=2048, shuffle=True, + neg_sampling_ratio=0.5, num_neighbors=[10, 10], + num_workers=6, persistent_workers=True) + +# Evaluation loaders (one datapoint corresponds to a graph) +train_loader = DataLoader(train_dataset, batch_size=2) +val_loader = DataLoader(val_dataset, batch_size=2) +test_loader = DataLoader(test_dataset, batch_size=2) + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +model = GraphSAGE( + in_channels=train_dataset.num_features, + hidden_channels=64, + num_layers=2, + out_channels=64, +).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.005) + + +def train(): + model.train() + + total_loss = total_examples = 0 + for data in tqdm.tqdm(loader): + data = data.to(device) + optimizer.zero_grad() + h = model(data.x, data.edge_index) + + h_src = h[data.edge_label_index[0]] + h_dst = h[data.edge_label_index[1]] + link_pred = (h_src * h_dst).sum(dim=-1) # Inner product. + + loss = F.binary_cross_entropy_with_logits(link_pred, data.edge_label) + loss.backward() + optimizer.step() + + total_loss += float(loss) * link_pred.numel() + total_examples += link_pred.numel() + + return total_loss / total_examples + + +@torch.no_grad() +def encode(loader): + model.eval() + + xs, ys = [], [] + for data in loader: + data = data.to(device) + xs.append(model(data.x, data.edge_index).cpu()) + ys.append(data.y.cpu()) + return torch.cat(xs, dim=0), torch.cat(ys, dim=0) + + +@torch.no_grad() +def test(): + # Train classifier on training set: + x, y = encode(train_loader) + + clf = MultiOutputClassifier(SGDClassifier(loss='log', penalty='l2')) + clf.fit(x, y) + + train_f1 = f1_score(y, clf.predict(x), average='micro') + + # Evaluate on validation set: + x, y = encode(val_loader) + val_f1 = f1_score(y, clf.predict(x), average='micro') + + # Evaluate on test set: + x, y = encode(test_loader) + test_f1 = f1_score(y, clf.predict(x), average='micro') + + return train_f1, val_f1, test_f1 + + +for epoch in range(1, 6): + loss = train() + print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}') + train_f1, val_f1, test_f1 = test() + print(f'Train F1: {train_f1:.4f}, Val F1: {val_f1:.4f}, ' + f'Test F1: {test_f1:.4f}') From c3d1ac56ee8681b0b4111a38f55d27c6374efa87 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Mon, 25 Apr 2022 07:12:43 +0200 Subject: [PATCH 0020/2432] fix typo --- torch_geometric/data/dataset.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/torch_geometric/data/dataset.py b/torch_geometric/data/dataset.py index ded985879180..25d85859a6c8 100644 --- a/torch_geometric/data/dataset.py +++ b/torch_geometric/data/dataset.py @@ -151,15 +151,15 @@ def _process(self): f"The `pre_transform` argument differs from the one used in " f"the pre-processed version of this dataset. If you want to " f"make use of another pre-processing technique, make sure to " - f"sure to delete '{self.processed_dir}' first") + f"delete '{self.processed_dir}' first") f = osp.join(self.processed_dir, 'pre_filter.pt') if osp.exists(f) and torch.load(f) != _repr(self.pre_filter): warnings.warn( - "The `pre_filter` argument differs from the one used in the " - "pre-processed version of this dataset. If you want to make " - "use of another pre-fitering technique, make sure to delete " - "'{self.processed_dir}' first") + "The `pre_filter` argument differs from the one used in " + "the pre-processed version of this dataset. If you want to " + "make use of another pre-fitering technique, make sure to " + "delete '{self.processed_dir}' first") if files_exist(self.processed_paths): # pragma: no cover return From b0cec1dfafa347eb0d04aaee2123173d17f9d782 Mon Sep 17 00:00:00 2001 From: Rex Ying Date: Mon, 25 Apr 2022 05:49:02 -0700 Subject: [PATCH 0021/2432] Add temporal sampling support to `NeighborLoader` (#4025) * temporal loader * parameterized test for temporal sampling * minor len fix * fix merge bugs in test * docstring * temporal * use time_attr to denote if data[time_attr] contains timestamp info for temporal sampling * Update test/loader/test_neighbor_loader.py Co-authored-by: Matthias Fey * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merge * Update torch_geometric/loader/neighbor_loader.py Co-authored-by: Matthias Fey * update * add test * update * fix typo Co-authored-by: Matthias Fey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- test/loader/test_neighbor_loader.py | 20 +++++++ torch_geometric/data/lightning_datamodule.py | 1 + torch_geometric/loader/neighbor_loader.py | 63 +++++++++++++++----- torch_geometric/testing.py | 24 ++++++++ 4 files changed, 94 insertions(+), 14 deletions(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index d65e9bb2ccc7..56bbfa94fe67 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -6,6 +6,7 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero +from torch_geometric.testing import withRegisteredOp from torch_geometric.utils import k_hop_subgraph @@ -255,3 +256,22 @@ def forward(self, x, edge_index, edge_weight): out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict, hetero_batch.edge_weight_dict)['paper'][:batch_size] assert torch.allclose(out1, out2, atol=1e-6) + + +@withRegisteredOp('torch_sparse.hetero_temporal_neighbor_sample') +def test_temporal_heterogeneous_neighbor_loader_on_cora(get_dataset): + dataset = get_dataset(name='Cora') + data = dataset[0] + + hetero_data = HeteroData() + hetero_data['paper'].x = data.x + hetero_data['paper'].time = torch.arange(data.num_nodes) + hetero_data['paper', 'paper'].edge_index = data.edge_index + + loader = NeighborLoader(hetero_data, num_neighbors=[-1, -1], + input_nodes='paper', time_attr='time', + batch_size=1) + + for batch in loader: + mask = batch['paper'].time[0] >= batch['paper'].time[1:] + assert torch.all(mask) diff --git a/torch_geometric/data/lightning_datamodule.py b/torch_geometric/data/lightning_datamodule.py index 88c422e4cc9e..2ac988543177 100644 --- a/torch_geometric/data/lightning_datamodule.py +++ b/torch_geometric/data/lightning_datamodule.py @@ -273,6 +273,7 @@ def __init__( replace=kwargs.get('replace', False), directed=kwargs.get('directed', True), input_type=get_input_nodes(data, input_train_nodes)[0], + time_attr=kwargs.get('time_attr', None), ) self.input_train_nodes = input_train_nodes self.input_val_nodes = input_val_nodes diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 8d4530b25708..b7bdf2c505cb 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -25,19 +25,32 @@ def __init__( directed: bool = True, input_type: Optional[Any] = None, share_memory: bool = False, + time_attr: Optional[str] = None, ): self.data_cls = data.__class__ self.num_neighbors = num_neighbors self.replace = replace self.directed = directed + self.node_time = None if isinstance(data, Data): + if time_attr is not None: + # TODO `time_attr` support for homogeneous graphs + raise ValueError( + f"'time_attr' attribute not yet supported for " + f"'{data.__class__.__name__}' object") + # Convert the graph data into a suitable format for sampling. out = to_csc(data, device='cpu', share_memory=share_memory) self.colptr, self.row, self.perm = out assert isinstance(num_neighbors, (list, tuple)) elif isinstance(data, HeteroData): + if time_attr is not None: + self.node_time_dict = data.collect(time_attr) + else: + self.node_time_dict = None + # Convert the graph data into a suitable format for sampling. # NOTE: Since C++ cannot take dictionaries with tuples as key as # input, edge type triplets are converted into single strings. @@ -66,8 +79,8 @@ def __call__(self, index: Union[List[int], Tensor]): index = torch.LongTensor(index) if issubclass(self.data_cls, Data): - sample_fn = torch.ops.torch_sparse.neighbor_sample - node, row, col, edge = sample_fn( + fn = torch.ops.torch_sparse.neighbor_sample + node, row, col, edge = fn( self.colptr, self.row, index, @@ -78,18 +91,33 @@ def __call__(self, index: Union[List[int], Tensor]): return node, row, col, edge, index.numel() elif issubclass(self.data_cls, HeteroData): - sample_fn = torch.ops.torch_sparse.hetero_neighbor_sample - node_dict, row_dict, col_dict, edge_dict = sample_fn( - self.node_types, - self.edge_types, - self.colptr_dict, - self.row_dict, - {self.input_type: index}, - self.num_neighbors, - self.num_hops, - self.replace, - self.directed, - ) + if self.node_time_dict is None: + fn = torch.ops.torch_sparse.hetero_neighbor_sample + node_dict, row_dict, col_dict, edge_dict = fn( + self.node_types, + self.edge_types, + self.colptr_dict, + self.row_dict, + {self.input_type: index}, + self.num_neighbors, + self.num_hops, + self.replace, + self.directed, + ) + else: + fn = torch.ops.torch_sparse.hetero_temporal_neighbor_sample + node_dict, row_dict, col_dict, edge_dict = fn( + self.node_types, + self.edge_types, + self.colptr_dict, + self.row_dict, + {self.input_type: index}, + self.num_neighbors, + self.node_time_dict, + self.num_hops, + self.replace, + self.directed, + ) return node_dict, row_dict, col_dict, edge_dict, index.numel() @@ -209,6 +237,11 @@ class NeighborLoader(torch.utils.data.DataLoader): replacement. (default: :obj:`False`) directed (bool, optional): If set to :obj:`False`, will include all edges between all sampled nodes. (default: :obj:`True`) + time_attr (str, optional): The name of the attribute that denotes + timestamps for the nodes in the graph. + If set, temporal sampling will be used such that neighbors are + guaranteed to fulfill temporal constraints, *i.e.* neighbors have + an earlier timestamp than the center node. (default: :obj:`None`) transform (Callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) @@ -223,6 +256,7 @@ def __init__( input_nodes: InputNodes = None, replace: bool = False, directed: bool = True, + time_attr: Optional[str] = None, transform: Callable = None, neighbor_sampler: Optional[NeighborSampler] = None, **kwargs, @@ -248,6 +282,7 @@ def __init__( if neighbor_sampler is None: self.neighbor_sampler = NeighborSampler( data, num_neighbors, replace, directed, node_type, + time_attr=time_attr, share_memory=kwargs.get('num_workers', 0) > 0) super().__init__(input_nodes, collate_fn=self.neighbor_sampler, diff --git a/torch_geometric/testing.py b/torch_geometric/testing.py index 9029dca59714..300ba23b2db1 100644 --- a/torch_geometric/testing.py +++ b/torch_geometric/testing.py @@ -35,6 +35,30 @@ def decorator(func: Callable) -> Callable: return decorator +def withRegisteredOp(*args) -> Callable: + r"""A decorator to skip tests if a certain op is not registered.""" + def is_registered(op: str) -> bool: + module = torch.ops + for attr in op.split('.'): + try: + module = getattr(module, attr) + except RuntimeError: + return False + return True + + na_ops = set(arg for arg in args if not is_registered(arg)) + + def decorator(func: Callable) -> Callable: + import pytest + + return pytest.mark.skipif( + len(na_ops) > 0, + reason=f"Operator(s) {na_ops} are not registered", + )(func) + + return decorator + + def withCUDA(func: Callable) -> Callable: r"""A decorator to skip tests if CUDA is not found.""" import pytest From e220a2c08fa1b2f1672d616c22eac2a67b5c8967 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 27 Apr 2022 14:48:11 +0000 Subject: [PATCH 0022/2432] typo --- setup.cfg | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index edf3b53436c5..8276a4977d84 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,21 +2,21 @@ long_description=file: README.md long_description_content_type=text/markdown -classifiers = +classifiers= Development Status :: 5 - Production/Stable License :: OSI Approved :: MIT License Programming Language :: Python - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Programming Language :: Python :: 3 :: Only [aliases] test=pytest [tool:pytest] -addopts = --capture=no +addopts=--capture=no filterwarnings=ignore::DeprecationWarning:tensorboard.* [flake8] @@ -24,7 +24,7 @@ ignore=F811,W503,W504 # ignore overload redefinition, allow line breaks before/ [isort] multi_line_output=3 -include_trailing_comma = True +include_trailing_comma=True skip=.gitignore,__init__.py [pylint.'MESSAGES CONTROL'] From 85fc32ed5a5a4f23551f09fbed1d25645ccf073f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Z=C3=BCgner?= Date: Fri, 29 Apr 2022 11:13:55 +0200 Subject: [PATCH 0023/2432] Improve distance cutoff in `DimeNet` (#4562) * Integrate clamping into Envelope This is cleaner than (dist / cutoff).clamp_max(1.0) because it's the envelope that should become zero and then effectively mask distances larger than the cutoff. This is also how it's implemented in [`DimeNet`](https://github.com/gasteigerjo/dimenet/blob/09123a0e16e728d0a0e53e6686b04f859802aa81/dimenet/model/layers/envelope.py#L23). * Remove extra parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update torch_geometric/nn/models/dimenet.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- torch_geometric/nn/models/dimenet.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index 5be16d3f55b0..dd594050dd5d 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -45,7 +45,8 @@ def forward(self, x): x_pow_p0 = x.pow(p - 1) x_pow_p1 = x_pow_p0 * x x_pow_p2 = x_pow_p1 * x - return 1. / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p2 + return (1. / x + a * x_pow_p0 + b * x_pow_p1 + + c * x_pow_p2) * (x < 1.0).to(x.dtype) class BesselBasisLayer(torch.nn.Module): @@ -64,7 +65,7 @@ def reset_parameters(self): self.freq.requires_grad_() def forward(self, dist): - dist = (dist.unsqueeze(-1) / self.cutoff).clamp(max=1.0) + dist = (dist.unsqueeze(-1) / self.cutoff) return self.envelope(dist) * (self.freq * dist).sin() From 484b7922ec68d146280acba409fc7170549e2775 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Fri, 29 Apr 2022 05:17:26 -0700 Subject: [PATCH 0024/2432] `FeatureStore` abstraction definition (#4534) * Feature store abstraction + tests * TensorType -> FeatureTensorType * to_type * fix * API changes, WIP * Fix * Updates * More cleanup for new API * Add override example * Fix * Remove unnecessary properties * remove backend * Updates * More updates * pass * pass * pass * pass Co-authored-by: rusty1s --- test/data/test_feature_store.py | 154 +++++++++ torch_geometric/data/feature_store.py | 430 ++++++++++++++++++++++++++ torch_geometric/typing.py | 5 + torch_geometric/utils/mixin.py | 14 + 4 files changed, 603 insertions(+) create mode 100644 test/data/test_feature_store.py create mode 100644 torch_geometric/data/feature_store.py create mode 100644 torch_geometric/utils/mixin.py diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py new file mode 100644 index 000000000000..c7c77b6c985a --- /dev/null +++ b/test/data/test_feature_store.py @@ -0,0 +1,154 @@ +from dataclasses import dataclass +from typing import Optional + +import pytest +import torch + +from torch_geometric.data.feature_store import ( + AttrView, + FeatureStore, + TensorAttr, + _field_status, +) +from torch_geometric.typing import FeatureTensorType + + +class MyFeatureStore(FeatureStore): + def __init__(self): + super().__init__() + self.store = {} + + @staticmethod + def key(attr: TensorAttr) -> str: + return (attr.group_name, attr.attr_name) + + def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: + index = attr.index + + # Not set or None indices define the obvious index: + if index is None: + index = torch.arange(0, tensor.shape[0]) + + # Store the index as a column: + self.store[MyFeatureStore.key(attr)] = (index, tensor) + + return True + + def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: + index, tensor = self.store.get(MyFeatureStore.key(attr), (None, None)) + + if tensor is None: + return None + + # Not set or None indices return the whole tensor: + if attr.index is None: + return tensor + + idx = torch.cat([(index == v).nonzero() for v in attr.index]).view(-1) + return tensor[idx] + + def _remove_tensor(self, attr: TensorAttr) -> bool: + del self.store[MyFeatureStore.key(attr)] + return True + + def __len__(self): + raise NotImplementedError + + +@dataclass +class MyTensorAttrNoGroupName(TensorAttr): + def __init__(self, attr_name=_field_status.UNSET, + index=_field_status.UNSET): + # Treat group_name as optional, and move it to the end + super().__init__(None, attr_name, index) + + +class MyFeatureStoreNoGroupName(MyFeatureStore): + def __init__(self): + super().__init__() + self._attr_cls = MyTensorAttrNoGroupName + + @staticmethod + def key(attr: TensorAttr) -> str: + return attr.attr_name + + def __len__(self): + raise NotImplementedError + + +def test_feature_store(): + r"""Tests basic API and indexing functionality of a feature store.""" + store = MyFeatureStore() + tensor = torch.Tensor([[0, 0, 0], [1, 1, 1], [2, 2, 2]]) + + group_name = 'A' + attr_name = 'feat' + index = torch.tensor([0, 1, 2]) + attr = TensorAttr(group_name, attr_name, index) + + # Normal API: + store.put_tensor(tensor, attr) + assert torch.equal(store.get_tensor(attr), tensor) + assert torch.equal( + store.get_tensor(group_name, attr_name, index=torch.tensor([0, 2])), + tensor[torch.tensor([0, 2])], + ) + assert store.get_tensor(None, None, index) is None + store.remove_tensor(group_name, attr_name, None) + assert store.get_tensor(attr) is None + + # Views: + view = store.view(group_name=group_name) + view.attr_name = attr_name + view['index'] = index + assert view == AttrView(store, TensorAttr(group_name, attr_name, index)) + + # Indexing: + store[group_name, attr_name, index] = tensor + + # Fully-specified forms, all of which produce a tensor output + assert torch.equal(store[group_name, attr_name, index], tensor) + assert torch.equal(store[group_name, attr_name, None], tensor) + assert torch.equal(store[group_name, attr_name, :], tensor) + assert torch.equal(store[group_name][attr_name][:], tensor) + assert torch.equal(store[group_name].feat[:], tensor) + assert torch.equal(store.view().A.feat[:], tensor) + + with pytest.raises(AttributeError) as exc_info: + _ = store.view(group_name=group_name, index=None).feat.A + print(exc_info) + + # Partially-specified forms, which produce an AttrView object + assert store[group_name] == store.view(TensorAttr(group_name=group_name)) + assert store[group_name].feat == store.view( + TensorAttr(group_name=group_name, attr_name=attr_name)) + + # Partially-specified forms, when called, produce a Tensor output + # from the `TensorAttr` that has been partially specified. + store[group_name] = tensor + assert isinstance(store[group_name], AttrView) + assert torch.equal(store[group_name](), tensor) + + # Deletion: + del store[group_name, attr_name, index] + assert store[group_name, attr_name, index] is None + del store[group_name] + assert store[group_name]() is None + + +def test_feature_store_override(): + store = MyFeatureStoreNoGroupName() + tensor = torch.Tensor([[0, 0, 0], [1, 1, 1], [2, 2, 2]]) + + attr_name = 'feat' + index = torch.tensor([0, 1, 2]) + + # Only use attr_name and index, in that order: + store[attr_name, index] = tensor + + # A few assertions to ensure group_name is not needed: + assert isinstance(store[attr_name], AttrView) + assert torch.equal(store[attr_name, index], tensor) + assert torch.equal(store[attr_name][index], tensor) + assert torch.equal(store[attr_name][:], tensor) + assert torch.equal(store[attr_name, :], tensor) diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py new file mode 100644 index 000000000000..7e3f8a82612f --- /dev/null +++ b/torch_geometric/data/feature_store.py @@ -0,0 +1,430 @@ +r""" +This class defines the abstraction for a backend-agnostic feature store. The +goal of the feature store is to abstract away all node and edge feature memory +management so that varying implementations can allow for independent scale-out. + +This particular feature store abstraction makes a few key assumptions: +* The features we care about storing are node and edge features of a graph. + To this end, the attributes that the feature store supports include a + `group_name` (e.g. a heterogeneous node name or a heterogeneous edge type), + an `attr_name` (e.g. `x` or `edge_attr`), and an index. +* A feature can be uniquely identified from any associated attributes specified + in `TensorAttr`. + +It is the job of a feature store implementor class to handle these assumptions +properly. For example, a simple in-memory feature store implementation may +concatenate all metadata values with a feature index and use this as a unique +index in a KV store. More complicated implementations may choose to partition +features in interesting manners based on the provided metadata. + +Major TODOs for future implementation: +* Async `put` and `get` functionality +""" +import copy +from abc import abstractmethod +from collections.abc import MutableMapping +from dataclasses import dataclass +from enum import Enum +from typing import Any, Optional, Union + +import numpy as np +import torch + +from torch_geometric.typing import FeatureTensorType +from torch_geometric.utils.mixin import CastMixin + +_field_status = Enum("FieldStatus", "UNSET") + +# We allow indexing with a tensor, numpy array, Python slicing, or a single +# integer index. +IndexType = Union[torch.Tensor, np.ndarray, slice, int] + + +@dataclass +class TensorAttr(CastMixin): + r"""Defines the attributes of a class:`FeatureStore` tensor; in particular, + all the parameters necessary to uniquely identify a tensor from the feature + store. + + Note that the order of the attributes is important; this is the order in + which attributes must be provided for indexing calls. Feature store + implementor classes can define a different ordering by overriding + :meth:`TensorAttr.__init__`. + """ + + # The group name that the tensor corresponds to. Defaults to None. + group_name: Optional[str] = _field_status.UNSET + + # The name of the tensor within its group. Defaults to None. + attr_name: Optional[str] = _field_status.UNSET + + # The node indices the rows of the tensor correspond to. Defaults to UNSET. + index: Optional[IndexType] = _field_status.UNSET + + # Convenience methods ##################################################### + + def is_set(self, key: str) -> bool: + r"""Whether an attribute is set in :obj:`TensorAttr`.""" + assert key in self.__dataclass_fields__ + return getattr(self, key) != _field_status.UNSET + + def is_fully_specified(self) -> bool: + r"""Whether the :obj:`TensorAttr` has no unset fields.""" + return all([self.is_set(key) for key in self.__dataclass_fields__]) + + def fully_specify(self): + r"""Sets all :obj:`UNSET` fields to :obj:`None`.""" + for key in self.__dataclass_fields__: + if not self.is_set(key): + setattr(self, key, None) + return self + + def update(self, attr: 'TensorAttr'): + r"""Updates an :class:`TensorAttr` with set attributes from another + :class:`TensorAttr`.""" + for key in self.__dataclass_fields__: + if attr.is_set(key): + setattr(self, key, getattr(attr, key)) + + +class AttrView(CastMixin): + r"""Defines a view of a :class:`FeatureStore` that is obtained from a + specification of attributes on the feature store. The view stores a + reference to the backing feature store as well as a :class:`TensorAttr` + object that represents the view's state. + + Users can create views either using the :class:`AttrView` constructor, + :meth:`FeatureStore.view`, or by incompletely indexing a feature store. + For example, the following calls all create views: + + .. code-block:: python + + store[group_name] + store[group_name].feat + store[group_name, feat] + + While the following calls all materialize those views and produce tensors + by either calling the view or fully-specifying the view: + + .. code-block:: python + + store[group_name]() + store[group_name].feat[index] + store[group_name, feat][index] + """ + def __init__(self, store: 'FeatureStore', attr: TensorAttr): + self.__dict__['_store'] = store + self.__dict__['_attr'] = attr + + # Advanced indexing ####################################################### + + def __getattr__(self, key: Any) -> Union['AttrView', FeatureTensorType]: + r"""Sets the first unset field of the backing :class:`TensorAttr` object + to the attribute. This allows for :class:`AttrView` to be indexed by + different values of attributes, in order. In particular, for a feature + store that we want to index by :obj:`group_name` and :obj:`attr_name`, + the following code will do so: + + .. code-block:: python + + store[group, attr] + store[group].attr + store.group.attr + """ + out = copy.copy(self) + + # Find the first attribute name that is UNSET: + attr_name: Optional[str] = None + for field in out._attr.__dataclass_fields__: + if getattr(out._attr, field) == _field_status.UNSET: + attr_name = field + break + + if attr_name is None: + raise AttributeError(f"Cannot access attribute '{key}' on view " + f"'{out}' as all attributes have already " + f"been set in this view") + + setattr(out._attr, attr_name, key) + + if out._attr.is_fully_specified(): + return out._store.get_tensor(out._attr) + + return out + + def __getitem__(self, key: Any) -> Union['AttrView', FeatureTensorType]: + r"""Sets the first unset field of the backing :class:`TensorAttr` object + to the attribute via indexing. This allows for :class:`AttrView` to be + indexed by different values of attributes, in order. In particular, for + a feature store that we want to index by :obj:`group_name` and + :obj:`attr_name`, the following code will do so: + + .. code-block:: python + + store[group, attr] + store[group][attr] + + """ + return self.__getattr__(key) + + # Setting attributes ###################################################### + + def __setattr__(self, key: str, value: Any): + r"""Supports attribute assignment to the backing :class:`TensorAttr` of + an :class:`AttrView`. This allows for :class:`AttrView` objects to set + their backing attribute values. In particular, the following operation + sets the :obj:`index` of an :class:`AttrView`: + + .. code-block:: python + + view = store.view(group_name) + view.index = torch.tensor([1, 2, 3]) + """ + if key not in self._attr.__dataclass_fields__: + raise ValueError(f"Attempted to set nonexistent attribute '{key}' " + f"(acceptable attributes are " + f"{self._attr.__dataclass_fields__})") + + setattr(self._attr, key, value) + + def __setitem__(self, key: str, value: Any): + r"""Supports attribute assignment to the backing :class:`TensorAttr` of + an :class:`AttrView` via indexing. This allows for :class:`AttrView` + objects to set their backing attribute values. In particular, the + following operation sets the `index` of an :class:`AttrView`: + + .. code-block:: python + + view = store.view(TensorAttr(group_name)) + view['index'] = torch.Tensor([1, 2, 3]) + """ + self.__setattr__(key, value) + + # Miscellaneous built-ins ################################################# + + def __call__(self) -> FeatureTensorType: + r"""Supports :class:`AttrView` as a callable to force retrieval from + the currently specified attributes. In particular, this passes the + current :class:`TensorAttr` object to a GET call, regardless of whether + all attributes have been specified. It returns the result of this call. + In particular, the following operation returns a tensor by performing a + GET operation on the backing feature store: + + .. code-block:: python + + store[group_name, attr_name]() + """ + # Set all UNSET values to None: + out = copy.copy(self) + out._attr.fully_specify() + return out._store.get_tensor(out._attr) + + def __copy__(self) -> 'AttrView': + out = self.__class__.__new__(self.__class__) + for key, value in self.__dict__.items(): + out.__dict__[key] = value + out.__dict__['_attr'] = copy.copy(out.__dict__['_attr']) + return out + + def __eq__(self, obj: Any) -> bool: + r"""Compares two :class:`AttrView` objects by checking equality of their + :class:`FeatureStore` references and :class:`TensorAttr` attributes.""" + if not isinstance(obj, AttrView): + return False + return self._store == obj._store and self._attr == obj._attr + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}(store={self._store}, ' + f'attr={self._attr})') + + +class FeatureStore(MutableMapping): + def __init__(self, attr_cls: Any = TensorAttr): + r"""Initializes the feature store. Implementor classes can customize + the ordering and required nature of their :class:`TensorAttr` tensor + attributes by subclassing :class:`TensorAttr` and passing the subclass + as :obj:`attr_cls`.""" + super().__init__() + self._attr_cls = attr_cls + + # Core (CRUD) ############################################################# + + @abstractmethod + def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: + r"""To be implemented by :class:`FeatureStore` subclasses.""" + pass + + def put_tensor(self, tensor: FeatureTensorType, *args, **kwargs) -> bool: + r"""Synchronously adds a :class:`FeatureTensorType` object to the + feature store. + + Args: + tensor (FeatureTensorType): The feature tensor to be added. + **attr (TensorAttr): Any relevant tensor attributes that correspond + to the feature tensor. See the :class:`TensorAttr` + documentation for required and optional attributes. It is the + job of implementations of a :class:`FeatureStore` to store this + metadata in a meaningful way that allows for tensor retrieval + from a :class:`TensorAttr` object. + + Returns: + bool: Whether insertion was successful. + """ + attr = self._attr_cls.cast(*args, **kwargs) + if not attr.is_fully_specified(): + raise ValueError(f"The input TensorAttr '{attr}' is not fully " + f"specified. Please fully specify the input by " + f"specifying all 'UNSET' fields") + return self._put_tensor(tensor, attr) + + @abstractmethod + def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: + r"""To be implemented by :class:`FeatureStore` subclasses.""" + pass + + def get_tensor(self, *args, **kwargs) -> Optional[FeatureTensorType]: + r"""Synchronously obtains a :class:`FeatureTensorType` object from the + feature store. Feature store implementors guarantee that the call + :obj:`get_tensor(put_tensor(tensor, attr), attr) = tensor` holds. + + Args: + **attr (TensorAttr): Any relevant tensor attributes that correspond + to the feature tensor. See the :class:`TensorAttr` + documentation for required and optional attributes. It is the + job of implementations of a :class:`FeatureStore` to store this + metadata in a meaningful way that allows for tensor retrieval + from a :class:`TensorAttr` object. + + Returns: + FeatureTensorType, optional: a Tensor of the same type as the + index, or :obj:`None` if no tensor was found. + """ + def to_type(tensor: FeatureTensorType) -> FeatureTensorType: + if tensor is None: + return None + if (isinstance(attr.index, torch.Tensor) + and isinstance(tensor, np.ndarray)): + return torch.from_numpy(tensor) + if (isinstance(attr.index, np.ndarray) + and isinstance(tensor, torch.Tensor)): + return tensor.numpy() + return tensor + + attr = self._attr_cls.cast(*args, **kwargs) + if isinstance(attr.index, slice): + if attr.index.start == attr.index.stop == attr.index.step is None: + attr.index = None + + if not attr.is_fully_specified(): + raise ValueError(f"The input TensorAttr '{attr}' is not fully " + f"specified. Please fully specify the input by " + f"specifying all 'UNSET' fields.") + + return to_type(self._get_tensor(attr)) + + @abstractmethod + def _remove_tensor(self, attr: TensorAttr) -> bool: + r"""To be implemented by :obj:`FeatureStore` subclasses.""" + pass + + def remove_tensor(self, *args, **kwargs) -> bool: + r"""Removes a :obj:`FeatureTensorType` object from the feature store. + + Args: + **attr (TensorAttr): Any relevant tensor attributes that correspond + to the feature tensor. See the :class:`TensorAttr` + documentation for required and optional attributes. It is the + job of implementations of a :class:`FeatureStore` to store this + metadata in a meaningful way that allows for tensor retrieval + from a :class:`TensorAttr` object. + + Returns: + bool: Whether deletion was succesful. + """ + attr = self._attr_cls.cast(*args, **kwargs) + if not attr.is_fully_specified(): + raise ValueError(f"The input TensorAttr '{attr}' is not fully " + f"specified. Please fully specify the input by " + f"specifying all 'UNSET' fields.") + self._remove_tensor(attr) + + def update_tensor(self, tensor: FeatureTensorType, *args, + **kwargs) -> bool: + r"""Updates a :class:`FeatureTensorType` object with a new value. + implementor classes can choose to define more efficient update methods; + the default performs a removal and insertion. + + Args: + tensor (FeatureTensorType): The feature tensor to be updated. + **attr (TensorAttr): Any relevant tensor attributes that correspond + to the feature tensor. See the :class:`TensorAttr` + documentation for required and optional attributes. It is the + job of implementations of a :class:`FeatureStore` to store this + metadata in a meaningful way that allows for tensor retrieval + from a :class:`TensorAttr` object. + + Returns: + bool: Whether the update was succesful. + """ + attr = self._attr_cls.cast(*args, **kwargs) + self.remove_tensor(attr) + return self.put_tensor(tensor, attr) + + # :obj:`AttrView` methods ################################################# + + def view(self, *args, **kwargs) -> AttrView: + r"""Returns an :class:`AttrView` of the feature store, with the defined + attributes set.""" + attr = self._attr_cls.cast(*args, **kwargs) + return AttrView(self, attr) + + # Python built-ins ######################################################## + + def __setitem__(self, key: TensorAttr, value: FeatureTensorType): + r"""Supports store[tensor_attr] = tensor.""" + # CastMixin will handle the case of key being a tuple or TensorAttr + # object: + key = self._attr_cls.cast(key) + # We need to fully specify the key for __setitem__ as it does not make + # sense to work with a view here: + key.fully_specify() + self.put_tensor(value, key) + + def __getitem__(self, key: TensorAttr) -> Any: + r"""Supports pythonic indexing into the feature store. In particular, + the following rules are followed for indexing: + + * A fully-specified :obj:`key` will produce a tensor output. + + * A partially-specified :obj:`key` will produce an :class:`AttrView` + output, which is a view on the :class:`FeatureStore`. If a view is + called, it will produce a tensor output from the corresponding + (partially specified) attributes. + """ + # CastMixin will handle the case of key being a tuple or TensorAttr + # object: + attr = self._attr_cls.cast(key) + if attr.is_fully_specified(): + return self.get_tensor(attr) + return self.view(attr) + + def __delitem__(self, key: TensorAttr): + r"""Supports del store[tensor_attr].""" + # CastMixin will handle the case of key being a tuple or TensorAttr + # object: + key = self._attr_cls.cast(key) + key.fully_specify() + self.remove_tensor(key) + + def __iter__(self): + raise NotImplementedError + + def __eq__(self, obj: object) -> bool: + return id(self) == id(obj) + + @abstractmethod + def __len__(self): + pass + + def __repr__(self) -> str: + return f'{self.__class__.__name__}()' diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index cb672e6aef51..60bce93dfd59 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -1,5 +1,7 @@ from typing import Dict, List, Optional, Tuple, Union +import numpy as np +import torch from torch import Tensor from torch_sparse import SparseTensor @@ -20,6 +22,9 @@ Metadata = Tuple[List[NodeType], List[EdgeType]] +# A representation of a feature tensor +FeatureTensorType = Union[torch.Tensor, np.ndarray] + # Types for message passing ################################################### Adj = Union[Tensor, SparseTensor] diff --git a/torch_geometric/utils/mixin.py b/torch_geometric/utils/mixin.py new file mode 100644 index 000000000000..7f14a10a2dad --- /dev/null +++ b/torch_geometric/utils/mixin.py @@ -0,0 +1,14 @@ +class CastMixin: + @classmethod + def cast(cls, *args, **kwargs): # TODO Can we apply this recursively? + if len(args) == 1 and len(kwargs) == 0: + elem = args[0] + if elem is None: + return None + if isinstance(elem, CastMixin): + return elem + if isinstance(elem, (tuple, list)): + return cls(*elem) + if isinstance(elem, dict): + return cls(**elem) + return cls(*args, **kwargs) From 31d2479a366be5aec50416e7e790fa3c0f28e865 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Fri, 29 Apr 2022 12:00:26 -0700 Subject: [PATCH 0025/2432] Minor feature_store fixes (#4568) --- test/data/test_feature_store.py | 6 +++--- torch_geometric/data/feature_store.py | 9 +++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py index c7c77b6c985a..de3249bf78bf 100644 --- a/test/data/test_feature_store.py +++ b/test/data/test_feature_store.py @@ -25,11 +25,11 @@ def key(attr: TensorAttr) -> str: def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: index = attr.index - # Not set or None indices define the obvious index: + # None indices define the obvious index: if index is None: index = torch.arange(0, tensor.shape[0]) - # Store the index as a column: + # Store the index: self.store[MyFeatureStore.key(attr)] = (index, tensor) return True @@ -40,7 +40,7 @@ def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: if tensor is None: return None - # Not set or None indices return the whole tensor: + # None indices return the whole tensor: if attr.index is None: return tensor diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index 7e3f8a82612f..bc7d10322497 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -52,10 +52,10 @@ class TensorAttr(CastMixin): :meth:`TensorAttr.__init__`. """ - # The group name that the tensor corresponds to. Defaults to None. + # The group name that the tensor corresponds to. Defaults to UNSET. group_name: Optional[str] = _field_status.UNSET - # The name of the tensor within its group. Defaults to None. + # The name of the tensor within its group. Defaults to UNSET. attr_name: Optional[str] = _field_status.UNSET # The node indices the rows of the tensor correspond to. Defaults to UNSET. @@ -72,7 +72,7 @@ def is_fully_specified(self) -> bool: r"""Whether the :obj:`TensorAttr` has no unset fields.""" return all([self.is_set(key) for key in self.__dataclass_fields__]) - def fully_specify(self): + def fully_specify(self) -> 'TensorAttr': r"""Sets all :obj:`UNSET` fields to :obj:`None`.""" for key in self.__dataclass_fields__: if not self.is_set(key): @@ -346,7 +346,7 @@ def remove_tensor(self, *args, **kwargs) -> bool: raise ValueError(f"The input TensorAttr '{attr}' is not fully " f"specified. Please fully specify the input by " f"specifying all 'UNSET' fields.") - self._remove_tensor(attr) + return self._remove_tensor(attr) def update_tensor(self, tensor: FeatureTensorType, *args, **kwargs) -> bool: @@ -406,6 +406,7 @@ def __getitem__(self, key: TensorAttr) -> Any: attr = self._attr_cls.cast(key) if attr.is_fully_specified(): return self.get_tensor(attr) + # If the view is not fully specified, return a :class:`AttrView`: return self.view(attr) def __delitem__(self, key: TensorAttr): From e53affc44ef8f2b8060e603e645b9fa29e752291 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 2 May 2022 09:51:20 +0200 Subject: [PATCH 0026/2432] replace Theta symbols (#4495) --- torch_geometric/nn/conv/eg_conv.py | 4 ++-- torch_geometric/nn/conv/graph_conv.py | 5 ++--- torch_geometric/nn/conv/tag_conv.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/torch_geometric/nn/conv/eg_conv.py b/torch_geometric/nn/conv/eg_conv.py index 5cd72951b2f2..c55174082540 100644 --- a/torch_geometric/nn/conv/eg_conv.py +++ b/torch_geometric/nn/conv/eg_conv.py @@ -25,9 +25,9 @@ class EGConv(MessagePassing): \mathbf{x}_i^{\prime} = {\LARGE ||}_{h=1}^H \sum_{\oplus \in \mathcal{A}} \sum_{b = 1}^B w_{i, h, \oplus, b} \; \underset{j \in \mathcal{N}(i) \cup \{i\}}{\bigoplus} - \mathbf{\Theta}_b \mathbf{x}_{j} + \mathbf{W}_b \mathbf{x}_{j} - with :math:`\mathbf{\Theta}_b` denoting a basis weight, + with :math:`\mathbf{W}_b` denoting a basis weight, :math:`\oplus` denoting an aggregator, and :math:`w` denoting per-vertex weighting coefficients across different heads, bases and aggregators. diff --git a/torch_geometric/nn/conv/graph_conv.py b/torch_geometric/nn/conv/graph_conv.py index 417723004cb4..06f57061d2c1 100644 --- a/torch_geometric/nn/conv/graph_conv.py +++ b/torch_geometric/nn/conv/graph_conv.py @@ -14,9 +14,8 @@ class GraphConv(MessagePassing): `_ paper .. math:: - \mathbf{x}^{\prime}_i = \mathbf{\Theta}_1 \mathbf{x}_i + - \mathbf{\Theta}_2 \sum_{j \in \mathcal{N}(i)} e_{j,i} \cdot - \mathbf{x}_j + \mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \mathbf{W}_2 + \sum_{j \in \mathcal{N}(i)} e_{j,i} \cdot \mathbf{x}_j where :math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to target node :obj:`i` (default: :obj:`1`) diff --git a/torch_geometric/nn/conv/tag_conv.py b/torch_geometric/nn/conv/tag_conv.py index 5d34c1d9b334..0bc24f1b9ec1 100644 --- a/torch_geometric/nn/conv/tag_conv.py +++ b/torch_geometric/nn/conv/tag_conv.py @@ -15,7 +15,7 @@ class TAGConv(MessagePassing): .. math:: \mathbf{X}^{\prime} = \sum_{k=0}^K \left( \mathbf{D}^{-1/2} \mathbf{A} - \mathbf{D}^{-1/2} \right)^k \mathbf{X} \mathbf{\Theta}_{k}, + \mathbf{D}^{-1/2} \right)^k \mathbf{X} \mathbf{W}_{k}, where :math:`\mathbf{A}` denotes the adjacency matrix and :math:`D_{ii} = \sum_{j=0} A_{ij}` its diagonal degree matrix. From d268e8608ca64bd285640fc9480e6c346d0baf80 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 2 May 2022 10:41:12 +0200 Subject: [PATCH 0027/2432] Added `CHANGELOG.md` (#4581) * update * update * add name * update --- .github/workflows/changelog.yml | 13 +++++++++++++ CHANGELOG.md | 10 ++++++++++ 2 files changed, 23 insertions(+) create mode 100644 .github/workflows/changelog.yml create mode 100644 CHANGELOG.md diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 000000000000..a968e49f88d6 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,13 @@ +name: Changelog Enforcer + +on: # yamllint disable-line rule:truthy + pull_request: + +jobs: + + changelog: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - uses: dangoslen/changelog-enforcer@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000000..5f4346bb66ce --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). + +## [2.0.5] - 2022-MM-DD +### Added +- Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) +### Changed +### Removed From be97c5bb8c7485fbedc8bad9607aa601694ce0d2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 2 May 2022 17:31:15 +0200 Subject: [PATCH 0028/2432] `GlobalPooling` and graph-level `to_hetero` support (#4582) * update * update call * update * update * update * update * typo * changelog --- CHANGELOG.md | 2 + test/nn/test_to_hetero_transformer.py | 67 ++++++++++++++++----- torch_geometric/nn/fx.py | 57 ++++++++++++++---- torch_geometric/nn/glob/__init__.py | 2 + torch_geometric/nn/glob/glob.py | 43 ++++++++++++- torch_geometric/nn/to_hetero_transformer.py | 64 ++++++++++++++++++-- 6 files changed, 204 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f4346bb66ce..c58fb878ea91 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `nn.glob.GlobalPooling` module with support for multiple aggregations ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) +- Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed ### Removed diff --git a/test/nn/test_to_hetero_transformer.py b/test/nn/test_to_hetero_transformer.py index d5458f6bd2ab..95bb0e7ab426 100644 --- a/test/nn/test_to_hetero_transformer.py +++ b/test/nn/test_to_hetero_transformer.py @@ -5,7 +5,7 @@ from torch.nn import Linear, ReLU, Sequential from torch_sparse import SparseTensor -from torch_geometric.nn import BatchNorm, GCNConv, GINEConv +from torch_geometric.nn import BatchNorm, GCNConv, GINEConv, GlobalPooling from torch_geometric.nn import Linear as LazyLinear from torch_geometric.nn import MessagePassing, RGCNConv, SAGEConv, to_hetero @@ -123,11 +123,10 @@ def forward(self, x: Tensor) -> Tensor: def test_to_hetero(): - metadata = (['paper', 'author'], [('paper', 'cites', 'paper'), - ('paper', 'written_by', 'author'), - ('author', 'writes', 'paper')]) - - x_dict = {'paper': torch.randn(100, 16), 'author': torch.randn(100, 16)} + x_dict = { + 'paper': torch.randn(100, 16), + 'author': torch.randn(100, 16), + } edge_index_dict = { ('paper', 'cites', 'paper'): torch.randint(100, (2, 200), dtype=torch.long), @@ -142,6 +141,8 @@ def test_to_hetero(): ('author', 'writes', 'paper'): torch.randn(200, 8), } + metadata = list(x_dict.keys()), list(edge_index_dict.keys()) + model = Net1() model = to_hetero(model, metadata, debug=False) out = model(x_dict, edge_attr_dict) @@ -225,13 +226,16 @@ def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: def test_to_hetero_with_gcn(): - metadata = (['paper'], [('paper', '0', 'paper'), ('paper', '1', 'paper')]) - x_dict = {'paper': torch.randn(100, 16)} + x_dict = { + 'paper': torch.randn(100, 16), + } edge_index_dict = { ('paper', '0', 'paper'): torch.randint(100, (2, 200)), ('paper', '1', 'paper'): torch.randint(100, (2, 200)), } + metadata = list(x_dict.keys()), list(edge_index_dict.keys()) + model = GCN() model = to_hetero(model, metadata, debug=False) out = model(x_dict, edge_index_dict) @@ -284,10 +288,6 @@ def test_to_hetero_and_rgcn_equal_output(): out1 = conv(x, edge_index, edge_type) # Run `to_hetero`: - node_types = ['paper', 'author'] - edge_types = [('paper', '_', 'paper'), ('paper', '_', 'author'), - ('author', '_', 'paper')] - x_dict = { 'paper': x[:6], 'author': x[6:], @@ -301,13 +301,14 @@ def test_to_hetero_and_rgcn_equal_output(): edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]), } + node_types, edge_types = list(x_dict.keys()), list(edge_index_dict.keys()) + adj_t_dict = { key: SparseTensor.from_edge_index(edge_index).t() for key, edge_index in edge_index_dict.items() } - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - model = to_hetero(RGCN(16, 32), metadata) + model = to_hetero(RGCN(16, 32), (node_types, edge_types)) # Set model weights: for i, edge_type in enumerate(edge_types): @@ -324,3 +325,41 @@ def test_to_hetero_and_rgcn_equal_output(): out3 = model(x_dict, adj_t_dict) out3 = torch.cat([out3['paper'], out3['author']], dim=0) assert torch.allclose(out1, out3, atol=1e-6) + + +class GraphLevelGNN(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = SAGEConv(16, 32) + self.pool = GlobalPooling(aggr='mean') + self.lin = Linear(32, 64) + + def forward(self, x: Tensor, edge_index: Tensor, batch: Tensor) -> Tensor: + x = self.conv(x, edge_index) + x = self.pool(x, batch) + x = self.lin(x) + return x + + +def test_graph_level_to_hetero(): + x_dict = { + 'paper': torch.randn(100, 16), + 'author': torch.randn(100, 16), + } + edge_index_dict = { + ('paper', 'written_by', 'author'): + torch.randint(100, (2, 200), dtype=torch.long), + ('author', 'writes', 'paper'): + torch.randint(100, (2, 200), dtype=torch.long), + } + batch_dict = { + 'paper': torch.zeros(100, dtype=torch.long), + 'author': torch.zeros(100, dtype=torch.long), + } + + metadata = list(x_dict.keys()), list(edge_index_dict.keys()) + + model = GraphLevelGNN() + model = to_hetero(model, metadata, aggr='mean', debug=False) + out = model(x_dict, edge_index_dict, batch_dict) + assert out.size() == (1, 64) diff --git a/torch_geometric/nn/fx.py b/torch_geometric/nn/fx.py index 94f1afb6463a..10d3663df019 100644 --- a/torch_geometric/nn/fx.py +++ b/torch_geometric/nn/fx.py @@ -4,8 +4,6 @@ import torch from torch.nn import Module, ModuleDict, ModuleList, Sequential -from torch_geometric.nn.conv import MessagePassing - try: from torch.fx import Graph, GraphModule, Node except (ImportError, ModuleNotFoundError, AttributeError): @@ -32,6 +30,7 @@ class Transformer(object): +-- call_method() +-- call_module() +-- call_message_passing_module() + +-- call_global_pooling_module() +-- output() +-- Erase unused nodes in the graph +-- Iterate over each children module @@ -41,8 +40,9 @@ class Transformer(object): :class:`Transformer` exposes additional functionality: #. It subdivides :func:`call_module` into nodes that call a regular - :class:`torch.nn.Module` (:func:`call_module`) or a - :class:`MessagePassing` module (:func:`call_message_passing_module`). + :class:`torch.nn.Module` (:func:`call_module`), a + :class:`MessagePassing` module (:func:`call_message_passing_module`), + or a :class:`GlobalPooling` module (:func:`call_global_pooling_module`). #. It allows to customize or initialize new children modules via :func:`init_submodule` @@ -85,6 +85,9 @@ def get_attr(self, node: Node, target: Any, name: str): def call_message_passing_module(self, node: Node, target: Any, name: str): pass + def call_global_pooling_module(self, node: Node, target: Any, name: str): + pass + def call_module(self, node: Node, target: Any, name: str): pass @@ -132,11 +135,15 @@ def transform(self) -> GraphModule: self._state[node.name] = 'node' elif is_message_passing_op(self.module, node.op, node.target): self._state[node.name] = 'node' + elif is_global_pooling_op(self.module, node.op, node.target): + self._state[node.name] = 'graph' elif node.op in ['call_module', 'call_method', 'call_function']: if self.has_edge_level_arg(node): self._state[node.name] = 'edge' - else: + elif self.has_node_level_arg(node): self._state[node.name] = 'node' + else: + self._state[node.name] = 'graph' # We iterate over each node and may transform it: for node in list(self.graph.nodes): @@ -145,6 +152,9 @@ def transform(self) -> GraphModule: op = node.op if is_message_passing_op(self.module, op, node.target): op = 'call_message_passing_module' + elif is_global_pooling_op(self.module, op, node.target): + op = 'call_global_pooling_module' + getattr(self, op)(node, node.target, node.name) # Remove all unused nodes in the computation graph, i.e., all nodes @@ -190,13 +200,13 @@ def _init_submodule(self, module: Module, target: str) -> Module: else: return self.init_submodule(module, target) - def is_edge_level(self, node: Node) -> bool: - return self._state[node.name] == 'edge' + def _is_level(self, node: Node, name: str) -> bool: + return self._state[node.name] == name - def has_edge_level_arg(self, node: Node) -> bool: + def _has_level_arg(self, node: Node, name: str) -> bool: def _recurse(value: Any) -> bool: if isinstance(value, Node): - return self.is_edge_level(value) + return getattr(self, f'is_{name}_level')(value) elif isinstance(value, dict): return any([_recurse(v) for v in value.values()]) elif isinstance(value, (list, tuple)): @@ -207,6 +217,24 @@ def _recurse(value: Any) -> bool: return (any([_recurse(value) for value in node.args]) or any([_recurse(value) for value in node.kwargs.values()])) + def is_node_level(self, node: Node) -> bool: + return self._is_level(node, name='node') + + def is_edge_level(self, node: Node) -> bool: + return self._is_level(node, name='edge') + + def is_graph_level(self, node: Node) -> bool: + return self._is_level(node, name='graph') + + def has_node_level_arg(self, node: Node) -> bool: + return self._has_level_arg(node, name='node') + + def has_edge_level_arg(self, node: Node) -> bool: + return self._has_level_arg(node, name='edge') + + def has_graph_level_arg(self, node: Node) -> bool: + return self._has_level_arg(node, name='graph') + def find_by_name(self, name: str) -> Optional[Node]: for node in self.graph.nodes: if node.name == name: @@ -249,7 +277,14 @@ def get_submodule(module: Module, target: str) -> Module: def is_message_passing_op(module: Module, op: str, target: str) -> bool: + from torch_geometric.nn import MessagePassing + if op == 'call_module': + return isinstance(get_submodule(module, target), MessagePassing) + return False + + +def is_global_pooling_op(module: Module, op: str, target: str) -> bool: + from torch_geometric.nn import GlobalPooling if op == 'call_module': - if isinstance(get_submodule(module, target), MessagePassing): - return True + return isinstance(get_submodule(module, target), GlobalPooling) return False diff --git a/torch_geometric/nn/glob/__init__.py b/torch_geometric/nn/glob/__init__.py index 5921d5a86206..8b911ccf859e 100644 --- a/torch_geometric/nn/glob/__init__.py +++ b/torch_geometric/nn/glob/__init__.py @@ -1,4 +1,5 @@ from .glob import global_add_pool, global_mean_pool, global_max_pool +from .glob import GlobalPooling from .sort import global_sort_pool from .attention import GlobalAttention from .set2set import Set2Set @@ -8,6 +9,7 @@ 'global_add_pool', 'global_mean_pool', 'global_max_pool', + 'GlobalPooling', 'global_sort_pool', 'GlobalAttention', 'Set2Set', diff --git a/torch_geometric/nn/glob/glob.py b/torch_geometric/nn/glob/glob.py index 2eb3bfafed66..c4eb0b541297 100644 --- a/torch_geometric/nn/glob/glob.py +++ b/torch_geometric/nn/glob/glob.py @@ -1,5 +1,6 @@ -from typing import Optional +from typing import List, Optional, Union +import torch from torch import Tensor from torch_scatter import scatter @@ -74,3 +75,43 @@ def global_max_pool(x: Tensor, batch: Optional[Tensor], return x.max(dim=0, keepdim=True)[0] size = int(batch.max().item() + 1) if size is None else size return scatter(x, batch, dim=0, dim_size=size, reduce='max') + + +class GlobalPooling(torch.nn.Module): + r"""A global pooling module that wraps the usage of + :meth:`~torch_geometric.nn.glob.global_add_pool`, + :meth:`~torch_geometric.nn.glob.global_mean_pool` and + :meth:`~torch_geometric.nn.glob.global_max_pool` into a single module. + + Args: + aggr (string or List[str]): The aggregation scheme to use + (:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`). + If given as a list, will make use of multiple aggregations in which + different outputs will get concatenated in the last dimension. + """ + def __init__(self, aggr: Union[str, List[str]]): + super().__init__() + + self.aggrs = [aggr] if isinstance(aggr, str) else aggr + + assert len(self.aggrs) > 0 + assert len(set(self.aggrs) | {'sum', 'add', 'mean', 'max'}) == 4 + + def forward(self, x: Tensor, batch: Optional[Tensor], + size: Optional[int] = None) -> Tensor: + """""" + xs: List[Tensor] = [] + + for aggr in self.aggrs: + if aggr == 'sum' or aggr == 'add': + xs.append(global_add_pool(x, batch, size)) + elif aggr == 'mean': + xs.append(global_mean_pool(x, batch, size)) + elif aggr == 'max': + xs.append(global_max_pool(x, batch, size)) + + return xs[0] if len(xs) == 1 else torch.cat(xs, dim=-1) + + def __repr__(self) -> str: + aggr = self.aggrs[0] if len(self.aggrs) == 1 else self.aggrs + return f'{self.__class__.__name__}(aggr={aggr})' diff --git a/torch_geometric/nn/to_hetero_transformer.py b/torch_geometric/nn/to_hetero_transformer.py index cedd26f4d8e2..710ce8e3e05b 100644 --- a/torch_geometric/nn/to_hetero_transformer.py +++ b/torch_geometric/nn/to_hetero_transformer.py @@ -150,7 +150,6 @@ def __init__( def placeholder(self, node: Node, target: Any, name: str): # Adds a `get` call to the input dictionary for every node-type or # edge-type. - if node.type is not None: Type = EdgeType if self.is_edge_level(node) else NodeType node.type = Dict[Type, node.type] @@ -197,14 +196,14 @@ def call_message_passing_module(self, node: Node, target: Any, name: str): # `keys_per_dst` and append the result to the list. for dst, keys in keys_per_dst.items(): queue = deque([key_name[key] for key in keys]) - i = len(queue) + 1 + i = 1 while len(queue) >= 2: key1, key2 = queue.popleft(), queue.popleft() args = (self.find_by_name(key1), self.find_by_name(key2)) new_name = f'{name}__{dst}' if self.aggr == 'mean' or len(queue) > 0: - new_name += f'{i}' + new_name = f'{new_name}_{i}' out = self.graph.create_node('call_function', target=self.aggrs[self.aggr], @@ -221,7 +220,46 @@ def call_message_passing_module(self, node: Node, target: Any, name: str): name=f'{name}__{dst}') self.graph.inserting_after(out) + def call_global_pooling_module(self, node: Node, target: Any, name: str): + # Add calls to node type-wise `GlobalPooling` modules and aggregate + # the outputs to graph type-wise embeddings afterwards. + self.graph.inserting_after(node) + for key in self.metadata[0]: + args, kwargs = self.map_args_kwargs(node, key) + out = self.graph.create_node('call_module', + target=f'{target}.{key2str(key)}', + args=args, kwargs=kwargs, + name=f'{node.name}__{key2str(key)}') + self.graph.inserting_after(out) + + # Perform node-wise aggregation. + queue = deque( + [f'{node.name}__{key2str(key)}' for key in self.metadata[0]]) + i = 1 + while len(queue) >= 2: + key1, key2 = queue.popleft(), queue.popleft() + args = (self.find_by_name(key1), self.find_by_name(key2)) + out = self.graph.create_node('call_function', + target=self.aggrs[self.aggr], + args=args, name=f'{name}_{i}') + self.graph.inserting_after(out) + queue.append(f'{name}_{i}') + i += 1 + + if self.aggr == 'mean': + key = queue.popleft() + out = self.graph.create_node( + 'call_function', target=torch.div, + args=(self.find_by_name(key), len(self.metadata[0])), + name=f'{name}_{i}') + self.graph.inserting_after(out) + + self.replace_all_uses_with(node, out) + def call_module(self, node: Node, target: Any, name: str): + if self.is_graph_level(node): + return + # Add calls to node type-wise or edge type-wise modules. self.graph.inserting_after(node) for key in self.metadata[int(self.is_edge_level(node))]: @@ -233,6 +271,9 @@ def call_module(self, node: Node, target: Any, name: str): self.graph.inserting_after(out) def call_method(self, node: Node, target: Any, name: str): + if self.is_graph_level(node): + return + # Add calls to node type-wise or edge type-wise methods. self.graph.inserting_after(node) for key in self.metadata[int(self.is_edge_level(node))]: @@ -243,6 +284,9 @@ def call_method(self, node: Node, target: Any, name: str): self.graph.inserting_after(out) def call_function(self, node: Node, target: Any, name: str): + if self.is_graph_level(node): + return + # Add calls to node type-wise or edge type-wise functions. self.graph.inserting_after(node) for key in self.metadata[int(self.is_edge_level(node))]: @@ -257,6 +301,8 @@ def output(self, node: Node, target: Any, name: str): # edge type-wise data. def _recurse(value: Any) -> Any: if isinstance(value, Node): + if self.is_graph_level(value): + return value return { key: self.find_by_name(f'{value.name}__{key2str(key)}') for key in self.metadata[int(self.is_edge_level(value))] @@ -272,8 +318,10 @@ def _recurse(value: Any) -> Any: if node.type is not None and isinstance(node.args[0], Node): output = node.args[0] - Type = EdgeType if self.is_edge_level(output) else NodeType - node.type = Dict[Type, node.type] + if self.is_node_level(output): + node.type = Dict[NodeType, node.type] + elif self.is_edge_level(output): + node.type = Dict[EdgeType, node.type] else: node.type = None @@ -281,9 +329,14 @@ def _recurse(value: Any) -> Any: def init_submodule(self, module: Module, target: str) -> Module: # Replicate each module for each node type or edge type. + has_node_level_target = bool( + self.find_by_target(f'{target}.{key2str(self.metadata[0][0])}')) has_edge_level_target = bool( self.find_by_target(f'{target}.{key2str(self.metadata[1][0])}')) + if not has_node_level_target and not has_edge_level_target: + return module + module_dict = torch.nn.ModuleDict() for key in self.metadata[int(has_edge_level_target)]: module_dict[key2str(key)] = copy.deepcopy(module) @@ -296,6 +349,7 @@ def init_submodule(self, module: Module, target: str) -> Module: f"'{target}' will be duplicated, but its parameters " f"cannot be reset. To suppress this warning, add a " f"'reset_parameters()' method to '{target}'") + return module_dict # Helper methods ########################################################## From 5ed4b38f37d794248fdd956c5d01f10c0eba202c Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Wed, 4 May 2022 22:29:04 +0800 Subject: [PATCH 0029/2432] change dataset behaviour when dataset is overridden (#4586) * change dataset behaviour when dataset is overridden * changelog * remove not implemented in derived class * merge Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_dataset.py | 61 ++++++++++++++++++++++- torch_geometric/data/dataset.py | 4 +- torch_geometric/data/in_memory_dataset.py | 6 --- 4 files changed, 63 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c58fb878ea91..ca1339590768 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,4 +9,5 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) ### Removed diff --git a/test/data/test_dataset.py b/test/data/test_dataset.py index eb3a532da3c2..e3214fac22e9 100644 --- a/test/data/test_dataset.py +++ b/test/data/test_dataset.py @@ -1,6 +1,6 @@ import torch -from torch_geometric.data import Data, HeteroData, InMemoryDataset +from torch_geometric.data import Data, Dataset, HeteroData, InMemoryDataset class MyTestDataset(InMemoryDataset): @@ -99,3 +99,62 @@ def test_hetero_in_memory_dataset(): assert dataset[1]['paper'].x.tolist() == data2['paper'].x.tolist() assert (dataset[1]['paper', 'paper'].edge_index.tolist() == data2[ 'paper', 'paper'].edge_index.tolist()) + + +def test_override_behaviour(): + class DS(Dataset): + def __init__(self): + self.enter_download = False + self.enter_process = False + super().__init__() + + def _download(self): + self.enter_download = True + + def _process(self): + self.enter_process = True + + def download(self): + pass + + def process(self): + pass + + class DS2(Dataset): + def __init__(self): + self.enter_download = False + self.enter_process = False + super().__init__() + + def _download(self): + self.enter_download = True + + def _process(self): + self.enter_process = True + + def process(self): + pass + + class DS3(Dataset): + def __init__(self): + self.enter_download = False + self.enter_process = False + super().__init__() + + def _download(self): + self.enter_download = True + + def _process(self): + self.enter_process = True + + ds = DS() + assert ds.enter_download + assert ds.enter_process + + ds = DS2() + assert not ds.enter_download + assert ds.enter_process + + ds = DS3() + assert not ds.enter_download + assert not ds.enter_process diff --git a/torch_geometric/data/dataset.py b/torch_geometric/data/dataset.py index 25d85859a6c8..cf006b5f1054 100644 --- a/torch_geometric/data/dataset.py +++ b/torch_geometric/data/dataset.py @@ -80,10 +80,10 @@ def __init__(self, root: Optional[str] = None, self.pre_filter = pre_filter self._indices: Optional[Sequence] = None - if 'download' in self.__class__.__dict__: + if self.download.__qualname__.split('.')[0] != 'Dataset': self._download() - if 'process' in self.__class__.__dict__: + if self.process.__qualname__.split('.')[0] != 'Dataset': self._process() def indices(self) -> Sequence: diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index 2aaa3e5028f4..33929726f672 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -43,12 +43,6 @@ def raw_file_names(self) -> Union[str, List[str], Tuple]: def processed_file_names(self) -> Union[str, List[str], Tuple]: raise NotImplementedError - def download(self): - raise NotImplementedError - - def process(self): - raise NotImplementedError - def __init__(self, root: Optional[str] = None, transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None, From 926b5dc4d6515ee40e5774ed68dde1e6ff9ae4df Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 5 May 2022 15:32:31 +0200 Subject: [PATCH 0030/2432] Add `bias` to `TAGConv` (#4597) * add bias to TAGConv * changelog --- CHANGELOG.md | 1 + torch_geometric/nn/conv/tag_conv.py | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca1339590768..f87161a30313 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,5 +9,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- The `bias` argument in `TAGConv` is now actually apllied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) ### Removed diff --git a/torch_geometric/nn/conv/tag_conv.py b/torch_geometric/nn/conv/tag_conv.py index 0bc24f1b9ec1..c0d18e6c4d44 100644 --- a/torch_geometric/nn/conv/tag_conv.py +++ b/torch_geometric/nn/conv/tag_conv.py @@ -5,6 +5,7 @@ from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.nn.dense.linear import Linear +from torch_geometric.nn.inits import zeros from torch_geometric.typing import Adj, OptTensor @@ -51,14 +52,21 @@ def __init__(self, in_channels: int, out_channels: int, K: int = 3, self.K = K self.normalize = normalize - self.lins = torch.nn.ModuleList( - [Linear(in_channels, out_channels) for _ in range(K + 1)]) + self.lins = torch.nn.ModuleList([ + Linear(in_channels, out_channels, bias=False) for _ in range(K + 1) + ]) + + if bias: + self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): for lin in self.lins: lin.reset_parameters() + zeros(self.bias) def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor = None) -> Tensor: @@ -80,6 +88,10 @@ def forward(self, x: Tensor, edge_index: Adj, x = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=None) out += lin.forward(x) + + if self.bias is not None: + out += self.bias + return out def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor: From 9a16a0eb841e1efaedc06efb5b6b5bcf407bf588 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Fri, 6 May 2022 18:51:12 +0800 Subject: [PATCH 0031/2432] Add missing Genius dataset (#4570) * add linkx datasets * remove added datasets not needed * restrict to genius dataset * add changelog note * revert to using Linkx dataset * fix changelog * merge Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/linkx_dataset.py | 33 +++++++++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f87161a30313..79d8a8f17bbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added the `Genius` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570)) - Added `nn.glob.GlobalPooling` module with support for multiple aggregations ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) diff --git a/torch_geometric/datasets/linkx_dataset.py b/torch_geometric/datasets/linkx_dataset.py index b372c4674c15..84ae20f477ca 100644 --- a/torch_geometric/datasets/linkx_dataset.py +++ b/torch_geometric/datasets/linkx_dataset.py @@ -13,11 +13,15 @@ class LINKXDataset(InMemoryDataset): Learning on Non-Homophilous Graphs: New Benchmarks and Strong Simple Methods" `_ paper. + .. note:: + Some of the datasets provided in :class:`LINKXDataset` are from other + sources, but have been updated with new features and/or labels. + Args: root (string): Root directory where the dataset should be saved. name (string): The name of the dataset (:obj:`"penn94"`, :obj:`"reed98"`, :obj:`"amherst41"`, :obj:`"cornell5"`, - :obj:`"johnshopkins55"`). + :obj:`"johnshopkins55"`, :obj:`"genius"`). transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. @@ -30,12 +34,17 @@ class LINKXDataset(InMemoryDataset): url = 'https://github.com/CUAI/Non-Homophily-Large-Scale/raw/master/data' + facebook_datasets = [ + 'penn94', 'reed98', 'amherst41', 'cornell5', 'johnshopkins55' + ] + datasets = { 'penn94': f'{url}/facebook100/Penn94.mat', 'reed98': f'{url}/facebook100/Reed98.mat', 'amherst41': f'{url}/facebook100/Amherst41.mat', 'cornell5': f'{url}/facebook100/Cornell5.mat', 'johnshopkins55': f'{url}/facebook100/Johns%20Hopkins55.mat', + 'genius': f'{url}/genius.mat' } splits = { @@ -74,7 +83,7 @@ def download(self): if self.name in self.splits: download_url(http://github.com/self.splits[self.name],%20self.raw_dir) - def process(self): + def _process_facebook(self): from scipy.io import loadmat mat = loadmat(self.raw_paths[0]) @@ -108,6 +117,26 @@ def process(self): data.val_mask[:, i][torch.tensor(split['valid'])] = True data.test_mask[:, i][torch.tensor(split['test'])] = True + return data + + def _process_genius(self): + from scipy.io import loadmat + + mat = loadmat(self.raw_paths[0]) + edge_index = torch.from_numpy(mat['edge_index']).to(torch.long) + x = torch.from_numpy(mat['node_feat']).to(torch.float) + y = torch.from_numpy(mat['label']).squeeze().to(torch.long) + + return Data(x=x, edge_index=edge_index, y=y) + + def process(self): + if self.name in self.facebook_datasets: + data = self._process_facebook() + elif self.name == 'genius': + data = self._process_genius() + else: + raise NotImplementedError + if self.pre_transform is not None: data = self.pre_transform(data) From cd6c1f7179fc28e953e98be0eaf8aa5f201ca94d Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Mon, 9 May 2022 18:52:52 +0800 Subject: [PATCH 0032/2432] Adding `'wiki'` dataset to `LINKXDataset` (#4600) * adding wiki linkx dataset * adding wiki linkx dataset * add changelog * increase chunk size * Update torch_geometric/datasets/linkx_dataset.py Co-authored-by: Matthias Fey * Update torch_geometric/datasets/linkx_dataset.py Co-authored-by: Matthias Fey * Update torch_geometric/datasets/linkx_dataset.py Co-authored-by: Matthias Fey * Update torch_geometric/datasets/linkx_dataset.py Co-authored-by: Matthias Fey * Update torch_geometric/datasets/linkx_dataset.py Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- torch_geometric/data/download.py | 17 +++++-- torch_geometric/datasets/linkx_dataset.py | 57 ++++++++++++++++++----- 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79d8a8f17bbb..aa11b32c5ffe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added the `Genius` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570)) +- Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) - Added `nn.glob.GlobalPooling` module with support for multiple aggregations ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) diff --git a/torch_geometric/data/download.py b/torch_geometric/data/download.py index 6a4f524e54c5..256610bab8a9 100644 --- a/torch_geometric/data/download.py +++ b/torch_geometric/data/download.py @@ -2,11 +2,13 @@ import ssl import sys import urllib +from typing import Optional from .makedirs import makedirs -def download_url(/service/url: str, folder: str, log: bool = True): +def download_url(url: str, folder: str, log: bool = True, + filename: Optional[str] = None): r"""Downloads the content of an URL to a specific folder. Args: @@ -16,8 +18,10 @@ def download_url(/service/url: str, folder: str, log: bool = True): console. (default: :obj:`True`) """ - filename = url.rpartition('/')[2] - filename = filename if filename[0] == '?' else filename.split('?')[0] + if filename is None: + filename = url.rpartition('/')[2] + filename = filename if filename[0] == '?' else filename.split('?')[0] + path = osp.join(folder, filename) if osp.exists(path): # pragma: no cover @@ -34,6 +38,11 @@ def download_url(/service/url: str, folder: str, log: bool = True): data = urllib.request.urlopen(url, context=context) with open(path, 'wb') as f: - f.write(data.read()) + # workaround for https://bugs.python.org/issue42853 + while True: + chunk = data.read(10 * 1024 * 1024) + if not chunk: + break + f.write(chunk) return path diff --git a/torch_geometric/datasets/linkx_dataset.py b/torch_geometric/datasets/linkx_dataset.py index 84ae20f477ca..e0c7038f4d6b 100644 --- a/torch_geometric/datasets/linkx_dataset.py +++ b/torch_geometric/datasets/linkx_dataset.py @@ -32,23 +32,45 @@ class LINKXDataset(InMemoryDataset): being saved to disk. (default: :obj:`None`) """ - url = 'https://github.com/CUAI/Non-Homophily-Large-Scale/raw/master/data' + github_url = ('https://github.com/CUAI/Non-Homophily-Large-Scale/' + 'raw/master/data') + gdrive_url = 'https://drive.google.com/uc?confirm=t&' facebook_datasets = [ 'penn94', 'reed98', 'amherst41', 'cornell5', 'johnshopkins55' ] datasets = { - 'penn94': f'{url}/facebook100/Penn94.mat', - 'reed98': f'{url}/facebook100/Reed98.mat', - 'amherst41': f'{url}/facebook100/Amherst41.mat', - 'cornell5': f'{url}/facebook100/Cornell5.mat', - 'johnshopkins55': f'{url}/facebook100/Johns%20Hopkins55.mat', - 'genius': f'{url}/genius.mat' + 'penn94': { + 'data.mat': f'{github_url}/facebook100/Penn94.mat' + }, + 'reed98': { + 'data.mat': f'{github_url}/facebook100/Reed98.mat' + }, + 'amherst41': { + 'data.mat': f'{github_url}/facebook100/Amherst41.mat', + }, + 'cornell5': { + 'data.mat': f'{github_url}/facebook100/Cornell5.mat' + }, + 'johnshopkins55': { + 'data.mat': f'{github_url}/facebook100/Johns%20Hopkins55.mat' + }, + 'genius': { + 'data.mat': f'{github_url}/genius.mat' + }, + 'wiki': { + 'wiki_views2M.pt': + f'{gdrive_url}id=1p5DlVHrnFgYm3VsNIzahSsvCD424AyvP', + 'wiki_edges2M.pt': + f'{gdrive_url}id=14X7FlkjrlUgmnsYtPwdh-gGuFla4yb5u', + 'wiki_features2M.pt': + f'{gdrive_url}id=1ySNspxbK-snNoAZM7oxiWGvOnTRdSyEK' + } } splits = { - 'penn94': f'{url}/splits/fb100-Penn94-splits.npy', + 'penn94': f'{github_url}/splits/fb100-Penn94-splits.npy', } def __init__(self, root: str, name: str, @@ -69,7 +91,7 @@ def processed_dir(self) -> str: @property def raw_file_names(self) -> List[str]: - names = [self.datasets[self.name].split('/')[-1]] + names = list(self.datasets[self.name].keys()) if self.name in self.splits: names += [self.splits[self.name].split('/')[-1]] return names @@ -79,10 +101,20 @@ def processed_file_names(self) -> str: return 'data.pt' def download(self): - download_url(http://github.com/self.datasets[self.name],%20self.raw_dir) + for filename, path in self.datasets[self.name].items(): + download_url(http://github.com/path,%20self.raw_dir,%20filename=filename) if self.name in self.splits: download_url(http://github.com/self.splits[self.name],%20self.raw_dir) + def _process_wiki(self): + + paths = {x.split('/')[-1]: x for x in self.raw_paths} + x = torch.load(paths['wiki_features2M.pt']) + edge_index = torch.load(paths['wiki_edges2M.pt']).t().contiguous() + y = torch.load(paths['wiki_views2M.pt']) + + return Data(x=x, edge_index=edge_index, y=y) + def _process_facebook(self): from scipy.io import loadmat @@ -134,8 +166,11 @@ def process(self): data = self._process_facebook() elif self.name == 'genius': data = self._process_genius() + elif self.name == 'wiki': + data = self._process_wiki() else: - raise NotImplementedError + raise NotImplementedError( + f"chosen dataset '{self.name}' is not implemented") if self.pre_transform is not None: data = self.pre_transform(data) From 8fdf8957d002ebe576ec89111ee7833db955541d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 10 May 2022 04:01:40 -0700 Subject: [PATCH 0033/2432] `HeteroData.is_undirected()` (#4604) * initial commit * changelog * update --- CHANGELOG.md | 3 +- test/transforms/test_to_undirected.py | 4 + torch_geometric/data/hetero_data.py | 116 ++++++++++++++------------ torch_geometric/data/storage.py | 2 +- 4 files changed, 71 insertions(+), 54 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa11b32c5ffe..877a367b28b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) - Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) - Added `nn.glob.GlobalPooling` module with support for multiple aggregations ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed -- The `bias` argument in `TAGConv` is now actually apllied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) +- The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) ### Removed diff --git a/test/transforms/test_to_undirected.py b/test/transforms/test_to_undirected.py index 167f6535bf8c..3e7070b5c3c1 100644 --- a/test/transforms/test_to_undirected.py +++ b/test/transforms/test_to_undirected.py @@ -53,7 +53,11 @@ def test_hetero_to_undirected(): data['v', 'w'].edge_attr = edge_attr from torch_geometric.transforms import ToUndirected + + assert not data.is_undirected() data = ToUndirected()(data) + assert data.is_undirected() + assert data['v', 'v'].edge_index.tolist() == [[0, 1, 2, 3], [1, 0, 3, 2]] assert data['v', 'v'].edge_weight.tolist() == edge_weight[perm].tolist() assert data['v', 'v'].edge_attr.tolist() == edge_attr[perm].tolist() diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 41d011248840..bf52ad2b2c1b 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -12,6 +12,7 @@ from torch_geometric.data.data import BaseData, Data, size_repr from torch_geometric.data.storage import BaseStorage, EdgeStorage, NodeStorage from torch_geometric.typing import EdgeType, NodeType, QueryType +from torch_geometric.utils import is_undirected NodeOrEdgeType = Union[NodeType, EdgeType] NodeOrEdgeStorage = Union[NodeStorage, EdgeStorage] @@ -295,6 +296,11 @@ def num_edge_features(self) -> Dict[EdgeType, int]: for key, store in self._edge_store_dict.items() } + def is_undirected(self) -> bool: + r"""Returns :obj:`True` if graph edges are undirected.""" + edge_index, _, _ = to_homogeneous_edge_index(self) + return is_undirected(edge_index, num_nodes=self.num_nodes) + def debug(self): pass # TODO @@ -481,26 +487,14 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: if len(sizes) == len(stores) and len(set(sizes)) == 1 ] - data = Data(**self._global_store.to_dict()) + edge_index, node_slices, edge_slices = to_homogeneous_edge_index(self) + device = edge_index.device if edge_index is not None else None - # Iterate over all node stores and record the slice information: - node_slices, cumsum = {}, 0 - node_type_names, node_types = [], [] - for i, (node_type, store) in enumerate(self._node_store_dict.items()): - num_nodes = store.num_nodes - node_slices[node_type] = (cumsum, cumsum + num_nodes) - node_type_names.append(node_type) - cumsum += num_nodes - - if add_node_type: - kwargs = {'dtype': torch.long} - node_types.append(torch.full((num_nodes, ), i, **kwargs)) - data._node_type_names = node_type_names - - if len(node_types) > 1: - data.node_type = torch.cat(node_types, dim=0) - elif len(node_types) == 1: - data.node_type = node_types[0] + data = Data(**self._global_store.to_dict()) + if edge_index is not None: + data.edge_index = edge_index + data._node_type_names = list(node_slices.keys()) + data._edge_type_names = list(edge_slices.keys()) # Combine node attributes into a single tensor: if node_attrs is None: @@ -511,39 +505,8 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: value = torch.cat(values, dim) if len(values) > 1 else values[0] data[key] = value - if len([ - key for key in node_attrs - if (key in {'x', 'pos', 'batch'} or 'node' in key) - ]) == 0 and not add_node_type: - data.num_nodes = cumsum - - # Iterate over all edge stores and record the slice information: - edge_slices, cumsum = {}, 0 - edge_indices, edge_type_names, edge_types = [], [], [] - for i, (edge_type, store) in enumerate(self._edge_store_dict.items()): - src, _, dst = edge_type - num_edges = store.num_edges - edge_slices[edge_type] = (cumsum, cumsum + num_edges) - edge_type_names.append(edge_type) - cumsum += num_edges - - kwargs = {'dtype': torch.long, 'device': store.edge_index.device} - offset = [[node_slices[src][0]], [node_slices[dst][0]]] - offset = torch.tensor(offset, **kwargs) - edge_indices.append(store.edge_index + offset) - if add_edge_type: - edge_types.append(torch.full((num_edges, ), i, **kwargs)) - data._edge_type_names = edge_type_names - - if len(edge_indices) > 1: - data.edge_index = torch.cat(edge_indices, dim=-1) - elif len(edge_indices) == 1: - data.edge_index = edge_indices[0] - - if len(edge_types) > 1: - data.edge_type = torch.cat(edge_types, dim=0) - elif len(edge_types) == 1: - data.edge_type = edge_types[0] + if not data.can_infer_num_nodes: + data.num_nodes = list(node_slices.values())[-1][1] # Combine edge attributes into a single tensor: if edge_attrs is None: @@ -554,4 +517,53 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: value = torch.cat(values, dim) if len(values) > 1 else values[0] data[key] = value + if add_node_type: + sizes = [offset[1] - offset[0] for offset in node_slices.values()] + sizes = torch.tensor(sizes, dtype=torch.long, device=device) + node_type = torch.arange(len(sizes), device=device) + data.node_type = node_type.repeat_interleave(sizes) + + if add_edge_type and edge_index is not None: + sizes = [offset[1] - offset[0] for offset in edge_slices.values()] + sizes = torch.tensor(sizes, dtype=torch.long, device=device) + edge_type = torch.arange(len(sizes), device=device) + data.edge_type = edge_type.repeat_interleave(sizes) + return data + + +# Helper functions ############################################################ + + +def to_homogeneous_edge_index( + data: HeteroData, +) -> Tuple[Optional[Tensor], Dict[NodeType, Any], Dict[EdgeType, Any]]: + # Record slice information per node type: + cumsum = 0 + node_slices: Dict[NodeType, Tuple[int, int]] = {} + for node_type, store in data._node_store_dict.items(): + num_nodes = store.num_nodes + node_slices[node_type] = (cumsum, cumsum + num_nodes) + cumsum += num_nodes + + # Record edge indices and slice information per edge type: + cumsum = 0 + edge_indices: List[Tensor] = [] + edge_slices: Dict[EdgeType, Tuple[int, int]] = {} + for edge_type, store in data._edge_store_dict.items(): + src, _, dst = edge_type + offset = [[node_slices[src][0]], [node_slices[dst][0]]] + offset = torch.tensor(offset, device=store.edge_index.device) + edge_indices.append(store.edge_index + offset) + + num_edges = store.num_edges + edge_slices[edge_type] = (cumsum, cumsum + num_edges) + cumsum += num_edges + + edge_index = None + if len(edge_indices) == 1: # Memory-efficient `torch.cat`: + edge_index = edge_indices[0] + elif len(edge_indices) > 0: + edge_index = torch.cat(edge_indices, dim=-1) + + return edge_index, node_slices, edge_slices diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index b3469eeab3a3..1b51571f277b 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -427,7 +427,7 @@ def has_self_loops(self) -> bool: return int((edge_index[0] == edge_index[1]).sum()) > 0 def is_undirected(self) -> bool: - if self.is_bipartite(): # TODO check for inverse storage. + if self.is_bipartite(): return False for value in self.values('adj', 'adj_t'): From f35c85fca7a464bfd8e0ffe63819b8cba25b515e Mon Sep 17 00:00:00 2001 From: Dongkwan Kim Date: Tue, 10 May 2022 21:34:25 +0900 Subject: [PATCH 0034/2432] Implement `AddPositionalEncoding` transform (#4521) * Add skeleton of AddPositionalEncoding * Implement laplacian_eigenvector_pe * Add random sign flip for laplacian_eigenvector_pe * Implement random_walk_pe * Update docs & linting * Add __init__ & tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix indentation errors in docs. * Move diagonal_weight to out of the class * Handle Data where 'x' is None. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Separate PEs to different clases. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update test/transforms/test_add_positional_encoding.py Co-authored-by: Matthias Fey * Update test/transforms/test_add_positional_encoding.py Co-authored-by: Matthias Fey * Update tests & remove unnecessary lines * Update torch_geometric/transforms/add_positional_encoding.py Co-authored-by: Matthias Fey * Remove unnecessary ':' * Add full_self_loop_attr * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Improve docs of full_self_loop_attr * Make add_node_attr (add_pe) outside the class. * Add value tests for AddRandomWalkPE * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix an error in 'which' of eig_fn * Add kwargs for AddLaplacianEigenvectorPE * Add output tests for AddLaplacianEigenvectorPE * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changelog * update * typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + .../test_add_positional_encoding.py | 93 ++++++++++++ test/utils/test_loop.py | 19 +++ torch_geometric/transforms/__init__.py | 3 + .../transforms/add_positional_encoding.py | 139 ++++++++++++++++++ torch_geometric/utils/__init__.py | 3 +- torch_geometric/utils/loop.py | 36 +++++ 7 files changed, 293 insertions(+), 1 deletion(-) create mode 100644 test/transforms/test_add_positional_encoding.py create mode 100644 torch_geometric/transforms/add_positional_encoding.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 877a367b28b2..1be0426da651 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) - Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) - Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) - Added `nn.glob.GlobalPooling` module with support for multiple aggregations ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) diff --git a/test/transforms/test_add_positional_encoding.py b/test/transforms/test_add_positional_encoding.py new file mode 100644 index 000000000000..ab64a3490c54 --- /dev/null +++ b/test/transforms/test_add_positional_encoding.py @@ -0,0 +1,93 @@ +import copy + +import torch + +from torch_geometric.data import Data +from torch_geometric.transforms import ( + AddLaplacianEigenvectorPE, + AddRandomWalkPE, +) + + +def test_add_laplacian_eigenvector_pe(): + x = torch.randn(6, 4) + edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5], + [1, 0, 4, 0, 4, 1, 3, 2, 5, 3]]) + data = Data(x=x, edge_index=edge_index) + + transform = AddLaplacianEigenvectorPE(k=3) + assert str(transform) == 'AddLaplacianEigenvectorPE()' + out = transform(copy.copy(data)) + assert out.laplacian_eigenvector_pe.size() == (6, 3) + + transform = AddLaplacianEigenvectorPE(k=3, attr_name=None) + out = transform(copy.copy(data)) + assert out.x.size() == (6, 4 + 3) + + transform = AddLaplacianEigenvectorPE(k=3, attr_name='x') + out = transform(copy.copy(data)) + assert out.x.size() == (6, 3) + + # Output tests: + edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5, 2, 5], + [1, 0, 4, 0, 4, 1, 3, 2, 5, 3, 5, 2]]) + data = Data(x=x, edge_index=edge_index) + + transform1 = AddLaplacianEigenvectorPE(k=1, is_undirected=True) + transform2 = AddLaplacianEigenvectorPE(k=1, is_undirected=False) + + # Clustering test with first non-trivial eigenvector (Fiedler vector) + pe = transform1(copy.copy(data)).laplacian_eigenvector_pe + pe_cluster_1 = pe[[0, 1, 4]] + pe_cluster_2 = pe[[2, 3, 5]] + assert not torch.allclose(pe_cluster_1, pe_cluster_2) + assert torch.allclose(pe_cluster_1, pe_cluster_1.mean()) + assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) + + pe = transform2(copy.copy(data)).laplacian_eigenvector_pe + pe_cluster_1 = pe[[0, 1, 4]] + pe_cluster_2 = pe[[2, 3, 5]] + assert not torch.allclose(pe_cluster_1, pe_cluster_2) + assert torch.allclose(pe_cluster_1, pe_cluster_1.mean()) + assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) + + +def test_add_random_walk_pe(): + x = torch.randn(6, 4) + edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5], + [1, 0, 4, 0, 4, 1, 3, 2, 5, 3]]) + data = Data(x=x, edge_index=edge_index) + + transform = AddRandomWalkPE(walk_length=3) + assert str(transform) == 'AddRandomWalkPE()' + out = transform(copy.copy(data)) + assert out.random_walk_pe.size() == (6, 3) + + transform = AddRandomWalkPE(walk_length=3, attr_name=None) + out = transform(copy.copy(data)) + assert out.x.size() == (6, 4 + 3) + + transform = AddRandomWalkPE(walk_length=3, attr_name='x') + out = transform(copy.copy(data)) + assert out.x.size() == (6, 3) + + # Output tests: + assert out.x.tolist() == [ + [0.0, 0.5, 0.25], + [0.0, 0.5, 0.25], + [0.0, 0.5, 0.00], + [0.0, 1.0, 0.00], + [0.0, 0.5, 0.25], + [0.0, 0.5, 0.00], + ] + + edge_index = torch.tensor([[0, 1, 2], [0, 1, 2]]) + data = Data(edge_index=edge_index, num_nodes=4) + out = transform(copy.copy(data)) + + assert out.x.tolist() == [ + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [0.0, 0.0, 0.0], + ] diff --git a/test/utils/test_loop.py b/test/utils/test_loop.py index cf245befcd51..ded7a69cb091 100644 --- a/test/utils/test_loop.py +++ b/test/utils/test_loop.py @@ -4,6 +4,7 @@ add_remaining_self_loops, add_self_loops, contains_self_loops, + get_self_loop_attr, remove_self_loops, segregate_self_loops, ) @@ -103,3 +104,21 @@ def test_add_remaining_self_loops_without_initial_loops(): edge_index, edge_weight = add_remaining_self_loops(edge_index, edge_weight) assert edge_index.tolist() == [[0, 1, 0, 1], [1, 0, 0, 1]] assert edge_weight.tolist() == [0.5, 0.5, 1, 1] + + +def test_get_self_loop_attr(): + edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) + edge_weight = torch.tensor([0.2, 0.3, 0.5]) + + full_loop_weight = get_self_loop_attr(edge_index, edge_weight) + assert full_loop_weight.tolist() == [0.5, 0.0] + + full_loop_weight = get_self_loop_attr(edge_index, edge_weight, num_nodes=4) + assert full_loop_weight.tolist() == [0.5, 0.0, 0.0, 0.0] + + full_loop_weight = get_self_loop_attr(edge_index) + assert full_loop_weight.tolist() == [1.0, 0.0] + + edge_attr = torch.tensor([[1.0, 0.0], [0.0, 1.0], [0.5, 1.0]]) + full_loop_attr = get_self_loop_attr(edge_index, edge_attr) + assert full_loop_attr.tolist() == [[0.5, 1.0], [0.0, 0.0]] diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index deb6fe11c28b..fbd52180fdb8 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -48,6 +48,7 @@ from .add_metapaths import AddMetaPaths from .largest_connected_components import LargestConnectedComponents from .virtual_node import VirtualNode +from .add_positional_encoding import AddLaplacianEigenvectorPE, AddRandomWalkPE __all__ = [ 'BaseTransform', @@ -100,6 +101,8 @@ 'AddMetaPaths', 'LargestConnectedComponents', 'VirtualNode', + 'AddLaplacianEigenvectorPE', + 'AddRandomWalkPE', ] classes = __all__ diff --git a/torch_geometric/transforms/add_positional_encoding.py b/torch_geometric/transforms/add_positional_encoding.py new file mode 100644 index 000000000000..05aed9387dc0 --- /dev/null +++ b/torch_geometric/transforms/add_positional_encoding.py @@ -0,0 +1,139 @@ +from typing import Any, Optional + +import numpy as np +import torch +from torch_sparse import SparseTensor + +from torch_geometric.data import Data +from torch_geometric.data.datapipes import functional_transform +from torch_geometric.transforms import BaseTransform +from torch_geometric.utils import ( + get_laplacian, + get_self_loop_attr, + to_scipy_sparse_matrix, +) + + +def add_node_attr(data: Data, value: Any, + attr_name: Optional[str] = None) -> Data: + # TODO Move to `BaseTransform`. + if attr_name is None: + if 'x' in data: + x = data.x.view(-1, 1) if data.x.dim() == 1 else data.x + data.x = torch.cat([x, value.to(x.device, x.dtype)], dim=-1) + else: + data.x = value + else: + data[attr_name] = value + + return data + + +@functional_transform('add_laplacian_eigenvector_pe') +class AddLaplacianEigenvectorPE(BaseTransform): + r"""Adds the Laplacian eigenvector positional encoding from the + `"Benchmarking Graph Neural Networks" `_ + paper to the given graph + (functional name: :obj:`add_laplacian_eigenvector_pe`). + + Args: + k (int): The number of non-trivial eigenvectors to consider. + attr_name (str, optional): The attribute name of the data object to add + positional encodings to. If set to :obj:`None`, will be + concatenated to :obj:`data.x`. + (default: :obj:`"laplacian_eigenvector_pe"`) + is_undirected (bool, optional): If set to :obj:`True`, this transform + expects undirected graphs as input, and can hence speed up the + computation of eigenvectors. (default: :obj:`False`) + **kwargs (optional): Additional arguments of + :meth:`scipy.sparse.linalg.eigs` (when :attr:`is_undirected` is + :obj:`False`) or :meth:`scipy.sparse.linalg.eigsh` (when + :attr:`is_undirected` is :obj:`True`). + """ + def __init__( + self, + k: int, + attr_name: Optional[str] = 'laplacian_eigenvector_pe', + is_undirected: bool = False, + **kwargs, + ): + self.k = k + self.attr_name = attr_name + self.is_undirected = is_undirected + self.kwargs = kwargs + + def __call__(self, data: Data) -> Data: + from scipy.sparse.linalg import eigs, eigsh + eig_fn = eigs if not self.is_undirected else eigsh + + num_nodes = data.num_nodes + edge_index, edge_weight = get_laplacian( + data.edge_index, + normalization='sym', + num_nodes=num_nodes, + ) + + L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes) + + eig_vals, eig_vecs = eig_fn( + L, + k=self.k + 1, + which='SR' if not self.is_undirected else 'SA', + return_eigenvectors=True, + **self.kwargs, + ) + + eig_vecs = np.real(eig_vecs[:, eig_vals.argsort()]) + pe = torch.from_numpy(eig_vecs[:, 1:self.k + 1]) + sign = -1 + 2 * torch.randint(0, 2, (self.k, )) + pe *= sign + + data = add_node_attr(data, pe, attr_name=self.attr_name) + return data + + +@functional_transform('add_random_walk_pe') +class AddRandomWalkPE(BaseTransform): + r"""Adds the random walk positional encoding from the `"Graph Neural + Networks with Learnable Structural and Positional Representations" + `_ paper to the given graph + (functional name: :obj:`add_random_walk_pe`). + + Args: + walk_length (int): The number of random walk steps. + attr_name (str, optional): The attribute name of the data object to add + positional encodings to. If set to :obj:`None`, will be + concatenated to :obj:`data.x`. + (default: :obj:`"laplacian_eigenvector_pe"`) + """ + def __init__( + self, + walk_length: int, + attr_name: Optional[str] = 'random_walk_pe', + ): + self.walk_length = walk_length + self.attr_name = attr_name + + def __call__(self, data: Data) -> Data: + num_nodes = data.num_nodes + edge_index, edge_weight = data.edge_index, data.edge_weight + + adj = SparseTensor.from_edge_index(edge_index, edge_weight, + sparse_sizes=(num_nodes, num_nodes)) + + # Compute D^{-1} A: + deg_inv = 1.0 / adj.sum(dim=1) + deg_inv[deg_inv == float('inf')] = 0 + adj = adj * deg_inv.view(-1, 1) + + out = adj + row, col, value = out.coo() + pe_list = [get_self_loop_attr((row, col), value, num_nodes)] + for _ in range(self.walk_length - 1): + out = out @ adj + row, col, value = out.coo() + pe_list.append(get_self_loop_attr((row, col), value, num_nodes)) + pe = torch.stack(pe_list, dim=-1) + + data = add_node_attr(data, pe, attr_name=self.attr_name) + return data diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 64c77fbda34e..a9c09230d6c0 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -6,7 +6,7 @@ from .undirected import is_undirected, to_undirected from .loop import (contains_self_loops, remove_self_loops, segregate_self_loops, add_self_loops, - add_remaining_self_loops) + add_remaining_self_loops, get_self_loop_attr) from .isolated import contains_isolated_nodes, remove_isolated_nodes from .subgraph import (get_num_hops, subgraph, k_hop_subgraph, bipartite_subgraph) @@ -46,6 +46,7 @@ 'segregate_self_loops', 'add_self_loops', 'add_remaining_self_loops', + 'get_self_loop_attr', 'contains_isolated_nodes', 'remove_isolated_nodes', 'get_num_hops', diff --git a/torch_geometric/utils/loop.py b/torch_geometric/utils/loop.py index 2d50d6004867..7c08b322d112 100644 --- a/torch_geometric/utils/loop.py +++ b/torch_geometric/utils/loop.py @@ -227,3 +227,39 @@ def add_remaining_self_loops( edge_index = torch.cat([edge_index[:, mask], loop_index], dim=1) return edge_index, edge_attr + + +def get_self_loop_attr(edge_index: Tensor, edge_attr: OptTensor = None, + num_nodes: Optional[int] = None) -> Tensor: + r"""Returns the edge features or weights of self-loops + :math:`(i, i)` of every node :math:`i \in \mathcal{V}` in the + graph given by :attr:`edge_index`. Edge features of missing self-loops not + present in :attr:`edge_index` will be filled with zeros. If + :attr:`edge_attr` is not given, it will be the vector of ones. + + .. note:: + This operation is analogous to getting the diagonal elements of the + dense adjacency matrix. + + Args: + edge_index (LongTensor): The edge indices. + edge_attr (Tensor, optional): Edge weights or multi-dimensional edge + features. (default: :obj:`None`) + num_nodes (int, optional): The number of nodes, *i.e.* + :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) + + :rtype: :class:`Tensor` + """ + loop_mask = edge_index[0] == edge_index[1] + loop_index = edge_index[0][loop_mask] + + if edge_attr is not None: + loop_attr = edge_attr[loop_mask] + else: # A vector of ones: + loop_attr = torch.ones_like(loop_index, dtype=torch.float) + + num_nodes = maybe_num_nodes(edge_index, num_nodes) + full_loop_attr = loop_attr.new_zeros((num_nodes, ) + loop_attr.size()[1:]) + full_loop_attr[loop_index] = loop_attr + + return full_loop_attr From 405b2ba87658f8eeb4c4ef8b97509bd485cae5dd Mon Sep 17 00:00:00 2001 From: Dongkwan Kim Date: Wed, 11 May 2022 20:14:03 +0900 Subject: [PATCH 0035/2432] Removed unnecessary colons and fixed typos in the documentation (#4616) * Fix typos including unnecessary commas. * Update Changelog * Add PR number and link * Make verbs be past tense * Update torch_geometric/data/lightning_datamodule.py * Update torch_geometric/data/lightning_datamodule.py * Update torch_geometric/data/lightning_datamodule.py Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/data/lightning_datamodule.py | 6 +++--- torch_geometric/datasets/tu_dataset.py | 2 +- torch_geometric/nn/conv/cluster_gcn_conv.py | 2 +- torch_geometric/nn/conv/han_conv.py | 4 ++-- torch_geometric/nn/conv/hgt_conv.py | 4 ++-- torch_geometric/nn/conv/pna_conv.py | 2 +- torch_geometric/nn/conv/point_transformer_conv.py | 4 ++-- torch_geometric/nn/fx.py | 2 +- torch_geometric/nn/models/deepgcn.py | 2 +- torch_geometric/nn/models/dimenet.py | 10 +++++----- torch_geometric/nn/to_hetero_transformer.py | 2 +- torch_geometric/nn/to_hetero_with_bases_transformer.py | 2 +- torch_geometric/transforms/add_self_loops.py | 2 +- torch_geometric/transforms/to_sparse_tensor.py | 2 +- torch_geometric/utils/subgraph.py | 2 +- 16 files changed, 25 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1be0426da651..55fdb1d0a3aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) ### Removed diff --git a/torch_geometric/data/lightning_datamodule.py b/torch_geometric/data/lightning_datamodule.py index 2ac988543177..550784a57762 100644 --- a/torch_geometric/data/lightning_datamodule.py +++ b/torch_geometric/data/lightning_datamodule.py @@ -103,10 +103,10 @@ class LightningDataset(LightningDataModule): trainer.fit(model, datamodule) Args: - train_dataset: (Dataset) The training dataset. - val_dataset: (Dataset, optional) The validation dataset. + train_dataset (Dataset): The training dataset. + val_dataset (Dataset, optional): The validation dataset. (default: :obj:`None`) - test_dataset: (Dataset, optional) The test dataset. + test_dataset (Dataset, optional): The test dataset. (default: :obj:`None`) batch_size (int, optional): How many samples per batch to load. (default: :obj:`1`) diff --git a/torch_geometric/datasets/tu_dataset.py b/torch_geometric/datasets/tu_dataset.py index 0accb0931e3e..4f948f207cdb 100644 --- a/torch_geometric/datasets/tu_dataset.py +++ b/torch_geometric/datasets/tu_dataset.py @@ -50,7 +50,7 @@ class TUDataset(InMemoryDataset): use_edge_attr (bool, optional): If :obj:`True`, the dataset will contain additional continuous edge attributes (if present). (default: :obj:`False`) - cleaned: (bool, optional): If :obj:`True`, the dataset will + cleaned (bool, optional): If :obj:`True`, the dataset will contain only non-isomorphic graphs. (default: :obj:`False`) Stats: diff --git a/torch_geometric/nn/conv/cluster_gcn_conv.py b/torch_geometric/nn/conv/cluster_gcn_conv.py index d18f3ca69911..2e03efaf3ca5 100644 --- a/torch_geometric/nn/conv/cluster_gcn_conv.py +++ b/torch_geometric/nn/conv/cluster_gcn_conv.py @@ -25,7 +25,7 @@ class ClusterGCNConv(MessagePassing): in_channels (int): Size of each input sample, or :obj:`-1` to derive the size from the first input(s) to the forward method. out_channels (int): Size of each output sample. - diag_lambda: (float, optional): Diagonal enhancement value + diag_lambda (float, optional): Diagonal enhancement value :math:`\lambda`. (default: :obj:`0.`) add_self_loops (bool, optional): If set to :obj:`False`, will not add self-loops to the input graph. (default: :obj:`True`) diff --git a/torch_geometric/nn/conv/han_conv.py b/torch_geometric/nn/conv/han_conv.py index 4ff294e3434d..b3144bb22965 100644 --- a/torch_geometric/nn/conv/han_conv.py +++ b/torch_geometric/nn/conv/han_conv.py @@ -109,13 +109,13 @@ def forward( Args: x_dict (Dict[str, Tensor]): A dictionary holding input node features for each individual node type. - edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A + edge_index_dict (Dict[str, Union[Tensor, SparseTensor]]): A dictionary holding graph connectivity information for each individual edge type, either as a :obj:`torch.LongTensor` of shape :obj:`[2, num_edges]` or a :obj:`torch_sparse.SparseTensor`. - :rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings + :rtype: :obj:`Dict[str, Optional[Tensor]]` - The output node embeddings for each node type. In case a node type does not receive any message, its output will be set to :obj:`None`. diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index 03ca21331b99..39bde0c06266 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -121,13 +121,13 @@ def forward( Args: x_dict (Dict[str, Tensor]): A dictionary holding input node features for each individual node type. - edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A + edge_index_dict (Dict[str, Union[Tensor, SparseTensor]]): A dictionary holding graph connectivity information for each individual edge type, either as a :obj:`torch.LongTensor` of shape :obj:`[2, num_edges]` or a :obj:`torch_sparse.SparseTensor`. - :rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings + :rtype: :obj:`Dict[str, Optional[Tensor]]` - The output node embeddings for each node type. In case a node type does not receive any message, its output will be set to :obj:`None`. diff --git a/torch_geometric/nn/conv/pna_conv.py b/torch_geometric/nn/conv/pna_conv.py index ae7cd5178fec..3eee2f43adf8 100644 --- a/torch_geometric/nn/conv/pna_conv.py +++ b/torch_geometric/nn/conv/pna_conv.py @@ -55,7 +55,7 @@ class PNAConv(MessagePassing): aggregators (list of str): Set of aggregation function identifiers, namely :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`, :obj:`"var"` and :obj:`"std"`. - scalers: (list of str): Set of scaling function identifiers, namely + scalers (list of str): Set of scaling function identifiers, namely :obj:`"identity"`, :obj:`"amplification"`, :obj:`"attenuation"`, :obj:`"linear"` and :obj:`"inverse_linear"`. diff --git a/torch_geometric/nn/conv/point_transformer_conv.py b/torch_geometric/nn/conv/point_transformer_conv.py index 91aa1252a443..aed732b4769d 100644 --- a/torch_geometric/nn/conv/point_transformer_conv.py +++ b/torch_geometric/nn/conv/point_transformer_conv.py @@ -44,13 +44,13 @@ class PointTransformerConv(MessagePassing): A tuple corresponds to the sizes of source and target dimensionalities. out_channels (int): Size of each output sample. - pos_nn : (torch.nn.Module, optional): A neural network + pos_nn (torch.nn.Module, optional): A neural network :math:`h_\mathbf{\Theta}` which maps relative spatial coordinates :obj:`pos_j - pos_i` of shape :obj:`[-1, 3]` to shape :obj:`[-1, out_channels]`. Will default to a :class:`torch.nn.Linear` transformation if not further specified. (default: :obj:`None`) - attn_nn : (torch.nn.Module, optional): A neural network + attn_nn (torch.nn.Module, optional): A neural network :math:`\gamma_\mathbf{\Theta}` which maps transformed node features of shape :obj:`[-1, out_channels]` to shape :obj:`[-1, out_channels]`. (default: :obj:`None`) diff --git a/torch_geometric/nn/fx.py b/torch_geometric/nn/fx.py index 10d3663df019..88bfe684bbce 100644 --- a/torch_geometric/nn/fx.py +++ b/torch_geometric/nn/fx.py @@ -60,7 +60,7 @@ class Transformer(object): In case :obj:`input_map` is not further specified, will try to automatically determine the correct type of input arguments. (default: :obj:`None`) - debug: (bool, optional): If set to :obj:`True`, will perform + debug (bool, optional): If set to :obj:`True`, will perform transformation in debug mode. (default: :obj:`False`) """ def __init__( diff --git a/torch_geometric/nn/models/deepgcn.py b/torch_geometric/nn/models/deepgcn.py index 25d06377d46e..bf1f7971e7cd 100644 --- a/torch_geometric/nn/models/deepgcn.py +++ b/torch_geometric/nn/models/deepgcn.py @@ -40,7 +40,7 @@ class DeepGCNLayer(torch.nn.Module): block (string, optional): The skip connection operation to use (:obj:`"res+"`, :obj:`"res"`, :obj:`"dense"` or :obj:`"plain"`). (default: :obj:`"res+"`) - dropout: (float, optional): Whether to apply or dropout. + dropout (float, optional): Whether to apply or dropout. (default: :obj:`0.`) ckpt_grad (bool, optional): If set to :obj:`True`, will checkpoint this part of the model. Checkpointing works by trading compute for diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index dd594050dd5d..fcf93fd681cc 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -269,20 +269,20 @@ class DimeNet(torch.nn.Module): num_bilinear (int): Size of the bilinear layer tensor. num_spherical (int): Number of spherical harmonics. num_radial (int): Number of radial basis functions. - cutoff: (float, optional): Cutoff distance for interatomic + cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`) max_num_neighbors (int, optional): The maximum number of neighbors to collect for each node within the :attr:`cutoff` distance. (default: :obj:`32`) envelope_exponent (int, optional): Shape of the smooth cutoff. (default: :obj:`5`) - num_before_skip: (int, optional): Number of residual layers in the + num_before_skip (int, optional): Number of residual layers in the interaction blocks before the skip connection. (default: :obj:`1`) - num_after_skip: (int, optional): Number of residual layers in the + num_after_skip (int, optional): Number of residual layers in the interaction blocks after the skip connection. (default: :obj:`2`) - num_output_layers: (int, optional): Number of linear layers for the + num_output_layers (int, optional): Number of linear layers for the output blocks. (default: :obj:`3`) - act: (Callable, optional): The activation funtion. + act (Callable, optional): The activation function. (default: :obj:`swish`) """ diff --git a/torch_geometric/nn/to_hetero_transformer.py b/torch_geometric/nn/to_hetero_transformer.py index 710ce8e3e05b..6f1af095e147 100644 --- a/torch_geometric/nn/to_hetero_transformer.py +++ b/torch_geometric/nn/to_hetero_transformer.py @@ -104,7 +104,7 @@ def forward(self, x, edge_index): In case :obj:`input_map` is not further specified, will try to automatically determine the correct type of input arguments. (default: :obj:`None`) - debug: (bool, optional): If set to :obj:`True`, will perform + debug (bool, optional): If set to :obj:`True`, will perform transformation in debug mode. (default: :obj:`False`) """ transformer = ToHeteroTransformer(module, metadata, aggr, input_map, debug) diff --git a/torch_geometric/nn/to_hetero_with_bases_transformer.py b/torch_geometric/nn/to_hetero_with_bases_transformer.py index 10e5d30b31b5..8c0be95d5d2b 100644 --- a/torch_geometric/nn/to_hetero_with_bases_transformer.py +++ b/torch_geometric/nn/to_hetero_with_bases_transformer.py @@ -125,7 +125,7 @@ def forward(self, x, edge_index): In case :obj:`input_map` is not further specified, will try to automatically determine the correct type of input arguments. (default: :obj:`None`) - debug: (bool, optional): If set to :obj:`True`, will perform + debug (bool, optional): If set to :obj:`True`, will perform transformation in debug mode. (default: :obj:`False`) """ transformer = ToHeteroWithBasesTransformer(module, metadata, num_bases, diff --git a/torch_geometric/transforms/add_self_loops.py b/torch_geometric/transforms/add_self_loops.py index bb0a117fd28d..c540f5887890 100644 --- a/torch_geometric/transforms/add_self_loops.py +++ b/torch_geometric/transforms/add_self_loops.py @@ -14,7 +14,7 @@ class AddSelfLoops(BaseTransform): (functional name: :obj:`add_self_loops`). Args: - attr: (str, optional): The name of the attribute of edge weights + attr (str, optional): The name of the attribute of edge weights or multi-dimensional edge features to pass to :meth:`torch_geometric.utils.add_self_loops`. (default: :obj:`"edge_weight"`) diff --git a/torch_geometric/transforms/to_sparse_tensor.py b/torch_geometric/transforms/to_sparse_tensor.py index 02c46130b28c..7abe38cd7877 100644 --- a/torch_geometric/transforms/to_sparse_tensor.py +++ b/torch_geometric/transforms/to_sparse_tensor.py @@ -23,7 +23,7 @@ class ToSparseTensor(BaseTransform): :obj:`data.edge_index` for now. Args: - attr: (str, optional): The name of the attribute to add as a value to + attr (str, optional): The name of the attribute to add as a value to the :class:`~torch_sparse.SparseTensor` object (if present). (default: :obj:`edge_weight`) remove_edge_index (bool, optional): If set to :obj:`False`, the diff --git a/torch_geometric/utils/subgraph.py b/torch_geometric/utils/subgraph.py index 4d29474acd19..54eb51c7485c 100644 --- a/torch_geometric/utils/subgraph.py +++ b/torch_geometric/utils/subgraph.py @@ -147,7 +147,7 @@ def k_hop_subgraph(node_idx, num_hops, edge_index, relabel_nodes=False, Args: node_idx (int, list, tuple or :obj:`torch.Tensor`): The central node(s). - num_hops: (int): The number of hops :math:`k`. + num_hops (int): The number of hops :math:`k`. edge_index (LongTensor): The edge indices. relabel_nodes (bool, optional): If set to :obj:`True`, the resulting :obj:`edge_index` will be relabeled to hold consecutive indices From 6f120ff8e19b39a26b14cb9dbe0ce271c1a2093d Mon Sep 17 00:00:00 2001 From: Dongkwan Kim Date: Wed, 11 May 2022 20:15:59 +0900 Subject: [PATCH 0036/2432] Fixed an error in generated node features in `StochasticBlockModelDataset` (#4617) * Fix an error in sbm node features (x) - If n_clusters_per_class in make_classification is bigger than 1, its output is not sorted with respect to the labels (y). * Updated changelog Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/datasets/sbm_dataset.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55fdb1d0a3aa..e9424a4624fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) - Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) diff --git a/torch_geometric/datasets/sbm_dataset.py b/torch_geometric/datasets/sbm_dataset.py index 49d1cb23d972..44af3b39f131 100644 --- a/torch_geometric/datasets/sbm_dataset.py +++ b/torch_geometric/datasets/sbm_dataset.py @@ -1,6 +1,7 @@ import os from typing import Callable, List, Optional, Union +import numpy as np import torch from torch import Tensor @@ -94,13 +95,14 @@ def process(self): x = None if self.num_channels is not None: - x, _ = make_classification( + x, y_not_sorted = make_classification( n_samples=num_samples, n_features=self.num_channels, n_classes=num_classes, weights=self.block_sizes / num_samples, **self.kwargs, ) + x = x[np.argsort(y_not_sorted)] x = torch.from_numpy(x).to(torch.float) y = torch.arange(num_classes).repeat_interleave(self.block_sizes) From e3ba9d37ca275e5c14f28c981e7ebe1d5e27e879 Mon Sep 17 00:00:00 2001 From: Zeyuan Tan <41138939+ZenoTan@users.noreply.github.com> Date: Wed, 11 May 2022 22:39:39 +0100 Subject: [PATCH 0037/2432] `NeighborLoader`: Optionally use argument data without `to_csc` (#4620) * update * update * update * changelog Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/data/lightning_datamodule.py | 1 + .../loader/link_neighbor_loader.py | 23 +++++++++++++----- torch_geometric/loader/neighbor_loader.py | 24 +++++++++++++++---- torch_geometric/loader/utils.py | 13 ++++++---- 5 files changed, 46 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9424a4624fb..3a06934bed42 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620)) - Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) - Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) - Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) diff --git a/torch_geometric/data/lightning_datamodule.py b/torch_geometric/data/lightning_datamodule.py index 550784a57762..cafe28d4c77a 100644 --- a/torch_geometric/data/lightning_datamodule.py +++ b/torch_geometric/data/lightning_datamodule.py @@ -274,6 +274,7 @@ def __init__( directed=kwargs.get('directed', True), input_type=get_input_nodes(data, input_train_nodes)[0], time_attr=kwargs.get('time_attr', None), + is_sorted=kwargs.get('is_sorted', False), ) self.input_train_nodes = input_train_nodes self.input_val_nodes = input_val_nodes diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 08390100e2f0..e8162ea66fd9 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -201,9 +201,6 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): replacement. (default: :obj:`False`) directed (bool, optional): If set to :obj:`False`, will include all edges between all sampled nodes. (default: :obj:`True`) - transform (Callable, optional): A function/transform that takes in - a sampled mini-batch and returns a transformed version. - (default: :obj:`None`) neg_sampling_ratio (float, optional): The ratio of sampled negative edges to the number of positive edges. If :obj:`edge_label` does not exist, it will be automatically @@ -219,6 +216,13 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): :meth:`F.binary_cross_entropy`) and of type :obj:`torch.long` for multi-class classification (to facilitate the ease-of-use of :meth:`F.cross_entropy`). (default: :obj:`0.0`). + transform (Callable, optional): A function/transform that takes in + a sampled mini-batch and returns a transformed version. + (default: :obj:`None`) + is_sorted (bool, optional): If set to :obj:`True`, assumes that + :obj:`edge_index` is sorted by column. This avoids internal + re-sorting of the data and can improve runtime and memory + efficiency. (default: :obj:`False`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -231,9 +235,10 @@ def __init__( edge_label: OptTensor = None, replace: bool = False, directed: bool = True, + neg_sampling_ratio: float = 0.0, transform: Callable = None, + is_sorted: bool = False, neighbor_sampler: Optional[LinkNeighborSampler] = None, - neg_sampling_ratio: float = 0.0, **kwargs, ): # Remove for PyTorch Lightning: @@ -259,9 +264,15 @@ def __init__( if neighbor_sampler is None: self.neighbor_sampler = LinkNeighborSampler( - data, num_neighbors, replace, directed, edge_type, + data, + num_neighbors, + replace, + directed, + input_type=edge_type, + is_sorted=is_sorted, + neg_sampling_ratio=self.neg_sampling_ratio, share_memory=kwargs.get('num_workers', 0) > 0, - neg_sampling_ratio=self.neg_sampling_ratio) + ) super().__init__(Dataset(edge_label_index, edge_label), collate_fn=self.neighbor_sampler, **kwargs) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index b7bdf2c505cb..744b6e4d06e5 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -24,8 +24,9 @@ def __init__( replace: bool = False, directed: bool = True, input_type: Optional[Any] = None, - share_memory: bool = False, time_attr: Optional[str] = None, + is_sorted: bool = False, + share_memory: bool = False, ): self.data_cls = data.__class__ self.num_neighbors = num_neighbors @@ -41,7 +42,8 @@ def __init__( f"'{data.__class__.__name__}' object") # Convert the graph data into a suitable format for sampling. - out = to_csc(data, device='cpu', share_memory=share_memory) + out = to_csc(data, device='cpu', share_memory=share_memory, + is_sorted=is_sorted) self.colptr, self.row, self.perm = out assert isinstance(num_neighbors, (list, tuple)) @@ -54,7 +56,8 @@ def __init__( # Convert the graph data into a suitable format for sampling. # NOTE: Since C++ cannot take dictionaries with tuples as key as # input, edge type triplets are converted into single strings. - out = to_hetero_csc(data, device='cpu', share_memory=share_memory) + out = to_hetero_csc(data, device='cpu', share_memory=share_memory, + is_sorted=is_sorted) self.colptr_dict, self.row_dict, self.perm_dict = out self.node_types, self.edge_types = data.metadata() @@ -245,6 +248,10 @@ class NeighborLoader(torch.utils.data.DataLoader): transform (Callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) + is_sorted (bool, optional): If set to :obj:`True`, assumes that + :obj:`edge_index` is sorted by column. This avoids internal + re-sorting of the data and can improve runtime and memory + efficiency. (default: :obj:`False`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -258,6 +265,7 @@ def __init__( directed: bool = True, time_attr: Optional[str] = None, transform: Callable = None, + is_sorted: bool = False, neighbor_sampler: Optional[NeighborSampler] = None, **kwargs, ): @@ -281,9 +289,15 @@ def __init__( if neighbor_sampler is None: self.neighbor_sampler = NeighborSampler( - data, num_neighbors, replace, directed, node_type, + data, + num_neighbors, + replace, + directed, + input_type=node_type, time_attr=time_attr, - share_memory=kwargs.get('num_workers', 0) > 0) + is_sorted=is_sorted, + share_memory=kwargs.get('num_workers', 0) > 0, + ) super().__init__(input_nodes, collate_fn=self.neighbor_sampler, **kwargs) diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index a7b783727477..e69ae0c0c9d3 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -34,6 +34,7 @@ def to_csc( data: Union[Data, EdgeStorage], device: Optional[torch.device] = None, share_memory: bool = False, + is_sorted: bool = False, ) -> Tuple[Tensor, Tensor, OptTensor]: # Convert the graph data into a suitable format for sampling (CSC format). # Returns the `colptr` and `row` indices of the graph, as well as an @@ -47,17 +48,18 @@ def to_csc( elif hasattr(data, 'edge_index'): (row, col) = data.edge_index - size = data.size() - perm = (col * size[0]).add_(row).argsort() + if not is_sorted: + size = data.size() + perm = (col * size[0]).add_(row).argsort() + row = row[perm] colptr = torch.ops.torch_sparse.ind2ptr(col[perm], size[1]) - row = row[perm] else: raise AttributeError("Data object does not contain attributes " "'adj_t' or 'edge_index'") colptr = colptr.to(device) row = row.to(device) - perm = perm if perm is not None else perm.to(device) + perm = perm.to(device) if perm is not None else None if not colptr.is_cuda and share_memory: colptr.share_memory_() @@ -72,6 +74,7 @@ def to_hetero_csc( data: HeteroData, device: Optional[torch.device] = None, share_memory: bool = False, + is_sorted: bool = False, ) -> Tuple[Dict[str, Tensor], Dict[str, Tensor], Dict[str, OptTensor]]: # Convert the heterogeneous graph data into a suitable format for sampling # (CSC format). @@ -83,7 +86,7 @@ def to_hetero_csc( for store in data.edge_stores: key = edge_type_to_str(store._key) - out = to_csc(store, device, share_memory) + out = to_csc(store, device, share_memory, is_sorted) colptr_dict[key], row_dict[key], perm_dict[key] = out return colptr_dict, row_dict, perm_dict From 363a4ebf44f1c881b030d7f917189b58d48f871a Mon Sep 17 00:00:00 2001 From: Jiaxuan Date: Wed, 11 May 2022 16:50:28 -0700 Subject: [PATCH 0038/2432] Support return embedding for MLP layer (#4625) * support return embedding for MLP layer * CHANGELOG.md * update * typo * fix Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/models/mlp.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a06934bed42..8fb505544488 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) - Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620)) - Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) - Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 0f8e3fce676e..6a18d794d72a 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -137,10 +137,12 @@ def reset_parameters(self): if hasattr(norm, 'reset_parameters'): norm.reset_parameters() - def forward(self, x: Tensor) -> Tensor: + def forward(self, x: Tensor, return_emb: bool = False) -> Tensor: """""" x = self.lins[0](x) + emb = x for lin, norm in zip(self.lins[1:], self.norms): + emb = x if self.act is not None and self.act_first: x = self.act(x) x = norm(x) @@ -148,7 +150,8 @@ def forward(self, x: Tensor) -> Tensor: x = self.act(x) x = F.dropout(x, p=self.dropout, training=self.training) x = lin.forward(x) - return x + + return (x, emb) if return_emb else x def __repr__(self) -> str: return f'{self.__class__.__name__}({str(self.channel_list)[1:-1]})' From bbff5b7327205a55b3d577ee76cd859e017e64ba Mon Sep 17 00:00:00 2001 From: rusty1s Date: Thu, 12 May 2022 12:36:45 -0700 Subject: [PATCH 0039/2432] add tutorials --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 5e132037d380..419c3e407844 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,10 @@ It consists of various methods for deep learning on graphs and other irregular s In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, [multi GPU-support](https://github.com/pyg-team/pytorch_geometric/tree/master/examples/multi_gpu), [`DataPipe` support](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/datapipe.py), distributed graph learning via [Quiver](https://github.com/pyg-team/pytorch_geometric/tree/master/examples/quiver), a large number of common benchmark datasets (based on simple interfaces to create your own), the [GraphGym](https://pytorch-geometric.readthedocs.io/en/latest/notes/graphgym.html) experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. [Click here to join our Slack community!][slack-url] +

+ +

+ -------------------------------------------------------------------------------- * [Library Highlights](#library-highlights) From 6fd6f5b936ef85cf5f40d5551d61a2640e5bb7f9 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Fri, 13 May 2022 03:51:56 +0800 Subject: [PATCH 0040/2432] Fix dimension in edge filter selection (#4629) * fix dimension in edge filter * update changelog * Update CHANGELOG.md * update Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/loader/utils.py | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fb505544488..4ca2eda4295d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,4 +18,5 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) +- Fixed filtering of attributes for loaders in case `__cat_dim__ != 0` ([#4629](https://github.com/pyg-team/pytorch_geometric/pull/4629)) ### Removed diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index e69ae0c0c9d3..86fc2bf4186d 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -21,7 +21,7 @@ def index_select(value: Tensor, index: Tensor, dim: int = 0) -> Tensor: numel = math.prod(size) storage = value.storage()._new_shared(numel) out = value.new(storage).view(size) - return torch.index_select(value, 0, index, out=out) + return torch.index_select(value, dim, index, out=out) def edge_type_to_str(edge_type: Union[EdgeType, str]) -> str: @@ -101,7 +101,8 @@ def filter_node_store_(store: NodeStorage, out_store: NodeStorage, elif store.is_node_attr(key): index = index.to(value.device) - out_store[key] = index_select(value, index, dim=0) + dim = store._parent().__cat_dim__(key, value, store) + out_store[key] = index_select(value, index, dim=dim) return store @@ -132,13 +133,14 @@ def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor, is_sorted=False, trust_data=True) elif store.is_edge_attr(key): + dim = store._parent().__cat_dim__(key, value, store) if perm is None: index = index.to(value.device) - out_store[key] = index_select(value, index, dim=0) + out_store[key] = index_select(value, index, dim=dim) else: perm = perm.to(value.device) index = index.to(value.device) - out_store[key] = index_select(value, perm[index], dim=0) + out_store[key] = index_select(value, perm[index], dim=dim) return store From c55729ba2227aebd8fe0f59f79a80d0edc788d66 Mon Sep 17 00:00:00 2001 From: Aniket Maurya Date: Fri, 13 May 2022 18:23:44 +0530 Subject: [PATCH 0041/2432] Implement `LightningModule` & `LoggerCallback` in GraphGym (#4531) * implement model steps and configure_optimizer * fix imports * dummy pr: logger pl callback (#3) implement logger pl callback * update logger * add test * fix test * fix * fixes * add test * update test * test configure optimizer * test configure optimizer * commit suggested change Co-authored-by: Jirka Borovec * remove redundant parameters * add typing * apply pr suggestions * apply suggestions * apply suggestions * apply suggestions * test logger * remove graphgym from minimal installation * fix * fix minimal test * changelog * update * update * update * lint Co-authored-by: Jirka Borovec Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/graphgym/test_graphgym.py | 93 ++++++++++++++- test/graphgym/test_logger.py | 11 ++ torch_geometric/graphgym/logger.py | 136 +++++++++++++++++++++- torch_geometric/graphgym/model_builder.py | 30 ++++- 5 files changed, 262 insertions(+), 9 deletions(-) create mode 100644 test/graphgym/test_logger.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ca2eda4295d..e280e5e0909b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531)) - Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) - Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620)) - Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index 57258d070115..e783ff9ff977 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -18,7 +18,11 @@ set_run_dir, ) from torch_geometric.graphgym.loader import create_loader -from torch_geometric.graphgym.logger import create_logger, set_printing +from torch_geometric.graphgym.logger import ( + LoggerCallback, + create_logger, + set_printing, +) from torch_geometric.graphgym.model_builder import create_model from torch_geometric.graphgym.models.gnn import FeatureEncoder, GNNStackStage from torch_geometric.graphgym.models.head import GNNNodeHead @@ -33,6 +37,10 @@ num_trivial_metric_calls = 0 +Args = namedtuple('Args', ['cfg_file', 'opts']) +root = osp.join(osp.dirname(osp.realpath(__file__))) +args = Args(osp.join(root, 'example_node.yml'), []) + def trivial_metric(true, pred, task_type): global num_trivial_metric_calls @@ -110,3 +118,86 @@ def test_run_single_graphgym(auto_resume, skip_train_eval, use_trivial_metric): agg_runs(cfg.out_dir, cfg.metric_best) shutil.rmtree(cfg.out_dir) + + +@withPackage('yacs') +@withPackage('pytorch_lightning') +def test_graphgym_module(tmpdir): + import pytorch_lightning as pl + + load_cfg(cfg, args) + cfg.out_dir = osp.join(tmpdir, str(random.randrange(sys.maxsize))) + cfg.run_dir = osp.join(tmpdir, str(random.randrange(sys.maxsize))) + cfg.dataset.dir = osp.join(tmpdir, 'pyg_test_datasets', 'Planetoid') + + set_out_dir(cfg.out_dir, args.cfg_file) + dump_cfg(cfg) + set_printing() + + seed_everything(cfg.seed) + auto_select_device() + set_run_dir(cfg.out_dir) + + loaders = create_loader() + assert len(loaders) == 3 + + model = create_model() + assert isinstance(model, pl.LightningModule) + + optimizer, scheduler = model.configure_optimizers() + assert isinstance(optimizer[0], torch.optim.Adam) + assert isinstance(scheduler[0], torch.optim.lr_scheduler.CosineAnnealingLR) + + cfg.params = params_count(model) + assert cfg.params == 23880 + + keys = {"loss", "true", "pred_score", "step_end_time"} + # test training step + batch = next(iter(loaders[0])) + outputs = model.training_step(batch) + assert keys == set(outputs.keys()) + assert isinstance(outputs["loss"], torch.Tensor) + + # test validation step + batch = next(iter(loaders[1])) + outputs = model.validation_step(batch) + assert keys == set(outputs.keys()) + assert isinstance(outputs["loss"], torch.Tensor) + + # test test step + batch = next(iter(loaders[2])) + outputs = model.test_step(batch) + assert keys == set(outputs.keys()) + assert isinstance(outputs["loss"], torch.Tensor) + + shutil.rmtree(cfg.out_dir) + + +@withPackage('yacs') +@withPackage('pytorch_lightning') +def test_train(tmpdir): + import pytorch_lightning as pl + + load_cfg(cfg, args) + cfg.out_dir = osp.join(tmpdir, str(random.randrange(sys.maxsize))) + cfg.run_dir = osp.join(tmpdir, str(random.randrange(sys.maxsize))) + cfg.dataset.dir = osp.join(tmpdir, 'pyg_test_datasets', 'Planetoid') + + set_out_dir(cfg.out_dir, args.cfg_file) + dump_cfg(cfg) + set_printing() + + seed_everything(cfg.seed) + auto_select_device() + set_run_dir(cfg.out_dir) + + loaders = create_loader() + model = create_model() + cfg.params = params_count(model) + logger = LoggerCallback() + trainer = pl.Trainer(max_epochs=1, max_steps=4, callbacks=logger, + log_every_n_steps=1) + train_loader, val_loader = loaders[0], loaders[1] + trainer.fit(model, train_loader, val_loader) + + shutil.rmtree(cfg.out_dir) diff --git a/test/graphgym/test_logger.py b/test/graphgym/test_logger.py new file mode 100644 index 000000000000..fa1c005d4caf --- /dev/null +++ b/test/graphgym/test_logger.py @@ -0,0 +1,11 @@ +from torch_geometric.graphgym.logger import Logger, LoggerCallback +from torch_geometric.testing import withPackage + + +@withPackage('yacs') +@withPackage('pytorch_lightning') +def test_logger_callback(): + logger = LoggerCallback() + assert isinstance(logger.train_logger, Logger) + assert isinstance(logger.val_logger, Logger) + assert isinstance(logger.test_logger, Logger) diff --git a/torch_geometric/graphgym/logger.py b/torch_geometric/graphgym/logger.py index 055c2711288d..d50f459a8ef7 100644 --- a/torch_geometric/graphgym/logger.py +++ b/torch_geometric/graphgym/logger.py @@ -1,6 +1,9 @@ import logging import math import sys +import time +import warnings +from typing import Any, Dict, Optional import torch @@ -10,6 +13,16 @@ from torch_geometric.graphgym.utils.device import get_current_gpu_usage from torch_geometric.graphgym.utils.io import dict_to_json, dict_to_tb +try: + import pytorch_lightning as pl + from pytorch_lightning import Callback + +except ImportError: + pl = None + Callback = object + warnings.warn("Please install 'pytorch_lightning' for using the GraphGym " + "experiment manager via 'pip install pytorch_lightning'") + def set_printing(): """ @@ -236,14 +249,125 @@ def infer_task(): def create_logger(): - """ - Create logger for the experiment - - Returns: List of logger objects - - """ + r"""Create logger for the experiment.""" loggers = [] names = ['train', 'val', 'test'] for i, dataset in enumerate(range(cfg.share.num_splits)): loggers.append(Logger(name=names[i], task_type=infer_task())) return loggers + + +class LoggerCallback(Callback): + def __init__(self): + self._logger = create_logger() + self._train_epoch_start_time = None + self._val_epoch_start_time = None + self._test_epoch_start_time = None + + @property + def train_logger(self) -> Any: + return self._logger[0] + + @property + def val_logger(self) -> Any: + return self._logger[1] + + @property + def test_logger(self) -> Any: + return self._logger[2] + + def _get_stats( + self, + epoch_start_time: int, + outputs: Dict[str, Any], + trainer: 'pl.Trainer', + ) -> Dict: + return dict( + true=outputs['true'].detach().cpu(), + pred=outputs['pred_score'].detach().cpu(), + loss=float(outputs['loss']), + lr=trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0], + time_used=time.time() - epoch_start_time, + params=cfg.params, + ) + + def on_train_epoch_start( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + ): + self._train_epoch_start_time = time.time() + + def on_validation_epoch_start( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + ): + self._val_epoch_start_time = time.time() + + def on_test_epoch_start( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + ): + self._test_epoch_start_time = time.time() + + def on_train_batch_end( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + outputs: Dict[str, Any], + batch: Any, + batch_idx: int, + unused: int = 0, + ) -> None: + stats = self._get_stats(self._train_epoch_start_time, outputs, trainer) + self.train_logger.update_stats(**stats) + + def on_validation_batch_end( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + outputs: Optional[Dict[str, Any]], + batch: Any, + batch_idx: int, + dataloader_idx: int, + ) -> None: + stats = self._get_stats(self._val_epoch_start_time, outputs, trainer) + self.val_logger.update_stats(**stats) + + def on_test_batch_end( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + outputs: Optional[Dict[str, Any]], + batch: Any, + batch_idx: int, + dataloader_idx: int, + ) -> None: + stats = self._get_stats(self._test_epoch_start_time, outputs, trainer) + self.test_logger.update_stats(**stats) + + def on_train_epoch_end( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + ): + self.train_logger.write_epoch(trainer.current_epoch) + self.train_logger.close() + + def on_validation_epoch_end( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + ): + self.val_logger.write_epoch(trainer.current_epoch) + self.val_logger.close() + + def on_test_epoch_end( + self, + trainer: 'pl.Trainer', + pl_module: 'pl.LightningModule', + ): + self.test_logger.write_epoch(trainer.current_epoch) + self.test_logger.close() diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index 2d22a7c6aa75..96c97d5df720 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -1,9 +1,13 @@ +import time import warnings +from typing import Any, Dict, Tuple import torch from torch_geometric.graphgym.config import cfg +from torch_geometric.graphgym.loss import compute_loss from torch_geometric.graphgym.models.gnn import GNN +from torch_geometric.graphgym.optim import create_optimizer, create_scheduler from torch_geometric.graphgym.register import network_dict, register_network try: @@ -19,12 +23,35 @@ class GraphGymModule(LightningModule): def __init__(self, dim_in, dim_out, cfg): super().__init__() + self.cfg = cfg self.model = network_dict[cfg.model.type](dim_in=dim_in, dim_out=dim_out) def forward(self, *args, **kwargs): return self.model(*args, **kwargs) + def configure_optimizers(self) -> Tuple[Any, Any]: + optimizer = create_optimizer(self.model.parameters(), self.cfg.optim) + scheduler = create_scheduler(optimizer, self.cfg.optim) + return [optimizer], [scheduler] + + def _shared_step(self, batch, split: str) -> Dict: + batch.split = split + pred, true = self(batch) + loss, pred_score = compute_loss(pred, true) + step_end_time = time.time() + return dict(loss=loss, true=true, pred_score=pred_score, + step_end_time=step_end_time) + + def training_step(self, batch, *args, **kwargs): + return self._shared_step(batch, split="train") + + def validation_step(self, batch, *args, **kwargs): + return self._shared_step(batch, split="val") + + def test_step(self, batch, *args, **kwargs): + return self._shared_step(batch, split="test") + @property def encoder(self) -> torch.nn.Module: return self.model.encoder @@ -43,8 +70,7 @@ def pre_mp(self) -> torch.nn.Module: def create_model(to_device=True, dim_in=None, dim_out=None): - r""" - Create model for graph machine learning + r"""Create model for graph machine learning. Args: to_device (string): The devide that the model will be transferred to From cfaea95da5404d4390614419df86415f6c393ac4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 14 May 2022 17:20:24 -0700 Subject: [PATCH 0042/2432] `HeteroData.node_items()` and `HeteroData.edge_items()` functionality (#4644) * update * changelog --- CHANGELOG.md | 1 + test/data/test_hetero_data.py | 21 ++++++++++++++++++--- torch_geometric/data/hetero_data.py | 8 ++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e280e5e0909b..8aeea2418fc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) - Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531)) - Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) - Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620)) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index b8b5c5acdb7f..69cfa25a580d 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -31,9 +31,14 @@ def test_init_hetero_data(): data['paper', 'paper'].edge_index = edge_index_paper_paper data['paper', 'author'].edge_index = edge_index_paper_author data['author', 'paper'].edge_index = edge_index_author_paper + assert len(data) == 2 - assert len(data.edge_types) == 3 assert data.node_types == ['v1', 'paper', 'author'] + assert len(data.node_stores) == 3 + assert len(data.node_items()) == 3 + assert len(data.edge_types) == 3 + assert len(data.edge_stores) == 3 + assert len(data.edge_items()) == 3 data = HeteroData( v1={'x': 1}, @@ -43,9 +48,14 @@ def test_init_hetero_data(): paper__author={'edge_index': edge_index_paper_author}, author__paper={'edge_index': edge_index_author_paper}, ) + assert len(data) == 2 - assert len(data.edge_types) == 3 assert data.node_types == ['v1', 'paper', 'author'] + assert len(data.node_stores) == 3 + assert len(data.node_items()) == 3 + assert len(data.edge_types) == 3 + assert len(data.edge_stores) == 3 + assert len(data.edge_items()) == 3 data = HeteroData({ 'v1': { @@ -67,9 +77,14 @@ def test_init_hetero_data(): 'edge_index': edge_index_author_paper }, }) + assert len(data) == 2 - assert len(data.edge_types) == 3 assert data.node_types == ['v1', 'paper', 'author'] + assert len(data.node_stores) == 3 + assert len(data.node_items()) == 3 + assert len(data.edge_types) == 3 + assert len(data.edge_stores) == 3 + assert len(data.edge_items()) == 3 def test_hetero_data_functions(): diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index bf52ad2b2c1b..e1035d769afe 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -229,6 +229,14 @@ def edge_stores(self) -> List[EdgeStorage]: r"""Returns a list of all edge storages of the graph.""" return list(self._edge_store_dict.values()) + def node_items(self) -> List[Tuple[NodeType, NodeStorage]]: + r"""Returns a list of node type and node storage pairs.""" + return list(self._node_store_dict.items()) + + def edge_items(self) -> List[Tuple[EdgeType, EdgeStorage]]: + r"""Returns a list of edge type and edge storage pairs.""" + return list(self._edge_store_dict.items()) + def to_dict(self) -> Dict[str, Any]: out = self._global_store.to_dict() for key, store in chain(self._node_store_dict.items(), From c20f8dfcd16fa8f0b6b6e87945c8daa05bf50a2e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 14 May 2022 17:54:24 -0700 Subject: [PATCH 0043/2432] Fix `MLP.jittable()` bug in case `return_emb=True` (#4645) * fix MLP * changelog --- CHANGELOG.md | 1 + torch_geometric/nn/models/mlp.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8aeea2418fc8..2c0cf8a02b2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) - Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 6a18d794d72a..64f8cc79870e 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -137,7 +137,7 @@ def reset_parameters(self): if hasattr(norm, 'reset_parameters'): norm.reset_parameters() - def forward(self, x: Tensor, return_emb: bool = False) -> Tensor: + def forward(self, x: Tensor, return_emb=None) -> Tensor: """""" x = self.lins[0](x) emb = x @@ -151,7 +151,7 @@ def forward(self, x: Tensor, return_emb: bool = False) -> Tensor: x = F.dropout(x, p=self.dropout, training=self.training) x = lin.forward(x) - return (x, emb) if return_emb else x + return (x, emb) if isinstance(return_emb, bool) else x def __repr__(self) -> str: return f'{self.__class__.__name__}({str(self.channel_list)[1:-1]})' From 90fa81de6b6e63781ca305ebc35a18878179fc39 Mon Sep 17 00:00:00 2001 From: Vijay Prakash Dwivedi Date: Sun, 15 May 2022 13:07:01 +0800 Subject: [PATCH 0044/2432] AQSOL dataset (#4626) * AQSOL dataset * minor edit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changelog * formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * minor doc formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doc formatting * update * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/aqsol.py | 122 +++++++++++++++++++++++++++ 3 files changed, 125 insertions(+) create mode 100644 torch_geometric/datasets/aqsol.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c0cf8a02b2a..71bca589cd20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) - Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) - Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531)) - Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index c5c1c29a86e5..393d6f3db772 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -17,6 +17,7 @@ from .qm9 import QM9 from .md17 import MD17 from .zinc import ZINC +from .aqsol import AQSOL from .molecule_net import MoleculeNet from .entities import Entities from .rel_link_pred_dataset import RelLinkPredDataset @@ -99,6 +100,7 @@ 'QM9', 'MD17', 'ZINC', + 'AQSOL', 'MoleculeNet', 'Entities', 'RelLinkPredDataset', diff --git a/torch_geometric/datasets/aqsol.py b/torch_geometric/datasets/aqsol.py new file mode 100644 index 000000000000..1aa4a11f6ed9 --- /dev/null +++ b/torch_geometric/datasets/aqsol.py @@ -0,0 +1,122 @@ +import os +import os.path as osp +import pickle +import shutil +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + Data, + InMemoryDataset, + download_url, + extract_zip, +) + + +class AQSOL(InMemoryDataset): + r"""The AQSOL dataset from the `Benchmarking Graph Neural Networks + `_ paper based on + `AqSolDB `_, a + standardized database of 9,982 molecular graphs with their aqueous + solubility values, collected from 9 different data sources. + + The aqueous solubility targets are collected from experimental measurements + and standardized to LogS units in AqSolDB. These final values denote the + property to regress in the :class:`AQSOL` dataset. After filtering out few + graphs with no bonds/edges, the total number of molecular graphs is 9,833. + For each molecular graph, the node features are the types of heavy atoms + and the edge features are the types of bonds between them, similar as in + the :class:`~torch_geometric.datasets.ZINC` dataset. + + Args: + root (string): Root directory where the dataset should be saved. + split (string, optional): If :obj:`"train"`, loads the training + dataset. + If :obj:`"val"`, loads the validation dataset. + If :obj:`"test"`, loads the test dataset. + (default: :obj:`"train"`) + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + pre_filter (callable, optional): A function that takes in an + :obj:`torch_geometric.data.Data` object and returns a boolean + value, indicating whether the data object should be included in + the final dataset. (default: :obj:`None`) + """ + url = 'https://www.dropbox.com/s/lzu9lmukwov12kt/aqsol_graph_raw.zip?dl=1' + + def __init__(self, root: str, split: str = 'train', + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + pre_filter: Optional[Callable] = None): + assert split in ['train', 'val', 'test'] + super().__init__(root, transform, pre_transform, pre_filter) + path = osp.join(self.processed_dir, f'{split}.pt') + self.data, self.slices = torch.load(path) + + @property + def raw_file_names(self) -> List[str]: + return [ + 'train.pickle', 'val.pickle', 'test.pickle', 'atom_dict.pickle', + 'bond_dict.pickle' + ] + + @property + def processed_file_names(self) -> List[str]: + return ['train.pt', 'val.pt', 'test.pt'] + + def download(self): + shutil.rmtree(self.raw_dir) + path = download_url(http://github.com/self.url,%20self.root) + extract_zip(path, self.root) + os.rename(osp.join(self.root, 'asqol_graph_raw'), self.raw_dir) + os.unlink(path) + + def process(self): + for raw_path, path in zip(self.raw_paths, self.processed_paths): + with open(raw_path, 'rb') as f: + graphs = pickle.load(f) + + data_list: List[Data] = [] + for graph in graphs: + x, edge_attr, edge_index, y = graph + + x = torch.from_numpy(x) + edge_attr = torch.from_numpy(edge_attr) + edge_index = torch.from_numpy(edge_index) + y = torch.tensor([y]).float() + + if edge_index.numel() == 0: + continue # Skipping for graphs with no bonds/edges. + + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, + y=y) + + if self.pre_filter is not None and not self.pre_filter(data): + continue + + if self.pre_transform is not None: + data = self.pre_transform(data) + + data_list.append(data) + + torch.save(self.collate(data_list), path) + + def atoms(self) -> List[str]: + return [ + 'Br', 'C', 'N', 'O', 'Cl', 'Zn', 'F', 'P', 'S', 'Na', 'Al', 'Si', + 'Mo', 'Ca', 'W', 'Pb', 'B', 'V', 'Co', 'Mg', 'Bi', 'Fe', 'Ba', 'K', + 'Ti', 'Sn', 'Cd', 'I', 'Re', 'Sr', 'H', 'Cu', 'Ni', 'Lu', 'Pr', + 'Te', 'Ce', 'Nd', 'Gd', 'Zr', 'Mn', 'As', 'Hg', 'Sb', 'Cr', 'Se', + 'La', 'Dy', 'Y', 'Pd', 'Ag', 'In', 'Li', 'Rh', 'Nb', 'Hf', 'Cs', + 'Ru', 'Au', 'Sm', 'Ta', 'Pt', 'Ir', 'Be', 'Ge' + ] + + def bonds(self) -> List[str]: + return ['NONE', 'SINGLE', 'DOUBLE', 'AROMATIC', 'TRIPLE'] From 9c5cbb2e31d6bafe9a05da1a471cab2f20ffbad3 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Sun, 15 May 2022 11:33:13 +0530 Subject: [PATCH 0045/2432] `HeteroData.subgraph()` (#4635) * added test for HeteroData subgraph function * added comments to HeteroData.subgraph * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * modified changelog * update * update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_hetero_data.py | 58 ++++++++++++++++++++- torch_geometric/data/hetero_data.py | 79 ++++++++++++++++++++++++++++- 3 files changed, 136 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71bca589cd20..3989cff8f5f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `HeteroData.subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) - Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) - Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) - Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531)) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 69cfa25a580d..bc3a28078d8e 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -6,15 +6,20 @@ x_paper = torch.randn(10, 16) x_author = torch.randn(5, 32) +x_conference = torch.randn(5, 8) idx_paper = torch.randint(x_paper.size(0), (100, ), dtype=torch.long) idx_author = torch.randint(x_author.size(0), (100, ), dtype=torch.long) +idx_conference = torch.randint(x_conference.size(0), (100, ), dtype=torch.long) edge_index_paper_paper = torch.stack([idx_paper[:50], idx_paper[:50]], dim=0) edge_index_paper_author = torch.stack([idx_paper[:30], idx_author[:30]], dim=0) -edge_index_author_paper = torch.stack([idx_paper[:30], idx_author[:30]], dim=0) +edge_index_author_paper = torch.stack([idx_author[:30], idx_paper[:30]], dim=0) +edge_index_paper_conference = torch.stack( + [idx_paper[:25], idx_conference[:25]], dim=0) edge_attr_paper_paper = torch.randn(edge_index_paper_paper.size(1), 8) +edge_attr_author_paper = torch.randn(edge_index_author_paper.size(1), 8) def get_edge_index(num_src_nodes, num_dst_nodes, num_edges): @@ -159,6 +164,57 @@ def test_hetero_data_rename(): assert edge_index.tolist() == edge_index_paper_paper.tolist() +def test_hetero_data_subgraph(): + data = HeteroData() + data.num_node_types = 3 + data['paper'].x = x_paper + data['paper'].name = 'paper' + data['paper'].num_nodes = x_paper.size(0) + data['author'].x = x_author + data['author'].num_nodes = x_author.size(0) + data['conference'].x = x_conference + data['conference'].num_nodes = x_conference.size(0) + data['paper', 'paper'].edge_index = edge_index_paper_paper + data['paper', 'paper'].edge_attr = edge_attr_paper_paper + data['paper', 'paper'].name = 'cites' + data['author', 'paper'].edge_index = edge_index_author_paper + data['paper', 'author'].edge_index = edge_index_paper_author + data['paper', 'conference'].edge_index = edge_index_paper_conference + + subset = { + 'paper': torch.randperm(x_paper.size(0))[:4], + 'author': torch.randperm(x_author.size(0))[:2] + } + + out = data.subgraph(subset) + + assert out.num_node_types == data.num_node_types + assert out.node_types == ['paper', 'author'] + + assert len(out['paper']) == 3 + assert torch.allclose(out['paper'].x, data['paper'].x[subset['paper']]) + assert out['paper'].name == 'paper' + assert out['paper'].num_nodes == 4 + assert len(out['author']) == 2 + assert torch.allclose(out['author'].x, data['author'].x[subset['author']]) + assert out['author'].num_nodes == 2 + + assert out.edge_types == [ + ('paper', 'to', 'paper'), + ('author', 'to', 'paper'), + ('paper', 'to', 'author'), + ] + + assert len(out['paper', 'paper']) == 3 + assert out['paper', 'paper'].edge_index is not None + assert out['paper', 'paper'].edge_attr is not None + assert out['paper', 'paper'].name == 'cites' + assert len(out['paper', 'author']) == 1 + assert out['paper', 'author'].edge_index is not None + assert len(out['author', 'paper']) == 1 + assert out['author', 'paper'].edge_index is not None + + def test_copy_hetero_data(): data = HeteroData() data['paper'].x = x_paper diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index e1035d769afe..18aa970eb64e 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -12,7 +12,7 @@ from torch_geometric.data.data import BaseData, Data, size_repr from torch_geometric.data.storage import BaseStorage, EdgeStorage, NodeStorage from torch_geometric.typing import EdgeType, NodeType, QueryType -from torch_geometric.utils import is_undirected +from torch_geometric.utils import bipartite_subgraph, is_undirected NodeOrEdgeType = Union[NodeType, EdgeType] NodeOrEdgeStorage = Union[NodeStorage, EdgeStorage] @@ -445,6 +445,83 @@ def rename(self, name: NodeType, new_name: NodeType) -> 'HeteroData': return self + def subgraph(self, subset_dict: Dict[NodeType, Tensor]) -> 'HeteroData': + r"""Returns the induced subgraph containing the node types and + corresponding nodes in :obj:`subset_dict`. + + .. code-block:: python + + data = HeteroData() + data['paper'].x = ... + data['author'].x = ... + data['conference'].x = ... + data['paper', 'cites', 'paper'].edge_index = ... + data['author', 'paper'].edge_index = ... + data['paper', 'conference'].edge_index = ... + print(data) + >>> HeteroData( + paper={ x=[10, 16] }, + author={ x=[5, 32] }, + conference={ x=[5, 8] }, + (paper, cites, paper)={ edge_index=[2, 50] }, + (author, to, paper)={ edge_index=[2, 30] }, + (paper, to, conference)={ edge_index=[2, 25] } + ) + + subset_dict = { + 'paper': torch.tensor([3, 4, 5, 6]), + 'author': torch.tensor([0, 2]), + } + + print(data.subgraph(subset_dict)) + >>> HeteroData( + paper={ x=[4, 16] }, + author={ x=[2, 32] }, + (paper, cites, paper)={ edge_index=[2, 24] }, + (author, to, paper)={ edge_index=[2, 5] } + ) + + Args: + subset_dict (Dict[str, LongTensor or BoolTensor]): A dictonary + holding the nodes to keep for each node type. + """ + data = self.__class__(self._global_store) + + for node_type, subset in subset_dict.items(): + for key, value in self[node_type].items(): + if key == 'num_nodes': + if subset.dtype == torch.bool: + data[node_type].num_nodes = int(subset.sum()) + else: + data[node_type].num_nodes = subset.size(0) + elif self[node_type].is_node_attr(key): + data[node_type][key] = value[subset] + else: + data[node_type][key] = value + + for edge_type in self.edge_types: + src, _, dst = edge_type + if src not in subset_dict or dst not in subset_dict: + continue + + edge_index, _, edge_mask = bipartite_subgraph( + (subset_dict[src], subset_dict[dst]), + self[edge_type].edge_index, + relabel_nodes=True, + size=(self[src].num_nodes, self[dst].num_nodes), + return_edge_mask=True, + ) + + for key, value in self[edge_type].items(): + if key == 'edge_index': + data[edge_type].edge_index = edge_index + elif self[edge_type].is_edge_attr(key): + data[edge_type][key] = value[edge_mask] + else: + data[edge_type][key] = value + + return data + def to_homogeneous(self, node_attrs: Optional[List[str]] = None, edge_attrs: Optional[List[str]] = None, add_node_type: bool = True, From db40aa6d403e7088e2eb312a03d9a5181edb0cc6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 15 May 2022 00:42:43 -0700 Subject: [PATCH 0046/2432] Hotfix: `MLP.jittable()` for PyTorch 1.10 (#4648) * update * changelog --- CHANGELOG.md | 2 +- torch_geometric/nn/models/mlp.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3989cff8f5f0..28752c36e2bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed -- Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645)) +- Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) - Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 64f8cc79870e..571f704de5b2 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -7,6 +7,7 @@ from torch_geometric.nn.dense.linear import Linear from torch_geometric.nn.resolver import activation_resolver +from torch_geometric.typing import NoneType class MLP(torch.nn.Module): @@ -137,7 +138,7 @@ def reset_parameters(self): if hasattr(norm, 'reset_parameters'): norm.reset_parameters() - def forward(self, x: Tensor, return_emb=None) -> Tensor: + def forward(self, x: Tensor, return_emb: NoneType = None) -> Tensor: """""" x = self.lins[0](x) emb = x From 0ded02b75431c45e686062c0926d165b421b708a Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Sun, 15 May 2022 22:48:28 +0800 Subject: [PATCH 0047/2432] Add check on `add_self_loops` in `HeteroConv` and `to_hetero` (#4647) * add check on self-loops in hetero conv * add check on self-loops in to_hetero * fix old tets * remove pytest import * update Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/conv/test_hetero_conv.py | 15 +++++++++++- test/nn/test_to_hetero_transformer.py | 26 +++++++++++++++++++++ torch_geometric/nn/conv/hetero_conv.py | 4 ++++ torch_geometric/nn/to_hetero_transformer.py | 10 ++++++-- torch_geometric/utils/hetero.py | 9 +++++++ 6 files changed, 62 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 28752c36e2bc..f90896d596b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added a check in `HeteroConv` and `to_hetero()` to ensure that `MessagePassing.add_self_loops` is disabled ([4647](https://github.com/pyg-team/pytorch_geometric/pull/4647)) - Added `HeteroData.subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) - Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) - Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) diff --git a/test/nn/conv/test_hetero_conv.py b/test/nn/conv/test_hetero_conv.py index 286a2e2f561b..36d93a7ebf7e 100644 --- a/test/nn/conv/test_hetero_conv.py +++ b/test/nn/conv/test_hetero_conv.py @@ -32,7 +32,8 @@ def test_hetero_conv(aggr): { ('paper', 'to', 'paper'): GCNConv(-1, 64), ('author', 'to', 'paper'): SAGEConv((-1, -1), 64), - ('paper', 'to', 'author'): GATConv((-1, -1), 64), + ('paper', 'to', 'author'): GATConv( + (-1, -1), 64, add_self_loops=False), }, aggr=aggr) assert len(list(conv.parameters())) > 0 @@ -77,3 +78,15 @@ def test_hetero_conv_with_custom_conv(): assert len(out) == 2 assert out['paper'].size() == (50, 64) assert out['author'].size() == (30, 64) + + +class MessagePassingLoops(MessagePassing): + def __init__(self): + super().__init__() + self.add_self_loops = True + + +def test_hetero_conv_self_loop_error(): + HeteroConv({('a', 'to', 'a'): MessagePassingLoops()}) + with pytest.raises(ValueError, match="incorrect message passing"): + HeteroConv({('a', 'to', 'b'): MessagePassingLoops()}) diff --git a/test/nn/test_to_hetero_transformer.py b/test/nn/test_to_hetero_transformer.py index 95bb0e7ab426..917502f87e4c 100644 --- a/test/nn/test_to_hetero_transformer.py +++ b/test/nn/test_to_hetero_transformer.py @@ -1,5 +1,6 @@ from typing import Tuple +import pytest import torch from torch import Tensor from torch.nn import Linear, ReLU, Sequential @@ -363,3 +364,28 @@ def test_graph_level_to_hetero(): model = to_hetero(model, metadata, aggr='mean', debug=False) out = model(x_dict, edge_index_dict, batch_dict) assert out.size() == (1, 64) + + +class MessagePassingLoops(MessagePassing): + def __init__(self): + super().__init__() + self.add_self_loops = True + + def forward(self, x): + return x + + +class ModelLoops(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = MessagePassingLoops() + + def forward(self, x): + return self.conv(x) + + +def test_hetero_transformer_self_loop_error(): + to_hetero(ModelLoops(), metadata=(['a'], [('a', 'to', 'a')])) + with pytest.raises(ValueError, match="incorrect message passing"): + to_hetero(ModelLoops(), metadata=(['a', 'b'], [('a', 'to', 'b'), + ('b', 'to', 'a')])) diff --git a/torch_geometric/nn/conv/hetero_conv.py b/torch_geometric/nn/conv/hetero_conv.py index 738eb440b28b..95253039534a 100644 --- a/torch_geometric/nn/conv/hetero_conv.py +++ b/torch_geometric/nn/conv/hetero_conv.py @@ -7,6 +7,7 @@ from torch_geometric.nn.conv.hgt_conv import group from torch_geometric.typing import Adj, EdgeType, NodeType +from torch_geometric.utils.hetero import check_add_self_loops class HeteroConv(Module): @@ -47,6 +48,9 @@ def __init__(self, convs: Dict[EdgeType, Module], aggr: Optional[str] = "sum"): super().__init__() + for edge_type, module in convs.items(): + check_add_self_loops(module, [edge_type]) + src_node_types = set([key[0] for key in convs.keys()]) dst_node_types = set([key[-1] for key in convs.keys()]) if len(src_node_types - dst_node_types) > 0: diff --git a/torch_geometric/nn/to_hetero_transformer.py b/torch_geometric/nn/to_hetero_transformer.py index 6f1af095e147..233c3c8bc8ee 100644 --- a/torch_geometric/nn/to_hetero_transformer.py +++ b/torch_geometric/nn/to_hetero_transformer.py @@ -6,9 +6,12 @@ import torch from torch.nn import Module -from torch_geometric.nn.fx import Transformer +from torch_geometric.nn.fx import Transformer, get_submodule from torch_geometric.typing import EdgeType, Metadata, NodeType -from torch_geometric.utils.hetero import get_unused_node_types +from torch_geometric.utils.hetero import ( + check_add_self_loops, + get_unused_node_types, +) try: from torch.fx import Graph, GraphModule, Node @@ -168,6 +171,9 @@ def call_message_passing_module(self, node: Node, target: Any, name: str): # Add calls to edge type-wise `MessagePassing` modules and aggregate # the outputs to node type-wise embeddings afterwards. + module = get_submodule(self.module, target) + check_add_self_loops(module, self.metadata[1]) + # Group edge-wise keys per destination: key_name, keys_per_dst = {}, defaultdict(list) for key in self.metadata[1]: diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index 5102ce3fb9b0..b34ab47c4d5a 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -47,3 +47,12 @@ def get_unused_node_types(node_types: List[NodeType], edge_types: List[EdgeType]) -> Set[NodeType]: dst_node_types = set(edge_type[-1] for edge_type in edge_types) return set(node_types) - set(dst_node_types) + + +def check_add_self_loops(module: torch.nn.Module, edge_types: List[EdgeType]): + is_bipartite = any([key[0] != key[-1] for key in edge_types]) + if is_bipartite and getattr(module, 'add_self_loops', False): + raise ValueError( + f"'add_self_loops' attribute set to 'True' on module '{module}' " + f"for use with edge type(s) '{edge_types}'. This will lead to " + f"incorrect message passing results.") From ced3886bdc54ee3e798f2ec33cbf585aa94d75ba Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 15 May 2022 13:51:07 -0700 Subject: [PATCH 0048/2432] `MLP.plain_last` option (#4652) * plain_last * changelog --- CHANGELOG.md | 1 + test/nn/models/test_mlp.py | 24 ++++++++++++++++++------ torch_geometric/nn/models/mlp.py | 23 +++++++++++++++-------- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f90896d596b5..e6223f2fdc21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) - Added a check in `HeteroConv` and `to_hetero()` to ensure that `MessagePassing.add_self_loops` is disabled ([4647](https://github.com/pyg-team/pytorch_geometric/pull/4647)) - Added `HeteroData.subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) - Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) diff --git a/test/nn/models/test_mlp.py b/test/nn/models/test_mlp.py index 6fb6ceebc750..92d94e46783b 100644 --- a/test/nn/models/test_mlp.py +++ b/test/nn/models/test_mlp.py @@ -7,13 +7,18 @@ from torch_geometric.testing import is_full_test -@pytest.mark.parametrize('batch_norm,act_first', - product([False, True], [False, True])) -def test_mlp(batch_norm, act_first): +@pytest.mark.parametrize('batch_norm,act_first,plain_last', + product([False, True], [False, True], [False, True])) +def test_mlp(batch_norm, act_first, plain_last): x = torch.randn(4, 16) torch.manual_seed(12345) - mlp = MLP([16, 32, 32, 64], batch_norm=batch_norm, act_first=act_first) + mlp = MLP( + [16, 32, 32, 64], + batch_norm=batch_norm, + act_first=act_first, + plain_last=plain_last, + ) assert str(mlp) == 'MLP(16, 32, 32, 64)' out = mlp(x) assert out.size() == (4, 64) @@ -23,6 +28,13 @@ def test_mlp(batch_norm, act_first): assert torch.allclose(jit(x), out) torch.manual_seed(12345) - mlp = MLP(16, hidden_channels=32, out_channels=64, num_layers=3, - batch_norm=batch_norm, act_first=act_first) + mlp = MLP( + 16, + hidden_channels=32, + out_channels=64, + num_layers=3, + batch_norm=batch_norm, + act_first=act_first, + plain_last=plain_last, + ) assert torch.allclose(mlp(x), out) diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 571f704de5b2..908f7c1621e2 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -58,6 +58,9 @@ class MLP(torch.nn.Module): batch_norm_kwargs (Dict[str, Any], optional): Arguments passed to :class:`torch.nn.BatchNorm1d` in case :obj:`batch_norm == True`. (default: :obj:`None`) + plain_last (bool, optional): If set to :obj:`False`, will apply + non-linearity, batch normalization and dropout to the last layer as + well. (default: :obj:`True`) bias (bool, optional): If set to :obj:`False`, the module will not learn additive biases. (default: :obj:`True`) relu_first (bool, optional): Deprecated in favor of :obj:`act_first`. @@ -77,6 +80,7 @@ def __init__( act_first: bool = False, act_kwargs: Optional[Dict[str, Any]] = None, batch_norm_kwargs: Optional[Dict[str, Any]] = None, + plain_last: bool = True, bias: bool = True, relu_first: bool = False, ): @@ -100,14 +104,16 @@ def __init__( self.dropout = dropout self.act = activation_resolver(act, **(act_kwargs or {})) self.act_first = act_first + self.plain_last = plain_last self.lins = torch.nn.ModuleList() - pairwise = zip(channel_list[:-1], channel_list[1:]) - for in_channels, out_channels in pairwise: + iterator = zip(channel_list[:-1], channel_list[1:]) + for in_channels, out_channels in iterator: self.lins.append(Linear(in_channels, out_channels, bias=bias)) self.norms = torch.nn.ModuleList() - for hidden_channels in channel_list[1:-1]: + iterator = channel_list[1:-1] if plain_last else channel_list[1:] + for hidden_channels in iterator: if batch_norm: norm = BatchNorm1d(hidden_channels, **batch_norm_kwargs) else: @@ -140,17 +146,18 @@ def reset_parameters(self): def forward(self, x: Tensor, return_emb: NoneType = None) -> Tensor: """""" - x = self.lins[0](x) - emb = x - for lin, norm in zip(self.lins[1:], self.norms): - emb = x + for lin, norm in zip(self.lins, self.norms): + x = lin(x) if self.act is not None and self.act_first: x = self.act(x) x = norm(x) if self.act is not None and not self.act_first: x = self.act(x) x = F.dropout(x, p=self.dropout, training=self.training) - x = lin.forward(x) + emb = x + + if self.plain_last: + x = self.lins[-1](x) return (x, emb) if isinstance(return_emb, bool) else x From f045451a45f3e3906654c913b1d8fc24dfba962f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 15 May 2022 14:17:55 -0700 Subject: [PATCH 0049/2432] Confirm that `to_hetero()` works with custom functions (`dropout_adj`) (#4653) * to_hetero_dropout * changelog * update --- CHANGELOG.md | 1 + test/nn/test_to_hetero_transformer.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6223f2fdc21..397c50d84b4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) - Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) - Added a check in `HeteroConv` and `to_hetero()` to ensure that `MessagePassing.add_self_loops` is disabled ([4647](https://github.com/pyg-team/pytorch_geometric/pull/4647)) - Added `HeteroData.subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) diff --git a/test/nn/test_to_hetero_transformer.py b/test/nn/test_to_hetero_transformer.py index 917502f87e4c..6d2ee1f2ceae 100644 --- a/test/nn/test_to_hetero_transformer.py +++ b/test/nn/test_to_hetero_transformer.py @@ -2,6 +2,7 @@ import pytest import torch +import torch.nn.functional as F from torch import Tensor from torch.nn import Linear, ReLU, Sequential from torch_sparse import SparseTensor @@ -9,6 +10,9 @@ from torch_geometric.nn import BatchNorm, GCNConv, GINEConv, GlobalPooling from torch_geometric.nn import Linear as LazyLinear from torch_geometric.nn import MessagePassing, RGCNConv, SAGEConv, to_hetero +from torch_geometric.utils import dropout_adj + +torch.fx.wrap('dropout_adj') class Net1(torch.nn.Module): @@ -123,6 +127,17 @@ def forward(self, x: Tensor) -> Tensor: return self.batch_norm(x) +class Net10(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = SAGEConv(16, 32) + + def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: + x = F.dropout(x, p=0.5, training=self.training) + edge_index, _ = dropout_adj(edge_index, p=0.5, training=self.training) + return self.conv(x, edge_index) + + def test_to_hetero(): x_dict = { 'paper': torch.randn(100, 16), @@ -213,6 +228,13 @@ def test_to_hetero(): assert out['paper'].size() == (4, 16) assert out['author'].size() == (8, 16) + model = Net10() + model = to_hetero(model, metadata, debug=False) + out = model(x_dict, edge_index_dict) + assert isinstance(out, dict) and len(out) == 2 + assert out['paper'].size() == (100, 32) + assert out['author'].size() == (100, 32) + class GCN(torch.nn.Module): def __init__(self): From da78713adde41f091eef95aa19ecc312a68c7dd9 Mon Sep 17 00:00:00 2001 From: Arun Date: Mon, 16 May 2022 18:38:57 +0530 Subject: [PATCH 0050/2432] Updates to extracting positions from sdf files (#4654) * extracting positions from sdf files * changelog --- CHANGELOG.md | 1 + torch_geometric/datasets/qm9.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 397c50d84b4f..c7c2b065c7a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) - Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) - Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) diff --git a/torch_geometric/datasets/qm9.py b/torch_geometric/datasets/qm9.py index 4dbc0f0b68f8..542a4b568959 100644 --- a/torch_geometric/datasets/qm9.py +++ b/torch_geometric/datasets/qm9.py @@ -233,8 +233,8 @@ def process(self): N = mol.GetNumAtoms() - pos = suppl.GetItemText(i).split('\n')[4:4 + N] - pos = [[float(x) for x in line.split()[:3]] for line in pos] + conf = mol.GetConformer() + pos = conf.GetPositions() pos = torch.tensor(pos, dtype=torch.float) type_idx = [] From 3b503fa0c7688e49449aa35ce4ddab9fca84ef72 Mon Sep 17 00:00:00 2001 From: Tzu-Han Chang <57966875+tzuhanchang@users.noreply.github.com> Date: Mon, 16 May 2022 16:49:30 +0100 Subject: [PATCH 0051/2432] Add `unbatch` functionality (#4628) * Add unbatching tool Unbatching data from DataLoader batch to a list. * Add an unbatching tool Unbatching data from DataLoader batch to a list. * Add an unbatching tool Unbatching data from DataLoader batch to a list. This is useful for GNN edge classifier, in which case, graphs are required to be reconstructed with edge predictions. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add unbatch to utils For unbatching node features, for example, use ```src=data.x``` and ```index =data.x_batch``` (assume ```follow_batch``` is set to ```x```). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert changes to loader/utils.py * Update unbatch.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update unbatch.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create test_unbatch.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add unbatch * Update test_unbatch.py * Update unbatch.py * Update unbatch.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merge * update * changelog * typo Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/utils/test_unbatch.py | 13 +++++++++++++ torch_geometric/utils/__init__.py | 2 ++ torch_geometric/utils/unbatch.py | 24 ++++++++++++++++++++++++ 4 files changed, 40 insertions(+) create mode 100644 test/utils/test_unbatch.py create mode 100644 torch_geometric/utils/unbatch.py diff --git a/CHANGELOG.md b/CHANGELOG.md index c7c2b065c7a1..39cd3f45f54d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) - Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) - Added a check in `HeteroConv` and `to_hetero()` to ensure that `MessagePassing.add_self_loops` is disabled ([4647](https://github.com/pyg-team/pytorch_geometric/pull/4647)) diff --git a/test/utils/test_unbatch.py b/test/utils/test_unbatch.py new file mode 100644 index 000000000000..1d72da5a565a --- /dev/null +++ b/test/utils/test_unbatch.py @@ -0,0 +1,13 @@ +import torch + +from torch_geometric.utils import unbatch + + +def test_unbatch(): + src = torch.arange(10) + batch = torch.tensor([0, 0, 0, 1, 1, 2, 2, 3, 4, 4]) + + out = unbatch(src, batch) + assert len(out) == 5 + for i in range(len(out)): + assert torch.equal(out[i], src[batch == i]) diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index a9c09230d6c0..8b7e89e99947 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -17,6 +17,7 @@ from .to_dense_batch import to_dense_batch from .to_dense_adj import to_dense_adj from .sparse import dense_to_sparse +from .unbatch import unbatch from .normalized_cut import normalized_cut from .grid import grid from .geodesic import geodesic_distance @@ -61,6 +62,7 @@ 'to_dense_batch', 'to_dense_adj', 'dense_to_sparse', + 'unbatch', 'normalized_cut', 'grid', 'geodesic_distance', diff --git a/torch_geometric/utils/unbatch.py b/torch_geometric/utils/unbatch.py new file mode 100644 index 000000000000..61ae14b73b88 --- /dev/null +++ b/torch_geometric/utils/unbatch.py @@ -0,0 +1,24 @@ +from typing import List + +import torch +from torch import Tensor + +from torch_geometric.utils import degree + + +def unbatch(src: Tensor, batch: Tensor, dim: int = 0) -> List[Tensor]: + r"""Splits :obj:`src` according to a :obj:`batch` vector along dimension + :obj:`dim`. + + Args: + src (Tensor): The source tensor. + batch (LongTensor): The batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + entry in :obj:`src` to a specific example. Must be ordered. + dim (int, optional): The dimension along which to split the :obj:`src` + tensor. (default: :obj:`0`) + + :rtype: :class:`List[Tensor]` + """ + sizes = degree(batch, dtype=torch.long).tolist() + return src.split(sizes, dim) From cef37e1b2e11994ba8a0b5f40e12fe78eb3395cc Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 16 May 2022 12:58:21 -0700 Subject: [PATCH 0052/2432] Weights & Biases Example (#4656) * initial commit * changelog * update * update * update * update * update * update * update * cron --- .github/workflows/examples.yml | 48 ++++++++++++++++ .github/workflows/full_testing.yml | 10 +--- .gitignore | 1 + CHANGELOG.md | 1 + examples/gcn.py | 90 +++++++++++++++++------------- setup.py | 9 ++- torch_geometric/logging.py | 38 +++++++++++++ 7 files changed, 147 insertions(+), 50 deletions(-) create mode 100644 .github/workflows/examples.yml create mode 100644 torch_geometric/logging.py diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml new file mode 100644 index 000000000000..a7bc97fc64fa --- /dev/null +++ b/.github/workflows/examples.yml @@ -0,0 +1,48 @@ +name: Examples + +on: # yamllint disable-line rule:truthy + workflow_dispatch: + schedule: + - cron: "0 7 * * *" # Everyday at 7:00am UTC/11:00pm PST + +jobs: + + pytest: + if: github.repository == 'pyg-team/pytorch_geometric' + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: [3.9] + torch-version: [1.11.0] + include: + - torch-version: 1.11.0 + torchvision-version: 0.12.0 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install PyTorch ${{ matrix.torch-version }}+cpu + run: | + pip install torch==${{ matrix.torch-version}}+cpu torchvision==${{ matrix.torchvision-version}}+cpu -f https://download.pytorch.org/whl/torch_stable.html + + - name: Install internal dependencies + run: | + pip install torch-scatter -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html + pip install torch-sparse -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html + + - name: Install main package + run: | + pip install .[benchmark] + + - name: Run examples + run: | + python examples/gcn.py --wandb + env: + WANDB_API_KEY: ${{ secrets.WANDB_API_KEY }} diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index edb91d51109c..1d63e1bf7e79 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -3,7 +3,7 @@ name: Full Testing on: # yamllint disable-line rule:truthy workflow_dispatch: schedule: - - cron: "0 7 * * *" # Everyday at 7:00am UTC/11:00pm PST + - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST jobs: @@ -53,11 +53,3 @@ jobs: uses: codecov/codecov-action@v2 with: fail_ci_if_error: false - - - name: Run examples - if: ${{ runner.os == 'Linux' }} - run: | - python examples/gcn.py - python examples/gat.py - python examples/mutag_gin.py - python examples/gnn_explainer.py diff --git a/.gitignore b/.gitignore index 2e7c4c976eab..df1ce1a898e1 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ build/ dist/ alpha/ runs/ +wandb/ .cache/ .eggs/ lightning_logs/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 39cd3f45f54d..b08ab171b31e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) - Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) diff --git a/examples/gcn.py b/examples/gcn.py index 12ac549bddd5..09e03d73b61a 100644 --- a/examples/gcn.py +++ b/examples/gcn.py @@ -6,77 +6,89 @@ import torch_geometric.transforms as T from torch_geometric.datasets import Planetoid -from torch_geometric.nn import ChebConv, GCNConv # noqa +from torch_geometric.logging import init_wandb, log +from torch_geometric.nn import GCNConv parser = argparse.ArgumentParser() -parser.add_argument('--use_gdc', action='http://github.com/store_true', - help='Use GDC preprocessing.') +parser.add_argument('--dataset', type=str, default='Cora') +parser.add_argument('--hidden_channels', type=int, default=16) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--use_gdc', action='http://github.com/store_true', help='Use GDC') +parser.add_argument('--wandb', action='http://github.com/store_true', help='Track experiment') args = parser.parse_args() -dataset = 'Cora' -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) -dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures()) +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +init_wandb(name=f'GCN-{args.dataset}', lr=args.lr, epochs=args.epochs, + hidden_channels=args.hidden_channels, device=device) + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') +dataset = Planetoid(path, args.dataset, transform=T.NormalizeFeatures()) data = dataset[0] if args.use_gdc: - gdc = T.GDC(self_loop_weight=1, normalization_in='sym', - normalization_out='col', - diffusion_kwargs=dict(method='ppr', alpha=0.05), - sparsification_kwargs=dict(method='topk', k=128, - dim=0), exact=True) - data = gdc(data) - - -class Net(torch.nn.Module): - def __init__(self): + transform = T.GDC( + self_loop_weight=1, + normalization_in='sym', + normalization_out='col', + diffusion_kwargs=dict(method='ppr', alpha=0.05), + sparsification_kwargs=dict(method='topk', k=128, dim=0), + exact=True, + ) + data = transform(data) + + +class GCN(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels): super().__init__() - self.conv1 = GCNConv(dataset.num_features, 16, cached=True, + self.conv1 = GCNConv(in_channels, hidden_channels, cached=True, normalize=not args.use_gdc) - self.conv2 = GCNConv(16, dataset.num_classes, cached=True, + self.conv2 = GCNConv(hidden_channels, out_channels, cached=True, normalize=not args.use_gdc) - # self.conv1 = ChebConv(data.num_features, 16, K=2) - # self.conv2 = ChebConv(16, data.num_features, K=2) - def forward(self): - x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr - x = F.relu(self.conv1(x, edge_index, edge_weight)) - x = F.dropout(x, training=self.training) + def forward(self, x, edge_index, edge_weight=None): + x = F.dropout(x, p=0.5, training=self.training) + x = self.conv1(x, edge_index, edge_weight).relu() + x = F.dropout(x, p=0.5, training=self.training) x = self.conv2(x, edge_index, edge_weight) - return F.log_softmax(x, dim=1) + return x -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model, data = Net().to(device), data.to(device) +model = GCN(dataset.num_features, args.hidden_channels, dataset.num_classes) +model, data = model.to(device), data.to(device) optimizer = torch.optim.Adam([ dict(params=model.conv1.parameters(), weight_decay=5e-4), dict(params=model.conv2.parameters(), weight_decay=0) -], lr=0.01) # Only perform weight-decay on first convolution. +], lr=args.lr) # Only perform weight-decay on first convolution. def train(): model.train() optimizer.zero_grad() - F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward() + out = model(data.x, data.edge_index, data.edge_weight) + loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) + loss.backward() optimizer.step() + return float(loss) @torch.no_grad() def test(): model.eval() - logits, accs = model(), [] - for _, mask in data('train_mask', 'val_mask', 'test_mask'): - pred = logits[mask].max(1)[1] - acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item() - accs.append(acc) + out = model(data.x, data.edge_index, data.edge_weight) + + accs = [] + for mask in [data.train_mask, data.val_mask, data.test_mask]: + pred = out[mask].argmax(dim=-1) + accs.append(int((pred == data.y[mask]).sum()) / int(mask.sum())) return accs -best_val_acc = test_acc = 0 -for epoch in range(1, 201): - train() +best_val_acc = final_test_acc = 0 +for epoch in range(1, args.epochs + 1): + loss = train() train_acc, val_acc, tmp_test_acc = test() if val_acc > best_val_acc: best_val_acc = val_acc test_acc = tmp_test_acc - print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, ' - f'Val: {best_val_acc:.4f}, Test: {test_acc:.4f}') + log(Epoch=epoch, Loss=loss, Train=train_acc, Val=val_acc, Test=test_acc) diff --git a/setup.py b/setup.py index c25d609943f7..13dfaf8155ed 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ 'pytorch-lightning', ] -full_install_requires = graphgym_requires + [ +full_requires = graphgym_requires + [ 'h5py', 'numba', 'pandas', @@ -34,6 +34,10 @@ 'torchmetrics>=0.7', ] +benchmark_requires = [ + 'wandb', +] + test_requires = [ 'pytest', 'pytest-cov', @@ -62,7 +66,8 @@ install_requires=install_requires, extras_require={ 'graphgym': graphgym_requires, - 'full': full_install_requires, + 'full': full_requires, + 'benchmark': benchmark_requires, 'test': test_requires, 'dev': dev_requires, }, diff --git a/torch_geometric/logging.py b/torch_geometric/logging.py new file mode 100644 index 000000000000..730272847f81 --- /dev/null +++ b/torch_geometric/logging.py @@ -0,0 +1,38 @@ +import sys +from typing import Any + +_wandb_initialized: bool = False + + +def init_wandb(name: str, **kwargs): + if '--wandb' not in sys.argv: + return + + from datetime import datetime + + import wandb + + wandb.init( + project=name, + entity='pytorch-geometric', + name=datetime.now().strftime('%Y-%m-%d_%H:%M'), + config=kwargs, + ) + + global _wandb_initialized + _wandb_initialized = True + + +def log(**kwargs): + def _map(value: Any) -> str: + if isinstance(value, int): + return f'{value:03d}' + if isinstance(value, float): + return f'{value:.4f}' + return value + + print(', '.join(f'{key}: {_map(value)}' for key, value in kwargs.items())) + + if _wandb_initialized: + import wandb + wandb.log(kwargs) From 6c87390d72c81f5df54534b3e9a4a50bf32902ba Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 17 May 2022 06:52:02 -0700 Subject: [PATCH 0053/2432] Integration of `pyg_sphinx_theme` (#4664) * update * readme * update * update * update * changelog * reset * update * update --- .github/workflows/documentation.yml | 3 +- CHANGELOG.md | 1 + CONTRIBUTING.md | 6 +- README.md | 4 +- docs/README.md | 5 +- docs/requirements.txt | 4 +- .../img => _figures}/architecture.pdf | Bin .../img => _figures}/architecture.svg | 0 docs/source/_static/css/custom.css | 213 ------------------ docs/source/_static/img/pyg1.png | Bin 51889 -> 0 bytes docs/source/_static/img/pyg1.svg | 1 - docs/source/_static/img/pyg2.png | Bin 22196 -> 0 bytes docs/source/_static/img/pyg2.svg | 1 - docs/source/_static/img/pyg_logo.svg | 1 - docs/source/_static/img/pyg_logo_text.svg | 1 - docs/source/_templates/autosummary/class.rst | 8 - docs/source/conf.py | 69 ++---- docs/source/modules/loader.rst | 1 - docs/source/notes/cheatsheet.rst | 12 +- docs/source/notes/data_cheatsheet.rst | 2 +- 20 files changed, 37 insertions(+), 295 deletions(-) rename docs/source/{_static/img => _figures}/architecture.pdf (100%) rename docs/source/{_static/img => _figures}/architecture.svg (100%) delete mode 100644 docs/source/_static/css/custom.css delete mode 100644 docs/source/_static/img/pyg1.png delete mode 100644 docs/source/_static/img/pyg1.svg delete mode 100644 docs/source/_static/img/pyg2.png delete mode 100644 docs/source/_static/img/pyg2.svg delete mode 100644 docs/source/_static/img/pyg_logo.svg delete mode 100644 docs/source/_static/img/pyg_logo_text.svg delete mode 100644 docs/source/_templates/autosummary/class.rst diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 3fc751a8ce31..9cf113178039 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -32,8 +32,7 @@ jobs: run: | pip install torch-scatter -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html pip install torch-sparse -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html - pip install sphinx - pip install sphinx_rtd_theme + pip install git+https://github.com/pyg-team/pyg_sphinx_theme.git - name: Install main package run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index b08ab171b31e..59b38b670317 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664)) - Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) - Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5d6c49c9d592..333b800a9299 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -101,9 +101,11 @@ Everytime you send a Pull Request, your commit will be built and checked against To build the documentation: 1. [Build and install](#developing-pyg) PyG from source. -2. Install [Sphinx](https://www.sphinx-doc.org/en/master/) via `pip install sphinx sphinx_rtd_theme`. +2. Install [Sphinx](https://www.sphinx-doc.org/en/master/) theme via + ```bash + pip install git+https://github.com/pyg-team/pyg_sphinx_theme.git + ``` 3. Generate the documentation via: - ```bash cd docs make html diff --git a/README.md b/README.md index 419c3e407844..f45f464ad0fe 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ [slack-url]: https://join.slack.com/t/torchgeometricco/shared_invite/zt-p6br3yuo-BxRoe36OHHLF6jYU8xHtBA

- +

-------------------------------------------------------------------------------- @@ -179,7 +179,7 @@ It comprises of the following components: * Finally, PyG provides an abundant set of GNN **models**, and examples that showcase GNN models on standard graph benchmarks. Thanks to its flexibility, users can easily build and modify custom GNN models to fit their specific needs.

- +

## Implemented GNN Models diff --git a/docs/README.md b/docs/README.md index 6a0259c10694..69979eb14533 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3,7 +3,10 @@ To build the documentation: 1. [Build and install](https://github.com/pyg-team/pytorch_geometric/blob/master/CONTRIBUTING.md#developing-pytorch-geometric) PyG from source. -2. Install [Sphinx](https://www.sphinx-doc.org/en/master/) via `pip install sphinx sphinx_rtd_theme`. +2. Install [Sphinx](https://www.sphinx-doc.org/en/master/) theme via + ``` + pip install git+https://github.com/pyg-team/pyg_sphinx_theme.git + ``` 3. Generate the documentation file via: ``` cd docs diff --git a/docs/requirements.txt b/docs/requirements.txt index c07e0e2fe513..5b3a689b5019 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,4 @@ -numpy https://download.pytorch.org/whl/cpu/torch-1.9.0%2Bcpu-cp38-cp38-linux_x86_64.whl https://data.pyg.org/whl/torch-1.9.0%2Bcpu/torch_scatter-2.0.7-cp38-cp38-linux_x86_64.whl https://data.pyg.org/whl/torch-1.9.0%2Bcpu/torch_sparse-0.6.10-cp38-cp38-linux_x86_64.whl -sphinx==4.0.2 -sphinx_rtd_theme==0.5.2 +git+https://github.com/pyg-team/pyg_sphinx_theme.git diff --git a/docs/source/_static/img/architecture.pdf b/docs/source/_figures/architecture.pdf similarity index 100% rename from docs/source/_static/img/architecture.pdf rename to docs/source/_figures/architecture.pdf diff --git a/docs/source/_static/img/architecture.svg b/docs/source/_figures/architecture.svg similarity index 100% rename from docs/source/_static/img/architecture.svg rename to docs/source/_figures/architecture.svg diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css deleted file mode 100644 index 52a7a2829b3c..000000000000 --- a/docs/source/_static/css/custom.css +++ /dev/null @@ -1,213 +0,0 @@ -.wy-side-nav-search .wy-dropdown > a img.logo, .wy-side-nav-search > a img.logo { - height: 150px; -} - -.wy-side-nav-search { - background: rgb(243,244,247); -} - -.wy-side-nav-search > div.version { - color: black; -} - -.wy-nav-content-wrap { - background: inherit; -} - -.wy-side-nav-search input[type="text"] { - border: none; - box-shadow: none; - background: white; - border-radius: 0; - font-size: 100%; -} - -.wy-menu-vertical li.current a, -.wy-menu-vertical li.toctree-l1.current > a { - border: none; -} - -.ethical-rtd > div.ethical-sidebar, -.ethical-rtd > div.ethical-footer { - display: none !important; -} - -h1 { - text-transform: uppercase; - font-family: inherit; - font-weight: 200; -} - -h2, -.rst-content .toctree-wrapper p.caption { - font-family: inherit; - font-weight: 200; -} - -.rst-content a:visited { - color: #3091d1; -} - -/* Begin code */ -.rst-content pre.literal-block, -.rst-content div[class^="highlight"] { - border: none; -} - -.rst-content pre.literal-block, -.rst-content div[class^="highlight"] pre, -.rst-content .linenodiv pre { - font-size: 80%; -} - -.highlight { - background: #f6f8fa; - border-radius: 6px; -} - -.highlight .kn, -.highlight .k { - color: #d73a49; -} - -.highlight .nn { - color: inherit; - font-weight: inherit; -} - -.highlight .nc { - color: #e36209; - font-weight: inherit; -} - -.highlight .fm, -.highlight .nd, -.highlight .nf, -.highlight .nb { - color: #6f42c1; -} - -.highlight .bp, -.highlight .n { - color: inherit; -} - -.highlight .kc, -.highlight .s1, -.highlight .s2, -.highlight .mi, -.highlight .mf, -.highlight .bp, -.highlight .bn, -.highlight .ow { - color: #005cc5; - font-weight: inherit; -} - -.highlight .c1 { - color: #6a737d; -} - -.rst-content code.xref { - padding: .2em .4em; - background: rgba(27,31,35,.05); - border-radius: 6px; - border: none; -} -/* End code */ - -.rst-content dl:not(.docutils) dt, -.rst-content dl:not(.docutils) dl dt { - background: rgb(243,244,247); -} - -.rst-content dl:not(.docutils) dt.field-odd { - text-transform: uppercase; - background: inherit; - border: none; - margin-bottom: 6px !important; -} - -.rst-content dl:not(.docutils) .property:first-child .pre { - text-transform: uppercase; - font-style: normal; -} - -em.sig-param span.n:first-child, em.sig-param span.n:nth-child(2) { - color: black; - font-style: normal; -} - -em.sig-param span.n:nth-child(3), -em.sig-param span.n:nth-child(3) a { - color: inherit; - font-weight: normal; - font-style: normal; -} - -em.sig-param span.default_value { - font-family: SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace; - font-style: normal; - font-size: 90%; -} - -.sig-paren { - padding: 0 4px; -} - -.wy-table-responsive table td, -.wy-table-responsive table th { - white-space: normal; -} - -.wy-table-bordered-all, -.rst-content table.docutils { - border: none; -} - -.wy-table-bordered-all td, -.rst-content table.docutils td { - border: none; -} - -.wy-table-odd td, -.wy-table-striped tr:nth-child(2n-1) td, -.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td { - background: rgb(243,244,247); -} - -.wy-table td, -.rst-content table.docutils td, -.rst-content table.field-list td, -.wy-table th, -.rst-content table.docutils th, -.rst-content table.field-list th { - padding: 14px; -} - -table.colwidths-given tr td p, -table.colwidths-given tr th p { - text-align: center; -} - -table.colwidths-given tr td:first-child p, -table.colwidths-given tr th:first-child p { - text-align: left; -} - -dl:not(.py) > dt { - text-transform: uppercase; - background: inherit !important; - color: inherit !important; - border: none !important; -} - -html.writer-html5 .rst-content dl.field-list > dt, -html.writer-html5 .rst-content dl.footnote > dt { - padding-left: 0; -} - -html.writer-html5 .rst-content dl.field-list, -html.writer-html5 .rst-content dl.footnote { - display: inherit; -} diff --git a/docs/source/_static/img/pyg1.png b/docs/source/_static/img/pyg1.png deleted file mode 100644 index 698d523e5cb0961cd57d8f77e19c9ddc3a48ae2c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51889 zcmYg%byQSs*Z0sM4bokrbeA+zf`l@3gA5(g-60(}h|(f8bc3XH$P5TWrvig?NY{7p zeO&ANgSA|qYtFShetVzyS{lmukEkDkKp=col^3r;AdGAf2<0a>Ch$LwH8>9sa9mXk z+(94;;)g$!ktZiIAP^%+^@W^{PxkJTnWG6J=ltGNsl!~7RYa87EqPZ*cEebRARhY@ z`NGp85^i;M`RDdm@s@>DZ5_5hZzRHnALA<9Tn#nAt2J7DHu+@MtfX>uoK#&g4Zl1g zmfquI-YS@NCytZhkbeC$f0>BAY|tB7;t7)&eqS3nZF;?8=+I^M8ofdMO#1p}&OdnC zJGiU$T`L!e@?y_PK_2_EgxjjkMFZQdKWwjiVBJAzXGwb7kgCy~6JQ1e>XyLH4y_6D zH%cP4?W^w9?C8TY&}>3C z)m$2U*yuQqR6d(r{&AnVuf%bnEJBc|NPMF4$1pv{!?Erlbl6a0Kvg{nA8xHX!kwq= z-Xsc+o3c*`%?+)|h#=bg>=4eqK*(u?hKk}d_6uN8{jODml5qV_V<(uMX2D-;|gdiF>uK` z9jOVYAIdvvb{T~<`4|(+8g*fzzsH_2yG8|nFaJbSuuY|+r0r4p$M&{(8+rTc)a=%> zrb3Sx|L-wNxmCPio$^oYBvA6=ZK{~T?Cpg`9~E=y8J>t_vazx4U~wg*|2z6aaTQNy z-Ms^P)gr!zQ~+c_cgqxtz$m6->Vti~Z+wb5>X$z8_pOZN=rC6!VUN_%15?~w{AO~EYWq=c@5~B=BE85Y&$UnS;Iw2W zoP~y(&+^iO4_ZC%%rLP~9}Xuoqe5Cr=9@ixQtZtD84MJ^n0c8uy;8v=)baJ(in%M} z-&Z~e!01?f4(>D61Km`z2g6A%YZiSd4T*H5dY$B=(7m9YV>E$xRJ7O+=VfEDv)7{R z!S#5KZBjn{#X5)+^>vw#P)Lg+0nYAY6V3!9xXyJW+@1@{C-zrgA&&ul1?}q!i@ttc zDfdW6ilvaA9MA?X@8x3rSr}gik7u#B=@m$yftXg*+!y(rZ-$EbW0{RlsP32WafRQFQpAh|KrFF9TuZ^N`5IH0gmHSMhK?%1boPD zFXKUV!<61k{{%%DD)>1K-dH|j0R6N?>Ww%_ew1eracJ~O0d=NlRrW#RAQGk3c-wV% z0WDfKtv{$>I3H6qx>syfdSp~mWz(X8%KCNK{}^SB?WS4-{1F#D#`NGuE)X_#AvI1a#A&$Ht`*hbf3T;zDQL9U}#S=5hS;xq^edJBnGfCcmqGd#dY?bK>vj=iYF7lN? zb4$wnVE?63lgwTSSD*Riz1kDD{+~s>XL41t(h*i51jFZ;%y#H2JI5}wcPFLNjxEXl z$rL+Oa9XbeBPa}NLz%xl=kiX)TyBSI)BPQ>rOSwap&4GrX%od?P9o%2GS@&tGXk%!7^MdS+mzgtky zVFtSRQIE*+{-BonH7)dhc6h7gfP2zPrFJg(k2O%JyY7a22Zv+vF=29{NHCR-V0+X?BPj+p|~6$F-39A6YR} zq4fvH;#9c_uUl~l`wdPP%fgV$@j+f*fEf*Zb8J+E$19-x|EH@^cT&W34w0%|%Z2{D{|8d?ze>pdGw^Vrz6N zn)n_tC|Qxbv=x3hm9w|uKQ6vwX9vEfTNIW93$A)1-1k}&9y~@%=67>5B3Id)Eq>2% z0@tb>O~nlVv~BXAt63_p?&6A;<-`HD)=Q>MIQAHFSu65`sv-O3uV1teEN>R?gneS92a+<&4X4y_-p zdxHotj5HQBl(P_iTz*>GShm8m%U=?P+zKDuD$o5@W-pQgAM+Wni~gTF`(nE16oK4s zPrT|5K1uaU#NgK_m?;s-+j*n#xGZ46UTvEo^!@ljJ>qkjUoPPEfx7MXK%kVQZ!%Zw zvHYL!@H4x5sW!gzo=O{sNHymBG9L%Ep~GkOwYgnqU!MlN@TgSRKl%igd7eh@s4DbN zkdD~dCfb$uJU;Xppt*Jia&9pAjI-;KMg#V{ga`=1LCk)lEt-m)`F zF=LJngWYw}ZtSj}7lsN!bK^cc^yg*}2H`z2PWZ=GME9J{rE#@A?;60?Yc>xxmYZRu z1emQH-cduG@CBN?z!~gFNu*zWW#9|X#ecL{O39TZaYy^td22{Q>l2`4kpheJJC}m# zOs1O9=B8_dj`uzf7HDZQYYfz2A!p*xMV<)#|I|IiM=HBh^FgOQ1bt(D!;Gx#5m;)0 zBhTn?Ju!}rU*Eab{Q2J-4hcf*xdCMvr#Zqmm2MoULah7sP94(%GzbK;|E$kB!oSP^ z&C(zoX4fV;;l7VUT9K7G&Gpv^0nqQmuI;%z4kSBJ{mqB%{e59KXYj!2N9MT^ro1d| zvTy^OthbCakZ)cAx}Gt!*-lv)B}4Ed3#Puobhwu%|FG!U;zuHll2IDMhhK+OVrNgL z!tu*Gzb#SuZ#L-#5g63qA1Zji<$spf#(dn0gWz1m>TJNbux0#v8-IS0(hq3C8>Vo% zdM(7*o@Gryf0&bjS>v{3Ts7fOq;fSfK!Yb^uK}=;e{~Okevi27T%~>0#~NwJD(XfH zmQ^;OObXlzT-?=0i2r@>^aU-iBI|$>;R|`@^qX7L zWL)j=jfA`01mlI})1jGe_*&xM-vWh*xbtOpMMfa(fW2&^VWT5UFKz8Yb9bKg;4yw) zGT!r792>6~npbE2MRBAtVA47c|5tG1qas_h5bg8vCRvl?9wVXdKP+~`EQ*k1Smo}- z8F;HF;tXCa|D236)Sb4}-ZaiA)LmHXIrMHP)=ja*5suQWo6|1vmyQ>ldORQIinNE3 zemY?HEC$V|^5JiJN0?BaUS{)RCG9n+|5YrbgpQapQ=S8{{1A{L_GLcmXEsHHLR$F$ zm+G;J#iA9#^#p0TJ)q?ORZ*^??Ot|e3oyhzol@X)t&^tDA@Zw^vCvJ%i_fS5uW#>8 zF#eCuFJ(nMl2!L5TKA3`<_ptV3Qam6eox8&p8Klx300xuefQi8=^4(5%zUVU+WQ3r z%E5(@W77Y*PAX%y)I37Q;l6Y7ScxOC(%+y>E4^13)}F6?ZY0%&W4Qpo{8GHGuVtfqH0Wmv+S_-EV_M|6dgu0 zjJ(pR-mK6OhF22vK$=i)j#a8^?AZun6i$GDHQQC}zFhsSD_ixyI66J~nu70~h))^% zYJ;b7x9Cf3sLj(y?`T3#46SA0_TUpg{E0xV3fv5et>*S8QSX_a1rpGyUvteyl^z*H zYuZT41j~u-;Ja7J5LUd(?&|4g0Lex*&R~51e)Li!3mQ}fmd&fe%_|G?QmHcLfdn!v za^%sF$jAHXAdK5;G7H$Eq=hB--{>__n9eQ>*<3;d&3>=l76aikdM`k<2jD1X{ z-I&U7?<=c}po|l7Uy?H@K*@(f%bH#H{KVb-9xA&mBjKCqpID#}C&O)d8(7w&XvrfL zyEcmyM=aQ-af#oX`S#dG9X9-38qTf8H=J_fK_Jop|ee-wisOxuO z5dj;5{5zMf@t-NV+#zTIva5It-w~&#Uu021$;I?!0_T2HN@UFvl`!!~yB|E>HWw*1 zLNCQ}o4%OZA-Lbe8v69<%l>#(Mvms}81E)Z{D4{xg*nNJ?CCV4&rKT^CWoqi2MVbW zpDE{HsftI>w%OrtZt*}dMwOp@h0tb%N(p=x%d5&AxJ9?lv%_}5Vhcc2ub^r9sjh|x zzFjB_`(@ZoYi%~aARw!qJ||e=!>oAw%IfWNUF$vL>^MNWYiJzVYb4NQ5Xj9=kE#9y z@x2}?(|L4o&~Y10b02g&AA51wvoS7n4{I~*n|AVEyf`o+47wZW^4?#jzd{kg_X~pr(!K77pMZi( z)W7u6!{{0nm$4lNnRf<(dbYIX>IIPs0$QCxr(K+DM;mH+v6?)ltw$?OEs#N7x9d%r z>O1#IC(*lwT3Np`9-rdzr#5rg*(d2p zapP3vD!d*r(^)Pc{c945^?DY4jgvD3uJkx9V^(m0cnq6g9`|eZE1!Jev^b8YAn!jd z=OlMD5c6L4jGpzIp|hOX-%AhvVDV>(&d6zH_u0Ht-{?Vit06)X>NlY1RK;VQ>shpN z3Kl@v#D}AV7fbMJLCBTrZmjSU*@Vno! z7fUWYIvklenPXE}vMu%VDktD}V#W1>9coBx{4`>gMgRn_~Yf5PsLS)Tt`&QA4%yLqQG$)F+uAVYF( zu?@EJbQ;V(Di@^DZlT-QS9*K&jOY_!rryOzaSHAKhC2b2Ht$+p4yvd0lQ3 zoo|A@7%1Hgd(e`7zsSg1ChMjo$JUg07sfhd@CLJ~b+1)pI=tCmuhsAhI7oIlG8P2q z9o+OL_3L^mz~K6JN?+%M`|F0Az{pp5=+LoB?<+L8x^)yMZPH?aND0W$Xt6t&Co3}9*A>*xzCbOgx zEH8_SYv1`7<~2*craGorXRSCw^5mu6-^-c4!o=#a1Mr=MilB^d^%W68>nZercJ}NX zM^YbgY#rc%_Ej4+Px7iNb*}jD-yL%wtwoNuo6xc>I0x(vU-uVYACP)`vwq#`__AE0 z@ts8tH`M)wPFAELe1of@k53pOq&sbIHX+g}_9e{H_Zwd&4f>2!=Nr13E7W#!z@UwK z&GkDD$Wem^vz@6q&z&zsq1>^i8+6^PvSI+JI-Q&j7x2jle!Ye9DhnUvs@(Qc=abe^MyNIevz6?=l# z3-M-M$JZ|vPuMABHa6hNKrZS0;jD0{+MOyfv#Mbb*=?YGoAKQ4J#kia*1(8iGgU(Z zFU*i+kwk(;wZFJ_fC+i5qH*?K!m-69!Dt@17T_!q;Ove4w-Yo%)?;Z&`7T9}ib)(4 zQ@Epef;oKCto=rp&9>M{scFe~hAur8SuN#jMzSZOQNAjEf`EO*%Lc2(Uxj?kpgfZJ z0q9Yp)rH2aq4I0$IF+hh%hN*g0cnC-DYaKt)N=OHgD+VDHhEAXVyaDxVji|yy9?*~ zi&Pwz_UIQ|R%* zE5i#~>>OgzrR~k9i)DC<^ybsp89N)E#M-WjC0Iv;WS)%VdJJCLO-OFb0iDc+( zeNScpsc2+A7p7shjjBQqN2=>R_a_yro**fd+BcQ8EhU)}p?%$Smn@q_B*CJgsv|nH z)qtq+JYrY<-#a5C!XSo@AlVxUFxZ`IW&nMRFJ0>}d(Ixhj)vSwF@~75EUI?BIB!Y6l`$ z;$6#Wf~D=szX6dRfci{Rzn#>>?-Y!sa*;Z5_@rb4?osF61g3H@H?~{=D3F znB?r~z6vS}wR&Z>QGkE{$VnI-(CAZCh)?B1Ah%w>{uzqJ9K6C)))eru1sgqt(i3`y zl7bXx7&9Xu91LPx_P-`<4~iMf@m^vIIxJ*YMDFjo-Ndq``;rF)W88labJDNWs|+I+ zX|v;yon1uQVsjb=e=qHvG}I{-OW(KYmB;r-FIQ}N+G|tL38Aq_etl|91u{>$ygcs@ zMMJan@zF7E^O73DchDV_&aq+znL8=ii@f>bo47l9^Yf&OJ2WRK#oZM&y~CX< zI9;3`C8S6rH{g|rx6G4Ak$H-(p~pA-V9z#AkgQo8)IPD@tlly2XkCF<2NMldH8qZA z#b*nH-*?ye?F5S@7QzI)+~n54@0$2Em2fz@8RL&gLOKo4eJN9@v<`4-?^G&G=Vl}2 zrBPfYYeJw8jH2C3T-q^U&%u< zGsg|G-j z9pPT3uIpEg{c3m!*fO`r0=d|mSDU$$DADNjXC5*{5(+b=7>F@piMRlRuJO$lbP$2K zyG`-yJxQ5+IUSn7WGg06fD+U8M|A+(rs$8RV0KDDf1$w>)u*!;puhBXSJ4cD3W?w^ zF*>Ub(_e8>Qy1{axbDu8VDaS@mXD^G#lt8y~}Wl@PU~EUolp$)_)*8cmSxgoZtsH!-P0;~G!wAWJh)RnpTZ=_2k`v*M|aJx1<)YpPQP zPBvKUtQaE;RSGD+(;G2k8xwdjJ1ahX=~G0&~r@F!~wZreGJA%G8UKz7#R34`n9de6fQSyiqwOmfa`Gk7Z= zW*muGdBJ9YzmkK^kg8QiAZzs!!|_B`$Bb~4>&`o73&D1l>!jcVqkF@$#Uz#bt6g#5 zWs5hFCqmqyY{qExvu(%HM@B4ktgEGWKU~YnLYIzJoK^4()u1M5(AZa22@3*TNndnT z)r}Goq?j;I$>%`0T5EU}RwtXUey-f@u>r-b%dE+|M?WzLT?;phr8Ve3xkbYJBeFA@ z_VcZ@^Y^_hNs0(dD^;>$aWJ-0q+po2O8&=T#&nq5>q=6IGB1rq+d{Jz$=79{UzIyW zY9#&S!m6Up19hY6RhyRMkxF`QpVnv=&y_qo2Gt~XmiE}p$^VMh?2@02|DH^CQ#Wrw z&>t^b&#~FStoP%s;%bF63*3Gqn{z$(<%$y8jy*8fvxC~YSiIS4&qp12hAYC3YwdXw zKXBqm`EpPNSry5J1;fKf2PH|~{&ClifH_ggh@c)C?Ky9Z&-@W^EpMR8kHi#EYMR}; zzjEXEzx!31+w!KMo|_D&lQhSxBC|FWh2t|&JZ^C2z0cx7whguUw+o>0qBr+D^y{U0 z>_Uqk_f4fs4aq0oqLu`^pOhG&-1kpC_BRuyLG-Z|!o|BYTxgcICdb>m|u=Txtc{=LMI<`D^>M?JxKBtnT3kEy_~3YZYsk_fPLdA2g4TMqk-uC zuYPOu10gb#y(P{487Of(p2N{bjBz}3*N#Vqvu@BAz1WF6@hTf*K^Bpj6NZPw#mB0Z z)z~Zv1O;{=o^||L?Uaiej7+%tP<<7@_%tCE*DevLX8+iuk4!!z6jR3?6!nRUl($MG6k3L-sm8bRA z9o>#sKo@63_3i52C6>n2@o~R^R$Ctm5#Y}I2;bDkARyG$X8?{!l{vhMC+jL54HH3! zEq1yjrP@SVIjJzRPhM#(2tBO{1wq!`mnR3Y z!FR}evd#DV2&odxkjF$KXyottJeKVy8~vc60ON^q-u^>OkPeh6baF}(${0Z=fZo>p zWkQ3RFPI<=)*E9d=P}vUcIo{gl#uw$Y)K|qE63oHnQ7 zydPDrPh6V@wYr%?3kb9f-cUPxa_5iA=RfV;%mp1>4*~jr`#j<9OK|m*IbEw?=E7}G z!eTZkvQa=_C~ff=zUZk=n4<)7>0tYR5VsH{O2Srvh_83??tOUQO(l?6P07<()2bA>>4=|G_Rf?B04y05>=n=z#J-;rfa37V7)L09H&kOQAcq^^az6;N zTcyMU@o?hv;?^qhNEyiGY145@ID~i&YD~0Mq#UcX3r;2)nyv69fZ1al$qthkC z0X>N!_}O<-jjv7=pbEv@m3!9jxlE3jsl*mrfDwHIaKu)3n2bd&Z7OyF{c$xGcFq`NR*}!joDwIfL1JX^Are*Ul^wvZ}O_ZE_>{*@iRd4ppBFEwubB8 z_dHS84Jp#bBW@qsfLAD!-DT{nIfZK9r`E}CUF04!Zt%se8T={KoyoD+kMcQrzRSte zz^q;-*X3CtdsoJ8iS=w1zOwRame;2r#vq8bR5D6vbfO6N;xPuH!(`j#8>aPNBgOF# zc+f*)BEDv@J0>RKWqM1snC#BqUcMx&cUr6Cxuj1yAf%EXwcj#)EA*Ltg?&)-#w3#= z<-%9Ro?Do*{Xgm`^d1gq<>)7=st?NV$`!T@M-%f=kf|&R{_Co1SpaVLD#`w|HifGR zk$j+v3IPq}|1I&Gg9WLXXDgf`MI%(QBz`@nqxUB=o$|A^#B<~dm{EjMb-lkse1#3% zI2ZhfWNx?buag8RoFm}v!Jk>qk%W0cijpTiX~i=224FGvX=Z`+M-cFY%3k0x5X>F* zg>`kmRXIL62M!_U56HqW+BLt9h%v3 z*rQ$RvSRh5D}n<8Qnl6YCmxtN7MS(>mA-Of8Of-y8Qu9+!9%J}A>cBF4kn$%XHAp1 za=RJk^J~2=0#59^sjL~Re7-`l8BM1OrR1&bZXK-S=;TUfZ;`@xN$L`3b$t@vJUqCu z_QQ4O6IpxAAfP5sMGrY-hf6pB*5M7XbI*U-!@o8~y5nZxOU5tQ@$2uOUO(%x_(+iL zebBkma5bfNa>5YfFjKYUWAD;+>%5VNgx{OF{r>UR4Jg^Cy;D*ifC0-R&qkW#$8`iPfbW?fgX>6|FT@c!m$2vYD@8}@*eT2FFNSOOB(r3 z0d7yjZ}oq9H3%(k>#&XHl8!dE5p7OkOvGQ6;Ju@O(b;|uc8cesp;rOSeTfN^rkNe} zHQ;t+umNjO;O6&uG55`}5t&+kgN=5l1>Llc7wLi_Vi8}{ZUCS$ohcMO1>%JaP53qDeOOc;O*PaUQ78q3Dc8@zx_cu8`J%*q{^@|k*+H;zd=iaGdAbfS1Qhc!?kkU=1jGGgWBSWk^{tuA-LadoJKp!odF?A% zYr!JPh|{jjfbt(+XnviSQy==yb8o-?UN(FUhSidhK3RUPfPcm8s4N+2e zbS!k9qi6%9_L`{tq_Yfzx_Pm(1|V3UGhW&||Gle5p3`lyKf+0rX4udufnM3D2^2y; zm)BB#V3aOXqS3*lk$dB9=v={!b_oQqSJ3Q@My%BjP+4=W^XUM`}06w4Jb1xwCE_4XcLUsm?;I6czaqPfO6 zrqJmvUOSAUPS+DHx_ zvhpC{S)d%!rPg0~6Mar3RHA-HE+Y*g))UY_-vIw?<+_dbUc_auDJG2 z3wJ9qtSP;1LqsaPb9>Pgbh&P6ENa#Es?)4%;$E*9&Xj=?1_0v*&=55=1=PK_w};(Q z>=_%1@GlryBz?w#iXN43qjxMCB17vnj1npu)q$NG#}}*WtJ$UX5D15pNS*1Pqy7f1 z5tukc9ouvN_Pck`NJtZSf3CKXWO3NEC*x^VCz{yib1TE`uq_SGc>CruTaV+}))7}Z z@|VtRMh@zC+gh25se9UlL^t(MgY{YL%^5A+11L#sMazxsOP9w|s_og#OC&(jCD79i z=8-{v4XzL8+4`GO^f?3Rq~RNOr&9ls>GCq7$dYs(r{}($<>_O#rI%l@W9{}03R8ywxYWD?G)C^ta2o6hGC;LNbRxGOA??tb5PKk7664j}A* zI+W3#k3)GzU00K?q_5Qcp~wl?BG~vDL3=eXr0e|nrzWJd605$UbSCr~P_HL(mg3Dw1XL%fC_i_BS}v{C#pYg7AJLIObjq=(=pe?U5XFNJE|{ zR87-cy47#3y$_$ziMhMcGt8NR&%f9+5A&kw{K)`pQ0g~egRRjPxw^HdU)uG4!D6T$ z@^~>21^N={&TZ=YqXIhX@=iMglQ05po4=a)dba2s)(|nOs!FBX92^M2=#PCTV(C*527-rYUR$2#tO3&2Z5E}Aj1_C*$wbF(N5COgP>$<+E& z_pehzy2}?oxdGffIOB;pFbilZh;~1uhp}n$TurBr_@3X~VEtTJp&v5dS_IIWqj?9V zO5wx`Vm>z*y=gFQGq#n&0NSo=IE-(?U=p>CBJV!^qJPxoZIH{T>6py-d#BgnF}72> z1GS-wa=nfZq?a1W=^X-&Mf0r^e_rc`(D*S!&)QV6U`_bnj^+O1`W+um=`w0qiq>?0 zEgQPMu{eoGk{#S4353KwYk}xee`$UDKFUe>Sl1;n5S{d zKE2ICB{BLE33Op5%aG8Hvw8Rn<-ra)29f3g6F~!`?gWLDY1a3tE4&Plt;>OpHO%G) z8eXNb+;q`m4D&AUpySgtqI{3)tgrdVBJ#yQ227yT1PXWd3=@a-O$ZMof1 zv$tQ=cZu#Po4#H_7O;s3jZf!(NyHUkt%>CQ+V_fB#OPZ;E`AoU>)E265heVN3AdZ9 z-cupO4}r1DBYacx_|!eQVCOw4FBlqBzdltqqn^756Xw~kDy>1?{95?cHi{h6JW9{r zb9(mr=JruVKDjPK;>oNRfSSD=ACZ+K zF4pwV8ZaR@B*D#YeVU|L4NI1N8^sj*KGnjVmEniju6XU@$CMg_7O&%Sa-`#31&P>7 zY`+eOCksrbytmDK=}Kz{@V#Hyyv<#ID1-SkWLnBZZz{?9>lL_JIj4ssbvhFvWU=TBo$ruQ*cZ zBPlN+YMt9Wb1kTs4%~6?j-`Cz#(P{Wf==vtb+!%wh;U?tF=GP`~}O6K+F} zbILu%?Zc43;*-(K_*Sv4!V%74u)%fU%txe}E*M&$^#Bj);sZ1V7Gpi3OtEm+v}*x7 zD8Avwq}6#71p$C=t2R<|`1_4BAPbZr*0(CaZO^sc0K0W?`@(F5Tun<~7@VKVe*rq| z54*|@*3X%Vc(UL?`DfZ|`lFyyba%(L*CnFD3BDO4VkpHLw!L{+Os&&Ttw(#u*IOx+ zE77j}P|v4BXJUwjlz+wgxipu@B2|WCy;^6J5#t5~T6ndy>pH9IkxoJlS&8(hr)qUyQ{7 zgQP!cued0GE|p9zIKkzm9qXg%=W`|B(Ge%Lj!;f_Z#RCHS+k^+ce|{eE_d0>T>oNGH+(I0tx``a(e}@~ z9Ad*li%=zRi2z`~!sl{!&LdYl*1i{oVJX;biiRINQ%H5BueI`wP+qu4U^f=C75%&dr$0AkctfXwMC z@W@gDSt&f_3o1ZekCjS(#}!({SWZ+W=2vj~aXIvA?m{8YO;?ww+}>h_9Lu)cjWkp+ zq=NFb6sLmk8peHGFW!Rvu5k&;2S890R$}yf#g+7~ai`?zhU(>65O^?G(0LZ_$NAC5 z8iOXS7_G`cOz)%;gyoGLtGJpac9t5))|Wt~D;sbzrD~(Qt^yJmh*#P1 zB26cobn*m!wTPg+WfRD}aL9KZBmxtjSNplj_n0`R);~vYgKaW`hsEUH1daKn#B@ z8acS-zu5lyWb816k^F1$Btx3b{q`11oxTl_ec`r^!MDt!?ai3pO}56B-&rozhAzn8 zHG2WpTQ|KoE3tRg3~=@86EpQrwuc^z11`l_2v;QG7H;tzrUj~Ep%NMqPP@~orlwD# za%hZ1KL}o2Sh5ilb1lTREhOsW!>*qn zPOJXtXj-qT$TR(R3c5e_tZ)Wqf9$W$vrnmC?b_S&)j*fe#!hRMFzAo9j>Sp#NELS0 zp0{F$ZUGk6N^50NojDT)Pn~R#=*@IR^R-6WjtMHnAA^cS>|1ho-**gO&2p4Pf2&!s z976x;a?xzuEC-#ffceBqyO_05FggrW_cC{fmOpU6?!!YmY+v-`|Klb?P5;#8^xTPI zwA-M`hEEp0X4{Zy{3~1P`I}4Qizj_ObAD^Z+MXxuAF6aXDC>WIWRQB+c{7ywGkUo$8P-I*M5R1eoA=Ex8{O`Lz#vQJP5#UHV&YfLDNz8$|vN3ECh$|D6 zMl4%GPVi$CL9JP?gJ&x7`Jm1jdnN(yGbWW!_DmuMYP!3DL@pwT7}KO% zW}yc{;$s>6mt^DC9r~kSK#pqf)*cc!xxU8L`p*)}vzx&uS8eO1P zf=aG;vorI0+Iew+aRD`Y3~ITB07(A`{0&tPt-~E@%>G+A$@Oy)`r_AoE=DAApwnwH zqYQipI20y5dEm!jBt1n5M2IaP%Z8X8t5O$AG2F?T$7^Q=6)J>BpA0zrAPybycX@W6 zCp6VJ%U8E#kwBLZw?3RRytY2xeW&Uy1e~Xtx27eh3>3I<{c?WvGQ$* zP{k|+K(kBS<+JdzJkiuInX==3XSVUShJJzsCu>@#QNtxj_Ulk*W=vreJd|KD7Y}^Te(Pxkpt$@DY(6|$u;ez7l9ss- z9o{77i=4Fk0h<`CG01G5oVC?oEgdHO4>4xo3Qw&3=_-_7di%%WY{c<#L4APB@OkFJ z`p^Gp`Rqfw=PU#qr=7z2oCL@ymHtL@Rk{`({n)IYsIk+pU_RQX4Zw1ZqaKrcfm6Uy ztf7y-!$p!yLH&dNaT#e(K-Np~zgLasM(32HLTw9w;q;=V%dbTY_kP03QQ)^fU$t7L z?XBsSPII`a&=DQ|<_whRmB8$`0IQu?b3!3*zjt%5|RjyP{&%+ zymu`W9kz)ImrB9qjk^6_N!|-Yp!2u7(5!pNolAetg8YmzKr1&uYc#gB=LP5TL8&N- zZ8@>`s`^tX5`aGpcBZL-Qo{aWd*m~L@pOZjxOe_DwQ#~K0BiaI&Bvz{*}3_iOXN-S zoiYN}bS7V1i3v;{WJ@y$l?0W}4(=Uzy;qCW;+_yX=(O}6%93r-5PaUTc@j%pc-K>+ zajCcTAVZC`8&*!l81M}lu)ksvjA!945cpS?Rdr$CeNv+%MT_h(ORHvr)% z7k$VTUrJX82zwbd_O)8g8c~Px+nC?lLJP3R@N6-4>PjY%EwP!tA5U6 zfe`dxLg8{}d;Z_s9vT1MYfga1P;;9LNNS1u9uJI9YJ63Jg5WmvP^Gb#V4ty%!bj z8fZhQWU%yxx_JS3=ULCDdRp}@|0LbvhO(E>lHsqkxV%qIh;a#nI6>Fq5eF+goQ5Q0 zxBX|pE@ouIJALCR6dPFyWm7XOtS2NQ0LDZE7E`9;xgwI-F>as|HuL~KCIso2@V5@F zbX@axEClLRQ{y*IisydM#Cv?^=3^d{fv+RJgWFzhz0qos**lefXYY3YS)F6#8wcEdkTbz#(Y46jt1=yT*KQD!8n}j%Zm8Ebam5)VD1O zs0kZ>f3J&Wb2X(O)?R9tjzXV@0F}7|7_Gd<_eU6{puY5%k*Zv(MLmqemKJDOpg+WI zw;aPafzBM0Q))-ksIJ4UU<7b>n^zpJffesW^8LB^?M;3n4`rd*R@Mzk8abBw<~gTT z7V&3$0CMqqa6RPTF2F};fG}{`Zh?cX>YiTSf*Nxsr-k9u;ezysUGfiY#!ZNe_0$RF zA`47nJs`L*I6PQ%6j1ANp?(=G9@+fV!eQw}9RV9aBW6}g7vwGi<(sbdAJ9GDKXu0H zJb|^zF?{yNckN$q;fTh%(pBX#5~U;{#9M?hTE%0|51-%k$Hgu0l~G1vU=n<@fd?Do zybovu#$|DT#};#^zKez=o9mFn(%*%INdZd>Cil&u0*1>mvBG5L*9jv5BoUhO_ z=JxttVyQAQ)FR!TCa#}G0Q%@pZH^LNFRK5mD(>JwfkV`cH@P^#$JP3nRl?@ZQ|mC! z0!-|Yej&_$j;Oj z|GQR7iP0u%8V0l*m-24_70!>0*SOH=R-)#qen{m~HuB6hAMhWXxyO(bG$8cNRv#S# zGZlXtF`C$loG4oIivdn0h0g55{~S$f#x6)45`pR3w~K4jPMN?z0Cvmgt2>qed&5i! z^mrV-sg#sH<-iBQ{@j#GA28kk_)cYb%A;!=u1c|)R-)Rs`Pn!?W?>+j2bRT{9SaNI ztY!Ck=fnZI&0%w8=s4@)v%}^`l60m`0f)npxD1(+S&V}uy&RBVps|;Ak>PUJBn2F= z#Jz%bONKA299@290s0#WHCmDjo~@Q5$aB5aR-*`wYl^rRKB#1?p; ztOzU->E|6Yg(J<-vnpajF3XKN~zXhQ{WtDRHX1OLtN+gcg1yb-xfha>J` z^W4i#eCj=$!!gYT6F6!=LVuc`22FjIN3!6!SiQ;uhL#wUcLs^8`q&^*4CkXpr!2Ej zchvYOEa!zLMB7@ZfNKUf9iA)a9R!0Qdt-F~fX7H@8>|&2xwJBO7m_x4{hxCk=F%DU z;yvOLm}NjQ;WyDbX&kwf;>gYs1O&rasq&MJ@IngETh4O3b~7XD2vd&Co^}_ zdZG-}zFgmX4YA5?^-OX=dLJQkXqG%RS84OCpglEAK*Id|+KHagG4ZREf^oo( zFhp^-!oW967sCN?Al{;J1IYaa(C7mzei1cR(PmQFq&+s4hezi8oCYn8z~qm7Vu+>0qQ~O}bam+rET0mU z6gYh!u4>lRUjI@UC{w23XYJ$}ROilpW7yi6N_~En$gFslaM!Q*A3EwZ3KKV>9du|I zJpzFJt&V<~G}qv}3@bsz_iVA!u*!tB>ocpNv)j|sJEtQ=<|$I=DFIxxc!Q+`L;B{u zTdcn`J5`6g!Z`#uk$&Q1K1>%D6|rr2-lkO(TTPc^#4>NU;>`YPo+^);4^=pD&T_LR&LSA^;=NfdoR5J zG{}_u=Ez&kp?avu>*1WB!w-}6}b`I_yM zHMF9d$yosi*Id%MTiNb4U<&dr$OIsdxLu;T_td+~D zjqy?0$Y&dW1gmB^hBnZI845H5M?yI29o+N9CA6`dMF|4pqs_vi}1UpFez;VVG6A5xwP4rcsa@oH-}Oa$!NQ z@Uf7}flh^PAU7~CO%E%2&`OKN&s*v9`#&L9@1^BNa^7>NNki=f+^m4HAxrse(R=?> zW6*cQG2HK^H$7frK1w)_Z`8RR_W;jT@cCrn@~)-UsS?-JiCcc7jKv3?h>ZJNP4=RL z7dqf934mzpC1e~K^Cf`` z_PCM1x*eudHDyH0!n#5Ck;T?T<=lq*`Rm7pUf0tsqaXFsw@=P%c0*}+9aS3_E~Bz+ zrhYwdQ2A3pD^yx1E{YS06k;B4ov$;X1sLAzUTaHXj$FVmg~wq)SqF#OOhL~zLur^t ztO5fIHgCUVoEQ!l+rbIe1aSsa>ihUz-o55+!o!W*Yh`*>K9=Qnh@!_K7BZ=R^b;#t zwU=DU`6bAP>qd##ygejU9{~*(kGxjy^_oGp6)}`~+VH-XYY^!X7nkF4ek310oLEN% zS!`+#JMNB^W6W{s{QWfr+R&v!NOTfszUGu}S83dHng;i{*1xCu66QylSGKp+$-BI* z)^t!cL0^8B+1G{>98WsRQnBAq3e5Ox1S#wA6$(B0jiT!yFMf~;5OR*eqch>x0kHKM zrj@|xp$=GCP)@`Yh0r$K=vv5VD1QnIVn(n~@faFP$6Oj%a-3%Z)l zf+)+wc2J-hK4iRj+{v$cJ8vmm1`MLV7I?JzpQ&mKro(DtM$g+XV`%n3m-xfeUsm#F zftHwWOhXZ{ObI&glZM8Az%zWS--;EVnM#8@aN>p)>X?@=CqzM46on1AdSjpTxsWfj zIR!xCQf|kj>q%zDC`@Q$y5K=ZE$Cjvt~@J08P@ZO5w52U`d;KLH(MaudJJ{WYS@e8 zcC9OC@6aCApZYM4L*{&?uHaDmLT9RM9HrVqr$CUU^bTpmxr2peRx-PJIgjvzIeeTK;8{vQ{GeyJ1gg_o{` zMsxoP9axvU@NR3L;Qmn!-_m*>#^TR&_y9PL1ZOykVyok#FlRKnSQMJC00Yd(j2vtn z!G{~;rbn)~y5~&iPceAz#c`bs+9%!*%V;+{9PB}}d}Z#NUDC-%LuWfMSFP}H)>MCQ z^cc0^b<|hETwos7`P}ukJh}_r1z55!Ko6WUoxyk4M`Hh09T>G|-WE|FpP0%Ij zFz1#0XZKQr6#un|I@iwHY|>{x{Fbc-tWHF`A+LSm;|oBa{O+dh0tcIYfFdK`Pd4u& zCw5HxL19|6=ZP5TKGh>p?^Fq^%n`iI^x#c4$`MP z#*WkRITYs9goM4cTT}kgZDS1ywC~6hgn+y zqqn*})_K2qWrF=`q(G(kOoomxmH8Rq(u=&RE}ZWUfiT1q|nN&V)~!s z%YtFhAtz*>1~iLiVgcI**CmYoc{EzubF3iwuRj0SQsCGigb#_|S-72U)js5ZVO;%m zg2B~U7xlU|%0zB_Fp6EVjC>Seh&#u{{LF3b=*CqEh?|m&|9w4Z#e3zcL_j7#DWS`6 z7@`($kJbT^^Z)?%0T5uk`}LG%EK-`Is(xc0?>!J zQnNy`>J?79s{Z4E2T{|#lpX=VkuEZZS_%uxN}r8xtsRYV)6ah05PV@fUnzDrB5mD; zj?$zHC2ChE6@@$Dm>L-bWurAv`XM&|6eCIbyVl$5iG&nU9uz+GHWQ$~rXk*lh|mY5 zQa6up_7n}{2=0EFxA6Y*C5p4;54 zn%&^pvhecwuL~WM3@J6+dI`N}+}K*S*oPaoIT@jGe=Rz^#0*Y|4hYjJJ7iR{I2*^SiXpt(v=OpJ29PeZFzD zGBwO1#bs8`ii_7l$_>5ursCfqticN$JYK?VV}a9iyA*aHB-%AvwFR<3`=Bt-Pszvv zSv+Vr**&}C|GhI{mgSg{8R$0mVO438jazfcC_#6l%wv3O<^2mXAOODviTgrHdOof2 ze=P770T-SU(v$M?D5K$&EdDJLx{Djf2J)IDetO@DGp1hvKE7?3-7BOsZ)=U4#9gZW zWaFc~(C6{U1UTch{WY;#F`2PApr4LPH+B9mXyIEK>Crsl5Q`j?I*#~Ns8DMMy!GI$ z#C1*B8xky#uRVmLS*ld_d3C1pxcJwnL!G8Aj}QXL8FJ;1mDQEyJ1*NKT`VZ;QM7=h zEj~(o+FYqw11>^5QYKVrs}8?X6CE!uyx19whPuy&Z*J#KAFGSO=(vP-Th@W{fv^Wd%2769_u6navD5I z5A0NT9$kViwXi0Y^SIEf3OpW+%8>xet^~?CGl9@sK()S$1$?}x+Ak{~Z#CIhQdJkr zMk3!xiARcF!0kw= z$+#jl$;N9?e?cXngpCn}JUSM%x@ZEnrr!Nt!LBS9){m)>>bGO7epi#K(pwvdl1|ub z5rn-v_h=VJTuHiG=D+ie`sY?$MBpZb9S6AFFJKZYQ<*F+LvW{oF-1+;#Mj&0@MlAJ9M_tAO&dT_qu$tEoGp?l~kV08*c{9Q6pcy2TZG*2E-h6)q z>bGJQCmnfTzuptX!Y3-jZwqKUYUNq%I%sbXtC`hnt!;kqr=P8kDH--5S1heCq(%x) zv)%32y-&Pi?#l-t(nRauC~&5(_6_9dhl+9Pd~7nUg&EN4MS{_;17Q&fQtY~I)FGs@TeBmx*8 zuZ>~`QJs_;3IDG?{ zpfhiqY6C^$au)t3R&l%|EZmA*5$(c$%=1Tu`GJin$PJ$x?2{=Yaje>AK$+^$Q3Ka9 z<|>>%aAEbyxnT2aXVRoBp*6ilcsoE{-q#23Ov#YvJFgACrR58^<@=Rjsyhf%roFmyH28BvD zU^wtd+zcD9fMV(>WD8ZIYisTA&2MipLJL;~yUh~{*SEp9ngHFxEjRmPN7$ZzB3TX? zRdW=?ybyAICQ5#i6EN?6iHi4MA4LVQe&R?|~?S^6R@%!m(GFmTMINnp#o4PfGVt6~umIFTX z%T=FYm-j0}7kXr0Z5Q*C8=2}bWU6MMjGgz7F$)n$;X~&8*UZLZA%oVSrmAN29wikU?L}BZqqbNt0!lpfJ*oXyOTfKK1eSk{Rh3YFAC62ohsi$_q0h;tYb?h0;(7dlF&&E>7`eM%O*fj4?)>wXkbKR!6JHzx;X zC`rct;eyKC8L~!F#Mq4GK7AoK!y;ZNreLdYrjjizE|zxgWR|T#=C>&O8sZIRTUdS7%PG2dM^Z2=6(D!i& zg!(kn&V$L2;=*W-jV0{ZG);tU(|zQf&=Ap2fZ^^-D?6|}Ba>npY4kKR^(UwolcC)&tWZ>hzH`C3ndgS+$1l*p9X+qG21NtYQd&80jdj5$x;5geRSCY^wEpdllLbdX|mLwx}_q1#fC zgCnDA;;7Sfi8Gh~HBv+uxHaPiel(?~+emm+{>&3;G4j_&vza9KXvW}wmxKyP<_xAf zVbpCZ>?-O^ZKphluv>JrxIm5UD(Cbi;g(GQkc}L2BcLsd9JD*raCf@$82ea2aaO4m zIS8X#l#e`C;dvZ=pRGZBNIU87@FKI`gT&IFZ(#b{$9d?~CXzGjEjxjk|zOfE+N zxjb z&e9(&spB+ep#5DyjTTjv_tmPA1g)JNw3GpSpf8Qt^UWJ#Ursbv(D8yURr}{l{l@jY zqI3#2(higTUf{&|n~_96GoykP&`BMvtsej34!lU7spHrFYzYND&(I)j9o z1@1z|>zS#Oh}@3+1uJn!`o}Ur_BM5LEPYbglgIPa#6`D^V`|gz&4q>b`21|s5`#b# z)Lx(_>aA($pW0A018j{11Se&5)HyQ5@xYxxdd#WyAsKSTyYxa0DhX zOZya&lWj*1y@)OC=5J&NttF1d9HuIfiyS8`f+H zoPhz%5C;hqm$lXtzweh{c6ZG!V@bw)1jov2$)^qi@|*%c+mFTd?Z?@H^$PR>n&}`$ zN^xn51E?#V-Z#8wj185!p)WYQb0vb=7Z9kuMr^;dhBog6{JxslD2NAjnYZ+xX(jME zBvnRMS$#rjNxaG$s2MVOk`A4tJIM(mCl{xTTx2we)qj<59u_uqdOu%)F*w8cUNv&b zjEw~aoSpG&oXu_VX66Iv16(-m4~hFm7sL8$FwF-8U;lI1t=cL*nb<&0eJ8!?)<<@E zy>cEF<_2zkR)YHJ;9mldPa{8B;nrc@mXZeIz-gxz>KwEZk)n)JsQIWTv*Bx}tU1St zNs1XIX@wI<`w#Db2h2F|QL4|ppPtCg_aQIzdfY#6JEM;GF?H-{i%MFcBIjAIKy2_pt;U0)xthY-3gHxx^Le1Qjj z!IqJerY*2NRV-ywZun3qAnxjnZK-tuYoO*W!st=>ni!XU(#%1`i~5(9k(rB-kuej! zI6v7_TQLf+Nw$EcV3hE}&={W6Br~vEX9MgxT6P+V1rUTMkB`^f;i=E?yQ&Q>{sJU* z7L&~e>9|=$;81ZCvq(&NcV#gebX-j9=DnPW(nruX7K~qySZVXWaj*?QF{kzglaZNO#u(iTujj%|?h2$x`kUHdqX5uZ=-X(HL294R2G z-TlYu%4svTuV2T0P%NZ-#&z34gs!(yZ&&*;*0iZ_PMh2D%<1#X$omi}0*t=Ma7 zlN{hparzr;@$qgF6=25H(cFEA2Ii#D&f%o6w*%nC_zU639x%Rt?Fmk%KX};=NK}b- zEM$?>mo{bKW&v%u#=bXa;D;A0o#c$e@1M8ECgn_f--`yZg5YSrsEfm7Zb~TLvDrX@ zHx-{;Vg}>Ys=+H6U+1ojdO1uOqrvqmPSv(<^5x^{I|{krbhC-f08o zVNUbXasIqJ>%0VtpI3Oh?Xuh~YR%go+Agr86JjrojaPCs)uXOL_vxbng7&ipT7`o( zZ6*HbrK9?#wRPbY;fLDuK!)%{3%SZ(lpMS5fb=4MJN1!nASLg$w{R;5WnDjuDF$@D z&V^Ux0C3QAXW#6yzx)(*KPKJ*VME$wN=l}V_z0t*QQ996I3PvtL}&Vvq7 zM-UOWt+rdHkGQaO|LJ}NikT&_VvbzB2Ip4H~Q!?-Uqg`G{f;wEL^6k}NU(mwSQzQEd z^hc>Aw8M(J&%R@YQ4`>OWtR!;L`Xp7lf9J)V*OLnA_jR%2^+a-0=yQUgtQ{|W5u)7 zLzsyHkOs(|23D&$Z7c>DVtv2^FB0%N)28jTsyHWDIEc_ZV^nq4YW7l(LjDzP=%}78 zcvkEzU0cZu5m2fd7H~CV7&SlMn>$Y@#DiB~h}ciBru_nJsroG#PDLf}@$1u{r{aJZ zWH(Ymo-P9Eo7CK+#o%Fp@BMXryWK0p&DM9cG`?IzQb~z&LaB&KVEoA z2`(wE@erdM9vQeTXiI(#zC*eXqdYap)o6klT8fc2VLC9y-a>KdWH6Dk7Hc2wn6`5 zYA^>S`9sK`Dlu0R&BQ^2@p>E}+%WTAf}JiG4%J+Bw~g_AuLJwE8L^;ZocCEZg&K^v zWz1)>nYVywGP9;y_5Ry+A&N0>YV1B9y3_wX)HZ}y^YKwjdL&8DPeV+?W&19SptnB` zs(Er&0%H~UYX8k%|BanPG-jHIK^|2N@m;wAHp|Nq;Y673aK&3dI56)RoiOuF9bUFP z%dHMSD@H$!=k#jQ-`sH5FF2-6IBVCY@0}m4n9UW)@#kOyediLnmc5>vBe_3Zm%DdhK2^Bo3&WY6IA>3YCVcUlHFshaK3>0N z9fgS=DoTZge1(tCOOT$h7w+c@&XkxZ037Q%Tm?Z95K5|2G$Vc0k(hMgOdr)QICK=q zSc7+u!Nm#lw1YIgs{4@cJGHJreH)N~XZ%}r2|;bdHcOOaKZqP&&)Y$BE1_!^DuK}0(>&hMft^@PH4rt#lk4vV2PpObHRr%+^Iw(Hy3CKCMv88NW>~Y z*AMtwEvs6NM^6Ml?F=)nY^~>p_xwzIr;rov=)y2lb0MYsAU+;@FJ`2yf_ep)Z$_nJ zO*O-Rt`xKAQ5SkF*PPd*qJ!U{9Q*gwft4x!@8*He^TqNR{yFQTF>5b>0cJ9RCUPiV zM&hb?>zJ|n(4aq9oQlY3F)XCpg7cGFm>2ij(~O}PiG@ppf@7+AOqV3aU@~(kiDjt0 z??9_7!pB223cKXPy*bb=a|NKS2(Z89Eqozu5Mw*w{EbfQ5ahd5-B+NogW)T~?5fH# z?8WYw&GSMhqSu`fBvIrLdtqa!ClEUll5rqe(?2B(m5`|X+kjc3G@>0?_qJ#kpGYo zCV<5BMBQs^Z^`s-2&2G|Al81+Zdk%ueX7mposKtS-`=p53^Wc;Ab4cT(7Wa5wgC%4 zck02y+X^r)N^b|p=#;6EX6_P$f3NIGT@0xO z-yDH!7e8HV+gZfdRxYT@29IHzTzuM zgW2ClW$K+pPsb2*VYRCV5*SV(3qDHQH5kZ8Nc703^Zxv>G>7d5Xf zCc7v@-;9*G5|Tpq!Hte%dd4h+dhR~`Sr>ytTd+yQRdzy>tV~#m@B`rS{Rvyq?6hn} zT1z_)GvvI#z&A^D-`y_Um5#(t4$I*B4E}r%S|AKGWducrH*6@+vg?wzyU^Y6Sm0=Q zUSL+MXCCMSZ2X^s`r;R&8`u+@{nHu1k?&IM1-m*(HY2-Xv4I^!mbGmMY_e#PO6Lka z*V1Xir@r9}g zef0mY&&v5?-_OaMuHBwLb+OjZZV_uBz2MFkly1U4faU*i$-_nc;0O!?j`cv`M|Z>* z)_Is2xwW1KotftWC+rtjL$&O;Md#96*Re&VU39n7Td+xr%MM zO|tRb?wT~Ep_DHiKwUCU{IDEbHAV%(1n=rwg9ijBsk!qZZ)eb$9* zgZmmREvTg>#1&g&DAbOnfK1k*Udr{gW5plAgQmZ%{ZYe$1Lhto_LbpRE#k0K@d3qS z)N0=}Sm8s=#I!`E=YAO(_et4q2@>6W(m zlJ3N~nLZF4Ae!f3j+lD!F9P%48p$vRu11gJ&u{#1@6Pr(pcTih=Qk@1Po~OW{NaCR z`rs4c5&Gu4+%Fh2j?14UxjWpb>3P1BJay(5;XSiHYu$*EBgR&7Ow$}8^`5r)&|ur; zVfGP0O{TnqQ@iVkVUF&x)%agHN$hH5#81iJXwPRB<75Xmkr_m{c{Y7Yq=Ls~HZ7vt zumvIpfvOj7)`PAB$A)*O*Y)92ro2=QY(!<#MyHf3bwb6E0rY2WrgiY=o`0#Iq_|>z z*z3YBw~zHEg5RzD*}ft!#}x>Upi{KHF&K@ zd$P1rO7thabz&PCNyOaaXb{0Kk4g=0+~L&2Y=5}9OV`4Pg|3(XT~3S-g*bcBnarEc z>N`CA=X>FU2HG$6z}5;Yv^cQl#94bYdde zdUM)gZfx^e=6t5&WUSiA9%^~L1GMGx=as=X3U8~-ltD`oYd8`uVS9>3h1?8O_*D`KHGh4*5zaJ=9SZM7ukN9bug&sj&53lCH7A>e zVMQXpaM&n8hZ0*K?hjSk@@8tev;`Hnzqz#k$ccv^dqR2k%Md<2ML$z&V_JX73gTEa zO~Xn!l)u0tUygWT6>5GQh;GX7!Zd*K1dqxL)7+-aXIW#j_~7#cba~TW?ZX8B5`jO# z5~-ShwFZxq^aOjU7kS*s@wS3dW>wjPBaA9pV#UmO180;uQT$A zbJf%onHAM;&o+zACdxO7LM26`PB`P$#!!+<4T- z=yCnBI?Z$loUMFiGtl^6QF@`@SHeS2W#2+IXSN z8A+gE+$9afKh`Gs9%DU|lMNE3E*#9?!Z_r3s*0E~eHP6mqB$b7*B9Ge{w(H&m|u3C z4$56-;aAwAsknPV|4wCZntiNNt+d36R!1XvNCtXJ%gt}1G;U#8CFkTz;Xgzwq-*;X zmFPzoe^ekj2)JKp_ZNt&*i)7;-*i&8GseN!m%)ehdxvaLHwN5&27&g&Ha96rSj z656177^%KH9d$#ZVnrb|w9)>3!+1dfHk$aXX(Z=X&AmCUB2FAwIwg|S@Jyv0##j}jsdRP|S{+6Q3{NXoKY`CP8(L{5O9xIj2^wV~o zQ&3lDVyJ1p{qSR*a|Tz!Z34^=y@L7|%1{Q$qmsU_C+oB7I?>1q<>d9g=Lha<*xm?z zL~*!HsnYbfX(=@dwXJLDmmZnQ+ca)JW3|f%@{io>!d3NHxWF$8y1Kw-^r+wOnO_10*Njn||{SuRarQQj9CZ_J{09RQy07 zt|Mx!7Msb}b{ug!t@9TGz419M{T6TEuP3d!Ll=6vKK1|C>P*r!jmZ4Qab3l88M-n} z`PL8Pa2o7%Pg+$fv3b2e<}L*TtirQN_WLW9hEDhk-P3iUg4-1hw6v9VSaq1cKXYz= z?b-3ItrgJO_gWT<9A{xoNMf!cjJ|^t=A_S*2*8`#AnMjQQG?95&B-qmI*!`q!)y^E z2eSqUmb1keZ?zGBT+;Xe&3%ZaeRB$uLL1?wX zzhJdza8>2M(uBpM97~z=(4ol(Lxhxn;v)yEoTD0(Gm348Zw9(@J_){05We;-kJ5^j zuun`3i%>Je-!y%5y3j^On7x*DB8ed(MvR}Xmi(jMk6BkQukJ4<*gmdBkF{-f)2_TL>0m*8fs7D z(yt8`e!zqZZI1BM^XXQ6Yjs3ToW|qz43DTzl1$kAc*EacG$+*=NkH^gdw5$?V2wK;irVnXjJT8-n*oZ5E*`=N#wpa>*BPhQrM4Qnm|n)eErRA z0KMyJTU*VJir3%NWYd}kJL$$I8VSNxMZAXPr@6Wp^)6Yi(3an+h^g6_4x$F3?u&_( zc!Z3rCg)?e87^MG<7I8Hu&^!#HuB%G5Bgf-+vZ7>%h#)ZyE5na{|ETX(8TgmvA@e+ z&kGd7kKa=iLju>5`{=NT#7=b@clU8etv+4RS9{(}orSBvdU>~1CN~XkZqaw-f}j&? ziNK|?N{1^MAa-!e#jxZMy3MuS?vimUGvu3c#dn13_;!ZMqVgoo;I|HSvg~ zGkwGzXkEi@GSPw1*xw&YV(>6jx=q+plK=8MBfV11i|C$#$0rCq<`L7`A8e8P`3C_z zjG}^kIq!XDsx`iqoNyJeNxJ*rTA05^78qpeiDeHK-k$-CuCguOz%&ZZgX(sP z)d_-B*Q9}|prhpxSbD%{u|1Z=2e!)ZlYX6-;&d$)g#*|J-lml1HHix0{CGUj^bDm% zgc!6y=>fA+waoCh^O073l{hJ5g?M-rwzN6-*7kMP~a4}sxM zu>$yRZ*~1MpFl(mf6J$Nzw3}vy`k2n@t|rraH?qLjgsyqWjaEIq~#|A;~vB4CiUrO z_{t{CH`Btkd^mh1&INs1o+~Cj-0GX}5fNt+o3Ps{@WdPU6qOS&<9Ms)H{dBG8Qpml zRV~)Uo6;No;Km0leZlxPRpt{K1gB`cHP^oO{d1xh<{q9r8cp!@VdkY83qAA=H@cPRHaIKu&8~l}alixUBAfPZ^sk>(gThdV zUS(!&V|6zyi3HiCYrjLTifLZ+C!c}13`9h`@XoNNj@;}%5uTWH+ zPK*P8=hrnqRU@6ns&6eHNlA}yM~6SY)-J1sa?*n+|HQRmB#464wf+6d<<)ObMUuqz zO3TJhVSjsQ?o!9njHy<+ogguiLZMkfXQmI+=gho*vxUprz_8j{a)#DER}waPZqn9% zUmf~SuH%2lC=0dXxfLs(r;8Ene>%HuGRHYEkyX2Umpc=|WA5Qz5;cG^ppI>>bOn#L zqMBVC(xWxTP5Sz=N8+6J+H)aY)_}&AMLYiSztQ^q~ZU1w9q5_P^0yK7YnZ$A*VLGhE63dH#CfYmQni75CCV zK=`#o8kvuzWWdu1lrl$KPx%f1+8KeFZ^p%kfK^V^@8YwVdk86W#4bmEujvAIA~mUN zU!0E$7h8rQ9*p8Aky~6z0O|Lmwz&2VMI6OMb@nr$mvLYlxQ=DM8RE4`#xlNEe*jTU zVA%TnrnLZcr;wW0G$r{oHrJ(_`DJsyRi=4kLhcLDeulB0Qv?QLu3zYSOtcpA&R(yS z>szudI`VMyn|gb-rUk0qZ-)1|xrcwr-0eEx^ky|Y-abS3J2fRg9wnVgJHX9>&t{(? zXFMTAS`{+&H?7Yh!@m+YZ)daxZuube;qwT4Xx9IrQ_Rufisv!dM^7iH?g=6h@9m2jar=`?`rc^}a*nrB7JL`6($_NW3nzZ;hJkI^couiY zi4b{B8Nb>8Uj@jEtUcr5oXYcM7v!$wr(LE~V&CPAiS-MQ+fKrSArzn2%&SQc4toj{ z_Uv{2r%n*z$xBb>a)VtS;S6BpS-=*_II?;RdtgbW89~x5ejWBnwm<9avE&n6NRUZ4 zgM%5jR*wWsry`Ul;X(dBnta+&G&Az^Vb3@D^QorDu*@%rsgo}M zj4b-ClbsQUAt%U&#JfVR!u2tt5P@XRAi2c|LT+DC)p^-;lF}oAft9^jHm3L&j=|Tv zO7k>4@gVsqpN5Av9D77-wrKbv;O_G_`oL-^rwal|Y?8hB;1 zz4%Aaj)TY&W|ls1e^F`z14kSKsyc-*EaZo^FxPH+jvI{@Tk-P!=K~~0<#YCZ^Yx=` zvvGv!^e+z%J!C7FjwA{=e$&PwWDq8cFW${vr3aj(nl8Lv+9_ZkC86p(l8uwO6d3Bk3ifNo9Y3fm@*e0TWl{jj!)l=PGmmBCjkDz2} zs##w6TBH5=-yU4&+=(-X$LLWW)h}2QnOM{9BmcI|1&K-ckH_IS{DSN8WCyKlR>5~N z9=?`%e&S*fW-P1+8Eyc&j` z{gsgBM}u)Xqxsu@G>=KUph!kZ53zS^VV}j6`j4f7#1hlKm}|xT&*`%ys9e zRoD!>TzPUraaW=0zo!Y*$@%nyZQoiie+EHK-89A5t^8DyNyvI=e$cj3)irQW)e8}f z@3|luU6~wJyD{=^@Df12Vr=?6-^2G#rUv$9;3%OQXk}A2kA9a7uYHxp=}<;zF_lJs zM&~#^6m`wK-*1rxwf(p)cY1lM7!t(~7>_qL=C&3);w}9q>OF^lZ#&;wJBq0(LN=8a z0|6k8HF&hCoCLIk*k1hi^8^l_?0rTe=$fQDPE9mByeD?ncdXWyODA$Qh@?5~}?SA6$*?$!xisY5=NxkrgUT}pFM!JNL(SGuZ(pPbD3MifP4 zhdZ@HVaM-dl+Z6J-TAh7A--x%TJ(Oa4qH<;ce3o9;xiS$dj%YmuD*+PTH7E*)L)I; zKCc&17s?qml1t&kNk89dm|rg?-#tF69z+eP`TY=47x7v~)rFjJ-B1zz)TpJDIH^|c za&Gi1*U}y3x?exziT5gV{6=3T#>k=pM#RngZd}FXDkIY#AjMR>=x5;6EZU!fo2R!9 zFZv_awOb}O0^~Sx;A(eFWc|}SBW9lqTp{f-4}RB9>x}{JXv?SVy7p>`2&G+nwN@r= zoqw$%t`ZrQ0Cwf;zR<}+1njbn&9$kQxwBQ^_WhWPHl1bv>CD7`3gVCADD zl7Q^+h_RI32oq6IiMvsv3)`%j`e}^boQd&L1Sk2$gReO2uI)ypDn&ES*S;n`YVQi8 zPkvI1hs^#wYOm)kmRo6%x#h?Ljywi?KlhTN{nDq{NcZ9+4>DZM7EB0AkRSC2R`q4+?yl?1xCEE#EKx z0+PoB5iP*K1poKhrCP;SC8M8+o{oYyWGiT28Z3N&?@d(D_oW@MqBm`y=TSe|eel3| zEU8Zr+x_|N@f>M-oq)3)4WHRVH?IM9ew&63&JQGw3t!$_*H?)kXi4veCI>L`@eG7= zg1E$-gEVuBvYxz4;QTz#y5P0nGhS+wAZWhyg~AxA*jB%{a!OBLhBXtsy5dLXPXi8$18~cgA@_!A4|cv6+-8i}R&P1>517(4^O)K(dv2wlbd{0X-)(&@9UQ zvvITS8=tGUU7}963t%sr@zgk2>!sWI1i%6nxgwK!;<0D_h%eh_{kpD1V1rg*tFkAHfhQ;CwEu<;cUBuO}WGM;P zo72N@syGEZ1?gG-3N6QH^ew`@Nq87d!F4SoOUjdQ)`S{2-3%LBVMqUlM3bi*Mt9Ej z2A^(723yf5p7f?xwdO_-PJfFY9b_)tYLOHFkuO<# z(N765^2W?XJ8UFCN-&PCL8y#A+<0@-s9fzwfBbl;`R#S$`6289mcQe+-fF|@toXjC@|)M1tFS5!|fe!Nysy2ejo^PGJ@ z1|4HzMJnfRvk)wBEPWw*_v~@fgTFB1$;Y5gMCMtR%CA%Ims>pl-tx(lumN+-n7z!4 zxgoJ}yRWXWMQ`2nuKiai)U;?r(q7I0rLlg9bzYm?zf|2^!)i)VS?zw;Km-Wp7DI;a0>Pzhn zKw2&Ws7)cB@}nuFxMO%`EB)0yz5gy+w&y`6E@JB@|prx8ItfC!frq*=YI(}jH| zV&Zh%Htgr~B#azvjVxaQN%1&#@#*U|LTEp~i3sJKfpZv1{j2A^G zvSScI)$G>Up?3U?`{;?;X)&RoOsnP{Ic zN*g<%=&6Jr*D8MEwy^8g5#T$A73zS=cua8BLO%L?O$+QejjNQQjBBT+>!~MPNtjpO zPZzds|6R7+stU&L2j6T8gfu6I5jk2I=QA=(1nxK|3B{>RVD*HwBVdx0?(U@zjnRUozY;`lj@w8hgx z`UX}mP-o*JzZUdnnBm2Ld$Sahp`7qOfyR(rJJJd#B*EY+>B*DYwua(lbOX#UU_U-v zCW^v-o}HX&lU`{jRgeZ)nq+OXOU5AkDC!I0SbW$k#7>L}~i1u7gOsgw#IL~N$q@Hq{FRC&G2TX$=RtGHoe3)_GNFn7 zVUs+zpo=a`xJMw-GGc{+O>pK->xwWW^tDVcy_BN|_%|X$p80=ydGyZg>))tgG1=wO zBZ-7QtJHV*30~@K=J#%W@{NGR^zoa3gAJQ6Z7&G@j{g~M$^46SIu_iwk`f~)C=v8I ztv~etnQ_*As5Uv@Di0gF_PHj~Ea#eTbDgf+eo&=NK-b$y*#5{Ap`E>5?7cZWFaGE2 zf)7*piu2L>Oys7uAhqdu!ujXO#?XX^Y4r(aCwU~~fFAteX#(S57co1~tP5p1q{UZRKe zcI~qNr>3ipimLnCLnt5!2ny0&GK6$DNDPg1C=${!gfu9nNXyVA-Q6iD-KjK4qjcwY z#`pKlTK?m@+QxWh;7C{Arz3cB%NH(+_1q-5W!!0k3OO zhJIA>6Tu?$0OcFwk)Net9OtATO}vWb4&{GD-O0BvjqgGFO9hkO@-4ver8e9`F$KSY zn{@KYsudgj?U z7eX&-Q03K(@GxX92Ux!4;006(xbN0c#BW}VEe=$+N4!@#>glB5TDvxi%zOUhPhEPL zCR)bvPCr*BoCVfJ5)-qqpSQwR>@t&@E}VVqGK^tsZ@P*IiU_FEvH=K5%QgL3e`#Zz?XyFao&2wqBGnEbX3rAbH{o8g%Tj0Yyt>d0}be>BEi?#>) zPRCF^eHqITaG3A|%vm)>KZZaOsvbPf(eA`ZWlYFv`-K6;=BdoQI-omh>DO!aJPoJ! zbUoV|{`v=cq{_Sdq``SbF3qx4$hjM1GQ3WAa7NF_e2mv|s!)$zQ92ol$8+%!mLJ`` zhN*=9LgT^BG>-MN$~1cfzt@X@?yCJ*>&SXyEADgsNKk6dqj81+&y`SyfKgizV;;`n zK>P(k*+TlwpK>9`rS%S~brr&a)?@xE=!ouW|5RL>b1T3d7jr8(E$3BL+gFPhRs<%F zX(PW_D>~Qx41|h&k0+Fjsj@l3?-Kt~)p%0yfnC!7;Lv55Kz>wt0~zWN8&|S*VHSz$ zz#@s|=&OA^mHXjk?9wiCw3Kj~q9HKw7o7UQg^ua#O$uJBP)O6bbw$MfPS}@P%!Ck2-$P z=We7jGG&YDRp=AF^@;nq6$n==iMH^8l%BK$vZ9+BP!)&u*(>G1v|fa{V%k){V?)&; zy59#^dT&ugh@^T$({y{etFtwJi~a+VkvYD940ShkWtyGtL8u!W2k}G1Ye=V6xJ(+4 zBZ!eJ`aH1xX{-_^h`seI?&AcyNQSsqyi;Bzp2bS=;MnZ-VA*w~n`-^AX-ZTjr86%s z($rU4a)AL1(YSiFZy(d|$bRT1Xnw+I85K-$?K#_fc5%KwH&QNE@aQ8nvFN2X;-q7g z*5wY7sqtp;_`5tHnA&$f)l2~onOuefDJQ(Rc~LG>zUh`^c52?32vW*=s)BwyUT14w z{oM3~1#QCd@{7Asx7S9sNk3?JD)ji8VhI<0hjtb$oNgBIs#WFnLS5q&vi~8WZ$x_o zt`9?9e)fL!cM)j-F7g;x+ntQ5;uXBez%^*gu9E&NQ&qf-7Oac$1n&i>e4?Wnt`suuW}PJXKyaY`m4{7Hc+HClVf~Fo9)>m1>#q*z*Nwy5M*hkEth; ze!IfCJ1z8uKYn3+Ggx`Oj}6ce-FJ>jaN1K1uPqcm0AjC2HIiWb zep`||?SfM~j;rl@S^9JajPc(H1@itO#M_ZkNxQkdNRi8A)y-0iqH-8VyzuW4t*VOO zlZd@|m2g!MA>~Q~hn+|(7L2QxY()CmP!bISDb<)+3t0YWvE?ArN zHt;B#6#yQbTssQ-;<#CSe49D>*^TjAinn-k)OW|^V?V#?BZdH!MEl!uQL6HbEGE}_ zhrwq^-wUU^-;bt4m{9y<7dJU=9+^na5n%Hnnq3`ysXaCsGH_>0L20+E-pipK~RPWIV!0JhS0N^|H1)s44TWd&3N z46t;q7woKt@7_PQyp-X>@QAp!Q|MSv7$2+%(YN~tt`I5TNS>d63Z&SIVR$dcq<@^f zFHPaAq@?=9Pm;xT2k5mc*exGV1(Ukl_13^=coAD=R$S^nPqr>5$IChBodlJVEaLC? zUs}|FR2Q$%L1sL9*UWAhA0u*Yax7qNOez1ZjrKFb*MW&J9FOv*c-$Ro#S%EAh_`C{ zbp5%!u_J>kZQ=(J{#8NR;W)0)=ks$s&4;aYJMDT^o(Iy_-Ja2fu@j;oH)wU-X#p&^ zoJlY~#)}C=`#=tZwop?pNjPNh9YtGeOvnvNNcJ&D699(QiJ>VEQ7|OK%|T= zLH(bfG2LIkzMujHNnhMNGGKgHBS?oWqZcB+c)j3bzmWA*9iD+W&n*IykLPaGN>;wB{Y=a5G+Zwq(mbMF^!@r?00z!JzQp}V#l)TEg0 z9=mgdyx-7{UtE|_LMch3LYh(E-+qzvC4x z*K=}KOuPZ4m$Cbad)bTn?Wtx&5Tnv^>s@Mkt`jlk$avlOu`pBa3##Gim?84j@15r+ z-fdtQbU22++V2r(v&ZZACf7R6 zKco=c2d?=NoJMmWtmrb$axo*Ke;KLc=3=BJOJ?}qjX?*2#OU;`1!xqg9|r|Mv0*VE)s>Daey1{H0R0ZPa5BSccWe zxxr8^J0ky(&x?<`Sag`^SbS#9X)Om##aFKux`LP5_W@{#rG)Xun@gDv9xD=hcqjHK zYuUJ2MdgG+AlX?!-@`te2NV>}vPV(@!#<{M6RI<8yt}WQIW1VOY9e}>bX#m*-QE%y zd~&24F+R$3ct6piZ2s=9a3fviZ-txw-_haOg=U$&R0MO9H4qa zDY1z6i!mcJVHwv&lGG2mpQ4R1T=}!iR}=7b-v#?kbN_-#p}xmnCmgM1L)V9ARz6sH z^A`=$e`A#p1gZAg;A17>C(jcPg`!s%KT;&ZxCq{xS(=PS)@eZjM^a>U%(ymSrucPp zxhQZj_ht@?1*4>j05(Yn*d(GS{ivFC*2xWDTJ-6tW6M~yZ39oXho{T7y6q}umf?{@ zB}6ATei~Gf6W8x<7iarX7Deu~ThBz808t{5vbAnOs*I(eD!`ihsj8aY2w76;LC*EE zr@zf?rAAvXanzF32Zz?D>_3>KwULoGO*ns!UBlPJ^VEZc2?7btMs7-|c|c-ftr4~E zsdc!&maV*ipx+9yo=vvy;l5iUd-i?<-rAa{z$^jDcs7BCC|s| znsB?zpmO&e8F**Jdw`2 zcG^PrAYre!G52r~n|M}p(C;YD&)yv#Z!}PtGU0a6B3{xaaQtw^D?BjG>w6K~|iY0--n2NAA=a!Ur{-cU}^AcLTX`$_fwrBG*l5-%e%IGIf1zkF`qM zS7LKSScd}_Q<(NuMo13FS7I6~MKexuT6s+BTNYfL(^>ob4m1Rir@4|Fv`jY|ouUfN2d zZ;l^y;`gJ%d9#FdOL}t7$L>BA-Qy+GnB8xJJ3Rio8JL&d+4g{hPiX`i`9Xv-a8M_9 z&!nKIf+zNL$Dt5}&J6poaDXloN3tz>D1rn_K^RGvcE3FJ{B-@~JJJ{n=Eb9nMcpbt zE0G0>BuJ0!>e-^kkwiSa5$sgcquBx3j>5L9ZzeTN3{Gr%a{RZ%i#m%w=UZ+LoRxIl z1`$o@gXSg=>SKk)-Q|5MoqJ9r>niK@#ZTJx3VBy-)ns)TyAf2YTg)?obWD_`FTW^; z|4R-z6SvSiiUA*B4dX|HMByPYgg_cqB{Z%9-Dl|MJTghv`fh{pFGP#cKAJL-IsP{k zJWRNhL5JbH;o}4ax#S;#Re?iOWLl$>$jqK-s&6b5?=w8{p_^!_Q{TmrTJEj709dLk6u&6eQYXJBHqevh<-NAz!eaHllmj`uBccp|SILKxS3r1&awajIl^pJ;z ze7ANVcX#OTO_Fe_tBlO)4Ll1U*E7GLh(1I*NxJo1Q23jRP2~gPrD67m2)p;~AvS5N zyj9kf^qkdCxegK5GEzTrAQG*`V$-_pHPS-eaXwGC@@I}yFp$~G?H&p$mpfx2iD#6z z>#TRht>*=M1ysX#O_!ZH;)t6l-RR$^*W6{*k#v(J7JwiAY>qI{<5M#O-;XbeE-dYS z$x%nQud1TnX}~yNwtlJ6Qn(c(Ac%GTGLmMhm!V)a^2 zEd-azwQTU;G3@QXz3cG*w%Dh)y88I!p+@4u&nG@#uYUeZ^Z_%F2$sn>rT8uf0)eob zdSsxP!ooH4qA5MfJO@r4EI($576e2UY&6}EDBAPe$otD~+WaCSggpLLl3v8Lumd-V zy<#g<+tr6V@{O{$7NR0?R!ha|CDFLCL8jNQz^h8j z6D(@!S_b(S#+-Z%m=d+E&==BAh;54_?EqZ^BaEWAmqB1`Z^uvo_OxU-{cWk!FCW@^ z^x`Fu=Fr}$<}SB*hYA7%6sZxIQyeA)VvmWySRh=R0!1mq>3*pp&l#~mV7OWRh~m%L z%$)pI=*{}suFW(>ds<-8u=veX9K$XQn7cYhu;aYDhQ|tkVcY80k!Ch-|Q0gT< zg$qj;ItW8u@Zm>tAb(91b?AFyy6~u)w+_Ye<|j&dSUw?%<~HH0M$eN!WZMJ%Wsf-X zWYWm|ZhB0z0K!HHpBAd1GD+=dOz)^7la6ur%?6kn*%Ybx@?Sg?#qb)b z+yr_INIM%cdq$W-@P$C2n@)3ui_Jd3Jm6PMv$@F7hM%6=X;;u0qf0b-!9qQO#EzN? z5#NH#(tX#}Q-iW=heW>NSQuNsU7W;4s*+zbIVT~zrBoW)SOI0H_MGfjr`3Pw<5MPZ z0Ahl*O*d;G79$1$wQA)nDBsRuN=k!1T{OmY#E)~W7WZGP{6LAg`3OZrgRDT~_8c8k zAXS)i5hy_z7CD0ANiTv$wfDa4{cy*+YMP;z`hW z({^^7SLADwr?3Saf7AM6OJ-XxCaU?t_OEHZ?RdIG|9B7yO?dGR$9j9H2w+T-zRW!) z;I$gL(`Rz5t;~&_UjgY}b&e7aYcgsa!8^b5$j}<68}q9NozLNCE9wKD!;AS$s-Y4e@6f$*lHP`LvlJG@dnwgoTZmo?#2#uw2r!}&M*sZHP&OJ`9J?LLphW!q!xgVzZP0!zzmqU8p&Y3lASn?73 z^en&{%6|CJ7e=2x$n5K5LHuWl>`E7^aa5GT!K&*>&mO>~EU85a_%Kqpp2HerLIfTO z*l5|%Zy=fEEr-~(0KWKik6auYxNaiq25ocZzY3!p>in15+b;7P!JsQ1awdE0K>@`J z6hz$flDZQv7py7K5I-+$O)oh?W_(2$L(B z?+gJ@bpG$2|6dDmG4KKzXsq!;AoauB@P`3#9w_#4uy;>|aS zrr#=eup@Uc<0&W|;R1`!OVZke#wJ4k$p3^czBs&j+=-wQE-XFt4Y5~}Fg`bS{GIz) ze-CL|-uM*%b`A#Q^qSq>GfdO6>nlK3iy3vRv5lDD*a$)Op|9(`<3QCxSs-C01uHMI zY2?@Zm!jgtDU7Y|@U>?bw4k3cLtu5|R8-xeWJ1P!L)dS)bM9V0Y72e3()|_huJ|b1 z(+NLy03fVRuE)#1+`L1LrYGkU)?wW?T$wTSTm%^0KB*}~J} zPDV4WXIyal(Vz9|*0!+5EFoSY92}|hAHQnhz3=8|X7Ozu~VndN#7)%1?S z#7rr1xI8a5Q^L*J!ZXxWW$<>E8g|>?e9`G4zYUmUQkpl!$peA*qhtjep;q#GFJZS+B^-Si1k-4js}fV=o9uxDudj~(_P()p)A3ZS z{o-&4FievuM`h`M;gU;@B%n+OVgDLoA_~KSF;}=fp=12VYer+Rz-i zg`9k;QWtlSLll_(e2HMfP(ga%T=Nr|%Qst#^e#>_OWF4T$RC!yp%EGDy?W)bba7O6 z6lLF8**+!@b!pvMh%89Poy$O?9BE|1L%*+K0IS)z^~IZ|vKY){)#TmazVR#aHlOj! z0p>;<;vmZ>d?CQ?_R@cQW(RjD7hno+!BkynnBv2%ay9}U>x|(EU)`t2e5~=pDRt2wM<3?ixgAGgzlKY6n6|*m z4bI#UsRL-2f;=%&_07rM@5_>9iEZ;E0!B1gh+C<;kd-EN(klMUIl*IX3Y_0D=pm3# z!`2=d=*3VxZ@hj~CAJC`%Y*62Ygw-qkJH)V(Vfp4^}c%;6Lqcb>6d0}Y6R}?N*j7X zjn3aMuq;oEYvgEz6uG7V+!Cz&{2H*6&hb^5t;$^P@W>SA(Xb_rth@q354xp~&8)oO zMb+OH?C9uz-i|Nqc3r6MKZIOtrNr37gCg>e(HH3StDLtxZc&hr-LVct-FHYo zER=qJ3g5oSGg*GxDLkfk>n?s?M2|o#qy=9RlMkqptLgHxAl-RGRyvpP7w9p)ZVUV0 zvfpoenh7tKucb;VD|3f0IcBu%01M-Dd8>_;yU1|+4S~jHx!#li4~7+T zQD;?~$L`bJdywevtSj!faWZjK>5K9!!HBeZJqXAknm9qMWz{}m28WJfah0;Hb}WDV zt@|@sz_AW|rB*W=^)vl=xo^_z^Eg52-8Frm;QS++Xe@Ij zy#TuE?QSxPQKLYXs1O+Xuk@&e^0&JMqiG}xe!kmoXwyDo^36;WI>3$)evs!gwu*75 zH~&t_6mDnZ`LldK(PSCbvZOHfwo(NXVcPg>Jc?wi{}Q@X0q)Bbn}jK`vgrUojX2T+ z?6m<{voUm&>!ON6<(6b5&>Jr{Cgn%SMUzSgPX*X&)#=Y#t7n z#ZTO2Ph0UzCi5B7iPS$S2EzSua-MO1MW zi_C0=$CVTUdDp-Fj8TtLR}N@SrYyzhni!#1XmyK)+u}DnZlM_UuxC7G7v>$W;p5T))S%Jb)^p~I8tsXT*3pR%I}hiG256c@poH-E%L z={4HgfERdkNC@s7Rr47)`{2K-LX0FRmdiL1609wR8J^*T9VY~`>X4-nN{1aT^;RE6 z-kt1hf6Gc=ejY$*R(P@JyRWm|ba$+y5Hi^9jg$Ox6m26$+vaht%A1g=3lu2<`T?^_ z+K-A4v67WppXw59*2;b$B?9i!Ry*MBXGm?7E>Y-HdQLLcONe6K(Y5MbW&)E5!W5OnpLWd|&4O81w5>&Wz?t>bG%70Y!He{I zx0>4J0+xQ;^p-n-1gsqmagP?o*>pobre$aauud9PdADJg$e>I$5mm1NWP$&du0PIA zsyjaWG_4czSj=&%nOuBPQhi674>K`3y1EP^fV2vpnk7xWRQ=l#or*{p2jHQNqT4|nPyq}U1QsbfDx_AN7kN$b4~Ui!kKJgYGqv|)G#Ua z724uXZO4TbKDvbB%>jCE+C^ar;?Rm60BXqn4|QuVLxndCDi`@;dZWn-@sY9AyST{kmJjXl0sn@X@XjJA_^^N(Q9zXjZ z(f5Y(EZ39FbZcp&&;eNw>tFy8m52&q7Ygp^E@DfLC|e|5iVkk z>$W29wrkNykKP`xc8o}0IiGEK$QW)?or{j3t8`Ks;gF*E2p#49&%R~yD)@2aKLZq& zpyJGO$hNZ%i`=cSAfbm4NFU3gOVxzcxzO?ecmOoPjdJQ6-1^rb6Klv!o3H!s!;`Fw zcw|Xd$D6$d(95^qhZTauIoXWnfM&TRIAt`{ni@g*OfwQrDx~FnyoH~n7-6cfw5hG5 z9FM+T6(8aw6w2T@_KJWK_|Lpg3PvhfY|`A(JQDI3C5^?OJdQF;#&p!xDA2K0?oGzto3-HDT*6=b=X$!m? zIb0pX-SvibvM4G?|1}`oX4wW23=fcP095=B27_i%rA2UOx@a>Y8;_~X`zR}xW6XzT(Xg|q%7DFkeu&I zCT6dd-q=XLs6p~RA}`39<_8Ekl}5aQWzbvriMaLXpw=W_z!+-OmrlbGcUoGf&(YbI z0!XJc-6-vkn$q)ZzZxbPSIqeb_oVwhNoN|WWVM_H-mvlrwviqUtR!uP;NR#Tb^f4O zRukN`Gs)otwF7~=52;0u2C9rHr4jeOLWoW*TYA|Wq)h3nuUF2~Yy1?Zk3ep@*A>nRI2eRpo}dV9UIK=Fm!GKueI zkgfCHoWz)?OHaH~D$O#y^}WKnFlz`h;#hYzP}x5FG+`;DmfJ2^0GVLu2Fq0Ol>m*0 z2?`CuvlogH^J=}XOKxbzk*%d+*Pwz_AU`h=bfSp*0kpBGV%R`W{>&s%S7rO)z9PMI zK9?|=V1_HgkWozN@*KYX1EP1=yQ%6td%Z$`==SKP z`{J&jdcZn1{>M3Lr-H(353%f3z)Ra=47}K_=Z#cYFVFRjlxNEd8bcr2cE%K1(W?m- zSef|_sDk(C9v-&S%rXq!D*3CM^2V!09vG28(OqT?VwEzU2&q>~=Hp~{Sbr*81%p-E zChZvM+7804H~=_GO{)6q&>GO+vm6QvQiP|^g#GG|yWKXg%l__P@aCq!iOTUK-~#Qi zQ>WtGhSzn;2xA_l5pwAQRLeE1n&80Lv6p|_?VOxqyjdShl@Zm)&|zlvYp?B(fG~eI z_A#*Y*X4=BS8k7G1hiVYDny!ibVK)q+tJX6M#0#y+sY(+gn7V=i1RazXF35;4uhag zCf9S+`tKi?1gVuU82rVK4)-SahFZU^e8YY3D2SOvfti5Dc9Zh8Obo>wC&j0DDoSZI z>%$;SZBg+VmC-^i>A6zOE-jvwkkW z!3UC1&9M+Uq;%(bA25?N)f(INjBw?AQnUwnUD$PhHYZQv(zvKU6YM~7!Iyur@cl1l ze8)QFjUn@P#pgF(`OSsW=3=%GdpgI62%s0SlP*<@bL2$pWYV^j@QBr#|hQqNnp4u1KC*kx+;Khq2obz7m#)i>% zDx%lrem26$XbRbsZ+FYwkE+h&BlND6Xzke~b*S&g-#ed+^vgSJVpOX{SgY@zUn4nm z2g@8Q?g#P!dG8dbOqJPq?<%V&qHTy#l%KmLHlw@I0b2}fbprN~{<|S`8J#(fd`n;j z%zErz|BS}ZfHfW1ci09RPxP))&qLY;g}}Ic@u~4azl(Un?X`Hnzw;>3Tk8h)AH7+w z5MqdCOPNO}n1HK)Sp7hRTuS?{!^EJ7ONV#{+> zWrj9~JLUDPkTQYB5vMUx)U#Mc;i+<41!n-HaJW3sHhy=SzZ%;8@m|1dkdBH@`)#w# z;PVYWGOebU&huw{c>2dzLl20V|Nj=J9k$|kQe;wewU@rX`%3Fs=Qh>d0hKLMT+3mP zZVV%?8~r;B#%YddnT9K0Znx`Q7?eB`_@zswbp5ac#JlBU+*)1624sV3m@Uzh#~y|x zI?l+}-jJ#$_jU53$)7A*hzB?v!y~~`Ybc_boW*w}B9zXE#u-4u4P4}+veW#hGhxjp z@4l^5UI@*5Z%?E&{;iFiy&k2zykT4l6QdB(>E&*&%|TlyG44D(x~*I|h$_sRBvthL zz#w(T6S>*1?!8y7Jfn8rc?Yb>n~v=binyXAP0cOS%sGm<@k~sGBsw|;R+&nY*lXU54{MI~w(!dL1r{0Hj44@fI!N|ITq%;u#s^hJoM7P*z62M*g~GbUmsQpn|Wc~!RBirF#7$K2z+uVq$0Sf?1_2=M{Y?ceO7uiAQoEuF~` zsZ!<*szCxQucXuSXIKnXjvj;}aFbd@O9AJu zE`IalZ$hRqr^B6j^)8b#{Sytd{4rDN4zl&%G=rxc6dwa&!uW?R-D9J&EZ#nFk7H5K zKNFhiRJy!RUNru?5HRsBgsiqiot^GQM;j(*yAqL+i)F1RC1o?$qp=Yj$M+5Ky(1p@ zI!(S4Szd0iDUFePu3q3&@>R#imc&?Ps$!AE_*Y5H8#<9kgXjf*5AdJu1s4EFHlmz< zJNJg-cw*;n#r!9WzGO$z+a`yVC?2|xUwqmQ8NHDsj%%=6G9^mU`7Hj&#}kJ>1=$n z@W|PMSDojzQ_);~!J-msXc05b}ntAmFK6>OIao_b|~k&tbL2*kCw0qgu=-JEmm zAk_`eYy7Cfzh2JX7lzXlnv2uW3HPO18YV64KPn3Y%}d+ivpJT8^z9R;T>t*xWR0 zi)R4+d!gNHrsdWF#mbAfvpOvgli#TD6Y`<`7`4(!8hNVns-pp$g8a#HcuH@j1Xhc2 z|1Kj+lQr*D_vI(=cWzj5zvSi;Vc%5Un+SlwpR)9AJht@yO*X!rXqk3$dixyAW_P?t zIAqkrvkuJeM^r5vDCsq2Zu0V{4*Ly?zb5ta`L;nwRf{59>|B33K4=h}&VT~plv}d3 z&=0984Tf>CDK6odeJB?s%}qjqNR)cxCCiqaKA9C48qXYl0)epZ{VsQ%)}?CNe5X5m za+n8{jn-8?3;h_iMQW6nl+#_`S`HILmJ-H&_Rf($|9pxz=K>h$bA$r&| zZF?sG?%_YdxlA7YW|GtZl&hgyKAu#I_Dp$kMSQs_xj3otPd;g+;TsbpBO6MDd|}on zzBQJ^CwzWiKzt=pnBZ7Thf+GB`=|fHEdh%C<$-_auQ_t+%150nKE$xYedDa0yrg)F zCL;(W1KsSao>0=xiRLyKYN&J))$ek2etuKy2L`l(J7K1DT?$$~)q7wOH6_4T#EaK7q0J#CGgZ-94de>vT9wc8{jfB?$4 zN-?CSV&YqZG^GKFrL2yx3CK3h=E_MK`5T*j(q2@ABUNLL`ij$d|E419oY0gtj_U|-( zNi@MGJpKvh9#pe38@euc?C2Ce2RAG61Pw$Y^KSc_Std0%S!@f_sE|sDC30!txahjA zAI7d3YC$qEJ=d=CE5z2flNS{b_X>GMC!4q1?Z9Ru=tojhZ%{(Y+MmU}cAd4Os%ru5 zp%8yqb7UvE>Kh*zV|X%&->=wtMjYf$ITv3+zIwtq>RmpyDo4g$kBTs!h=S`Okc$}E zfU$Yof|nWdED-9U0l0!gHHR0&N(HSAwfa!s`d3C5?HaH4yzKZ6uDz)5OsWizLvlCw8Qn!0v2 zRgp(Ke@S%OP$67D#@fw#%@r**Zb=zkNe$V9XvxN6hdSpQ`(lX-N0|-o8njTAcT_-f z7b0NheeLVIzx2Y^VFWAZu}%pJ`13&c`{T8ZZjEb#JXfFYQkPe5s)#c(`2ZMknMH%V znqSEpLGU2gq=*El6oM?Jp<&qDyYm}Z<2~6g2iqgJlv4hsh!;kb08XWeJlg=05Z9-t*l|2P1s7C^MjNv2h02Wzj znbG-bIb?-ASdcl!*ulE_Q>WS4dc?A+7pUcz z3V}ZWF1p%pIRUpX#jI#%D?zk(shrI;z2w3!iUU%hDp%E~S3p^o&!S2)>kOAOQsnXc>Fxc6A}IF3~Q zZ8&q(3(AWmp?CXqwfyxzkbk|f>8PR(M5C8(6wz zpFaL@+-q?!8XX<(ZYX(4bPpAB*cAxB%yoTgYfw^Q{2of{dh=8u{2PfYgR5v$Ljoc#!rd=tCr-dL`=o=b|wFPD{pb zEaQXm;%45B6=Hn7rV_x|tbi))5b+dEuTw*+4L;2r6o}*f0JvMOtEk*#woVq=Av5r2 zuB7HB3B6i?{+5vZVMvgF>?OH?feJ~HTuOP=ZyjFRj|$GHE)nLUtM`!z=x)s-zK0l1 z(%8mC+78H%~zdUmR-`_~DX}nhi)DTUvV1YmcJIRxU`osxW???6eqJek*4(1;f zK{#}I^{S^6jZu>o4*-earWTOvfVBG{qluSACMqj#5mof~5H)ly>(#QtMIQ7?QpPYU z@3d|7FJcE{f4L;7c6hR9TdW-y!LeMbZ}B?I2c(k<@CDKI_M6WU=%|oS@3A&gwy?UE zW(wIlNl}Po+TG2I#-sLbIC!SI|G@#sr#&#Jq|oB1Asfh#V*4o&2W~(Wn$#p|*nCGW zFefNE6U&w4WM8xsL5r9c+~keV0KFOTG5)`i;rXzbm*U@Fa32ExWBMy#4P>Qrf`ws= zGMw``i#33?qG;MW{usaTo+^aRC|wfi*q2ZWM$Ys6E-{c!LUns6h0*ptCz!DeG&i<0O(gsy8j-E$;I5_+?eJd zW2O+enB0A-eQ^6DQ*LaEwRWSu`obPp3djZ1qi&ckt7-w^X?Q`$pFMnngvg?JflUAGnfI^l&Qolv~@W-06 zL`UK%V`@is+=X`;I54SSw3Alknu>$}E>p8*!5CCDdD#8_9mJm)GbJpm^o#!Fcq$r9 zxKoqyW>7Z4(Rf^Qun2b!x!bzHAZ7iHYS182VSP*V`|=1NQRJON!O02Mu%;vL0duHu zHunWFGx6eY1TshTFFjgWZ&LPLuT>}kzE>7@6b83>B}s&$*e(L1Hl%^GqU>P1(TkwD z2l3azOu>e!s+I;12LnPY@bN?r{w*<_4^Ikq) z&U)5dU_=m0(WlS$Dm&#Dw2@CB?I31;i+v(#Qc;4JloKc)tB~4NP8SLJXSzsY3s(Kn zdqMC6zq*@u>R$8XkUUBgoo+fWNA4bT>du1CnGvPy({UOXflvz;nixl$ItUQKvgyzy z2aFdS@c16DL*lPgDG9^i2uY3`%JfP8rrM;a=N;TSf1nQ>KnZ{%1q zi!iny`)J!?VN8-cUIL_WGLk{4?y-jnVdh1(q%ZB)=y-jZQ4y}-2d50+{`YpAG?GWv z8ihlnU;`B&t}<7DaZC`~JOrVY(e&2P&J3cCOGEnc6cd06rrF8w{c=0kgv)N?6##LAMtF zyWe=`y{%>)bVxfs#n8S~KAxw4PGC^Qlr9z7rpud?-XLz4d76KCK6-RHr5S-6AHQV| za`cph|E_Md3lV#ZAZTP7-dcY%MltkHCkslp&a#Ux(_Oeg$v6bn@|{(@*k+m?HWr4i zVghSyL3B3S#y{BlvjVxq#v~y%n3%ncD9aXPq5zC5j4L> z67b{p6aY9vY_zNm?UXQjiR*6QA9~F}Yru~{ZbfAdhGy$=m$xj-oIWE3y_G5-kdS>T zE6mfn4yQJxYxzz!14OZJFjE5TTsRv@alc9{VTEjcPzeO;={Ueqo#T}RND>G{Oka8F z1|S`YmHY_^^iTP}EtI#P9A9e!^nd-+-wAvtTHNF8U?t#30*dJfzFuW?x{@DD?%9G< z#y7IXzo{R~P2^Vx<3;*b3xejB|1Rc~$+;heFRI-1s5A`oL=uCB$7f{ggdZ7RBY#oc zsFu{SU^7{zd`8e1j1=FFEs`@hA_~@fLGp5b=?v%~_)*JhSwjS8Gx26NasojBR*|OC zCP@`phjZ6D4q8u5x%fd12=eE|xs0<43(4tUF8oMwnSmjqr diff --git a/docs/source/_static/img/pyg2.png b/docs/source/_static/img/pyg2.png deleted file mode 100644 index 2add72aceccd976f8802ad0eac96d25de878d1bf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22196 zcmXtAbyQT(+owyqK|oqUVPOGTy1PN?hNY24O1fLZ1(tB>ZltBVOH!1S29XZm%lCKQ zKknh2duN`S&wS>YXJ(1kP?N{Qp~gW%Lc&v2kkLXyLf!$M4J-`cjVQ|dCEyRXi-Nv8 z5)uW`(}O(1dM=KHM31B>BdPNNew5vzP4j(bz{VK=2P5)JoYbI4wUA_a12SGv5Jdk$9`=Mh570k>&<$i@(SkSicJV{mlt#_@*7p5CoV&u*v4L zuK72bjT%Y#Z)ug0)1{E@>uJI?pIcLt{-lWrP^h4wZ6kYTw)ed+p)9Hye&>>dKcRTm z@4DaCfDncMga~YM7ZVT2qnJdFv$d)+zW(KjVix;&)oxx&^*WfrIgLyPtQ_k4cFAk{f=D%f_-UONvLleRtR` z{{#6V&vQTwhUttH3PNq^BV*AGW}_~Mm}nlaf|>?SVSuo}Ns288yih{fP|_(LzwJ!{ zPpsu9)9YXznLM(&VZk&r@t?{iNbxMpC7|31sD>FGTn@g< z|N9n5$Z`+_c%d}wn&pe;_@p%uW;_m~jzUP%&-9Pp4lx^^m-9$tb5U{N*lueq*j=ar43D!FDYoxHA@K1;Ysve zT>e(TtH&!8gze8~U!m1@X!cL9+lBy*j8TYS<;;_J&T2_dUe@$pqpkLb8ZdJqeml_& zeA>~!{JAN1fB^vTwXa$T-;)L(LYBy|aW)}!_hQfijsL*qlkWe%Kn9dwUJ|ijMjx^H zX30=THa4HxU1rMtCmAH&UQVSASXL5x?7ZE&DR6-`^%THy1qXghd-@m*A^%Y~Gn@R~ zftFsP_HL@KPGT24dYu8yKM)y+0l6K@>_0gs;=zF5Pz}wrOtg!in>|B$`C19|Rs#Go zOBY7|KUg^>=`ApB;~b>#2)CGl=&(-n_+?OkEtPh8a%~E=oT{7ht-1(RbVEEJgZ=cE zsZl!rEo-d-s?UKU?NvWHFCTqmCdD(QNzhz8&TG|{Bur+0R;jUYEQ_5N*9Hg z?f%!SDjLEI#G>1AptLwZW;hQc3b83?^R=$>Dd_x9xa~wMrwWD;$_@^i03qZ$cso4Y zgl_#LX2p!3sRk4A`*eD#($R0V2w##4fTbBO!s;i!%(OY9(`peN^3y9MP2N*%#Bb0P zzhCw<{~R<+pm#CuJ1`1_J|=zq3ZRDer9I3_@RutHB>)%5+nP+GA#}}#R~}xRU4zLbR9Q8@EnLfrVv1;kS^sJhV@TosK(7!LVgY8!Q zUvZtH6Y<_$u~D8ekwzc|<)!qX@C@4wc?JvnU-7&nN6 zuu8&@LWt*I8U*HH3D^XC5p#n~LnoS5ho4~gUmGl-4Ogk`bbwG$ZY|^LnWSa>$7OBb zcvEWvu|Hv>lg`^k|7+N{7*w0cMio?&|NFl22udJkB5<4acpi9jpf|r-S@GEMjrzga z6wT~OF$xeBP%I4byLb9z6ISOz`7L48Y|v?+;fY`Hv(G*kWWiz^IBnH)~*n z$qi}#HJh90&+fF2JsmTdC43LaiBu8N{KGDaJE2D~ch<{P{iSff41GI{RCXo$(*eCV zFjgDD^xGk%;?7=76j7p^aX17xb$s7;Gg_uCs-T-RBGq;I{XHbBPla?mx7|7`bm1fO z%5L8i?=U0}*bf*bUB%1u65490CE|zd%)&1awvTc4D?=~7mPWcfjREP5-uUA4`Dqn- zF4-DyY8@tqm~>Kh&h>#nbfaiRnI}Orit-9M01F=@LeC_3(JpK>f)m*?*PFIN1aVDM zqMpcy1$<1l`ea}NotjuDvz^egq^ObXwDKXeIjX&anx;)8$YLy_&~2(Ujyd$rSd%k3n$e&R z{g{@*OQi`{L+vWYP90j8*bX)WV%L`X6Gc%CKsxBL=Hw-5?XU`yc?G+z+P$WSh~|0X z(!zLj(7|;`b(pcbK?&*phffn(VKP2~_R*h)7I9}Ne4OY>cS=JyzU7}>UQf|JcjGl4 z*Gu*_Cxn{D=@w2epw` zj%FtJMk3{lPLX;`eu5Y%YON$AAWjo-Fybwu^NZ zbNqbV1!3nuQlz|LNlx-J!Z%N@VApJZjcK*hM?$KJ7eu98F3jXj_%z8dQW!SHh>oN{ z;=1Q3Gv+CoT7fzyiPYD3eM9kV1K_$Hp*UUeaen%k1%0hLA<`69t$UF}P!z;=#&Enu zITx{cMnE%6532H(TR&?h*l$h#zO)+qvy;bEn{F{JAj7W$)Y}CYRgcT?y(!4G#JII zul;sL4t*~(jlXX{IN7(D6Nz9206a>0qP3>ZrgW@M^vjBN{=4vNPkO&sKeZ%Q$4pNe zmfg9nxQ=qA^f0mM5L)MiJ9Y)NnYFrvKb7Y~i-`?e@rMdYrUr@eT?O5ALKP52zmWm5WR)S@RJ3_GX`G-Y#7N&z7={QY87Z*xhZIs5g zMR_o48nlxI5-iDrk0Yk4qxl3*qegry0oCby)WWrsOfSmj zMw^^vtVU+yVb)d9S52(wX)#5Q3r1nOP2dkqeh4SO1d%5N?+|!|*)!3}6s5kVL4}Tbe=2+)~Wc5eC zP~WM@nnv)k#G={TNkwJFybV0=kG~dRjsg<~1-jBrx_wBQiu^U`I2tJKL)|0Nbls-V zuTq5ImbOzx=y-7T)N4%_EoS+-GW}S?cEvzBeL1lgU1!0)^5>spj3Ml(3X1Edv>?MF zL}Bs5?&1Be$ zPDr{o2zU19<0QEpVG6Rl;yf3`9Fa{FnkN1}X@O8N+?;jmi#REGu;nFWiTNUfZ!YA6 z#c(1C^^cc9;khH7v@Ggs!#F%-9ac5`ioE+B27i(VfUf*YqaTOJc5&^Teqt<>KOg|olD|ugi80h`>A@JdOR}cA4ZMVbgpX~ZAgo7f1UgQ(TOQbB5`_|%b zOWV88+bSjrt-)FX!$@s41~}E;2A`?*8{Tse^k7yJ2)f+92{J#*m;L?AI4;OiS@wIs z-FsGSx2%0n>#IO3xmm(pbRkl}c0~+#Zs+u}$Ib)VqiCb~^)`={0@gEoq+Ty}X}!!( zWK5ApPyRNlhLkfrGH5O$XQ{D?<*wG-s6Zr2jZS)YLNY*Lu;-orMq@qvv&!{;*2&wpBYZO;Ul1QCgY}rP>Ye&R|SainFW{$bN3mp zD@N5G&!|WnUgrx3LMXWj#SwGHh*x_JvegpU`P-k3QBJ!Zl7hxyP7Ym!7Sh=|#& zuiJ&@MT5rXUt6SD0~q>iM+GYDON$FE?#}j4t>YLQxQ_XV?UBEG7YRh)JubB3LKhT= zyNYy9n^1T&b(&7Y@k(cQ7kY*cSv>n`W|q4I;LCqLeR0&v;#tD!IdnzQOCmZ?4aY0N z9JN$f$>-3Pw7P7?{V6K5h3nWg!eM+u8Ea8KyR^wO_)qzM?NR_)r2!=0RKvi}abOJr z;yHopYkClME;h!O7dFzEkQ#h5_*1pw@grmQkrN86Sw|oM=_?Nlr)`d1l-rw{-t^W5 zWhDpQ1k%Es>4^(9&QQTnb{gI`GL@^jCYuIkaz$g0G6>M}vqn57xwKSg7)_?yPHsKou zo|1fN(6rb1tDv5nMBOG4&nzLCbu0?jyQwpKc;Q}f1x)|uL!78|cfoFr{XNIsRy1=A z)!*O)otC9omY>L*TJXDkWUSo4h~)JZJxUk+x`u^mSsR?)yXT=V3a}?U73YGI4`^}K z2(Vbt902Td`1uSUdgf?6TTO~c()p1EkD(euExRbqJ+HGZui^B^V^fW>=nyhNXcUs@$;cs3(+K?#*mXq&Jb~G=&^Sm!3 zWqY8qex)^#^ZxgVO2@V=4(g7 z*(PS9?~iRW+dWD(ym|p_ZJ89|;(3Y7RB8N{G3fzZ9zHV2GmcGik+C`}e`aD~JZH)x zAioUhI6)gqw~Y|#o#1y;OPHGHj&N#VrOd+treEPAWA33fG0Vin;C)(6d}cjAU^EM* zDZ|*dU~Fdc2!?gnp><>j;Mm0H>d6p)R3{Yz@WhwF#strw1@-Dl6HSTU=0(dmDpG%f zmPYX(fQDLheYy0=MBc_B8kXu;a!%mQ92frU!oo+ze-D1Q|J3cYRy3Fl(sgcwh5ZC3 zc-u_dZe>~M4br0%mdc;iQ#|Ln*jCixoWD>j$4l&bnL6Y(dQV2Oo~%CMK<0u*{(858 zs>g3*sbnX%Kx>H*>eD_0@;kBs;buu^-z?=w%!vr}mIgm}I=(U^PGFlgkhhDZSIdUX z7m`olnh}Rl4RGt1c=4t{t;NBmyhQdxv}e(i0w>^cHFau#O;qUXO}ny#^0NSWvC4)#J9@Z)O{yr^+R5tVy+sc zx;V5$bnxY?`SqM>V}0nb0p@sT-mYb&Yo6kCb_ev_(@)V_yHatc6okI`n$hY~sQ1?} zrDjW=v{{UMJLebC7}WR!f2QA`j^8G5hypa$WPZ%I(rS(l@jOi%iC|h06@pd=MDb!} z@a}#(oG{<;8SmruxzMD2X@k89 z>Z8QP!01=;@$kANCc&XEYa~`En+5f4(b(q}UQ-4&Pi2ikb<@h)&Gr!h^$ysXf<}ZE z5p4ewb}~{78P9rizf-BA-Z3xyb$`4phMYC7A|-82bXNOI7V_U#W}P4A@hiN_b{a?8 zF@g=(B+q2+rZDJ|0<^h5K)F&;*81@CW-SRV_Pk{{d~>0%Ods(8NSRx@Q7Di6N(G&o zn^{`mH5X%q%8VZC&B6{nLxxS>{SsM}IRz9y!x)hM$gs@3*gK46h%G2{_pE?+gMQ4^ zLUojW?5qyE{xe$>s=c^nnwUMWIzsd{e@^VBPi+;Tp@n{Rj`d3uyW;1+QOOfxqxc5B zVffbz4P!oSsa?YYJZ>cbNaT!{s3aRyA5M20`xZV+^7ODyCoG4tx(rW33#0$iw?rqr zV+FFM1OOmgb)0Khmw-vMU+9u#Z-)sk@}oUS?Rp`{7|ue%EapuXNbVeCR`*i0vveT8 zVY$CwQtd|(Hj0yZV~~%xfX5D@^u+ng-Zh@?pY()QVvr9xX9LaADPLJo0K$93_xnF8SNjH4MTM zDH=vKhL+M{RnNZkSC;+IHXr`tvE78&g{oB{;2ony=lBN{n7Pq2@ap0YP*JPwI^2fa zJPB%2OlHfKHCe!_4P(8?+~u5Ivi$s~w4U=%;+b?R*gV|;j72fhaDZh=)E-dmq_tRm^^yJ%!6tC7LHaMb6<;)79 z9A1Bmb?(#?57RUim;m$BMxB4=>TKWdIowVagTVDmu58Y>=f$k~E6#z?9`#*1P0l*t zqhHz#lANbeO*PB_6b_Qg=+I-I0ia0>>wqIk-0p${Y=q+ujo&1btL9Vha-`wOd%2rkei|)`V5^=}Ijfwps`|#Z zM!PYEiyc+LpOV4RForR+l#8teBjS033gvEbbi<5FV#^w_(!q9j6ON<8Eu=1IWASJE zspX+p&@WTJL~VV2E7_h2rQ{FNr+}7a^$Ad}vjA}3){su6V5{6^etQEzc~jT|)n*Gh z$=}pA#ME{puH*(hfTBS`1V(J%;;bnz+KKF1ByG4p~w@juKbr0Fj_yj&1)=?$8Ztycy0gY;>%kXC4QuJ$+c1WJ3gi{(b! zzD-J1ErB4r=sJ!!z%bW2<1)R#`m9Djz^--vumRGBf38Yry`+%gvlYN) zYtcE>!hG{fq3-TMDj`&vW&G7MxLPFXc#x+ogd4q*d;lTv1Hb4I|H6BEtM}f!Jz4{v zJV9rmMA5X$dA{OB*O8AX*IfVE zcRSloGY~m7CEgs2o9uq>JPtu}{n)2qlRNc`9U4LijlxWvZJ}sK{tfbj$Q~jQ;YZ_9 zE&1Qg(tM4Tluv~O?iz$1&g1Orr9OcFcckBU-DzX9Q^@&-@D5sWI8@)^!jD}_T zihA_l!11kT`dV5|#p6A^Hi@~ymbd}9Nsj=gloC*tGL6XHjo?wAA_#YZqzM82MFfsB z-3CCD7@UX9NTWr^yCIWWerfU?U&4aWrnx@+8CXtc0!TM};>aVgsOftc=DpZYlHHJJ zPlAbeNt~IvG~h%$&p|=@L{C{5VteUH)|VZmV2oS+j+ynCzBNS<(MMz0q#BXW$_@ zw|bg~X6_aHU%vX8gqeV_G>qpvgk1MB2UBy$U)M?Oz@xu~Qw9z}prPtv6GFXz!#cb~ zg#xrU(f~t#@>0GJ%#4+U5v)J*4gRsRf-+ertao{3i_5s0RKP8X0)-6IL9==Sl;525 zEidh#fyfGj08iI8(g+BXVckDPkfe^EzeS84vlruIXzw|w^&M{LQy>K0f7HX)mf~^E z1fJH~uT-{~9a@Wq`&l_VNJO(K2o+-Y(bOuotYb5_co;1=QA_o?BO(1n3HZ&F)bk!( z`AZwQ*MihgZq531rou^o+qe=Q_7VQtW9urWk)rHBZ!xLAOAayJiqz4&e%!gHb{E_` z97S5t7x>ad&S=pz+Eldkj0LIfH$EXx26-d8JWN`dN{Mhg?hfKQW~u7>F87zT`u7nZ z0?P@)m>3wtn5D^dbR#i9_!>4r3Rt1W7SDMZrBXAJ;njzERf66k(eC@lj#NGV9kT{f zv6E3hfND}zEf$pfE=))w4E5zGu~NK#44Vz|7$t!u{3L~SjP=;V%Lx^>7X^v1ZIKsv z?C6=~yj_R9$V2`b0pt;2trzsfJ&ey35pW&?aOVRbLzMe|eZ7O-#&=?_9&iBhAlO+H zCO1(z9a#lq2wGC6lO!Pgl@}4G^U5&k$LJ~v>xjODVN0rDE&)(}qGe4J1WKK}kw2pG zH}h8FKfLHN$PSGJlrzgW2|<1UHMn{NE-^Lh>x*Avnx*?K6mK|G&n?Z1xW-KCqCN&; zVg8bq^DzEXHG&KGW9_f}C%i^MP6}zGKJHeu$<;36k+VlHXD_OAmwdO$^IW_}q2T-P zldvUa{90VXUxfQf$3pK|9 z+PouY4cE7)z9W+zHW{bU+x#8&F$^FFmrg$|c=d*C20P&=RHEn&c%87}T|IFdNPP2> zMA+;Fl)F66%UE7ef|*r{aQoG#w;At$Kfc{ifNR<;bQOSh9)AsYkt>|Y`U1@VB0c$9CFBKR~pn;Ju^rfBJBR7+Ij z&qW)IZSaj#!8@Wj{1y`}rqb{?cn`<>9+wM4Ok4KD5z31Oc2clHnK6d!I#9ZHF0fio z34eT^RWw8?1F`&(PkA|U5aa$mPq3JpzrK+-J?cgWLE=Zw%vH4?z>{)3hbYQY`pQT+a)fplrZ<>4}kQoh9aq7bvn6dz#whXvJA=-|c?xoRgFxn!@V9`$! z6n8kin-X{{McPBdhvBSl*Cjh%Nhw-a3Bv^U8175nk4=vWRG_ht_As?+0=-3Jk5pO zBX#!`z*`4v{Yz1Z13`u*n>L|!Lh@@e^9cp`(o%Wm27E&+R_W-zwc8YbQx|n;?IuPH z#?xR5*IM3-vvf2B$U&|;AfC?4!AP10kML$~=__xB7YdGn*XO?R_8({a zNR$8AXc0sEPk^GUm`fW|s^F}Dto+!wZ@rY76(6rA$G&l+EtKIuuguEIGN|74Eq;4q z+R*!zZ<1f3o&tfb;R5!Xnd}I+w+=ER`_d2UDX|GyX~B!MBHtWdA+CQ-9VbCy`!F)kP~=YyEANV0;*viPm!1MzHAR zLztkGn_p~Mv8%qz^xbEGdpF?AhmQ3TD&JyB}g!II55r;-PHJ#jJn@Tu#}(#r(iFfI@skMg5Sj9-wd+pgl|ez#n-tz?1w zx!&3^ZG zAwfmQ?yv|&S2YJOuV~~~0_GD1%lnE!=LUNsssoyuga2?ConDhw@14?NO^Bry_F8PD zr8@RAd*~5T48K7eb=kL6f|oPxxC#lALw|5rA~WfrCjBrAQ~(>wy6!3_uIw%q_x2Cp zeJ@w@6418i{g?5biuq^gQ)P1Mw0!OW+?C{;CL8-SwDnQY`9N*2Z#=FE;X+f@F5+KP z*Whr~)6LaboHJ3|Y!!CSSLlLNjyq@|WApUKU#T1;p7Fc^w?DnsznORviL7DvDbs%b=frwp}syX59A>aXOg#)WnWPJ|#6-v0$FH}^EQ8sNBsAzLIzchHA z5iTFz6k#Mgu6f8O|6Wl2p~PY5W|5qcgASNyV49$r`cj`c`7w6P(s+gOHC!*eWpJ&I zC5+PkV+nVbtSOXP$U|cgEtoosJ;$DQvu*iIQn!5l)F_EVk6xYiP1yR?C8fX-yz zjwcbDD|7zm(dCtbUcZ&1UPD422%mchwXnj(*_830DA-XR=Y-8d?zZ6@8|DZt{0r^y zn}edjVEG0apMaYZ)xF8J%~-sw;(Oys)Q1<-lP5q};KJ>+?&K>|M5V8_$l#dA6FdHI zEL?NjcEm(Ow^G-d&%>wi*c&Mo z@9AF4*H#rhekkw9{#PUV<|;e+G2&l)NFf$04XH^O7DM+B(T zkc~r#x8u%#F>1e6*u>HP?T6tNWegNXvVMVryd=a8FIZWFT*H?lpBw6?QqTa$;;sQV zY)xxl2%#FElf;GLDm08xu!1qm9Th*rnEIUm0r;KWBd^fc)I=-Ii9Jve-_fl90U;JI zhepbTMU3r_Zzg!Ie=Uc4`h5oaTKmUX7{-IvuYOJ~kU$9rs-JRf)vFHyH@0`z7M45F zh8bPQB6}Ao8Pue!y=l-;-Im{>8vqq4DL6UsIZsEX%##)pfxU0UOPFaUr3GPcW|QmRVdZx|E-)u zP5^xi>{!OL9tsR{V74TM`t(waKqy+&5;1rEqN7?M4_)svpeFdl=%|$PFk@^}TSP^( zy4op+RCJ~^$qwa}<~o5L7#1T490XKNOWh{567oU=sEFL%d$N*2JdCqMxeWz6z~Mm= zw`)@uGzhS4Z}gcQOnPZgWx8X_XG=Bd#3Wk+FP>85TN@uWH{}GiLITtCHJov5&wAta zvy)Bi`h30#*4M*TGI?yA%O1vH25{oRn-=_Y1SZldYwAITGzham@hL4%frZy3q>$iB zz_LgT0w9AIInGU>l?2!lLd64JNMbjGeLi3yH@U0HLo;taljmeGo8b5PA9SGlDGuBb zc&qtO^jE$e5f7@O|BOH`QGGgFHTMfo^sfUhh_5LUTK)_&eUau6+wA%!tVN!);@EWM2b5ZqOGFv6i1%KH{lz?4OO(`gQVzs^b@X?zg=w@+N za})B_*$W`GM&O26uoAfJ?q^y*?v#SJ;hQ&awEwLiWT(el|ErCuv-sd&tI2K!@DdlQ zQX*|tPJyntMW}Hx+?C<1*FdF^xnDr@Smx_PvP;+ zb8lTzW>F68l9Qs5xG-wk@0(_PbwdC2!KeQFHimCFt$X~Tg4WG%t~MfQkI;ZasH{PX z=Y^OAO8t+=f6Fp|8IuxIY|~Vq^rlW8CE?oN=@gZs7sg(RUd#KYn&b`x!Fk@TIE4EW zC~19dG#>EWKoO0#HazJt@5I;ILWC|W)49)F7aX-GOj z0!ZdQ=>-B`cara`_kK34Z5gNl8suXwrv(NvjeQY|RN}iD$8l zaqQoXcE!$;ZByC{`8`F2-7nL?dgHNmAcKumlH=}KgwAH;!h!#;Vzo3obge=^k@5HP zG}(Oa*S%uJ1!sw!7H8w+FAjTCz>$T}{OIh=YA;UB(5~;r_A7G%O+!V^RI<>`*m$Kk zHfF3fjKE>Bkf(gg^J|(m*Y`a1!2&z0ML+CF*sZlOYK7Wd<@Y?dQL69SY>W~sZGWKm zhF~$heht^}S)IF1D`VTee}c(!JzxvBhh?;@W=V+5<_q0#-1MFAKS>qE$z&m|4GVRu zNBFeXsNH^=xqW67OfWTn9P-{HEb+FRqNHitqPL z_?{rlHQeCxE-MYQgTX?40YbGS0a8m?AnGqV z4ZmD^v^=DKHZ@ z1Vh8etK09XdBfg{u49m}zGk=ELr_iijqP&u6*T0E`R_WY^}Kf1uQdr!emb2NGPQ`} zB}l*?`)l-va*8LkFw;{o;Gb{f$ioW*LzCQ@70C(nr+Zx)_+${(*qIsG{PfWuhnmCt zfSp&k(ej!(vL}h(z8T*6Zv5LSqM#q~Uyhp(Sfv|J{g7{;PXmVWifbAOH~I|`aVPsV zDr*o71pf^ z^0c&Bm~%KXkDknuqq+U-ow@i9npN5(a-a;4j-2Sn22)tI>HO2zt#okj+p|df5Q)5p0ZS>i%YA(#4gF4K_k(F zfI_X8em)$`&$w0-RSm7Wcc89f^em@4)?X=mCtu`H(M3`*Ud31n%M+bns5xLG4#V0f zG%>k(uMNR=X0c#6rxA?#* zs?Jl;g7}a=aPUx@HltS{dn>ef++!fco&grV=%n1dGMm zI<=StJpbEW#ipoP3bNLaz`7PIX7&e_*znrf|FMiS%gTF^pPDnF&+0z$P{PtPx=bE0 zYD_is;l(VyrW~ibJ9!2xNM_vs;{qtu6ICG86(hE<+58z%y-LJtsG0dHbRSv`&%h*Y z0-_g>YA!JOyy|QZ7+q{Z;&Rc&L1HSWkjTwNFdweUeJ zL>BhE3ly=3 zu1S&RX$vcEQV?0R>5TPMcyZR}s9IfyM64DyOqg(t5n3PU9KS5|wf+5Ctg1yFJjlra zhFxtRsc6x>`ZNtc%)_T?sN#9IN#XaEH>0Blwkz;3OaxCEBZWrQk)&eBxtGb!RZLo{ zf)_c>zl#qiDLn^&=_lj)G9tK+(b^tzvGgpdbHI7Kkks22T;iXjPPTKg%~U|Q+I@qP z-;qc6zS-j9@nXN})c2#3onsn<@8#nKhj)LmjMQ+?Z%_e$!m4YQiJGP3Dujm&-O zNT#-YfW}(0yt+&0$mYpWC8lt|X(Oc{tT7v5&vj3%+V(8_2rGYEI(}`0W9WMRkp(@P zx_kJ~v-mJzla4h3`L;3SIC$~EMEi760QR⩔O}@w9zt%N@y2xAb#m9pBtg}0ye3YJ_3h&Yy@EWka z_myqjGN>|6<0^dABhtVT7Qp27;>?m&hecBwhcRffQ>H@uk;|yokh}#4bxe=7Q`?PS zDt!c~Y#CUSx!8FFm?YuP5HwkAm3D{k#DbT~Q@blvwPvrG$AFB{2Klesz?jJVO}Ru_ z3Dr6C1jCWb&Z{b{Uta{NNwr=oy(6MJI(@J~L>#>RpAaYexRq6PtJrrW4I@b@M_O%q z{J+Vt>YpFGpD;nf0w(O}Kt>}ovy1T<;YnY9w_8P4Q@AGRr4sNDI$3;m<2RT*GJNv) zjA0|1pP{C;(_d%VU0Ktpg!388nSy0xz3;WyxyKR1755uzQYp>47`{gf!R$0a@6rW3 z+GQF~sxNGoU+a8st+NV>&iaFh3K}|kOD~x01wtqAcLUTh5gG@hfNG7@im;R9fp0~= zHj{5@TPIGo@(^19D~c4-8LIl^wHQJMT3hiNa=+I@t(gjsfQA1M_au?$GW^5^e>{23 zRwF92`X0atIOfxV$lteC4~e(JDmZ-Df4FGLEmYR(Xcy6BG7`PA4s^`rlvX(HYtf5~ zqDz)=4~NbAGL^x4Z*gGwWLk7dfGJO@YnIj(Yn)6u^VII&s|BeGR_!pjaJMBH)8V_O zUb}hLQagXqgMS+AM*#h4kLwVFq$6EN_APo6c*yrY>dNVr4XDTRJbJnq!wad25!PuP zdwjnYCmhlQ;2la5AFIp4KP7X1`^@>bo_ufYK(WSfiLMd9wD1mwK8cr*EssmG$Rf+u zg_ZqTF-sNn4Hzzivg98puZSn&sg#_;xsY4DJ z53IK`^Q&YsO2$}z^dTZtYJmeI{F=Eze^b@Y;qYC=l5*~s39Jc4y`H$#rZ<9DG{E_xEm9~9@S-}yo_Oi-BmueU8VqU@4J9^j-#pwwdP%hqx2JHW%w$Y%VA@>!M9FWhO?a zfgPR-U%b|pYSbl>8)mSqi5j|5rr*CFuWMADs8ZdRsT7F`ZI+m6O?7* zP9a-JHls_h*az->^o2Dv?J|u5#M3mZ8{i|c$+yC9nD>rN>Ct#mnzq!jh>hfB^k0`1 zL@k4^Aq=~qVJ)Pe{G)l|5woPj?Zc_2=>%r@9BnbXwMLYH{UOxsbHy{j)_hYNfGHcp zfB^=swzAf^6Fei=+7NpqD|;fF%@u)coiDgytE7T~jE7UIoW|CFI5gmF{gra>QwD^y z4JU1;$oiR}@sdrBfm>Op*&Stu4Vg7Y+t%N+vY~UJz64e}P_vXksX%PH(}t22&Z`zs zUs3;M=AQaxbRXJ&{I?M6x83o(f%<%i+>X+ms`W0+nFx9O_ySuAcj4w@B8AT8?t2C8 zL?mBVe2!9UruWy;ub38*% z>4HyfCma=^zRg9u$)VoVw^5KMV%aK-S1ioqM;H1l$>6Erss@bLT{mp9*sial&xfK> z)y4$@RY9qr6p03p;>t-amQqoDeps_X53ZtqM%|m2Bvb$!I5zuQ=T05}6JAgYBBA`g zhcRIh>;9_Ar6+)W3EP0Rme+FSt$nD}KA|>;-^u4=#shc7hn>YrM~wVQ6s9M*f_J`u zP6UY?;@d~oNBOBHx7<34aNbYW0x_0Wa<7|rwoZQ^x090CczZ_Z1!lyn&)sHbJm|TO_Ln6Xi&ds@$0e92&yq5 z4kA_JrPMNB(!5%^vHRuljuqMd|lQ#VT7gc#&qmZEbEV0s;vIv&2ej7x0I;NvLFZy5ByzQ<{|k_TixDvHn)Q3GMq02(g}%_~^Am3setGCJ69{cR zQV_%XprP=ZF*`+c%bB9r*}9zac{e1Q|A~FE6>RQaM!j|*Wf>G>E5K(cs;f2B$5_D4 z6=2#CDU6RAr%OAsvqI^$cqWXC8k0^zyoyG#X@m~^q}SN-U;O|`&zv=rD;~36lNB@( z{X(;zHl(F`i$>K6NyE|ZP&czJjmLSG?xnye^THD|9+=4#i%T6C4h7 zt1VU@qLr@)P!QDv7lR5KD$jswY>CGLcScv{;R*WeiG!`MqxMS6^H`>~>RLk|?6$N@ zFsMb*o?7icj-j*7c2jr3%ocxqmgo|y0;#AaXS1)9G11=zIwC~cf|b~1_FdC zLk{?=Oh+b;_J{9&3_4ry3O=*~)(l|Y=qOSH>5f*u*cHwInL-o2;;ejqFV%A>4LU$vt3yx>jO0u_0H&=7nIt#L|` z<$_9!`pJ^b2DIP6#exCNdS2VZo6dwNn5n5)&;22klxdTUjCPQI1lYt$Q{M>?9}gn6 zrS*dwL;;>Z>4C@h`%thuo zSZn4e{;z~9kB2(_?K%)aKfBps+})^?U5^&+qp&-_P@WKF>4n&;7}vquG`OkyRvN?lrheO9Oir z5079Ws(seakdd{;d+)sb{KMRr26MA5(lF?Wac>3cW0PvayvvCZDBYmqUR>ALHa7ofXLHyQV0y1aR`%6Aw4?rq69)$1d+oANePK1IwS=|NGu zhnKfW*sJA&QoA{28vmaShd%*Ki`1DJac1cbQ>$e97?xgjjP(UWsCR^a($%@Vx4$5` zw)xWyq|QAL2G^)H%R<7`#ag`A^20~YiG8PBEV^N5bpII|8$EIJ z=BWgCLFs+(!8vx2Md?q*daz{g?caR&L{hr7Q>RHN<&8x0f&5Hg%+@$wv>OCRDvYp=_+u;9 zpPi@G7#m*@jLFgm#yCb)dGm)HZRyAJj};uBFkUKR{4#*DFV7cwrx#`vGX}yUmqdbJ znJ*H^Zg^7pjkmIAllDKag8PPB0FhG-d=1%YSd4Wcfit1K1h+gWeHRkb; z!kspumCc$bD*Q$~l;2tc`M-D%;_3IV&{_V-75QIX4BxPv%Vj@*Z**Wr7LHwhCDEM# zaS~7aDz3LRXcyb~SbUeMnV@7wiNUn=4U(+<0LRJCZ?@%lUXT;WzSRpV?&{eOo+njr z++r;E(aCH`BJGFO@DF(ciS0EUlW}gITIyDN@>aN4QkT%0)%3cW~L-XI+`nYbr zFp8J!U0Vfznie#ysNO!acy&_+G*qbJ>_+)*sG*y9nHZ;VgtKg?M1h3Eib+!Pf}?1X zm3g#5M2>*Emb-i>0l{zSwdijj2jHC-8?Y1s44!IBfKUlD^?fFDD<5G-yTP!}7HSh9z`IwrJt3i={Jd_JLWVl^$}DDxrMm8;>+XQdqT( zdY#N)$*yHWk6>D`JnZc;;3vl;ci(cUPT;bYH!w7s0{feg9d)DcPZk{~Zt1)JYohI#?>w2Xtk22l_;-9Z+XLJ4< zdlKa^)zv27QOAriEc5O!aJNpnBu!YV;wom0-pvHAWVHJZJv&;w=|6+AMe{`87;`0n(cAhIoy{->$6EA+vk;c41>-qF9YD+Knm zx(guXu(Z}w+SKQCW-*8+gp_L^52l3mQy$!B7Qhc?P4pV)VhZ z8at{`$3+9f#8{tkp|>Z-yuK4i%u8V+WxRVO%}rqwt6L+PAy8<F9zoA~g~GrBo;V`Y@m2Un`!>k<(l%jc{DH9>Y$e3vb+_HO^eRWF-PLqvf&!Q(Y5e zj08H02QZ%c9gqCnMZ>aN)2OICmj`-+p%*ISY__{!_@rouyw3P)p2T(qsR>tE2!D{~ zqx`EMi}&x2$oB`*6(;J_%|)(!0(3`62sX+BjRkQ}c>4PgcnL)$;6O&yRQCFnP zwJ_3+n$F@2+C?I(YEzW^i6q<5N)c+Qg!u4*@uEw-#yq3WvGTu6Q*NF_H^b8D##ze& z!f`9Sf~9YsR^1ux)_w8dhuziF3kS|zMx;csy1^2-1;NiQBmWOIn|SKby58-j^f~zQ zm4d&4CgsPI8@$_=rKy;gept(93+s&I8HUF{J6_~c%-V(|i4&yX87lWb>5|H@F9IUW z2ylIkdloPJ9*;{9$7l3fk}<(6*W>S+ep}3mJ4tHuCWx;p9-!HG?n*xYcI_SEvbTw+ zb*z>#VJU9kwE)v}{hv0Xt(IAAU%ysyGd8&LsU_R6FW7;VGAo)R3B~yZJ2rO9iM8hQ z*18oA&}LuF=hldM!&|pEqE1x@Fi$I}Pk201-k=?2R`NbRrp#AAbgzC^??$s1vw6u{ zad|MGuNoaw)*c&2=D;r8Fd z3IqPv#+o;i~Ov- zu6E2)s@l)}MsPKodG%?3zU-i#W2{6D)H$k6W!}@><+6^6`0YlA zaJulMKKNz4*WN<p(TbLe518OX0sD7v1Pk( zt4;;N(yePHF@_A3T48lfJRL)ArU zxmDSJJu*fi_|O^{TZAgRUiC5v-PTH%jziMH{82uJSBxh&v7AMGq0%R{ z9l8@N^RAGXzSwU~oIoVOvQO79Xf#*pR;NFcC2H%JDx!Y%VgFD^Dqe&M$LQR6*kzEE z5&>F0IevH2k%{aG@dcx$cuc8MrV%#%{q58pZa-g8nS^I-%*mD&hp4 zDX=J4mzyDmw(7rqU#V!K_z?a;w9-ef)*q=={^XjCRJ|thEtB0et0VJkB zdIDBOx!>f`4h>4;Nkf{3G{$a~qN7Y~PiM5${4ExWBD)Z=QmOX0lXxD(`BhCP-jpB> zj*4WCZ5+Bap}xr3?D50M)Vh@_*$iuMbeyYl^yQccT@`v;63grW(uCqpAsgmH8U~V-UcbjB`@c=`ak&U)vBQ(h&3n3j@K z!*&r`)hR|-c8WhW&O3_)+IZI+~~Be@%W|gOhEmn&jCbhs5CD4jkGw{ z{ka5|L{+!gr75u%itm*nXI>KW5vGq2_pB@kLfYhf|MoJiNc4nY2=zaS$l%6}T_``! zxawpIrO(Oq>ghX19d{kb74XY+9-=l`((u%sF>~BuqnJOGG@e*2@LshzcQY_pcjHB>TS@%vJ;(7N+U$mF$d0 z37F-H;wJKb$A|j&o#enKi=pIg?4i7!JQB(qoH1yQbks0no^Ddy7DS_k!{oUb;WsID zU8Yi2BZ9Bpek z!i2tgPkiMjs*U$6#{``FamJKFJ58egd{ru8JgAjaXJW@W>~JrF(9jhN3Le1p! zE_bKcgK!Qu{SMo8;pYhu`3O_ zY@I%(q(?WCLk2|h^L8Td)!z^^O+pO|o@+GEzvEr_gg^AkCO2}G=r8cPPx^J0%RIWC(?b2EDtCv~O&wm;j1tQw z?&nVFq1*nIH$&o>NiN(3r$xioKg$vh^M)5h!^5i})yRV`P2A`JFLL!+mFFJ~iQ)$C zIZnHho~=Yj^A^n}Gr8|zJ>`Nidt`LHL;VxEKL%6*BDOFWG-JzM@@oP<6VV(hX2Ue} z4zfqBUXXwHlNfcJ$a6Y^P*f8L7y_-mX~G$ayQh>s&0tG7%hS8MAWqutaL{2Kz`R_M zB)(STGvo!cL#A%=J84Hk!X~POUA|tl=#$h%r|UDEvx_=nA?`fFIw4$V8tnlQP*a|p zsE4tL?jp~S2MmxwJ~Vg?DzlnsO}}p~X9K%zsv;F%>!Sfk7{?9zNO*7E9}$(FDq=B1E9Xjr0zy}d@cbY^PeH7~q}XUYuO6an5#A}q3G z`9M!*02)3`H52ubuqoI9x5ITz<4yDtRkR3h0u7fKH}cUazV$LBAz$19Z39~dY-38D z+Spi8kNpqc>@X{|ES+gKRnQYIup9P%KcskmD9j28AyJRsHD7$hBaykxRT3JG8%1Nl zpg%ju*{)sdb}hunX^nY$rpq}i3L+K42+*tYmus>L&UC*8&qus#=5|AOs z+1c>oK|s|Q9|-l~Wn(FN(@p1Q?`fX2fjM#b!Up7z&H;-Ui7%&Q7j*+B9+J=U6XOfU zU;{c?0v1|-DP;g^hDa^wB5HLz&por6YUA&(cakK|tHS{S+~oO6WM}CsgFS|UCArK; zu@n~Z)69AKRnD0rVa2pYvs@JM8PXV0XI(Dps*@UEI*c$45Y>;!O-wj75ooeDbM4J6 zxT!_ZL4@hw*yn*7pJqT541-eOBg}CSh40*|{?K)Fh}LN&6bE*28!LMC=$O$7U{4&g zx)PbptWhtHO(GV`A^Ai)b6X&6i+;T%_qwM diff --git a/docs/source/_static/img/pyg2.svg b/docs/source/_static/img/pyg2.svg deleted file mode 100644 index 0e01bf3f4537..000000000000 --- a/docs/source/_static/img/pyg2.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/source/_static/img/pyg_logo.svg b/docs/source/_static/img/pyg_logo.svg deleted file mode 100644 index 55a88b28133b..000000000000 --- a/docs/source/_static/img/pyg_logo.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/source/_static/img/pyg_logo_text.svg b/docs/source/_static/img/pyg_logo_text.svg deleted file mode 100644 index 148829b69fab..000000000000 --- a/docs/source/_static/img/pyg_logo_text.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/source/_templates/autosummary/class.rst b/docs/source/_templates/autosummary/class.rst deleted file mode 100644 index f561df24ca4c..000000000000 --- a/docs/source/_templates/autosummary/class.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. currentmodule:: {{ module }} - -{{ name | underline }} - -.. autoclass:: {{ name }} - :members: - :undoc-members: - :exclude-members: message, aggregate, message_and_aggregate, update, MessagePassing diff --git a/docs/source/conf.py b/docs/source/conf.py index b073b68b983e..8e3294f8cd89 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,77 +1,42 @@ import datetime -import doctest - -import sphinx_rtd_theme import torch_geometric +author = 'PyG Team' +project = 'pytorch_geometric' +version = torch_geometric.__version__ +copyright = f'{datetime.datetime.now().year}, {author}' + extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', ] -autosummary_generate = True -templates_path = ['_templates'] +html_theme = 'pyg_sphinx_theme' +html_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' + 'master/pyg_sphinx_theme/static/img/pyg_logo.png') +html_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' + 'master/pyg_sphinx_theme/static/img/favicon.png') -source_suffix = '.rst' master_doc = 'index' - -author = 'Matthias Fey' -project = 'pytorch_geometric' -copyright = f'{datetime.datetime.now().year}, {author}' - -version = torch_geometric.__version__ -release = torch_geometric.__version__ - -html_theme = 'sphinx_rtd_theme' -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -doctest_default_flags = doctest.NORMALIZE_WHITESPACE +add_module_names = False autodoc_member_order = 'bysource' + intersphinx_mapping = { 'python': ('https://docs.python.org/', None), - # TODO "unknown or unsupported inventory version" error for numpy doc. - # 'numpy': ('http://docs.scipy.org/doc/numpy', None), + 'numpy': ('http://docs.scipy.org/doc/numpy', None), 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None), 'torch': ('https://pytorch.org/docs/master', None), } -html_theme_options = { - 'collapse_navigation': False, - 'display_version': True, - 'logo_only': True, - 'navigation_depth': 2, -} - -html_logo = '_static/img/pyg2.png' -html_static_path = ['_static'] -html_css_files = ['css/custom.css'] -rst_context = {'torch_geometric': torch_geometric} - -add_module_names = False - def setup(app): - def skip(app, what, name, obj, skip, options): - members = [ - '__init__', - '__repr__', - '__weakref__', - '__dict__', - '__module__', - ] - return True if name in members else skip - - def rst_jinja_render(app, docname, source): - src = source[0] - rendered = app.builder.templates.render_string(src, rst_context) - source[0] = rendered + def rst_jinja_render(app, _, source): + rst_context = {'torch_geometric': torch_geometric} + source[0] = app.builder.templates.render_string(source[0], rst_context) - app.connect('autodoc-skip-member', skip) - app.connect("source-read", rst_jinja_render) + app.connect('source-read', rst_jinja_render) diff --git a/docs/source/modules/loader.rst b/docs/source/modules/loader.rst index 63d46a7bdd2c..7ab0143fc77f 100644 --- a/docs/source/modules/loader.rst +++ b/docs/source/modules/loader.rst @@ -10,4 +10,3 @@ torch_geometric.loader .. automodule:: torch_geometric.loader :members: - :special-members: diff --git a/docs/source/notes/cheatsheet.rst b/docs/source/notes/cheatsheet.rst index d800ee982f30..273da65f985a 100644 --- a/docs/source/notes/cheatsheet.rst +++ b/docs/source/notes/cheatsheet.rst @@ -1,17 +1,17 @@ GNN Cheatsheet ============== -* :class:`~torch_sparse.SparseTensor`: If checked (✓), supports message passing based on :class:`torch_sparse.SparseTensor`, *e.g.*, :obj:`GCNConv(...).forward(x, adj_t)`. See `here `__ for the accompanying tutorial +* :class:`~torch_sparse.SparseTensor`: If checked (✓), supports message passing based on :class:`torch_sparse.SparseTensor`, *e.g.*, :obj:`GCNConv(...).forward(x, adj_t)`. See `here `__ for the accompanying tutorial. -* :obj:`edge_weight`: If checked (✓), supports message passing with one-dimensional edge weight information, *e.g.*, :obj:`GraphConv(...).forward(x, edge_index, edge_weight)` +* :obj:`edge_weight`: If checked (✓), supports message passing with one-dimensional edge weight information, *e.g.*, :obj:`GraphConv(...).forward(x, edge_index, edge_weight)`. -* :obj:`edge_attr`: If checked (✓), supports message passing with multi-dimensional edge feature information, *e.g.*, :obj:`GINEConv(...).forward(x, edge_index, edge_attr)` +* :obj:`edge_attr`: If checked (✓), supports message passing with multi-dimensional edge feature information, *e.g.*, :obj:`GINEConv(...).forward(x, edge_index, edge_attr)`. -* **bipartite**: If checked (✓), supports message passing in bipartite graphs with potentially different feature dimensionalities for source and destination nodes, *e.g.*, :obj:`SAGEConv(in_channels=(16, 32), out_channels=64)` +* **bipartite**: If checked (✓), supports message passing in bipartite graphs with potentially different feature dimensionalities for source and destination nodes, *e.g.*, :obj:`SAGEConv(in_channels=(16, 32), out_channels=64)`. -* **static**: If checked (✓), supports message passing in static graphs, *e.g.*, :obj:`GCNConv(...).forward(x, edge_index)` with :obj:`x` having shape :obj:`[batch_size, num_nodes, in_channels]` +* **static**: If checked (✓), supports message passing in static graphs, *e.g.*, :obj:`GCNConv(...).forward(x, edge_index)` with :obj:`x` having shape :obj:`[batch_size, num_nodes, in_channels]`. -* **lazy**: If checked (✓), supports lazy initialization of message passing layers, *e.g.*, :obj:`SAGEConv(in_channels=-1, out_channels=64)` +* **lazy**: If checked (✓), supports lazy initialization of message passing layers, *e.g.*, :obj:`SAGEConv(in_channels=-1, out_channels=64)`. Graph Neural Network Operators ------------------------------ diff --git a/docs/source/notes/data_cheatsheet.rst b/docs/source/notes/data_cheatsheet.rst index d8b97a11f9ac..910ec7cc6543 100644 --- a/docs/source/notes/data_cheatsheet.rst +++ b/docs/source/notes/data_cheatsheet.rst @@ -5,7 +5,7 @@ Dataset Cheatsheet This dataset statistics table is a **work in progress**. Please consider helping us filling its content by providing statistics for individual datasets. - See `here `__ and `here `__ for examples on how to do so. + See `here `__ and `here `__ for examples on how to do so. .. list-table:: :widths: 50 10 10 10 10 10 From 61566508b50ec1f2fb49541ca69d06bcf0208848 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 17 May 2022 08:49:46 -0700 Subject: [PATCH 0054/2432] Fix `readthedocs` build (#4667) * fix * changelog --- CHANGELOG.md | 2 +- docs/source/conf.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 59b38b670317..bd0d1e2f8305 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,7 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed -- Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664)) +- Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) - Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) - Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) diff --git a/docs/source/conf.py b/docs/source/conf.py index 8e3294f8cd89..0619b6d73b6e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -22,7 +22,6 @@ html_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' 'master/pyg_sphinx_theme/static/img/favicon.png') -master_doc = 'index' add_module_names = False autodoc_member_order = 'bysource' From be2a463023557f524954584e2b34c3f10385b059 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 17 May 2022 15:16:19 -0700 Subject: [PATCH 0055/2432] Fix the interplay between `TUDataset` and `pre_transform` that modify node features (#4669) * fix num node attrs * changelog * typo --- CHANGELOG.md | 1 + torch_geometric/datasets/tu_dataset.py | 38 +++++++++++--------------- torch_geometric/io/tu.py | 21 +++++++++++--- 3 files changed, 34 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd0d1e2f8305..17065c8be284 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) - Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) - Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) - Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) diff --git a/torch_geometric/datasets/tu_dataset.py b/torch_geometric/datasets/tu_dataset.py index 4f948f207cdb..88e2af51af63 100644 --- a/torch_geometric/datasets/tu_dataset.py +++ b/torch_geometric/datasets/tu_dataset.py @@ -121,7 +121,16 @@ def __init__(self, root: str, name: str, self.name = name self.cleaned = cleaned super().__init__(root, transform, pre_transform, pre_filter) - self.data, self.slices = torch.load(self.processed_paths[0]) + + out = torch.load(self.processed_paths[0]) + if not isinstance(out, tuple) and len(out) != 3: + raise RuntimeError( + "The 'data' object was created by an older version of PyG. " + "If this error occurred while loading an already existing " + "dataset, remove the 'processed/' directory in the dataset's " + "root folder and try again.") + self.data, self.slices, self.sizes = out + if self.data.x is not None and not use_node_attr: num_node_attributes = self.num_node_attributes self.data.x = self.data.x[:, num_node_attributes:] @@ -141,34 +150,19 @@ def processed_dir(self) -> str: @property def num_node_labels(self) -> int: - if self.data.x is None: - return 0 - for i in range(self.data.x.size(1)): - x = self.data.x[:, i:] - if ((x == 0) | (x == 1)).all() and (x.sum(dim=1) == 1).all(): - return self.data.x.size(1) - i - return 0 + return self.sizes['num_node_labels'] @property def num_node_attributes(self) -> int: - if self.data.x is None: - return 0 - return self.data.x.size(1) - self.num_node_labels + return self.sizes['num_node_attributes'] @property def num_edge_labels(self) -> int: - if self.data.edge_attr is None: - return 0 - for i in range(self.data.edge_attr.size(1)): - if self.data.edge_attr[:, i:].sum() == self.data.edge_attr.size(0): - return self.data.edge_attr.size(1) - i - return 0 + return self.sizes['num_edge_labels'] @property def num_edge_attributes(self) -> int: - if self.data.edge_attr is None: - return 0 - return self.data.edge_attr.size(1) - self.num_edge_labels + return self.sizes['num_edge_attributes'] @property def raw_file_names(self) -> List[str]: @@ -189,7 +183,7 @@ def download(self): os.rename(osp.join(folder, self.name), self.raw_dir) def process(self): - self.data, self.slices = read_tu_data(self.raw_dir, self.name) + self.data, self.slices, sizes = read_tu_data(self.raw_dir, self.name) if self.pre_filter is not None: data_list = [self.get(idx) for idx in range(len(self))] @@ -201,7 +195,7 @@ def process(self): data_list = [self.pre_transform(data) for data in data_list] self.data, self.slices = self.collate(data_list) - torch.save((self.data, self.slices), self.processed_paths[0]) + torch.save((self.data, self.slices, sizes), self.processed_paths[0]) def __repr__(self) -> str: return f'{self.name}({len(self)})' diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py index 0483c58fd9e9..4d85d8ea05cc 100644 --- a/torch_geometric/io/tu.py +++ b/torch_geometric/io/tu.py @@ -24,9 +24,11 @@ def read_tu_data(folder, prefix): edge_index = read_file(folder, prefix, 'A', torch.long).t() - 1 batch = read_file(folder, prefix, 'graph_indicator', torch.long) - 1 - node_attributes = node_labels = None + node_attributes = torch.empty((batch.size(0), 0)) if 'node_attributes' in names: node_attributes = read_file(folder, prefix, 'node_attributes') + + node_labels = torch.empty((batch.size(0), 0)) if 'node_labels' in names: node_labels = read_file(folder, prefix, 'node_labels', torch.long) if node_labels.dim() == 1: @@ -35,11 +37,12 @@ def read_tu_data(folder, prefix): node_labels = node_labels.unbind(dim=-1) node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels] node_labels = torch.cat(node_labels, dim=-1).to(torch.float) - x = cat([node_attributes, node_labels]) - edge_attributes, edge_labels = None, None + edge_attributes = torch.empty((edge_index.size(1), 0)) if 'edge_attributes' in names: edge_attributes = read_file(folder, prefix, 'edge_attributes') + + edge_labels = torch.empty((edge_index.size(1), 0)) if 'edge_labels' in names: edge_labels = read_file(folder, prefix, 'edge_labels', torch.long) if edge_labels.dim() == 1: @@ -48,6 +51,8 @@ def read_tu_data(folder, prefix): edge_labels = edge_labels.unbind(dim=-1) edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels] edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float) + + x = cat([node_attributes, node_labels]) edge_attr = cat([edge_attributes, edge_labels]) y = None @@ -65,7 +70,14 @@ def read_tu_data(folder, prefix): data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y) data, slices = split(data, batch) - return data, slices + sizes = { + 'num_node_attributes': node_attributes.size(-1), + 'num_node_labels': node_labels.size(-1), + 'num_edge_attributes': edge_attributes.size(-1), + 'num_edge_labels': edge_labels.size(-1), + } + + return data, slices, sizes def read_file(folder, prefix, name, dtype=None): @@ -75,6 +87,7 @@ def read_file(folder, prefix, name, dtype=None): def cat(seq): seq = [item for item in seq if item is not None] + seq = [item for item in seq if item.numel() > 0] seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq] return torch.cat(seq, dim=-1) if len(seq) > 0 else None From 660a747469260dd6f47379c0d84bd689a9f5c5a3 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 17 May 2022 15:25:54 -0700 Subject: [PATCH 0056/2432] `GCN2Conv`: Allow for optional `edge_weight` (#4670) * initial commit * changelog --- CHANGELOG.md | 1 + torch_geometric/nn/conv/gcn2_conv.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17065c8be284..733c4f56fb36 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) - Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) - Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) - Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) diff --git a/torch_geometric/nn/conv/gcn2_conv.py b/torch_geometric/nn/conv/gcn2_conv.py index 6cb5eb6ad431..56c3dd9e24d1 100644 --- a/torch_geometric/nn/conv/gcn2_conv.py +++ b/torch_geometric/nn/conv/gcn2_conv.py @@ -154,8 +154,8 @@ def forward(self, x: Tensor, x_0: Tensor, edge_index: Adj, return out - def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor: - return edge_weight.view(-1, 1) * x_j + def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor: + return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor: return matmul(adj_t, x, reduce=self.aggr) From 0e2f726a3da11d2caa5090ba70e015ea205364ca Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 17 May 2022 23:06:26 -0700 Subject: [PATCH 0057/2432] `GAT`: Weights & Biases Tracking (#4672) * GAT wandb example * changelog * explainer --- .github/workflows/examples.yml | 12 +++++- CHANGELOG.md | 2 +- examples/gat.py | 71 +++++++++++++++++++++------------- examples/gcn.py | 5 +-- 4 files changed, 58 insertions(+), 32 deletions(-) diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index a7bc97fc64fa..49263bf31ad0 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -41,8 +41,18 @@ jobs: run: | pip install .[benchmark] - - name: Run examples + - name: Run GCN on Cora run: | python examples/gcn.py --wandb env: WANDB_API_KEY: ${{ secrets.WANDB_API_KEY }} + + - name: Run GAT on Cora + run: | + python examples/gat.py --wandb + env: + WANDB_API_KEY: ${{ secrets.WANDB_API_KEY }} + + - name: Run GNNExplainer + run: | + python examples/gnn_explainer.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 733c4f56fb36..29f489ff7403 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656)) +- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) - Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) diff --git a/examples/gat.py b/examples/gat.py index df9da35513f3..49769f09a9d9 100644 --- a/examples/gat.py +++ b/examples/gat.py @@ -1,3 +1,4 @@ +import argparse import os.path as osp import torch @@ -5,58 +6,74 @@ import torch_geometric.transforms as T from torch_geometric.datasets import Planetoid +from torch_geometric.logging import init_wandb, log from torch_geometric.nn import GATConv -dataset = 'Cora' -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) -dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures()) -data = dataset[0] +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, default='Cora') +parser.add_argument('--hidden_channels', type=int, default=8) +parser.add_argument('--heads', type=int, default=8) +parser.add_argument('--lr', type=float, default=0.005) +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--wandb', action='http://github.com/store_true', help='Track experiment') +args = parser.parse_args() +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +init_wandb(name=f'GAT-{args.dataset}', heads=args.heads, epochs=args.epochs, + hidden_channels=args.hidden_channels, lr=args.lr, device=device) + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') +dataset = Planetoid(path, args.dataset, transform=T.NormalizeFeatures()) +data = dataset[0].to(device) -class Net(torch.nn.Module): - def __init__(self, in_channels, out_channels): - super().__init__() - self.conv1 = GATConv(in_channels, 8, heads=8, dropout=0.6) - # On the Pubmed dataset, use heads=8 in conv2. - self.conv2 = GATConv(8 * 8, out_channels, heads=1, concat=False, - dropout=0.6) +class GAT(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, heads): + super().__init__() + self.conv1 = GATConv(in_channels, hidden_channels, heads, dropout=0.6) + # On the Pubmed dataset, use `heads` output heads in `conv2`. + self.conv2 = GATConv(hidden_channels * heads, out_channels, heads=1, + concat=False, dropout=0.6) def forward(self, x, edge_index): x = F.dropout(x, p=0.6, training=self.training) x = F.elu(self.conv1(x, edge_index)) x = F.dropout(x, p=0.6, training=self.training) x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=-1) + return x -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = Net(dataset.num_features, dataset.num_classes).to(device) -data = data.to(device) +model = GAT(dataset.num_features, args.hidden_channels, dataset.num_classes, + args.heads).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4) -def train(data): +def train(): model.train() optimizer.zero_grad() out = model(data.x, data.edge_index) - loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) + loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) loss.backward() optimizer.step() + return float(loss) @torch.no_grad() -def test(data): +def test(): model.eval() - out, accs = model(data.x, data.edge_index), [] - for _, mask in data('train_mask', 'val_mask', 'test_mask'): - acc = float((out[mask].argmax(-1) == data.y[mask]).sum() / mask.sum()) - accs.append(acc) + pred = model(data.x, data.edge_index).argmax(dim=-1) + + accs = [] + for mask in [data.train_mask, data.val_mask, data.test_mask]: + accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) return accs -for epoch in range(1, 201): - train(data) - train_acc, val_acc, test_acc = test(data) - print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' - f'Test: {test_acc:.4f}') +best_val_acc = final_test_acc = 0 +for epoch in range(1, args.epochs + 1): + loss = train() + train_acc, val_acc, tmp_test_acc = test() + if val_acc > best_val_acc: + best_val_acc = val_acc + test_acc = tmp_test_acc + log(Epoch=epoch, Loss=loss, Train=train_acc, Val=val_acc, Test=test_acc) diff --git a/examples/gcn.py b/examples/gcn.py index 09e03d73b61a..728edd67c5f0 100644 --- a/examples/gcn.py +++ b/examples/gcn.py @@ -75,12 +75,11 @@ def train(): @torch.no_grad() def test(): model.eval() - out = model(data.x, data.edge_index, data.edge_weight) + pred = model(data.x, data.edge_index, data.edge_weight).argmax(dim=-1) accs = [] for mask in [data.train_mask, data.val_mask, data.test_mask]: - pred = out[mask].argmax(dim=-1) - accs.append(int((pred == data.y[mask]).sum()) / int(mask.sum())) + accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) return accs From b7597730fbf5e8f72d70f92bd321c6968dd30459 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 18 May 2022 08:00:13 -0700 Subject: [PATCH 0058/2432] fix BZR test --- test/datasets/test_bzr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/datasets/test_bzr.py b/test/datasets/test_bzr.py index 14bbbeda4f9c..c7c662f007f1 100644 --- a/test/datasets/test_bzr.py +++ b/test/datasets/test_bzr.py @@ -7,7 +7,7 @@ def test_bzr(get_dataset): assert len(dataset) == 405 assert dataset.num_features == 53 assert dataset.num_node_labels == 53 - assert dataset.num_node_attributes == 0 + assert dataset.num_node_attributes == 3 assert dataset.num_classes == 2 assert str(dataset) == 'BZR(405)' assert len(dataset[0]) == 3 From 03de852c8700320805f4a6bd98f05fb162550733 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 18 May 2022 08:04:15 -0700 Subject: [PATCH 0059/2432] fix GNNExplainer example CI --- .github/workflows/examples.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 49263bf31ad0..6146a62b6f0a 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -55,4 +55,5 @@ jobs: - name: Run GNNExplainer run: | + pip install matplotlib python examples/gnn_explainer.py From d2b2e662488eae07d153de6d4b8c56c24bf413d9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 18 May 2022 15:53:41 -0700 Subject: [PATCH 0060/2432] Track `GIN` performance on `MUTAG` (#4676) * update * changelog --- .github/workflows/examples.yml | 6 +++ CHANGELOG.md | 2 +- examples/mutag_gin.py | 89 ++++++++++++++++------------------ 3 files changed, 49 insertions(+), 48 deletions(-) diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 6146a62b6f0a..f63cd7c4976c 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -53,6 +53,12 @@ jobs: env: WANDB_API_KEY: ${{ secrets.WANDB_API_KEY }} + - name: Run GIN on MUTAG + run: | + python examples/mutag_gin.py --wandb + env: + WANDB_API_KEY: ${{ secrets.WANDB_API_KEY }} + - name: Run GNNExplainer run: | pip install matplotlib diff --git a/CHANGELOG.md b/CHANGELOG.md index 29f489ff7403..9c8426c8e00d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672)) +- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) - Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) diff --git a/examples/mutag_gin.py b/examples/mutag_gin.py index 7762e6762365..d5401e8f2bd8 100644 --- a/examples/mutag_gin.py +++ b/examples/mutag_gin.py @@ -1,66 +1,62 @@ +import argparse import os.path as osp import torch import torch.nn.functional as F -from torch.nn import BatchNorm1d, Linear, ReLU, Sequential from torch_geometric.datasets import TUDataset from torch_geometric.loader import DataLoader -from torch_geometric.nn import GINConv, global_add_pool +from torch_geometric.logging import init_wandb, log +from torch_geometric.nn import MLP, GINConv, global_add_pool + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, default='MUTAG') +parser.add_argument('--batch_size', type=int, default=128) +parser.add_argument('--hidden_channels', type=int, default=32) +parser.add_argument('--num_layers', type=int, default=5) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--epochs', type=int, default=100) +parser.add_argument('--wandb', action='http://github.com/store_true', help='Track experiment') +args = parser.parse_args() + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +init_wandb(name=f'GIN-{args.dataset}', batch_size=args.batch_size, lr=args.lr, + epochs=args.epochs, hidden_channels=args.hidden_channels, + num_layers=args.num_layers, device=device) path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'TU') -dataset = TUDataset(path, name='MUTAG').shuffle() +dataset = TUDataset(path, name=args.dataset).shuffle() train_dataset = dataset[len(dataset) // 10:] -test_dataset = dataset[:len(dataset) // 10] +train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True) -train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) -test_loader = DataLoader(test_dataset, batch_size=128) +test_dataset = dataset[:len(dataset) // 10] +test_loader = DataLoader(test_dataset, args.batch_size) class Net(torch.nn.Module): - def __init__(self, in_channels, dim, out_channels): + def __init__(self, in_channels, hidden_channels, out_channels, num_layers): super().__init__() - self.conv1 = GINConv( - Sequential(Linear(in_channels, dim), BatchNorm1d(dim), ReLU(), - Linear(dim, dim), ReLU())) - - self.conv2 = GINConv( - Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(), - Linear(dim, dim), ReLU())) - - self.conv3 = GINConv( - Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(), - Linear(dim, dim), ReLU())) + self.convs = torch.nn.ModuleList() + for _ in range(num_layers): + mlp = MLP([in_channels, hidden_channels, hidden_channels]) + self.convs.append(GINConv(nn=mlp, train_eps=False)) + in_channels = hidden_channels - self.conv4 = GINConv( - Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(), - Linear(dim, dim), ReLU())) - - self.conv5 = GINConv( - Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(), - Linear(dim, dim), ReLU())) - - self.lin1 = Linear(dim, dim) - self.lin2 = Linear(dim, out_channels) + self.mlp = MLP([hidden_channels, hidden_channels, out_channels], + batch_norm=False, dropout=0.5) def forward(self, x, edge_index, batch): - x = self.conv1(x, edge_index) - x = self.conv2(x, edge_index) - x = self.conv3(x, edge_index) - x = self.conv4(x, edge_index) - x = self.conv5(x, edge_index) + for conv in self.convs: + x = conv(x, edge_index).relu() x = global_add_pool(x, batch) - x = self.lin1(x).relu() - x = F.dropout(x, p=0.5, training=self.training) - x = self.lin2(x) - return F.log_softmax(x, dim=-1) + return self.mlp(x) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = Net(dataset.num_features, 32, dataset.num_classes).to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01) +model = Net(dataset.num_features, args.hidden_channels, dataset.num_classes, + args.num_layers).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) def train(): @@ -70,8 +66,8 @@ def train(): for data in train_loader: data = data.to(device) optimizer.zero_grad() - output = model(data.x, data.edge_index, data.batch) - loss = F.nll_loss(output, data.y) + out = model(data.x, data.edge_index, data.batch) + loss = F.cross_entropy(out, data.y) loss.backward() optimizer.step() total_loss += float(loss) * data.num_graphs @@ -85,14 +81,13 @@ def test(loader): total_correct = 0 for data in loader: data = data.to(device) - out = model(data.x, data.edge_index, data.batch) - total_correct += int((out.argmax(-1) == data.y).sum()) + pred = model(data.x, data.edge_index, data.batch).argmax(dim=-1) + total_correct += int((pred == data.y).sum()) return total_correct / len(loader.dataset) -for epoch in range(1, 101): +for epoch in range(1, args.epochs + 1): loss = train() train_acc = test(train_loader) test_acc = test(test_loader) - print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train Acc: {train_acc:.4f} ' - f'Test Acc: {test_acc:.4f}') + log(Epoch=epoch, Loss=loss, Train=train_acc, Test=test_acc) From 6cb12eae060a3608bbe0067bd4335bb2e4bfbbfa Mon Sep 17 00:00:00 2001 From: rusty1s Date: Thu, 19 May 2022 06:10:48 -0700 Subject: [PATCH 0061/2432] fix GNNExplainer example CI --- .github/workflows/examples.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index f63cd7c4976c..93606f35e3fe 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -61,5 +61,5 @@ jobs: - name: Run GNNExplainer run: | - pip install matplotlib + pip install networkx matplotlib python examples/gnn_explainer.py From 1bafcc4bf4253d095028fbef1b1c000a6c059ed6 Mon Sep 17 00:00:00 2001 From: adrianomartinelli <32962328+adrianomartinelli@users.noreply.github.com> Date: Fri, 20 May 2022 00:44:28 +0200 Subject: [PATCH 0062/2432] Use class setter properties if present (#4682) * add support for setter methods when Data is subclassed * test support of setters * run flake8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changelog Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_data.py | 26 ++++++++++++++++++++++++++ torch_geometric/data/data.py | 6 +++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c8426c8e00d..17df37f2fb51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Allow for `setter` properties in `Data` and `HeteroData` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682)) - Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) - Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) - Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) diff --git a/test/data/test_data.py b/test/data/test_data.py index 3733072cc508..b794364be308 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -213,3 +213,29 @@ def test_data_share_memory(): for data in data_list: assert data.x.is_shared() assert torch.allclose(data.x, torch.full((8, ), 4.)) + + +def test_data_setter_properties(): + class MyData(Data): + def __init__(self): + super().__init__() + self.my_attr1 = 1 + self.my_attr2 = 2 + + @property + def my_attr1(self): + return self._my_attr1 + + @my_attr1.setter + def my_attr1(self, value): + self._my_attr1 = value + + data = MyData() + assert data.my_attr2 == 2 + + assert 'my_attr1' not in data._store + assert data.my_attr1 == 1 + + data.my_attr1 = 2 + assert 'my_attr1' not in data._store + assert data.my_attr1 == 2 diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 0a778214a582..a6bc90710d89 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -375,7 +375,11 @@ def __getattr__(self, key: str) -> Any: return getattr(self._store, key) def __setattr__(self, key: str, value: Any): - setattr(self._store, key, value) + propobj = getattr(self.__class__, key, None) + if propobj is None or propobj.fset is None: + setattr(self._store, key, value) + else: + propobj.fset(self, value) def __delattr__(self, key: str): delattr(self._store, key) From 9761ccf4bfd42277b190bf6242307307d4d8d9eb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 19 May 2022 18:42:32 -0700 Subject: [PATCH 0063/2432] Mathjax support in Markdown (#4683) * markdown * Update README.md * update * changelog --- CHANGELOG.md | 1 + README.md | 4 +- docs/source/_figures/edge_conv.svg | 179 ----------------------------- docs/source/_figures/edge_conv.tex | 12 -- 4 files changed, 2 insertions(+), 194 deletions(-) delete mode 100644 docs/source/_figures/edge_conv.svg delete mode 100644 docs/source/_figures/edge_conv.tex diff --git a/CHANGELOG.md b/CHANGELOG.md index 17df37f2fb51..dd5d6aab6644 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) - Allow for `setter` properties in `Data` and `HeteroData` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682)) - Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) - Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) diff --git a/README.md b/README.md index f45f464ad0fe..959114a689f4 100644 --- a/README.md +++ b/README.md @@ -123,9 +123,7 @@ More information about evaluating final model performance can be found in the co In addition to the easy application of existing GNNs, PyG makes it simple to implement custom Graph Neural Networks (see [here](https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html) for the accompanying tutorial). For example, this is all it takes to implement the [edge convolutional layer](https://arxiv.org/abs/1801.07829) from Wang *et al.*: -

- -

+$$x_i^{\prime} ~ = ~ \max_{j \in \mathcal{N}(i)} ~ \textrm{MLP}_{\theta} \left( [ ~ x_i, ~ x_j - x_i ~ ] \right)$$ ```python import torch diff --git a/docs/source/_figures/edge_conv.svg b/docs/source/_figures/edge_conv.svg deleted file mode 100644 index c27c67e6daf2..000000000000 --- a/docs/source/_figures/edge_conv.svg +++ /dev/null @@ -1,179 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/source/_figures/edge_conv.tex b/docs/source/_figures/edge_conv.tex deleted file mode 100644 index 9e3f3db263e5..000000000000 --- a/docs/source/_figures/edge_conv.tex +++ /dev/null @@ -1,12 +0,0 @@ -\documentclass{standalone} - -\usepackage{bm} -\usepackage{tikz} - -\begin{document} - -\begin{tikzpicture} - \node at (0,0) {$\bm{x}_i^{\prime} = \max_{j \in \mathcal{N}(i)} \textsc{MLP}_{\hspace{-1pt}\theta} \big([ \bm{x}_i, \bm{x}_j - \bm{x}_i ]\big)$}; -\end{tikzpicture} - -\end{document} From c4977eaf5f549228951adaede9147c17731d2a2f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 20 May 2022 06:46:32 -0700 Subject: [PATCH 0064/2432] Fix `setter` properties in `Data` (#4686) * fix * update --- CHANGELOG.md | 2 +- torch_geometric/data/data.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd5d6aab6644..6b31ee021aaa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed - Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) -- Allow for `setter` properties in `Data` and `HeteroData` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682)) +- Allow for `setter` properties in `Data` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682), [#4686](https://github.com/pyg-team/pytorch_geometric/pull/4686)) - Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) - Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) - Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index a6bc90710d89..580c9bdd3b6e 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -376,10 +376,10 @@ def __getattr__(self, key: str) -> Any: def __setattr__(self, key: str, value: Any): propobj = getattr(self.__class__, key, None) - if propobj is None or propobj.fset is None: - setattr(self._store, key, value) - else: + if propobj is not None and getattr(propobj, 'fset', None) is not None: propobj.fset(self, value) + else: + setattr(self._store, key, value) def __delattr__(self, key: str): delattr(self._store, key) From c7ac5506d88c9ed258d63d08830c7e5ab76f18a7 Mon Sep 17 00:00:00 2001 From: Michael Galkin Date: Fri, 20 May 2022 19:02:12 -0400 Subject: [PATCH 0065/2432] Fix rank computation in the RGCN link prediction example (#4688) * compute ranks fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * pleasing PEP8 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + examples/rgcn_link_pred.py | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b31ee021aaa..01e0f6184a0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed the ranking protocol bug in the RGCN link prediction example ([#4688](https://github.com/pyg-team/pytorch_geometric/pull/4688)) - Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) - Allow for `setter` properties in `Data` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682), [#4686](https://github.com/pyg-team/pytorch_geometric/pull/4686)) - Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) diff --git a/examples/rgcn_link_pred.py b/examples/rgcn_link_pred.py index 6c988ed931f1..89ecd7e10bcf 100644 --- a/examples/rgcn_link_pred.py +++ b/examples/rgcn_link_pred.py @@ -113,6 +113,16 @@ def test(): return valid_mrr, test_mrr +@torch.no_grad() +def compute_rank(ranks): + # fair ranking prediction as the average + # of optimistic and pessimistic ranking + true = ranks[0] + optimistic = (ranks > true).sum() + 1 + pessimistic = (ranks >= true).sum() + return (optimistic + pessimistic).float() * 0.5 + + @torch.no_grad() def compute_mrr(z, edge_index, edge_type): ranks = [] @@ -135,9 +145,8 @@ def compute_mrr(z, edge_index, edge_type): eval_edge_type = torch.full_like(tail, fill_value=rel) out = model.decode(z, eval_edge_index, eval_edge_type) - perm = out.argsort(descending=True) - rank = int((perm == 0).nonzero(as_tuple=False).view(-1)[0]) - ranks.append(rank + 1) + rank = compute_rank(out) + ranks.append(rank) # Try all nodes as heads, but delete true triplets: head_mask = torch.ones(data.num_nodes, dtype=torch.bool) @@ -155,9 +164,8 @@ def compute_mrr(z, edge_index, edge_type): eval_edge_type = torch.full_like(head, fill_value=rel) out = model.decode(z, eval_edge_index, eval_edge_type) - perm = out.argsort(descending=True) - rank = int((perm == 0).nonzero(as_tuple=False).view(-1)[0]) - ranks.append(rank + 1) + rank = compute_rank(out) + ranks.append(rank) return (1. / torch.tensor(ranks, dtype=torch.float)).mean() From 02fa0e53b3e3bbfa58cf40bee062101d86306900 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Mon, 23 May 2022 12:05:59 +0300 Subject: [PATCH 0066/2432] `RevGNN` implementation (#4671) * Fixed RuntimeError if graphgym is tested on GPU devices * Added RevGNN implementation to models * Added an example of RevGNN on ogbn-products * Added test example for RevGNN * Added GroupAddRev * Fixed typo of pytest function Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Use class name for error msg Co-authored-by: Matthias Fey * Update docs Co-authored-by: Matthias Fey * Use args and kwargs for abstract class forward method Co-authored-by: Matthias Fey * Use args and kwargs for abstract class inverse method Co-authored-by: Matthias Fey * Use callable forward Co-authored-by: Matthias Fey * Use callable forward Co-authored-by: Matthias Fey * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use T, count_parameters and index_to_mask from pyg utils * Minor fixes * Added testing for backward * Set dropout_mask to None when p=0. * Put map to zip directly * Added docs to InvertibleFunction * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Minor fix Co-authored-by: Matthias Fey * Split pytest units * Minor fix on docs * Minor fix on docs * readme * typo * rename * update * update * update * fix * typo Co-authored-by: Matthias Fey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + README.md | 1 + examples/rev_gnn.py | 186 ++++++++++++++++ test/graphgym/test_graphgym.py | 3 + test/nn/models/test_rev_gnn.py | 95 ++++++++ torch_geometric/nn/models/__init__.py | 2 + torch_geometric/nn/models/rev_gnn.py | 305 ++++++++++++++++++++++++++ 7 files changed, 593 insertions(+) create mode 100644 examples/rev_gnn.py create mode 100644 test/nn/models/test_rev_gnn.py create mode 100644 torch_geometric/nn/models/rev_gnn.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 01e0f6184a0c..e47f57018629 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) diff --git a/README.md b/README.md index 959114a689f4..62f200e2223c 100644 --- a/README.md +++ b/README.md @@ -296,6 +296,7 @@ Unlike simple stacking of GNN layers, these models could involve pre-processing, * **Graph-less Neural Networks** from Zhang *et al.*: [Graph-less Neural Networks: Teaching Old MLPs New Tricks via Distillation](https://arxiv.org/abs/2110.08727) (CoRR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/glnn.py)] * **[LINKX](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.LINKX)** from Lim *et al.*: [Large Scale Learning on Non-Homophilous Graphs: New Benchmarks and Strong Simple Methods](https://arxiv.org/abs/2110.14446) (NeurIPS 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/linkx.py)] +* **[RevGNN](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.GroupAddRev)** from Li *et al.*: [Training Graph Neural with 1000 Layers](https://arxiv.org/abs/2106.07476) (ICML 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rev_gnn.py)] **GNN operators and utilities:** diff --git a/examples/rev_gnn.py b/examples/rev_gnn.py new file mode 100644 index 000000000000..22b8476b29c5 --- /dev/null +++ b/examples/rev_gnn.py @@ -0,0 +1,186 @@ +# Model Paramters: 206,607 +# Peak GPU memory usage: 1.57 G +# RevGNN with 7 layers and 160 channels reaches around 0.8200 test accuracy. +# Final Train: 0.9373, Highest Val: 0.9230, Final Test: 0.8200. +# Training longer should produces better results. + +import os.path as osp + +import torch +import torch.nn.functional as F +from torch.nn import LayerNorm, Linear +from torch_sparse import SparseTensor +from tqdm import tqdm + +import torch_geometric.transforms as T +from torch_geometric.loader import RandomNodeSampler +from torch_geometric.nn import GroupAddRev, SAGEConv +from torch_geometric.utils import index_to_mask + + +class GNNBlock(torch.nn.Module): + def __init__(self, in_channels, out_channels): + super().__init__(in_channels) + self.norm = LayerNorm(in_channels, elementwise_affine=True) + self.conv = SAGEConv(in_channels, out_channels) + + def reset_parameters(self): + self.norm.reset_parameters() + self.conv.reset_parameters() + + def forward(self, x, edge_index, dropout_mask=None): + x = self.norm(x).relu() + if self.training and dropout_mask is not None: + x = x * dropout_mask + return self.conv(x, edge_index) + + +class RevGNN(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, num_layers, + dropout, num_groups=2): + super().__init__() + + self.dropout = dropout + + self.lin1 = Linear(in_channels, hidden_channels) + self.lin2 = Linear(hidden_channels, out_channels) + self.norm = LayerNorm(hidden_channels, elementwise_affine=True) + + assert hidden_channels % num_groups == 0 + self.convs = torch.nn.ModuleList() + for _ in range(self.num_layers): + conv = GNNBlock( + hidden_channels // num_groups, + hidden_channels // num_groups, + ) + self.convs.append(GroupAddRev(conv, num_groups=num_groups)) + + def reset_parameters(self): + self.lin1.reset_parameters() + self.lin2.reset_parameters() + self.norm.reset_parameters() + for conv in self.convs: + conv.reset_parameters() + + def forward(self, x, edge_index): + # Generate a dropout mask which will be shared across GNN blocks: + mask = None + if self.training and self.dropout > 0: + mask = torch.zeros_like(x).bernoulli_(1 - self.dropout) + mask = mask.requires_grad_(False) + mask = mask / (1 - self.dropout) + + x = self.lin1(x) + for conv in self.convs: + x = conv(x, edge_index, mask) + x = self.norm(x).relu() + x = F.dropout(x, p=self.dropout, training=self.training) + return self.lin2(x) + + +from ogb.nodeproppred import Evaluator, PygNodePropPredDataset # noqa + +transform = T.AddSelfLoops() +root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') +dataset = PygNodePropPredDataset('ogbn-products', root, transform=transform) +evaluator = Evaluator(name='ogbn-products') + +data = dataset[0] +split_idx = dataset.get_idx_split() +for split in ['train', 'valid', 'test']: + data[f'{split}_mask'] = index_to_mask(split_idx[split], data.y.shape[0]) + +train_loader = RandomNodeSampler(data, num_parts=10, shuffle=True, + num_workers=5) +# Increase the num_parts of the test loader if you cannot have fix +# the full batch graph into your GPU: +test_loader = RandomNodeSampler(data, num_parts=1, num_workers=5) + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +model = RevGNN( + in_channels=dataset.num_features, + hidden_channels=160, + out_channels=dataset.num_classes, + num_layers=7, # You can try 1000 layers for fun + dropout=0.5, + num_groups=2, +).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.003) + + +def train(epoch): + model.train() + + pbar = tqdm(total=len(train_loader)) + pbar.set_description(f'Training epoch: {epoch:03d}') + + total_loss = total_examples = 0 + for data in train_loader: + data = data.to(device) + optimizer.zero_grad() + + # Memory-efficient aggregations: + adj_t = SparseTensor.from_edge_index(data.edge_index).t() + out = model(data.x, adj_t)[data.train_mask] + loss = F.cross_entropy(out, data.y[data.train_mask].view(-1)) + loss.backward() + optimizer.step() + + total_loss += float(loss) * int(data.train_mask.sum()) + total_examples += int(data.train_mask.sum()) + pbar.update(1) + + pbar.close() + + return total_loss / total_examples + + +@torch.no_grad() +def test(epoch): + model.eval() + + y_true = {"train": [], "valid": [], "test": []} + y_pred = {"train": [], "valid": [], "test": []} + + pbar = tqdm(total=len(test_loader)) + pbar.set_description(f'Evaluating epoch: {epoch:03d}') + + for data in test_loader: + data = data.to(device) + + # Memory-efficient aggregations + adj_t = SparseTensor.from_edge_index(data.edge_index).t() + out = model(data.x, adj_t).argmax(dim=-1, keepdim=True) + + for split in ['train', 'valid', 'test']: + mask = data[f'{split}_mask'] + y_true[split].append(data.y[mask].cpu()) + y_pred[split].append(out[mask].cpu()) + + pbar.update(1) + + pbar.close() + + train_acc = evaluator.eval({ + 'y_true': torch.cat(y_true['train'], dim=0), + 'y_pred': torch.cat(y_pred['train'], dim=0), + })['acc'] + + valid_acc = evaluator.eval({ + 'y_true': torch.cat(y_true['valid'], dim=0), + 'y_pred': torch.cat(y_pred['valid'], dim=0), + })['acc'] + + test_acc = evaluator.eval({ + 'y_true': torch.cat(y_true['test'], dim=0), + 'y_pred': torch.cat(y_pred['test'], dim=0), + })['acc'] + + return train_acc, valid_acc, test_acc + + +for epoch in range(1, 501): + loss = train(epoch) + train_acc, val_acc, test_acc = test(epoch) + print(f'Loss: {loss:.4f}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' + f'Test: {test_acc:.4f}') diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index e783ff9ff977..b19c8dd0c44b 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -154,18 +154,21 @@ def test_graphgym_module(tmpdir): keys = {"loss", "true", "pred_score", "step_end_time"} # test training step batch = next(iter(loaders[0])) + batch.to(model.device) outputs = model.training_step(batch) assert keys == set(outputs.keys()) assert isinstance(outputs["loss"], torch.Tensor) # test validation step batch = next(iter(loaders[1])) + batch.to(model.device) outputs = model.validation_step(batch) assert keys == set(outputs.keys()) assert isinstance(outputs["loss"], torch.Tensor) # test test step batch = next(iter(loaders[2])) + batch.to(model.device) outputs = model.test_step(batch) assert keys == set(outputs.keys()) assert isinstance(outputs["loss"], torch.Tensor) diff --git a/test/nn/models/test_rev_gnn.py b/test/nn/models/test_rev_gnn.py new file mode 100644 index 000000000000..cd2921bf2f73 --- /dev/null +++ b/test/nn/models/test_rev_gnn.py @@ -0,0 +1,95 @@ +import pytest +import torch + +from torch_geometric.nn import GraphConv, GroupAddRev, SAGEConv +from torch_geometric.nn.dense.linear import Linear + + +@pytest.mark.parametrize('num_groups', [2, 4, 8, 16]) +def test_revgnn_forward_inverse(num_groups): + x = torch.randn(4, 32) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + + lin = Linear(32, 32) + conv = SAGEConv(32 // num_groups, 32 // num_groups) + conv = GroupAddRev(conv, num_groups=num_groups) + assert str(conv) == (f'GroupAddRev(SAGEConv({32 // num_groups}, ' + f'{32 // num_groups}, aggr=mean), ' + f'num_groups={num_groups})') + + h = lin(x) + h_o = h.clone().detach() + + out = conv(h, edge_index) + assert h.storage().size() == 0 + + h_rev = conv.inverse(out, edge_index) + assert torch.allclose(h_o, h_rev, atol=0.001) + + +@pytest.mark.parametrize('num_groups', [2, 4, 8, 16]) +def test_revgnn_backward(num_groups): + x = torch.randn(4, 32) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + + lin = Linear(32, 32) + conv = SAGEConv(32 // num_groups, 32 // num_groups) + conv = GroupAddRev(conv, num_groups=num_groups) + + h = lin(x) + out = conv(h, edge_index) + target = out.mean() + target.backward() + + +@pytest.mark.parametrize('num_groups', [2, 4, 8, 16]) +def test_revgnn_multi_backward(num_groups): + x = torch.randn(4, 32) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + + lin = Linear(32, 32) + conv = SAGEConv(32 // num_groups, 32 // num_groups) + conv = GroupAddRev(conv, num_groups=num_groups, num_bwd_passes=4) + + h = lin(x) + out = conv(h, edge_index) + target = out.mean() + target.backward(retain_graph=True) + target.backward(retain_graph=True) + torch.autograd.grad(outputs=target, inputs=[h] + list(conv.parameters()), + retain_graph=True) + torch.autograd.grad(outputs=target, inputs=[h] + list(conv.parameters())) + + +@pytest.mark.parametrize('num_groups', [2, 4, 8, 16]) +def test_revgnn_diable(num_groups): + x = torch.randn(4, 32) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + + lin = Linear(32, 32) + conv = SAGEConv(32 // num_groups, 32 // num_groups) + conv = GroupAddRev(conv, num_groups=num_groups, disable=True) + + h = lin(x) + out = conv(h, edge_index) + target = out.mean() + target.backward() + + # Memory will not be freed if disable: + assert h.storage().size() == 4 * 32 + + +@pytest.mark.parametrize('num_groups', [2, 4, 8, 16]) +def test_revgnn_with_args(num_groups): + x = torch.randn(4, 32) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + edge_weight = torch.rand(4) + + lin = Linear(32, 32) + conv = GraphConv(32 // num_groups, 32 // num_groups) + conv = GroupAddRev(conv, num_groups=num_groups) + + h = lin(x) + out = conv(h, edge_index, edge_weight) + target = out.mean() + target.backward() diff --git a/torch_geometric/nn/models/__init__.py b/torch_geometric/nn/models/__init__.py index 6de483ba7443..e541e36c1b33 100644 --- a/torch_geometric/nn/models/__init__.py +++ b/torch_geometric/nn/models/__init__.py @@ -21,6 +21,7 @@ from .linkx import LINKX from .lightgcn import LightGCN from .mask_label import MaskLabel +from .rev_gnn import GroupAddRev __all__ = [ 'MLP', @@ -55,6 +56,7 @@ 'LINKX', 'LightGCN', 'MaskLabel', + 'GroupAddRev', ] classes = __all__ diff --git a/torch_geometric/nn/models/rev_gnn.py b/torch_geometric/nn/models/rev_gnn.py new file mode 100644 index 000000000000..4f5a48acbeb5 --- /dev/null +++ b/torch_geometric/nn/models/rev_gnn.py @@ -0,0 +1,305 @@ +import copy +import math +from abc import ABC, abstractmethod +from typing import Any, List, Optional, Union + +import torch +from torch import Tensor + +from torch_geometric.typing import Adj + + +class InvertibleFunction(torch.autograd.Function): + r"""An invertible autograd function. This allows for automatic + backpropagation in a reversible fashion so that the memory of intermediate + results can be freed during the forward pass and be constructed on-the-fly + during the bachward pass. + + Args: + ctx (torch.autograd.function.InvertibleFunctionBackward): + A context object that can be used to stash information for backward + computation. + fn (torch.nn.Module): The forward function. + fn_inverse (torch.nn.Module): The inverse function to recompute the + freed input. + num_bwd_passes (int): Number of backward passes to retain a link + with the output. After the last backward pass the output is + discarded and memory is freed. + num_inputs (int): The number of inputs to the forward function. + *args (tuple): Inputs and weights. + """ + @staticmethod + def forward(ctx, fn: torch.nn.Module, fn_inverse: torch.nn.Module, + num_bwd_passes: int, num_inputs: int, *args): + ctx.fn = fn + ctx.fn_inverse = fn_inverse + ctx.weights = args[num_inputs:] + ctx.num_bwd_passes = num_bwd_passes + ctx.num_inputs = num_inputs + inputs = args[:num_inputs] + ctx.input_requires_grad = [] + + with torch.no_grad(): # Make a detached copy which shares the storage: + x = [] + for element in inputs: + if isinstance(element, torch.Tensor): + x.append(element.detach()) + ctx.input_requires_grad.append(element.requires_grad) + else: + x.append(element) + ctx.input_requires_grad.append(None) + outputs = ctx.fn(*x) + + if not isinstance(outputs, tuple): + outputs = (outputs, ) + + # Detaches outputs in-place, allows discarding the intermedate result: + detached_outputs = tuple(element.detach_() for element in outputs) + + # Clear memory of node features + inputs[0].storage().resize_(0) + + # Store these tensor nodes for backward passes: + ctx.inputs = [inputs] * num_bwd_passes + ctx.outputs = [detached_outputs] * num_bwd_passes + + return detached_outputs + + @staticmethod + def backward(ctx, *grad_outputs): + if len(ctx.outputs) == 0: + raise RuntimeError( + f"Trying to perform a backward pass on the " + f"'InvertibleFunction' for more than '{ctx.num_bwd_passes}' " + f"times. Try raising 'num_bwd_passes'.") + + inputs = ctx.inputs.pop() + outputs = ctx.outputs.pop() + + # Recompute input by swapping out the first argument: + with torch.no_grad(): + inputs_inverted = ctx.fn_inverse(*(outputs + inputs[1:])) + if len(ctx.outputs) == 0: # Clear memory from outputs: + for element in outputs: + element.storage().resize_(0) + + if not isinstance(inputs_inverted, tuple): + inputs_inverted = (inputs_inverted, ) + + for elem_orig, elem_inv in zip(inputs, inputs_inverted): + elem_orig.storage().resize_(int(math.prod(elem_orig.size()))) + elem_orig.set_(elem_inv) + + # Compute gradients with grad enabled: + with torch.set_grad_enabled(True): + detached_inputs = [] + for element in inputs: + if isinstance(element, torch.Tensor): + detached_inputs.append(element.detach()) + else: + detached_inputs.append(element) + detached_inputs = tuple(detached_inputs) + for x, req_grad in zip(detached_inputs, ctx.input_requires_grad): + if isinstance(x, torch.Tensor): + x.requires_grad = req_grad + tmp_output = ctx.fn(*detached_inputs) + + if not isinstance(tmp_output, tuple): + tmp_output = (tmp_output, ) + + filtered_detached_inputs = tuple( + filter( + lambda x: x.requires_grad + if isinstance(x, torch.Tensor) else False, + detached_inputs, + )) + gradients = torch.autograd.grad( + outputs=tmp_output, + inputs=filtered_detached_inputs + ctx.weights, + grad_outputs=grad_outputs, + ) + + input_gradients = [] + i = 0 + for rg in ctx.input_requires_grad: + if rg: + input_gradients.append(gradients[i]) + i += 1 + else: + input_gradients.append(None) + + gradients = tuple(input_gradients) + gradients[-len(ctx.weights):] + + return (None, None, None, None) + gradients + + +class InvertibleModule(torch.nn.Module, ABC): + r"""An abstract class for implementing invertible modules. + + Args: + disable (bool, optional): If set to :obj:`True`, will disable the usage + of :class:`InvertibleFunction` and will execute the module without + memory savings. (default: :obj:`False`) + num_bwd_passes (int, optional): Number of backward passes to retain a + link with the output. After the last backward pass the output is + discarded and memory is freed. (default: :obj:`1`) + """ + def __init__(self, disable: bool = False, num_bwd_passes: int = 1): + super().__init__() + self.disable = disable + self.num_bwd_passes = num_bwd_passes + + def forward(self, *args): + return self._fn_apply(args, self._forward, self._inverse) + + def inverse(self, *args): + return self._fn_apply(args, self._inverse, self._forward) + + @abstractmethod + def _forward(self): + pass + + @abstractmethod + def _inverse(self): + pass + + def _fn_apply(self, args, fn, fn_inverse): + if not self.disable: + out = InvertibleFunction.apply( + fn, + fn_inverse, + self.num_bwd_passes, + len(args), + *args, + *tuple(p for p in self.parameters() if p.requires_grad), + ) + else: + out = fn(*args) + + # If the layer only has one input, we unpack the tuple: + if isinstance(out, tuple) and len(out) == 1: + return out[0] + + return out + + +class GroupAddRev(InvertibleModule): + r"""The Grouped Reversible GNN module from the `"Graph Neural Networks with + 1000 Layers" `_ paper. + This module enables training of arbitary deep GNNs with a memory complexity + independent of the number of layers. + + It does so by partitioning input node features :math:`\mathbf{X}` into + :math:`C` groups across the feature dimension. Then, a grouped reversible + GNN block :math:`f_{\theta(i)}` operates on a group of inputs and produces + a group of outputs: + + .. math:: + + \mathbf{X}^{\prime}_0 &= \sum_{i=2}^C \mathbf{X}_i + + \mathbf{X}^{\prime}_i &= f_{\theta(i)} ( \mathbf{X}^{\prime}_{i - 1}, + \mathbf{A}) + \mathbf{X}_i + + for all :math:`i \in \{ 1, \ldots, C \}`. + + .. note:: + + For an example of using :class:`GroupAddRev`, see `examples/rev_gnn.py + `_. + + Args: + conv (torch.nn.Module or torch.nn.ModuleList]): A seed GNN. The input + and output feature dimensions need to match. + split_dim (int optional): The dimension across which to split groups. + (default: :obj:`-1`) + num_groups (Optional[int], optional): The number of groups :math:`C`. + (default: :obj:`None`) + disable (bool, optional): If set to :obj:`True`, will disable the usage + of :class:`InvertibleFunction` and will execute the module without + memory savings. (default: :obj:`False`) + num_bwd_passes (int, optional): Number of backward passes to retain a + link with the output. After the last backward pass the output is + discarded and memory is freed. (default: :obj:`1`) + """ + def __init__( + self, + conv: Union[torch.nn.Module, torch.nn.ModuleList], + split_dim: int = -1, + num_groups: Optional[int] = None, + disable: bool = False, + num_bwd_passes: int = 1, + ): + super().__init__(disable, num_bwd_passes) + self.split_dim = split_dim + + if isinstance(conv, torch.nn.ModuleList): + self.convs = conv + else: + assert num_groups is not None, "Please specific 'num_groups'" + self.convs = torch.nn.ModuleList([conv]) + for i in range(num_groups - 1): + conv = copy.deepcopy(self.convs[0]) + if hasattr(conv, 'reset_parameters'): + conv.reset_parameters() + self.convs.append(conv) + + if len(self.convs) < 2: + raise ValueError(f"The number of groups should not be smaller " + f"than '2' (got '{self.num_groups}'))") + + @property + def num_groups(self) -> int: + r"""The number of groups :math:`C`.""" + return len(self.convs) + + def reset_parameters(self): + for conv in self.convs: + conv.reset_parameters() + + def _forward(self, x: Tensor, edge_index: Adj, *args): + channels = x.size(self.split_dim) + xs = self._chunk(x, channels) + args = list(zip(*[self._chunk(arg, channels) for arg in args])) + args = [[]] * self.num_groups if len(args) == 0 else args + + ys = [] + y_in = sum(xs[1:]) + for i in range(self.num_groups): + y_in = xs[i] + self.convs[i](y_in, edge_index, *args[i]) + ys.append(y_in) + return torch.cat(ys, dim=self.split_dim) + + def _inverse(self, y: Tensor, edge_index: Adj, *args): + channels = y.size(self.split_dim) + ys = self._chunk(y, channels) + args = list(zip(*[self._chunk(arg, channels) for arg in args])) + args = [[]] * self.num_groups if len(args) == 0 else args + + xs = [] + for i in range(self.num_groups - 1, -1, -1): + if i != 0: + y_in = ys[i - 1] + else: + y_in = sum(xs) + x = ys[i] - self.convs[i](y_in, edge_index, *args[i]) + xs.append(x) + + return torch.cat(xs[::-1], dim=self.split_dim) + + def _chunk(self, x: Any, channels: int) -> List[Any]: + if not isinstance(x, Tensor): + return [x] * self.num_groups + + try: + if x.size(self.split_dim) != channels: + return [x] * self.num_groups + except IndexError: + return [x] * self.num_groups + + return torch.chunk(x, self.num_groups, dim=self.split_dim) + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.convs[0]}, ' + f'num_groups={self.num_groups})') From 5a6e826cde1a6ed37e03e89d8b372bce93aa83dd Mon Sep 17 00:00:00 2001 From: Martino Andrea Scarpolini Date: Mon, 23 May 2022 11:35:48 +0200 Subject: [PATCH 0067/2432] PyTorch Ignite example (#4487) * add an example of pytorch ignite training with console and tensorboard logging * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added test set evaluation, better comments and pep8 formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update examples/pytorch_ignite/gin.py Co-authored-by: Matthias Fey * Update examples/pytorch_ignite/gin.py * Update examples/pytorch_ignite/gin.py * Update examples/pytorch_ignite/gin.py * Update examples/pytorch_ignite/gin.py * Update examples/pytorch_ignite/gin.py * unique log_metric function instead of one for each dataset * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change one comment from italian to english * update * update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + examples/pytorch_ignite/gin.py | 164 +++++++++++++++++++++++++++++++++ 2 files changed, 165 insertions(+) create mode 100644 examples/pytorch_ignite/gin.py diff --git a/CHANGELOG.md b/CHANGELOG.md index e47f57018629..81d60da9dfba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) diff --git a/examples/pytorch_ignite/gin.py b/examples/pytorch_ignite/gin.py new file mode 100644 index 000000000000..6317a8a3b2da --- /dev/null +++ b/examples/pytorch_ignite/gin.py @@ -0,0 +1,164 @@ +import os.path as osp + +import ignite +import ignite.contrib.handlers.tensorboard_logger +import ignite.contrib.handlers.tqdm_logger +import torch +import torch.nn.functional as F + +import torch_geometric.transforms as T +from torch_geometric import seed_everything +from torch_geometric.datasets import TUDataset +from torch_geometric.loader import DataLoader +from torch_geometric.nn import GIN, MLP, global_add_pool + + +class Model(torch.nn.Module): + def __init__(self, in_channels: int, out_channels: int, + hidden_channels: int = 64, num_layers: int = 3, + dropout: float = 0.5): + super().__init__() + + self.gnn = GIN(in_channels, hidden_channels, num_layers, + dropout=dropout, jk='cat') + + self.classifier = MLP([hidden_channels, hidden_channels, out_channels], + batch_norm=True, dropout=dropout) + + def forward(self, data): + x = self.gnn(data.x, data.edge_index) + x = global_add_pool(x, data.batch) + x = self.classifier(x) + return x + + +def main(): + seed_everything(42) + + root = osp.join('data', 'TUDataset') + dataset = TUDataset(root, 'IMDB-BINARY', pre_transform=T.OneHotDegree(135)) + + dataset = dataset.shuffle() + test_dataset = dataset[:len(dataset) // 10] + val_dataset = dataset[len(dataset) // 10:2 * len(dataset) // 10] + train_dataset = dataset[2 * len(dataset) // 10:] + + train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, + pin_memory=True) + val_loader = DataLoader(val_dataset, batch_size=64, pin_memory=True) + test_loader = DataLoader(test_dataset, batch_size=64, pin_memory=True) + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + model = Model(dataset.num_node_features, dataset.num_classes).to(device) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + metrics = {'acc': ignite.metrics.Accuracy()} + + def prepare_batch_fn(batch, device, non_blocking): + return (batch.to(device, non_blocking=non_blocking), + batch.y.to(device, non_blocking=non_blocking)) + + trainer = ignite.engine.create_supervised_trainer( + model=model, + optimizer=optimizer, + loss_fn=F.cross_entropy, + device=device, + prepare_batch=prepare_batch_fn, + output_transform=lambda x, y, y_pred, loss: loss.item(), + amp_mode='amp', + ) + + # Progress bar for each epoch: + pbar = ignite.contrib.handlers.tqdm_logger.ProgressBar() + pbar.attach(trainer, output_transform=lambda x: {'loss': x}) + + def log_metrics(evaluator, loader, tag): + def logger(trainer): + evaluator.run(loader) + print(f'{tag:10} Epoch: {trainer.state.epoch:02d}, ' + f'Acc: {evaluator.state.metrics["acc"]:.4f}') + + return logger + + train_evaluator = ignite.engine.create_supervised_evaluator( + model=model, + metrics=metrics, + device=device, + prepare_batch=prepare_batch_fn, + output_transform=lambda x, y, y_pred: (y_pred, y), + amp_mode='amp', + ) + trainer.on(ignite.engine.Events.EPOCH_COMPLETED(every=1))(log_metrics( + train_evaluator, train_loader, 'Training')) + + val_evaluator = ignite.engine.create_supervised_evaluator( + model=model, + metrics=metrics, + device=device, + prepare_batch=prepare_batch_fn, + output_transform=lambda x, y, y_pred: (y_pred, y), + amp_mode='amp', + ) + trainer.on(ignite.engine.Events.EPOCH_COMPLETED(every=1))(log_metrics( + val_evaluator, val_loader, 'Validation')) + + test_evaluator = ignite.engine.create_supervised_evaluator( + model=model, + metrics=metrics, + device=device, + prepare_batch=prepare_batch_fn, + output_transform=lambda x, y, y_pred: (y_pred, y), + amp_mode='amp', + ) + trainer.on(ignite.engine.Events.EPOCH_COMPLETED(every=1))(log_metrics( + test_evaluator, test_loader, 'Test')) + + # Save checkpoint of the model based on Accuracy on the validation set: + checkpoint_handler = ignite.handlers.Checkpoint( + {'model': model}, + 'runs/gin', + n_saved=2, + score_name=list(metrics.keys())[0], + filename_pattern='best-{global_step}-{score_name}-{score}.pt', + global_step_transform=ignite.handlers.global_step_from_engine(trainer), + ) + val_evaluator.add_event_handler(ignite.engine.Events.EPOCH_COMPLETED, + checkpoint_handler) + + # Create a tensorboard logger to write logs: + tb_logger = ignite.contrib.handlers.tensorboard_logger.TensorboardLogger( + log_dir=osp.join('runs/example', 'tb_logs')) + + tb_logger.attach_output_handler( + trainer, event_name=ignite.engine.Events.ITERATION_COMPLETED, + tag='training', output_transform=lambda loss: {'loss_iteration': loss}) + tb_logger.attach_output_handler( + trainer, event_name=ignite.engine.Events.EPOCH_COMPLETED, + tag='training', output_transform=lambda loss: {'loss_epoch': loss}) + tb_logger.attach_output_handler( + train_evaluator, + event_name=ignite.engine.Events.EPOCH_COMPLETED, + tag='training', + metric_names='all', + global_step_transform=ignite.handlers.global_step_from_engine(trainer), + ) + tb_logger.attach_output_handler( + val_evaluator, + event_name=ignite.engine.Events.EPOCH_COMPLETED, + tag='validation', + metric_names='all', + global_step_transform=ignite.handlers.global_step_from_engine(trainer), + ) + tb_logger.attach_output_handler( + test_evaluator, + event_name=ignite.engine.Events.EPOCH_COMPLETED, + tag='test', + metric_names='all', + global_step_transform=ignite.handlers.global_step_from_engine(trainer), + ) + tb_logger.close() + + trainer.run(train_loader, max_epochs=50) + + +if __name__ == '__main__': + main() From a7e6be49e9a5e9e3ca5cedcdcb37994a32e38335 Mon Sep 17 00:00:00 2001 From: Arun Date: Tue, 24 May 2022 10:31:55 +0530 Subject: [PATCH 0068/2432] `DimeNet++` implementation (#4432) * adding outputpp block * added interaction plusplus block * added DimeNetPlusPlus by subclassing DimeNet * reverting back to OutputBlock for DimeNet DimeNet model, when used with OutputPPBlock has an additional layer lin_up for which we don't have pre-trained weights * using pretrained model in dimenet++ * test for dimenetplusplus * added sympy to full_install_requires * sympy to setup.py * changelog * adding torch cluster to install.yml * update * update * update Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/qm9_pretrained_dimenet.py | 18 +- setup.py | 1 + test/nn/models/test_dimenet.py | 41 ++++ torch_geometric/nn/models/__init__.py | 3 +- torch_geometric/nn/models/dimenet.py | 340 +++++++++++++++++++++++++- 6 files changed, 391 insertions(+), 13 deletions(-) create mode 100644 test/nn/models/test_dimenet.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 81d60da9dfba..bbc2792c91d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) diff --git a/examples/qm9_pretrained_dimenet.py b/examples/qm9_pretrained_dimenet.py index 8032157cd5fd..aeb521052c4c 100644 --- a/examples/qm9_pretrained_dimenet.py +++ b/examples/qm9_pretrained_dimenet.py @@ -1,15 +1,23 @@ +import argparse import os.path as osp import torch from torch_geometric.datasets import QM9 from torch_geometric.loader import DataLoader -from torch_geometric.nn import DimeNet +from torch_geometric.nn import DimeNet, DimeNetPlusPlus + +parser = argparse.ArgumentParser() +parser.add_argument('--use_dimenet_plus_plus', action='http://github.com/store_true') +args = parser.parse_args() + +Model = DimeNetPlusPlus if args.use_dimenet_plus_plus else DimeNet path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'QM9') dataset = QM9(path) -# DimeNet uses the atomization energy for targets U0, U, H, and G. +# DimeNet uses the atomization energy for targets U0, U, H, and G, i.e.: +# 7 -> 12, 8 -> 13, 9 -> 14, 10 -> 15 idx = torch.tensor([0, 1, 2, 3, 4, 5, 6, 12, 13, 14, 15, 11]) dataset.data.y = dataset.data.y[:, idx] @@ -17,11 +25,11 @@ for target in range(12): # Skip target \delta\epsilon, since it can be computed via - # \epsilon_{LUMO} - \epsilon_{HOMO}. + # \epsilon_{LUMO} - \epsilon_{HOMO}: if target == 4: continue - model, datasets = DimeNet.from_qm9_pretrained(path, dataset, target) + model, datasets = Model.from_qm9_pretrained(path, dataset, target) train_dataset, val_dataset, test_dataset = datasets model = model.to(device) @@ -37,7 +45,7 @@ mae = torch.cat(maes, dim=0) - # Report meV instead of eV. + # Report meV instead of eV: mae = 1000 * mae if target in [2, 3, 4, 6, 7, 8, 9, 10] else mae print(f'Target: {target:02d}, MAE: {mae.mean():.5f} ± {mae.std():.5f}') diff --git a/setup.py b/setup.py index 13dfaf8155ed..6ada4afcde8a 100644 --- a/setup.py +++ b/setup.py @@ -22,6 +22,7 @@ full_requires = graphgym_requires + [ 'h5py', 'numba', + 'sympy', 'pandas', 'captum', 'rdflib', diff --git a/test/nn/models/test_dimenet.py b/test/nn/models/test_dimenet.py new file mode 100644 index 000000000000..91a72ee1cae9 --- /dev/null +++ b/test/nn/models/test_dimenet.py @@ -0,0 +1,41 @@ +import torch +import torch.nn.functional as F + +from torch_geometric.data import Data +from torch_geometric.nn import DimeNetPlusPlus +from torch_geometric.testing import onlyFullTest + + +@onlyFullTest +def test_dimenet_plus_plus(): + data = Data( + z=torch.randint(1, 10, (20, )), + pos=torch.randn(20, 3), + y=torch.tensor([1.]), + ) + + model = DimeNetPlusPlus( + hidden_channels=5, + out_channels=1, + num_blocks=5, + out_emb_channels=3, + int_emb_size=5, + basis_emb_size=5, + num_spherical=5, + num_radial=5, + num_before_skip=2, + num_after_skip=2, + ) + + with torch.no_grad(): + out = model(data.z, data.pos) + assert out.size() == (1, ) + + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + for i in range(100): + optimizer.zero_grad() + out = model(data.z, data.pos) + loss = F.l1_loss(out, data.y) + loss.backward() + optimizer.step() + assert loss < 1 diff --git a/torch_geometric/nn/models/__init__.py b/torch_geometric/nn/models/__init__.py index e541e36c1b33..ec89e53bb294 100644 --- a/torch_geometric/nn/models/__init__.py +++ b/torch_geometric/nn/models/__init__.py @@ -8,7 +8,7 @@ from .re_net import RENet from .graph_unet import GraphUNet from .schnet import SchNet -from .dimenet import DimeNet +from .dimenet import DimeNet, DimeNetPlusPlus from .explainer import Explainer, to_captum from .gnn_explainer import GNNExplainer from .metapath2vec import MetaPath2Vec @@ -43,6 +43,7 @@ 'GraphUNet', 'SchNet', 'DimeNet', + 'DimeNetPlusPlus', 'Explainer', 'to_captum', 'GNNExplainer', diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index fcf93fd681cc..a9760852bcc9 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -219,6 +219,97 @@ def forward(self, x, rbf, sbf, idx_kj, idx_ji): return h +class InteractionPPBlock(torch.nn.Module): + """ + The interaction block transforms each message embedding using + multiple residual blocks. + """ + def __init__(self, hidden_channels, int_emb_size, basis_emb_size, + num_spherical, num_radial, num_before_skip, num_after_skip, + act=swish): + super().__init__() + self.act = act + + # Transformation of Bessel and spherical basis representations: + self.lin_rbf1 = Linear(num_radial, basis_emb_size, bias=False) + self.lin_rbf2 = Linear(basis_emb_size, hidden_channels, bias=False) + + self.lin_sbf1 = Linear(num_spherical * num_radial, basis_emb_size, + bias=False) + self.lin_sbf2 = Linear(basis_emb_size, int_emb_size, bias=False) + + # Hidden transformation of input message: + self.lin_kj = Linear(hidden_channels, hidden_channels) + self.lin_ji = Linear(hidden_channels, hidden_channels) + + # Embedding projections for interaction triplets: + self.lin_down = Linear(hidden_channels, int_emb_size, bias=False) + self.lin_up = Linear(int_emb_size, hidden_channels, bias=False) + + # Residual layers before and after skip connection: + self.layers_before_skip = torch.nn.ModuleList([ + ResidualLayer(hidden_channels, act) for _ in range(num_before_skip) + ]) + self.lin = Linear(hidden_channels, hidden_channels) + self.layers_after_skip = torch.nn.ModuleList([ + ResidualLayer(hidden_channels, act) for _ in range(num_before_skip) + ]) + + self.reset_parameters() + + def reset_parameters(self): + glorot_orthogonal(self.lin_rbf1.weight, scale=2.0) + glorot_orthogonal(self.lin_rbf2.weight, scale=2.0) + glorot_orthogonal(self.lin_sbf1.weight, scale=2.0) + glorot_orthogonal(self.lin_sbf2.weight, scale=2.0) + + glorot_orthogonal(self.lin_kj.weight, scale=2.0) + self.lin_kj.bias.data.fill_(0) + glorot_orthogonal(self.lin_ji.weight, scale=2.0) + self.lin_ji.bias.data.fill_(0) + + glorot_orthogonal(self.lin_down.weight, scale=2.0) + glorot_orthogonal(self.lin_up.weight, scale=2.0) + + for res_layer in self.layers_before_skip: + res_layer.reset_parameters() + glorot_orthogonal(self.lin.weight, scale=2.0) + self.lin.bias.data.fill_(0) + for res_layer in self.layers_before_skip: + res_layer.reset_parameters() + + def forward(self, x, rbf, sbf, idx_kj, idx_ji): + # Initial transformation: + x_ji = self.act(self.lin_ji(x)) + x_kj = self.act(self.lin_kj(x)) + + # Transformation via Bessel basis: + rbf = self.lin_rbf1(rbf) + rbf = self.lin_rbf2(rbf) + x_kj = x_kj * rbf + + # Down project embedding and generating triple-interactions: + x_kj = self.act(self.lin_down(x_kj)) + + # Transform via 2D spherical basis: + sbf = self.lin_sbf1(sbf) + sbf = self.lin_sbf2(sbf) + x_kj = x_kj[idx_kj] * sbf + + # Aggregate interactions and up-project embeddings: + x_kj = scatter(x_kj, idx_ji, dim=0, dim_size=x.size(0)) + x_kj = self.act(self.lin_up(x_kj)) + + h = x_ji + x_kj + for layer in self.layers_before_skip: + h = layer(h) + h = self.act(self.lin(h)) + x + for layer in self.layers_after_skip: + h = layer(h) + + return h + + class OutputBlock(torch.nn.Module): def __init__(self, num_radial, hidden_channels, out_channels, num_layers, act=swish): @@ -248,6 +339,40 @@ def forward(self, x, rbf, i, num_nodes=None): return self.lin(x) +class OutputPPBlock(torch.nn.Module): + def __init__(self, num_radial, hidden_channels, out_emb_channels, + out_channels, num_layers, act=swish): + super().__init__() + self.act = act + + self.lin_rbf = Linear(num_radial, hidden_channels, bias=False) + + # The up-projection layer: + self.lin_up = Linear(hidden_channels, out_emb_channels, bias=False) + self.lins = torch.nn.ModuleList() + for _ in range(num_layers): + self.lins.append(Linear(out_emb_channels, out_emb_channels)) + self.lin = Linear(out_emb_channels, out_channels, bias=False) + + self.reset_parameters() + + def reset_parameters(self): + glorot_orthogonal(self.lin_rbf.weight, scale=2.0) + glorot_orthogonal(self.lin_up.weight, scale=2.0) + for lin in self.lins: + glorot_orthogonal(lin.weight, scale=2.0) + lin.bias.data.fill_(0) + self.lin.weight.data.fill_(0) + + def forward(self, x, rbf, i, num_nodes=None): + x = self.lin_rbf(rbf) * x + x = scatter(x, i, dim=0, dim_size=num_nodes) + x = self.lin_up(x) + for lin in self.lins: + x = self.act(lin(x)) + return self.lin(x) + + class DimeNet(torch.nn.Module): r"""The directional message passing neural network (DimeNet) from the `"Directional Message Passing for Molecular Graphs" @@ -331,8 +456,8 @@ def reset_parameters(self): for interaction in self.interaction_blocks: interaction.reset_parameters() - @staticmethod - def from_qm9_pretrained(root: str, dataset: Dataset, target: int): + @classmethod + def from_qm9_pretrained(cls, root: str, dataset: Dataset, target: int): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tensorflow as tf @@ -340,8 +465,10 @@ def from_qm9_pretrained(root: str, dataset: Dataset, target: int): root = osp.expanduser(osp.normpath(root)) path = osp.join(root, 'pretrained_dimenet', qm9_target_dict[target]) + makedirs(path) - url = f'{DimeNet.url}/{qm9_target_dict[target]}' + url = f'{cls.url}/{qm9_target_dict[target]}' + if not osp.exists(osp.join(path, 'checkpoint')): download_url(http://github.com/f'%7Burl%7D/checkpoint',%20path) download_url(http://github.com/f'%7Burl%7D/ckpt.data-00000-of-00002',%20path) @@ -351,10 +478,19 @@ def from_qm9_pretrained(root: str, dataset: Dataset, target: int): path = osp.join(path, 'ckpt') reader = tf.train.load_checkpoint(path) - model = DimeNet(hidden_channels=128, out_channels=1, num_blocks=6, - num_bilinear=8, num_spherical=7, num_radial=6, - cutoff=5.0, envelope_exponent=5, num_before_skip=1, - num_after_skip=2, num_output_layers=3) + model = cls( + hidden_channels=128, + out_channels=1, + num_blocks=6, + num_bilinear=8, + num_spherical=7, + num_radial=6, + cutoff=5.0, + envelope_exponent=5, + num_before_skip=1, + num_after_skip=2, + num_output_layers=3, + ) def copy_(src, name, transpose=False): init = reader.get_tensor(f'{name}/.ATTRIBUTES/VARIABLE_VALUE') @@ -469,3 +605,193 @@ def forward(self, z, pos, batch=None): P += output_block(x, rbf, i) return P.sum(dim=0) if batch is None else scatter(P, batch, dim=0) + + +class DimeNetPlusPlus(DimeNet): + r"""The DimeNet++ from the `"Fast and Uncertainty-Aware + Directional Message Passing for Non-Equilibrium Molecules" + `_ paper. + + :class:`DimeNetPlusPlus` is an upgrade to the :class:`DimeNet` model with + 8x faster and 10% more accurate than :class:`DimeNet`. + + Args: + hidden_channels (int): Hidden embedding size. + out_channels (int): Size of each output sample. + num_blocks (int): Number of building blocks. + int_emb_size (int): Size of embedding in the interaction block. + basis_emb_size (int): Size of basis embedding in the interaction block. + out_emb_channels (int): Size of embedding in the output block. + num_spherical (int): Number of spherical harmonics. + num_radial (int): Number of radial basis functions. + cutoff: (float, optional): Cutoff distance for interatomic + interactions. (default: :obj:`5.0`) + max_num_neighbors (int, optional): The maximum number of neighbors to + collect for each node within the :attr:`cutoff` distance. + (default: :obj:`32`) + envelope_exponent (int, optional): Shape of the smooth cutoff. + (default: :obj:`5`) + num_before_skip: (int, optional): Number of residual layers in the + interaction blocks before the skip connection. (default: :obj:`1`) + num_after_skip: (int, optional): Number of residual layers in the + interaction blocks after the skip connection. (default: :obj:`2`) + num_output_layers: (int, optional): Number of linear layers for the + output blocks. (default: :obj:`3`) + act: (Callable, optional): The activation funtion. + (default: :obj:`swish`) + """ + + url = ('https://raw.githubusercontent.com/gasteigerjo/dimenet/' + 'master/pretrained/dimenet_pp') + + def __init__(self, hidden_channels: int, out_channels: int, + num_blocks: int, int_emb_size: int, basis_emb_size: int, + out_emb_channels: int, num_spherical: int, num_radial: int, + cutoff: float = 5.0, max_num_neighbors: int = 32, + envelope_exponent: int = 5, num_before_skip: int = 1, + num_after_skip: int = 2, num_output_layers: int = 3, + act: Callable = swish): + super().__init__( + hidden_channels=hidden_channels, + out_channels=out_channels, + num_blocks=num_blocks, + num_bilinear=1, + num_spherical=num_spherical, + num_radial=num_radial, + cutoff=cutoff, + max_num_neighbors=max_num_neighbors, + envelope_exponent=envelope_exponent, + num_before_skip=num_before_skip, + num_after_skip=num_after_skip, + num_output_layers=num_output_layers, + act=act, + ) + + # We are re-using the RBF, SBF and embedding layers of `DimeNet` and + # redefine output_block and interaction_block in DimeNet++. + # Hence, it is to be noted that in the above initalization, the + # variable `num_bilinear` does not have any purpose as it is used + # solely in the `OutputBlock` of DimeNet: + self.output_blocks = torch.nn.ModuleList([ + OutputPPBlock(num_radial, hidden_channels, out_emb_channels, + out_channels, num_output_layers, act) + for _ in range(num_blocks + 1) + ]) + + self.interaction_blocks = torch.nn.ModuleList([ + InteractionPPBlock(hidden_channels, int_emb_size, basis_emb_size, + num_spherical, num_radial, num_before_skip, + num_after_skip, act) for _ in range(num_blocks) + ]) + + self.reset_parameters() + + @classmethod + def from_qm9_pretrained(cls, root: str, dataset: Dataset, target: int): + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + import tensorflow as tf + + assert target >= 0 and target <= 12 and not target == 4 + + root = osp.expanduser(osp.normpath(root)) + path = osp.join(root, 'pretrained_dimenet_pp', qm9_target_dict[target]) + + makedirs(path) + url = f'{cls.url}/{qm9_target_dict[target]}' + + if not osp.exists(osp.join(path, 'checkpoint')): + download_url(http://github.com/f'%7Burl%7D/checkpoint',%20path) + download_url(http://github.com/f'%7Burl%7D/ckpt.data-00000-of-00002',%20path) + download_url(http://github.com/f'%7Burl%7D/ckpt.data-00001-of-00002',%20path) + download_url(http://github.com/f'%7Burl%7D/ckpt.index',%20path) + + path = osp.join(path, 'ckpt') + reader = tf.train.load_checkpoint(path) + + # Configuration from DimeNet++: + # https://github.com/gasteigerjo/dimenet/blob/master/config_pp.yaml + model = cls( + hidden_channels=128, + out_channels=1, + num_blocks=4, + int_emb_size=64, + basis_emb_size=8, + out_emb_channels=256, + num_spherical=7, + num_radial=6, + cutoff=5.0, + max_num_neighbors=32, + envelope_exponent=5, + num_before_skip=1, + num_after_skip=2, + num_output_layers=3, + ) + + def copy_(src, name, transpose=False): + init = reader.get_tensor(f'{name}/.ATTRIBUTES/VARIABLE_VALUE') + init = torch.from_numpy(init) + if name[-6:] == 'kernel': + init = init.t() + src.data.copy_(init) + + copy_(model.rbf.freq, 'rbf_layer/frequencies') + copy_(model.emb.emb.weight, 'emb_block/embeddings') + copy_(model.emb.lin_rbf.weight, 'emb_block/dense_rbf/kernel') + copy_(model.emb.lin_rbf.bias, 'emb_block/dense_rbf/bias') + copy_(model.emb.lin.weight, 'emb_block/dense/kernel') + copy_(model.emb.lin.bias, 'emb_block/dense/bias') + + for i, block in enumerate(model.output_blocks): + copy_(block.lin_rbf.weight, f'output_blocks/{i}/dense_rbf/kernel') + copy_(block.lin_up.weight, + f'output_blocks/{i}/up_projection/kernel') + for j, lin in enumerate(block.lins): + copy_(lin.weight, f'output_blocks/{i}/dense_layers/{j}/kernel') + copy_(lin.bias, f'output_blocks/{i}/dense_layers/{j}/bias') + copy_(block.lin.weight, f'output_blocks/{i}/dense_final/kernel') + + for i, block in enumerate(model.interaction_blocks): + copy_(block.lin_rbf1.weight, f'int_blocks/{i}/dense_rbf1/kernel') + copy_(block.lin_rbf2.weight, f'int_blocks/{i}/dense_rbf2/kernel') + copy_(block.lin_sbf1.weight, f'int_blocks/{i}/dense_sbf1/kernel') + copy_(block.lin_sbf2.weight, f'int_blocks/{i}/dense_sbf2/kernel') + + copy_(block.lin_ji.weight, f'int_blocks/{i}/dense_ji/kernel') + copy_(block.lin_ji.bias, f'int_blocks/{i}/dense_ji/bias') + copy_(block.lin_kj.weight, f'int_blocks/{i}/dense_kj/kernel') + copy_(block.lin_kj.bias, f'int_blocks/{i}/dense_kj/bias') + + copy_(block.lin_down.weight, + f'int_blocks/{i}/down_projection/kernel') + copy_(block.lin_up.weight, f'int_blocks/{i}/up_projection/kernel') + + for j, layer in enumerate(block.layers_before_skip): + copy_(layer.lin1.weight, + f'int_blocks/{i}/layers_before_skip/{j}/dense_1/kernel') + copy_(layer.lin1.bias, + f'int_blocks/{i}/layers_before_skip/{j}/dense_1/bias') + copy_(layer.lin2.weight, + f'int_blocks/{i}/layers_before_skip/{j}/dense_2/kernel') + copy_(layer.lin2.bias, + f'int_blocks/{i}/layers_before_skip/{j}/dense_2/bias') + + copy_(block.lin.weight, f'int_blocks/{i}/final_before_skip/kernel') + copy_(block.lin.bias, f'int_blocks/{i}/final_before_skip/bias') + + for j, layer in enumerate(block.layers_after_skip): + copy_(layer.lin1.weight, + f'int_blocks/{i}/layers_after_skip/{j}/dense_1/kernel') + copy_(layer.lin1.bias, + f'int_blocks/{i}/layers_after_skip/{j}/dense_1/bias') + copy_(layer.lin2.weight, + f'int_blocks/{i}/layers_after_skip/{j}/dense_2/kernel') + copy_(layer.lin2.bias, + f'int_blocks/{i}/layers_after_skip/{j}/dense_2/bias') + + random_state = np.random.RandomState(seed=42) + perm = torch.from_numpy(random_state.permutation(np.arange(130831))) + train_idx = perm[:110000] + val_idx = perm[110000:120000] + test_idx = perm[120000:] + + return model, (dataset[train_idx], dataset[val_idx], dataset[test_idx]) From b57f264a55364b4351f766823262f6fcec4f2ea9 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Tue, 24 May 2022 07:48:22 +0200 Subject: [PATCH 0069/2432] OGB @ NeurIPS --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 959114a689f4..aaebdd497c25 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,9 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many

+

+ +

-------------------------------------------------------------------------------- From 9963fcfefcc41522b7fe9b955c1a1a094e364e82 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 24 May 2022 07:54:21 +0200 Subject: [PATCH 0070/2432] Add `DimeNet++` to `README` (#4699) * add dimenet++ readme * changelog --- CHANGELOG.md | 2 +- README.md | 2 +- torch_geometric/nn/models/dimenet.py | 4 ---- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbc2792c91d9..a1929eceabe8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432)) +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) diff --git a/README.md b/README.md index 9d2ce26123ad..a9e738ca0ada 100644 --- a/README.md +++ b/README.md @@ -273,7 +273,7 @@ Our supported GNN models incorporate multiple message passing layers, and users Unlike simple stacking of GNN layers, these models could involve pre-processing, additional learnable parameters, skip connections, graph coarsening, etc. * **[SchNet](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.SchNet)** from Schütt *et al.*: [SchNet: A Continuous-filter Convolutional Neural Network for Modeling Quantum Interactions](https://arxiv.org/abs/1706.08566) (NIPS 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_schnet.py)] -* **[DimeNet](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.DimeNet)** from Klicpera *et al.*: [Directional Message Passing for Molecular Graphs](https://arxiv.org/abs/2003.03123) (ICLR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_dimenet.py)] +* **[DimeNet](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.DimeNet)** and **[DimeNetPlusPlus](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.DimeNetPlusPlus)** from Klicpera *et al.*: [Directional Message Passing for Molecular Graphs](https://arxiv.org/abs/2003.03123) (ICLR 2020) and [Fast and Uncertainty-Aware Directional Message Passing for Non-Equilibrium Molecules](https://arxiv.org/abs/2011.14115) (NeurIPS-W 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_dimenet.py)] * **[Node2Vec](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.Node2Vec)** from Grover and Leskovec: [node2vec: Scalable Feature Learning for Networks](https://arxiv.org/abs/1607.00653) (KDD 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/node2vec.py)] * **[Deep Graph Infomax](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.DeepGraphInfomax)** from Veličković *et al.*: [Deep Graph Infomax](https://arxiv.org/abs/1809.10341) (ICLR 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_transductive.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_inductive.py)] * **Deep Multiplex Graph Infomax** from Park *et al.*: [Unsupervised Attributed Multiplex Network Embedding](https://arxiv.org/abs/1911.06750) (AAAI 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/dmgi_unsup.py)] diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index a9760852bcc9..327ec2fd19b7 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -220,10 +220,6 @@ def forward(self, x, rbf, sbf, idx_kj, idx_ji): class InteractionPPBlock(torch.nn.Module): - """ - The interaction block transforms each message embedding using - multiple residual blocks. - """ def __init__(self, hidden_channels, int_emb_size, basis_emb_size, num_spherical, num_radial, num_before_skip, num_after_skip, act=swish): From f3ce4f2d34fd3fc01001ae35d281b7d8e147bc39 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Tue, 24 May 2022 08:09:32 +0200 Subject: [PATCH 0071/2432] update --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1929eceabe8..9f4e00a062ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699)) +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) From c7062dc95183cc9f13b517dc64b5376114f9b724 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 24 May 2022 08:12:39 +0200 Subject: [PATCH 0072/2432] `DimeNet`: Added `swish` activation to `activation_resolver` (#4700) * add dimenet++ readme * changelog * swish act * update --- torch_geometric/nn/acts.py | 2 -- torch_geometric/nn/models/dimenet.py | 36 +++++++++++++++------------- torch_geometric/nn/resolver.py | 17 ++++++++++++- 3 files changed, 36 insertions(+), 19 deletions(-) delete mode 100644 torch_geometric/nn/acts.py diff --git a/torch_geometric/nn/acts.py b/torch_geometric/nn/acts.py deleted file mode 100644 index 234ccefa35c0..000000000000 --- a/torch_geometric/nn/acts.py +++ /dev/null @@ -1,2 +0,0 @@ -def swish(x): - return x * x.sigmoid() diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index 327ec2fd19b7..4e3f44169b04 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -2,7 +2,7 @@ import os.path as osp from math import pi as PI from math import sqrt -from typing import Callable +from typing import Callable, Union import numpy as np import torch @@ -13,9 +13,8 @@ from torch_geometric.data import Dataset, download_url from torch_geometric.data.makedirs import makedirs from torch_geometric.nn import radius_graph - -from ..acts import swish -from ..inits import glorot_orthogonal +from torch_geometric.nn.inits import glorot_orthogonal +from torch_geometric.nn.resolver import activation_resolver qm9_target_dict = { 0: 'mu', @@ -117,7 +116,7 @@ def forward(self, dist, angle, idx_kj): class EmbeddingBlock(torch.nn.Module): - def __init__(self, num_radial, hidden_channels, act=swish): + def __init__(self, num_radial, hidden_channels, act): super().__init__() self.act = act @@ -139,7 +138,7 @@ def forward(self, x, rbf, i, j): class ResidualLayer(torch.nn.Module): - def __init__(self, hidden_channels, act=swish): + def __init__(self, hidden_channels, act): super().__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) @@ -159,7 +158,7 @@ def forward(self, x): class InteractionBlock(torch.nn.Module): def __init__(self, hidden_channels, num_bilinear, num_spherical, - num_radial, num_before_skip, num_after_skip, act=swish): + num_radial, num_before_skip, num_after_skip, act): super().__init__() self.act = act @@ -222,7 +221,7 @@ def forward(self, x, rbf, sbf, idx_kj, idx_ji): class InteractionPPBlock(torch.nn.Module): def __init__(self, hidden_channels, int_emb_size, basis_emb_size, num_spherical, num_radial, num_before_skip, num_after_skip, - act=swish): + act): super().__init__() self.act = act @@ -308,7 +307,7 @@ def forward(self, x, rbf, sbf, idx_kj, idx_ji): class OutputBlock(torch.nn.Module): def __init__(self, num_radial, hidden_channels, out_channels, num_layers, - act=swish): + act): super().__init__() self.act = act @@ -337,7 +336,7 @@ def forward(self, x, rbf, i, num_nodes=None): class OutputPPBlock(torch.nn.Module): def __init__(self, num_radial, hidden_channels, out_emb_channels, - out_channels, num_layers, act=swish): + out_channels, num_layers, act): super().__init__() self.act = act @@ -403,8 +402,8 @@ class DimeNet(torch.nn.Module): interaction blocks after the skip connection. (default: :obj:`2`) num_output_layers (int, optional): Number of linear layers for the output blocks. (default: :obj:`3`) - act (Callable, optional): The activation function. - (default: :obj:`swish`) + act (str or Callable, optional): The activation function. + (default: :obj:`"swish"`) """ url = ('https://github.com/klicperajo/dimenet/raw/master/pretrained/' @@ -415,12 +414,14 @@ def __init__(self, hidden_channels: int, out_channels: int, num_radial, cutoff: float = 5.0, max_num_neighbors: int = 32, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3, - act: Callable = swish): + act: Union[str, Callable] = 'swish'): super().__init__() if num_spherical < 2: raise ValueError("num_spherical should be greater than 1") + act = activation_resolver(act) + self.cutoff = cutoff self.max_num_neighbors = max_num_neighbors self.num_blocks = num_blocks @@ -633,8 +634,8 @@ class DimeNetPlusPlus(DimeNet): interaction blocks after the skip connection. (default: :obj:`2`) num_output_layers: (int, optional): Number of linear layers for the output blocks. (default: :obj:`3`) - act: (Callable, optional): The activation funtion. - (default: :obj:`swish`) + act: (str or Callable, optional): The activation funtion. + (default: :obj:`"swish"`) """ url = ('https://raw.githubusercontent.com/gasteigerjo/dimenet/' @@ -646,7 +647,10 @@ def __init__(self, hidden_channels: int, out_channels: int, cutoff: float = 5.0, max_num_neighbors: int = 32, envelope_exponent: int = 5, num_before_skip: int = 1, num_after_skip: int = 2, num_output_layers: int = 3, - act: Callable = swish): + act: Union[str, Callable] = 'swish'): + + act = activation_resolver(act) + super().__init__( hidden_channels=hidden_channels, out_channels=out_channels, diff --git a/torch_geometric/nn/resolver.py b/torch_geometric/nn/resolver.py index 04c2374f9114..13ea9119eaec 100644 --- a/torch_geometric/nn/resolver.py +++ b/torch_geometric/nn/resolver.py @@ -1,6 +1,8 @@ +import inspect from typing import Any, List, Union import torch +from torch import Tensor def normalize_string(s: str) -> str: @@ -14,16 +16,29 @@ def resolver(classes: List[Any], query: Union[Any, str], *args, **kwargs): query = normalize_string(query) for cls in classes: if query == normalize_string(cls.__name__): - return cls(*args, **kwargs) + if inspect.isclass(cls): + return cls(*args, **kwargs) + else: + return cls return ValueError( f"Could not resolve '{query}' among the choices " f"{set(normalize_string(cls.__name__) for cls in classes)}") +# Activation Resolver ######################################################### + + +def swish(x: Tensor) -> Tensor: + return x * x.sigmoid() + + def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): acts = [ act for act in vars(torch.nn.modules.activation).values() if isinstance(act, type) and issubclass(act, torch.nn.Module) ] + acts += [ + swish, + ] return resolver(acts, query, *args, **kwargs) From 54103a5ce336659323a7bc90b7ffce0822c56b75 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 24 May 2022 08:26:34 +0200 Subject: [PATCH 0073/2432] `RevGNN`: Fix Python 3.7 tests (#4701) * fix py3.7 tests * changelog --- CHANGELOG.md | 2 +- torch_geometric/nn/models/rev_gnn.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f4e00a062ba..b0111af95ad8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) -- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671)) +- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) diff --git a/torch_geometric/nn/models/rev_gnn.py b/torch_geometric/nn/models/rev_gnn.py index 4f5a48acbeb5..3230f7ec6182 100644 --- a/torch_geometric/nn/models/rev_gnn.py +++ b/torch_geometric/nn/models/rev_gnn.py @@ -1,8 +1,8 @@ import copy -import math from abc import ABC, abstractmethod from typing import Any, List, Optional, Union +import numpy as np import torch from torch import Tensor @@ -87,7 +87,7 @@ def backward(ctx, *grad_outputs): inputs_inverted = (inputs_inverted, ) for elem_orig, elem_inv in zip(inputs, inputs_inverted): - elem_orig.storage().resize_(int(math.prod(elem_orig.size()))) + elem_orig.storage().resize_(int(np.prod(elem_orig.size()))) elem_orig.set_(elem_inv) # Compute gradients with grad enabled: From f482cb7dfef1e72e000c5962148e4d5041526fbd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 24 May 2022 09:46:39 +0200 Subject: [PATCH 0074/2432] Fix: `is_sorted` argument in `NeighborLoader` (#4702) * fix: is_sorted * changelog --- CHANGELOG.md | 2 +- torch_geometric/loader/utils.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0111af95ad8..fb46ff9b5071 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) - Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531)) - Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) -- Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620)) +- Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620), [#4702](https://github.com/pyg-team/pytorch_geometric/pull/4702)) - Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) - Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) - Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 86fc2bf4186d..248859e2c0f9 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -49,10 +49,9 @@ def to_csc( elif hasattr(data, 'edge_index'): (row, col) = data.edge_index if not is_sorted: - size = data.size() - perm = (col * size[0]).add_(row).argsort() + perm = (col * data.size(0)).add_(row).argsort() row = row[perm] - colptr = torch.ops.torch_sparse.ind2ptr(col[perm], size[1]) + colptr = torch.ops.torch_sparse.ind2ptr(col[perm], data.size(1)) else: raise AttributeError("Data object does not contain attributes " "'adj_t' or 'edge_index'") From efffdc3ddead9c6bf7830b3c66f63111bbd654c8 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Wed, 25 May 2022 19:06:05 +0300 Subject: [PATCH 0075/2432] Fix errors of the `RevGNN` example (#4715) * Fix errors * changelog Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- examples/rev_gnn.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb46ff9b5071..66d0a8041322 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) -- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701)) +- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) diff --git a/examples/rev_gnn.py b/examples/rev_gnn.py index 22b8476b29c5..c0a88b69217b 100644 --- a/examples/rev_gnn.py +++ b/examples/rev_gnn.py @@ -20,7 +20,7 @@ class GNNBlock(torch.nn.Module): def __init__(self, in_channels, out_channels): - super().__init__(in_channels) + super().__init__() self.norm = LayerNorm(in_channels, elementwise_affine=True) self.conv = SAGEConv(in_channels, out_channels) @@ -48,7 +48,7 @@ def __init__(self, in_channels, hidden_channels, out_channels, num_layers, assert hidden_channels % num_groups == 0 self.convs = torch.nn.ModuleList() - for _ in range(self.num_layers): + for _ in range(num_layers): conv = GNNBlock( hidden_channels // num_groups, hidden_channels // num_groups, @@ -63,6 +63,8 @@ def reset_parameters(self): conv.reset_parameters() def forward(self, x, edge_index): + x = self.lin1(x) + # Generate a dropout mask which will be shared across GNN blocks: mask = None if self.training and self.dropout > 0: @@ -70,7 +72,6 @@ def forward(self, x, edge_index): mask = mask.requires_grad_(False) mask = mask / (1 - self.dropout) - x = self.lin1(x) for conv in self.convs: x = conv(x, edge_index, mask) x = self.norm(x).relu() From cb92831649fdb63057d6488a24462d07042cb207 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 25 May 2022 20:22:58 +0200 Subject: [PATCH 0076/2432] `torch_geometric.nn.aggr` package with base class (#4687) * initial commit * update * changelog * Added basic aggrs, gen aggrs and pna aggrs * Formatted * Formatted * Added test for aggr class * Formatted * update * update * update * update * update * docstring * typo Co-authored-by: lightaime --- CHANGELOG.md | 1 + test/nn/aggr/test_basic.py | 51 ++++++++++ torch_geometric/nn/__init__.py | 1 + torch_geometric/nn/aggr/__init__.py | 23 +++++ torch_geometric/nn/aggr/base.py | 62 ++++++++++++ torch_geometric/nn/aggr/basic.py | 99 ++++++++++++++++++++ torch_geometric/transforms/base_transform.py | 3 +- 7 files changed, 239 insertions(+), 1 deletion(-) create mode 100644 test/nn/aggr/test_basic.py create mode 100644 torch_geometric/nn/aggr/__init__.py create mode 100644 torch_geometric/nn/aggr/base.py create mode 100644 torch_geometric/nn/aggr/basic.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 66d0a8041322..29fd4a7864e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715)) diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py new file mode 100644 index 000000000000..1e1dd07a1ed3 --- /dev/null +++ b/test/nn/aggr/test_basic.py @@ -0,0 +1,51 @@ +import pytest +import torch + +from torch_geometric.nn import ( + MaxAggregation, + MeanAggregation, + MinAggregation, + PowerMeanAggregation, + SoftmaxAggregation, + StdAggregation, + SumAggregation, + VarAggregation, +) + + +@pytest.mark.parametrize('Aggregation', [ + MeanAggregation, SumAggregation, MaxAggregation, MinAggregation, + VarAggregation, StdAggregation +]) +def test_basic_aggregation(Aggregation): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + ptr = torch.tensor([0, 2, 5, 6]) + + aggr = Aggregation() + assert str(aggr) == f'{Aggregation.__name__}()' + + out = aggr(x, index) + assert out.size() == (3, x.size(1)) + assert torch.allclose(out, aggr(x, ptr=ptr)) + + +@pytest.mark.parametrize('Aggregation', + [SoftmaxAggregation, PowerMeanAggregation]) +@pytest.mark.parametrize('learn', [True, False]) +def test_gen_aggregation(Aggregation, learn): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + ptr = torch.tensor([0, 2, 5, 6]) + + aggr = Aggregation(learn=learn) + assert str(aggr) == f'{Aggregation.__name__}()' + + out = aggr(x, index) + assert out.size() == (3, x.size(1)) + assert torch.allclose(out, aggr(x, ptr=ptr)) + + if learn: + out.mean().backward() + for param in aggr.parameters(): + assert not torch.isnan(param.grad).any() diff --git a/torch_geometric/nn/__init__.py b/torch_geometric/nn/__init__.py index 0550d4c07b37..dd66031497f2 100644 --- a/torch_geometric/nn/__init__.py +++ b/torch_geometric/nn/__init__.py @@ -4,6 +4,7 @@ from .data_parallel import DataParallel from .to_hetero_transformer import to_hetero from .to_hetero_with_bases_transformer import to_hetero_with_bases +from .aggr import * # noqa from .conv import * # noqa from .norm import * # noqa from .glob import * # noqa diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py new file mode 100644 index 000000000000..dbe1e4f086db --- /dev/null +++ b/torch_geometric/nn/aggr/__init__.py @@ -0,0 +1,23 @@ +from .base import Aggregation +from .basic import ( + MeanAggregation, + SumAggregation, + MaxAggregation, + MinAggregation, + VarAggregation, + StdAggregation, + SoftmaxAggregation, + PowerMeanAggregation, +) + +__all__ = classes = [ + 'Aggregation', + 'MeanAggregation', + 'SumAggregation', + 'MaxAggregation', + 'MinAggregation', + 'VarAggregation', + 'StdAggregation', + 'SoftmaxAggregation', + 'PowerMeanAggregation', +] diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py new file mode 100644 index 000000000000..f36efc7bd09a --- /dev/null +++ b/torch_geometric/nn/aggr/base.py @@ -0,0 +1,62 @@ +from abc import ABC, abstractmethod +from typing import Optional + +import torch +from torch import Tensor +from torch_scatter import scatter, segment_csr + + +class Aggregation(torch.nn.Module, ABC): + r"""An abstract base class for implementing custom aggregations.""" + @abstractmethod + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + r""" + Args: + x (torch.Tensor): The source tensor. + index (torch.LongTensor, optional): The indices of elements for + applying the aggregation. + One of :obj:`index` or `ptr` must be defined. + (default: :obj:`None`) + ptr (torch.LongTensor, optional): If given, computes the + aggregation based on sorted inputs in CSR representation. + One of :obj:`index` or `ptr` must be defined. + (default: :obj:`None`) + dim_size (int, optional): The size of the output tensor at + dimension :obj:`dim` after aggregation. (default: :obj:`None`) + dim (int, optional): The dimension in which to aggregate. + (default: :obj:`-2`) + """ + pass + + def reset_parameters(self): + pass + + def reduce(self, x: Tensor, index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2, reduce: str = 'add') -> Tensor: + + assert index is not None or ptr is not None + + if ptr is not None: + ptr = expand_left(ptr, dim, dims=x.dim()) + return segment_csr(x, ptr, reduce=reduce) + + if index is not None: + return scatter(x, index, dim=dim, dim_size=dim_size, reduce=reduce) + + raise ValueError(f"Error in '{self.__class__.__name__}': " + f"One of 'index' or 'ptr' must be defined") + + def __repr__(self) -> str: + return f'{self.__class__.__name__}()' + + +############################################################################### + + +def expand_left(ptr: Tensor, dim: int, dims: int) -> Tensor: + for _ in range(dims + dim if dim < 0 else dim): + ptr = ptr.unsqueeze(0) + return ptr diff --git a/torch_geometric/nn/aggr/basic.py b/torch_geometric/nn/aggr/basic.py new file mode 100644 index 000000000000..3b52fc225fad --- /dev/null +++ b/torch_geometric/nn/aggr/basic.py @@ -0,0 +1,99 @@ +from typing import Optional + +import torch +from torch import Tensor +from torch.nn import Parameter + +from torch_geometric.nn.aggr import Aggregation +from torch_geometric.utils import softmax + + +class MeanAggregation(Aggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + return self.reduce(x, index, ptr, dim_size, dim, reduce='mean') + + +class SumAggregation(Aggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + return self.reduce(x, index, ptr, dim_size, dim, reduce='sum') + + +class MaxAggregation(Aggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + return self.reduce(x, index, ptr, dim_size, dim, reduce='max') + + +class MinAggregation(Aggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + return self.reduce(x, index, ptr, dim_size, dim, reduce='min') + + +class VarAggregation(Aggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + mean = self.reduce(x, index, ptr, dim_size, dim, reduce='mean') + mean_2 = self.reduce(x * x, index, ptr, dim_size, dim, reduce='mean') + return mean_2 - mean * mean + + +class StdAggregation(VarAggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + var = super().forward(x, index, ptr=ptr, dim_size=dim_size, dim=dim) + return torch.sqrt(var.relu() + 1e-5) + + +class SoftmaxAggregation(Aggregation): + def __init__(self, t: float = 1.0, learn: bool = False): + # TODO Learn distinct `t` per channel. + super().__init__() + self._init_t = t + self.t = Parameter(torch.Tensor(1)) if learn else t + self.reset_parameters() + + def reset_parameters(self): + if isinstance(self.t, Tensor): + self.t.data.fill_(self._init_t) + + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + alpha = x + if not isinstance(self.t, (int, float)) or self.t != 1: + alpha = x * self.t + alpha = softmax(alpha, index, ptr, dim_size, dim) + return self.reduce(x * alpha, index, ptr, dim_size, dim, reduce='sum') + + +class PowerMeanAggregation(Aggregation): + def __init__(self, p: float = 1.0, learn: bool = False): + # TODO Learn distinct `p` per channel. + super().__init__() + self._init_p = p + self.p = Parameter(torch.Tensor(1)) if learn else p + self.reset_parameters() + + if isinstance(self.p, Tensor): + self.p.data.fill_(self._init_p) + + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + out = self.reduce(x, index, ptr, dim_size, dim, reduce='mean') + if isinstance(self.p, (int, float)) and self.p == 1: + return out + return out.clamp_(min=0, max=100).pow(1. / self.p) diff --git a/torch_geometric/transforms/base_transform.py b/torch_geometric/transforms/base_transform.py index 56f12fcfc9e7..8a3041c2cd7e 100644 --- a/torch_geometric/transforms/base_transform.py +++ b/torch_geometric/transforms/base_transform.py @@ -1,7 +1,8 @@ +from abc import ABC from typing import Any -class BaseTransform: +class BaseTransform(ABC): r"""An abstract base class for writing transforms. Transforms are a general way to modify and customize From 2bd383554e3e1fae08affb2d3f7e3174cbbffe75 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 26 May 2022 15:17:20 +0200 Subject: [PATCH 0077/2432] Fix `protobuf` version (#4719) * fix protobuf * update --- CHANGELOG.md | 1 + setup.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29fd4a7864e7..c54fe572405c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed `protobuf` version ([#4719](https://github.com/pyg-team/pytorch_geometric/pull/4719)) - Fixed the ranking protocol bug in the RGCN link prediction example ([#4688](https://github.com/pyg-team/pytorch_geometric/pull/4688)) - Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) - Allow for `setter` properties in `Data` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682), [#4686](https://github.com/pyg-team/pytorch_geometric/pull/4686)) diff --git a/setup.py b/setup.py index 6ada4afcde8a..f1097d1db352 100644 --- a/setup.py +++ b/setup.py @@ -16,6 +16,7 @@ graphgym_requires = [ 'yacs', 'hydra-core', + 'protobuf<4.21', 'pytorch-lightning', ] @@ -36,6 +37,7 @@ ] benchmark_requires = [ + 'protobuf<4.21', 'wandb', ] From 590a3520d4498bbca5e5ae9d276b385bb91dfe7d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 26 May 2022 15:32:35 +0200 Subject: [PATCH 0078/2432] Validate `Aggregation` inputs (#4721) * validate * Add test --- CHANGELOG.md | 2 +- test/nn/aggr/test_basic.py | 14 ++++++++++++++ torch_geometric/nn/aggr/base.py | 15 +++++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c54fe572405c..f06e371abcf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715)) diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py index 1e1dd07a1ed3..19effd874301 100644 --- a/test/nn/aggr/test_basic.py +++ b/test/nn/aggr/test_basic.py @@ -13,6 +13,20 @@ ) +def test_validate(): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + ptr = torch.tensor([0, 2, 5, 6]) + + aggr = MeanAggregation() + + with pytest.raises(ValueError, match='invalid dimension'): + aggr(x, index, dim=-3) + + with pytest.raises(ValueError, match='mismatch between'): + aggr(x, ptr=ptr, dim_size=2) + + @pytest.mark.parametrize('Aggregation', [ MeanAggregation, SumAggregation, MaxAggregation, MinAggregation, VarAggregation, StdAggregation diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index f36efc7bd09a..00bf8ea27197 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -33,6 +33,21 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, def reset_parameters(self): pass + def __call__(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + if dim >= x.dim() or dim < -x.dim(): + raise ValueError(f"Encountered invalid dimension '{dim}' of " + f"source tensor with {x.dim()} dimensions") + + if (ptr is not None and dim_size is not None + and dim_size != ptr.numel() - 1): + raise ValueError(f"Encountered mismatch between 'dim_size' (got " + f"'{dim_size}') and 'ptr' (got '{ptr.size(0)}')") + + return super().__call__(x, index, ptr=ptr, dim_size=dim_size, dim=dim) + def reduce(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2, reduce: str = 'add') -> Tensor: From 282c4f90565d39d1f5fb2c71457f588a6e423525 Mon Sep 17 00:00:00 2001 From: Maria Kadukova Date: Fri, 27 May 2022 13:25:40 +0100 Subject: [PATCH 0079/2432] Automatically construct `ptr` for each graph type when doing "advanced mini-batching" (#4723) * creating ptrs per additional graph types in collate * dataloader test updated respectively * test_batch test updated with a ptr for a followed batch * formatting fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changelog Co-authored-by: mkadukova Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_batch.py | 7 ++++--- test/loader/test_dataloader.py | 2 +- torch_geometric/data/collate.py | 1 + 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f06e371abcf7..ef6b77b86a6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) - Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 9c383064ac3e..098710a8d7b4 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -70,10 +70,10 @@ def test_batch(): assert str(batch) == ('DataBatch(x=[9], edge_index=[2, 12], y=[3], ' 'x_sp=[9, 1, nnz=9], adj=[9, 9, nnz=12], s=[3], ' - 's_batch=[3], array=[3], num_nodes=9, batch=[9], ' - 'ptr=[4])') + 's_batch=[3], s_ptr=[4], array=[3], num_nodes=9, ' + 'batch=[9], ptr=[4])') assert batch.num_graphs == 3 - assert len(batch) == 11 + assert len(batch) == 12 assert batch.x.tolist() == [1, 2, 3, 1, 2, 1, 2, 3, 4] assert batch.y.tolist() == [1, 2, 3] assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() @@ -83,6 +83,7 @@ def test_batch(): assert edge_index.tolist() == batch.edge_index.tolist() assert batch.s == ['1', '2', '3'] assert batch.s_batch.tolist() == [0, 1, 2] + assert batch.s_ptr.tolist() == [0, 1, 2, 3] assert batch.array == [['1', '2'], ['3', '4', '5'], ['6', '7', '8', '9']] assert batch.num_nodes == 9 assert batch.batch.tolist() == [0, 0, 0, 1, 1, 2, 2, 2, 2] diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index 6e21a8cb1952..a793066c95de 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -58,7 +58,7 @@ def test_dataloader(num_workers): assert len(loader) == 2 for batch in loader: - assert len(batch) == 9 + assert len(batch) == 10 assert batch.edge_index_batch.tolist() == [0, 0, 0, 0, 1, 1, 1, 1] diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index e3f8ca1feb25..beddcee4afef 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -101,6 +101,7 @@ def collate( repeats = slices[1:] - slices[:-1] batch = repeat_interleave(repeats.tolist(), device=device) out_store[f'{attr}_batch'] = batch + out_store[f'{attr}_ptr'] = cumsum(repeats.to(device)) # In case the storage holds node, we add a top-level batch vector it: if (add_batch and isinstance(stores[0], NodeStorage) From 243b7078618077b7e8d0356f9cb6b5d9f3ee23c9 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Fri, 27 May 2022 15:36:09 +0300 Subject: [PATCH 0080/2432] Add results with 10 runs for RevGNN (#4730) * Add results with 10 runs * changelog Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- examples/rev_gnn.py | 24 +++++++++++++++++------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ef6b77b86a6c..629e2b443a6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) -- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715)) +- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) - Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) - Confirm that `to_hetero()` works with custom functions, *e.g.*, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) diff --git a/examples/rev_gnn.py b/examples/rev_gnn.py index c0a88b69217b..79f1911b4088 100644 --- a/examples/rev_gnn.py +++ b/examples/rev_gnn.py @@ -1,8 +1,8 @@ -# Model Paramters: 206,607 -# Peak GPU memory usage: 1.57 G -# RevGNN with 7 layers and 160 channels reaches around 0.8200 test accuracy. -# Final Train: 0.9373, Highest Val: 0.9230, Final Test: 0.8200. -# Training longer should produces better results. +# Peak GPU memory usage is around 1.57 G +# | RevGNN Models | Test Acc | Val Acc | +# |-------------------------|-----------------|-----------------| +# | 112 layers 160 channels | 0.8307 ± 0.0030 | 0.9290 ± 0.0007 | +# | 7 layers 160 channels | 0.8276 ± 0.0027 | 0.9272 ± 0.0006 | import os.path as osp @@ -93,7 +93,7 @@ def forward(self, x, edge_index): train_loader = RandomNodeSampler(data, num_parts=10, shuffle=True, num_workers=5) -# Increase the num_parts of the test loader if you cannot have fix +# Increase the num_parts of the test loader if you cannot fit # the full batch graph into your GPU: test_loader = RandomNodeSampler(data, num_parts=1, num_workers=5) @@ -180,8 +180,18 @@ def test(epoch): return train_acc, valid_acc, test_acc -for epoch in range(1, 501): +best_val = 0.0 +final_train = 0.0 +final_test = 0.0 +for epoch in range(1, 1001): loss = train(epoch) train_acc, val_acc, test_acc = test(epoch) + if val_acc > best_val: + best_val = val_acc + final_train = train_acc + final_test = test_acc print(f'Loss: {loss:.4f}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' f'Test: {test_acc:.4f}') + +print(f'Final Train: {final_train:.4f}, Best Val: {best_val:.4f}, ' + f'Final Test: {final_test:.4f}') From 70a3760c6af529bec7121f7bdd288cb6fc1bb93c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 29 May 2022 09:50:43 +0200 Subject: [PATCH 0081/2432] `LSTMAggregation` (#4731) * impl * update * changelog * docstring * warn * update * update asserts --- CHANGELOG.md | 2 +- test/nn/aggr/test_basic.py | 7 +++- test/nn/aggr/test_lstm.py | 20 ++++++++++ torch_geometric/nn/aggr/__init__.py | 2 + torch_geometric/nn/aggr/base.py | 21 +++++++---- torch_geometric/nn/aggr/lstm.py | 57 +++++++++++++++++++++++++++++ 6 files changed, 99 insertions(+), 10 deletions(-) create mode 100644 test/nn/aggr/test_lstm.py create mode 100644 torch_geometric/nn/aggr/lstm.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 629e2b443a6b..6c562d51dddb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py index 19effd874301..7214742eb56d 100644 --- a/test/nn/aggr/test_basic.py +++ b/test/nn/aggr/test_basic.py @@ -20,10 +20,13 @@ def test_validate(): aggr = MeanAggregation() - with pytest.raises(ValueError, match='invalid dimension'): + with pytest.raises(ValueError, match="either 'index' or 'ptr'"): + aggr(x) + + with pytest.raises(ValueError, match="invalid dimension"): aggr(x, index, dim=-3) - with pytest.raises(ValueError, match='mismatch between'): + with pytest.raises(ValueError, match="mismatch between"): aggr(x, ptr=ptr, dim_size=2) diff --git a/test/nn/aggr/test_lstm.py b/test/nn/aggr/test_lstm.py new file mode 100644 index 000000000000..216bc8c377e4 --- /dev/null +++ b/test/nn/aggr/test_lstm.py @@ -0,0 +1,20 @@ +import pytest +import torch + +from torch_geometric.nn import LSTMAggregation + + +def test_lstm_aggregation(): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + + aggr = LSTMAggregation(16, 32) + assert str(aggr) == 'LSTMAggregation(16, 32)' + + aggr.reset_parameters() + + with pytest.raises(ValueError, match="is not sorted"): + aggr(x, torch.tensor([0, 1, 0, 1, 2, 1])) + + out = aggr(x, index) + assert out.size() == (3, 32) diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py index dbe1e4f086db..dc43e7bbda21 100644 --- a/torch_geometric/nn/aggr/__init__.py +++ b/torch_geometric/nn/aggr/__init__.py @@ -9,6 +9,7 @@ SoftmaxAggregation, PowerMeanAggregation, ) +from .lstm import LSTMAggregation __all__ = classes = [ 'Aggregation', @@ -20,4 +21,5 @@ 'StdAggregation', 'SoftmaxAggregation', 'PowerMeanAggregation', + 'LSTMAggregation', ] diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 00bf8ea27197..2c721fcda2a6 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -8,6 +8,8 @@ class Aggregation(torch.nn.Module, ABC): r"""An abstract base class for implementing custom aggregations.""" + requires_sorted_index = False + @abstractmethod def forward(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, @@ -37,6 +39,16 @@ def __call__(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: + if index is None and ptr is None: + raise ValueError(f"Expected that either 'index' or 'ptr' is " + f"passed to '{self.__class__.__name__}'") + + if (self.requires_sorted_index and index is not None + and not torch.all(index[:-1] <= index[1:])): + raise ValueError(f"Can not perform aggregation inside " + f"'{self.__class__.__name__}' since the " + f"'index' tensor is not sorted") + if dim >= x.dim() or dim < -x.dim(): raise ValueError(f"Encountered invalid dimension '{dim}' of " f"source tensor with {x.dim()} dimensions") @@ -52,17 +64,12 @@ def reduce(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2, reduce: str = 'add') -> Tensor: - assert index is not None or ptr is not None - if ptr is not None: ptr = expand_left(ptr, dim, dims=x.dim()) return segment_csr(x, ptr, reduce=reduce) - if index is not None: - return scatter(x, index, dim=dim, dim_size=dim_size, reduce=reduce) - - raise ValueError(f"Error in '{self.__class__.__name__}': " - f"One of 'index' or 'ptr' must be defined") + assert index is not None + return scatter(x, index, dim=dim, dim_size=dim_size, reduce=reduce) def __repr__(self) -> str: return f'{self.__class__.__name__}()' diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py new file mode 100644 index 000000000000..a617d2c49daa --- /dev/null +++ b/torch_geometric/nn/aggr/lstm.py @@ -0,0 +1,57 @@ +from typing import Optional + +from torch import Tensor +from torch.nn import LSTM + +from torch_geometric.nn.aggr import Aggregation +from torch_geometric.utils import to_dense_batch + + +class LSTMAggregation(Aggregation): + r"""Performs LSTM-style aggregation in which the elements to aggregate are + interpreted as a sequence. + + .. warn:: + :class:`LSTMAggregation` is not permutation-invariant. + + .. note:: + :class:`LSTMAggregation` requires sorted indices. + + Args: + in_channels (int): Size of each input sample. + out_channels (int): Size of each output sample. + **kwargs (optional): Additional arguments of :class:`torch.nn.LSTM`. + """ + requires_sorted_index = True + + def __init__(self, in_channels: int, out_channels: int, **kwargs): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.lstm = LSTM(in_channels, out_channels, batch_first=True, **kwargs) + + def reset_parameters(self): + self.lstm.reset_parameters() + + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + if index is None: # TODO + raise NotImplementedError(f"'{self.__class__.__name__}' with " + f"'ptr' not yet supported") + + if x.dim() != 2: + raise ValueError(f"'{self.__class__.__name__}' requires " + f"two-dimensional inputs (got '{x.dim()}')") + + if dim not in [-2, 0]: + raise ValueError(f"'{self.__class__.__name__}' needs to perform " + f"aggregation in first dimension (got '{dim}')") + + x, _ = to_dense_batch(x, index, batch_size=dim_size) + return self.lstm(x)[0][:, -1] + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.out_channels})') From 5a4f8687f628fb4deda1d8708a7e84831c611bc2 Mon Sep 17 00:00:00 2001 From: Aniket Maurya Date: Mon, 30 May 2022 11:40:46 +0530 Subject: [PATCH 0082/2432] Lightning Trainer integration in GraphGym (#4689) * add trainer * add datamodule * fix tests * fix tests * fixes * soft import * revert * update * fix tests * reformat * remove test split * remove skip_train_eval * add soft import * update * update * update * flake Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- graphgym/main.py | 19 +-- test/graphgym/test_graphgym.py | 31 ++--- torch_geometric/graphgym/config.py | 3 - torch_geometric/graphgym/imports.py | 15 +++ torch_geometric/graphgym/logger.py | 10 +- torch_geometric/graphgym/model_builder.py | 11 +- torch_geometric/graphgym/train.py | 139 +++++++-------------- torch_geometric/graphgym/utils/agg_runs.py | 6 +- 9 files changed, 89 insertions(+), 147 deletions(-) create mode 100644 torch_geometric/graphgym/imports.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c562d51dddb..d41de565317d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `HeteroData.subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) - Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) - Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) -- Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531)) +- Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531), [#4689](https://github.com/pyg-team/pytorch_geometric/pull/4689)) - Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) - Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620), [#4702](https://github.com/pyg-team/pytorch_geometric/pull/4702)) - Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) diff --git a/graphgym/main.py b/graphgym/main.py index 940c5807efaf..2bb62a7ab5a9 100644 --- a/graphgym/main.py +++ b/graphgym/main.py @@ -13,11 +13,9 @@ set_out_dir, set_run_dir, ) -from torch_geometric.graphgym.loader import create_loader -from torch_geometric.graphgym.logger import create_logger, set_printing +from torch_geometric.graphgym.loader import GraphGymDataModule +from torch_geometric.graphgym.logger import set_printing from torch_geometric.graphgym.model_builder import create_model -from torch_geometric.graphgym.optim import create_optimizer, create_scheduler -from torch_geometric.graphgym.register import train_dict from torch_geometric.graphgym.train import train from torch_geometric.graphgym.utils.agg_runs import agg_runs from torch_geometric.graphgym.utils.comp_budget import params_count @@ -41,22 +39,15 @@ seed_everything(cfg.seed) auto_select_device() # Set machine learning pipeline - loaders = create_loader() - loggers = create_logger() + datamodule = GraphGymDataModule() model = create_model() - optimizer = create_optimizer(model.parameters(), cfg.optim) - scheduler = create_scheduler(optimizer, cfg.optim) # Print model info logging.info(model) logging.info(cfg) cfg.params = params_count(model) logging.info('Num parameters: %s', cfg.params) - # Start training - if cfg.train.mode == 'standard': - train(loggers, loaders, model, optimizer, scheduler) - else: - train_dict[cfg.train.mode](loggers, loaders, model, optimizer, - scheduler) + train(model, datamodule, logger=True) + # Aggregate results from different seeds agg_runs(cfg.out_dir, cfg.metric_best) # When being launched in batch mode, mark a yaml as done diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index b19c8dd0c44b..aa518443add0 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -18,16 +18,11 @@ set_run_dir, ) from torch_geometric.graphgym.loader import create_loader -from torch_geometric.graphgym.logger import ( - LoggerCallback, - create_logger, - set_printing, -) +from torch_geometric.graphgym.logger import LoggerCallback, set_printing from torch_geometric.graphgym.model_builder import create_model from torch_geometric.graphgym.models.gnn import FeatureEncoder, GNNStackStage from torch_geometric.graphgym.models.head import GNNNodeHead -from torch_geometric.graphgym.optim import create_optimizer, create_scheduler -from torch_geometric.graphgym.train import train +from torch_geometric.graphgym.train import GraphGymDataModule, train from torch_geometric.graphgym.utils import ( agg_runs, auto_select_device, @@ -85,11 +80,8 @@ def test_run_single_graphgym(auto_resume, skip_train_eval, use_trivial_metric): cfg.metric_best = 'auto' cfg.custom_metrics = [] - loaders = create_loader() - assert len(loaders) == 3 - - loggers = create_logger() - assert len(loggers) == 3 + datamodule = GraphGymDataModule() + assert len(datamodule.loaders) == 3 model = create_model() assert isinstance(model, torch.nn.Module) @@ -98,20 +90,15 @@ def test_run_single_graphgym(auto_resume, skip_train_eval, use_trivial_metric): assert isinstance(model.post_mp, GNNNodeHead) assert len(list(model.pre_mp.children())) == cfg.gnn.layers_pre_mp - optimizer = create_optimizer(model.parameters(), cfg.optim) - assert isinstance(optimizer, torch.optim.Adam) - - scheduler = create_scheduler(optimizer, cfg.optim) - assert isinstance(scheduler, torch.optim.lr_scheduler.CosineAnnealingLR) + optimizer, scheduler = model.configure_optimizers() + assert isinstance(optimizer[0], torch.optim.Adam) + assert isinstance(scheduler[0], torch.optim.lr_scheduler.CosineAnnealingLR) cfg.params = params_count(model) assert cfg.params == 23880 - train(loggers, loaders, model, optimizer, scheduler) - - if use_trivial_metric: - # 6 total epochs, 4 eval epochs, 3 splits (1 training split) - assert num_trivial_metric_calls == 12 if skip_train_eval else 14 + train(model, datamodule, logger=True, + trainer_config={"enable_progress_bar": False}) assert osp.isdir(get_ckpt_dir()) is cfg.train.enable_ckpt diff --git a/torch_geometric/graphgym/config.py b/torch_geometric/graphgym/config.py index 34909d4e1096..a49308a54dc8 100644 --- a/torch_geometric/graphgym/config.py +++ b/torch_geometric/graphgym/config.py @@ -224,9 +224,6 @@ def set_cfg(cfg): # ----------------------------------------------------------------------- # cfg.train = CN() - # Training (and validation) pipeline mode - cfg.train.mode = 'standard' - # Total graph mini-batch size cfg.train.batch_size = 16 diff --git a/torch_geometric/graphgym/imports.py b/torch_geometric/graphgym/imports.py new file mode 100644 index 000000000000..c0fe01012e75 --- /dev/null +++ b/torch_geometric/graphgym/imports.py @@ -0,0 +1,15 @@ +import warnings + +import torch + +try: + import pytorch_lightning as pl + from pytorch_lightning import Callback, LightningModule +except ImportError: + # define fallbacks + pl = object + LightningModule = torch.nn.Module + Callback = object + + warnings.warn("Please install 'pytorch_lightning' for using the GraphGym " + "experiment manager via 'pip install pytorch_lightning'") diff --git a/torch_geometric/graphgym/logger.py b/torch_geometric/graphgym/logger.py index d50f459a8ef7..61dd67a8c49a 100644 --- a/torch_geometric/graphgym/logger.py +++ b/torch_geometric/graphgym/logger.py @@ -276,6 +276,10 @@ def val_logger(self) -> Any: def test_logger(self) -> Any: return self._logger[2] + def close(self): + for logger in self._logger: + logger.close() + def _get_stats( self, epoch_start_time: int, @@ -354,7 +358,6 @@ def on_train_epoch_end( pl_module: 'pl.LightningModule', ): self.train_logger.write_epoch(trainer.current_epoch) - self.train_logger.close() def on_validation_epoch_end( self, @@ -362,7 +365,6 @@ def on_validation_epoch_end( pl_module: 'pl.LightningModule', ): self.val_logger.write_epoch(trainer.current_epoch) - self.val_logger.close() def on_test_epoch_end( self, @@ -370,4 +372,6 @@ def on_test_epoch_end( pl_module: 'pl.LightningModule', ): self.test_logger.write_epoch(trainer.current_epoch) - self.test_logger.close() + + def on_fit_end(self, trainer, pl_module): + self.close() diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index 96c97d5df720..a596648b38da 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -1,22 +1,15 @@ import time -import warnings from typing import Any, Dict, Tuple import torch from torch_geometric.graphgym.config import cfg +from torch_geometric.graphgym.imports import LightningModule from torch_geometric.graphgym.loss import compute_loss from torch_geometric.graphgym.models.gnn import GNN from torch_geometric.graphgym.optim import create_optimizer, create_scheduler from torch_geometric.graphgym.register import network_dict, register_network -try: - from pytorch_lightning import LightningModule -except ImportError: - LightningModule = torch.nn.Module - warnings.warn("Please install 'pytorch_lightning' for using the GraphGym " - "experiment manager via 'pip install pytorch_lightning'") - register_network('gnn', GNN) @@ -69,7 +62,7 @@ def pre_mp(self) -> torch.nn.Module: return self.model.pre_mp -def create_model(to_device=True, dim_in=None, dim_out=None): +def create_model(to_device=True, dim_in=None, dim_out=None) -> GraphGymModule: r"""Create model for graph machine learning. Args: diff --git a/torch_geometric/graphgym/train.py b/torch_geometric/graphgym/train.py index 0ced7118479f..da4bfd33f70c 100644 --- a/torch_geometric/graphgym/train.py +++ b/torch_geometric/graphgym/train.py @@ -1,95 +1,50 @@ -import logging -import time +from typing import Optional -import torch +from torch.utils.data import DataLoader -from torch_geometric.graphgym.checkpoint import ( - clean_ckpt, - load_ckpt, - save_ckpt, -) +from torch_geometric.data.lightning_datamodule import LightningDataModule +from torch_geometric.graphgym import create_loader +from torch_geometric.graphgym.checkpoint import get_ckpt_dir from torch_geometric.graphgym.config import cfg -from torch_geometric.graphgym.loss import compute_loss -from torch_geometric.graphgym.utils.epoch import ( - is_ckpt_epoch, - is_eval_epoch, - is_train_eval_epoch, -) - - -def train_epoch(logger, loader, model, optimizer, scheduler): - model.train() - time_start = time.time() - for batch in loader: - batch.split = 'train' - optimizer.zero_grad() - batch.to(torch.device(cfg.device)) - pred, true = model(batch) - loss, pred_score = compute_loss(pred, true) - loss.backward() - optimizer.step() - logger.update_stats(true=true.detach().cpu(), - pred=pred_score.detach().cpu(), loss=loss.item(), - lr=scheduler.get_last_lr()[0], - time_used=time.time() - time_start, - params=cfg.params) - time_start = time.time() - scheduler.step() - - -@torch.no_grad() -def eval_epoch(logger, loader, model, split='val'): - model.eval() - time_start = time.time() - for batch in loader: - batch.split = split - batch.to(torch.device(cfg.device)) - pred, true = model(batch) - loss, pred_score = compute_loss(pred, true) - logger.update_stats(true=true.detach().cpu(), - pred=pred_score.detach().cpu(), loss=loss.item(), - lr=0, time_used=time.time() - time_start, - params=cfg.params) - time_start = time.time() - - -def train(loggers, loaders, model, optimizer, scheduler): - """ - The core training pipeline - - Args: - loggers: List of loggers - loaders: List of loaders - model: GNN model - optimizer: PyTorch optimizer - scheduler: PyTorch learning rate scheduler - - """ - start_epoch = 0 - if cfg.train.auto_resume: - start_epoch = load_ckpt(model, optimizer, scheduler, - cfg.train.epoch_resume) - if start_epoch == cfg.optim.max_epoch: - logging.info('Checkpoint found, Task already done') - else: - logging.info('Start from epoch {}'.format(start_epoch)) - - num_splits = len(loggers) - split_names = ['val', 'test'] - for cur_epoch in range(start_epoch, cfg.optim.max_epoch): - train_epoch(loggers[0], loaders[0], model, optimizer, scheduler) - if is_train_eval_epoch(cur_epoch): - loggers[0].write_epoch(cur_epoch) - if is_eval_epoch(cur_epoch): - for i in range(1, num_splits): - eval_epoch(loggers[i], loaders[i], model, - split=split_names[i - 1]) - loggers[i].write_epoch(cur_epoch) - if is_ckpt_epoch(cur_epoch) and cfg.train.enable_ckpt: - save_ckpt(model, optimizer, scheduler, cur_epoch) - for logger in loggers: - logger.close() - if cfg.train.ckpt_clean: - clean_ckpt() - - logging.info('Task done, results saved in {}'.format(cfg.run_dir)) +from torch_geometric.graphgym.imports import pl +from torch_geometric.graphgym.logger import LoggerCallback +from torch_geometric.graphgym.model_builder import GraphGymModule + + +class GraphGymDataModule(LightningDataModule): + def __init__(self): + self.loaders = create_loader() + super().__init__(has_val=True, has_test=True) + + def train_dataloader(self) -> DataLoader: + return self.loaders[0] + + def val_dataloader(self) -> DataLoader: + # better way would be to test after fit. + # First call trainer.fit(...) then trainer.test(...) + return self.loaders[1] + + def test_dataloader(self) -> DataLoader: + return self.loaders[2] + + +def train(model: GraphGymModule, datamodule, logger: bool = True, + trainer_config: Optional[dict] = None): + callbacks = [] + if logger: + callbacks.append(LoggerCallback()) + if cfg.train.enable_ckpt: + ckpt_cbk = pl.callbacks.ModelCheckpoint(dirpath=get_ckpt_dir()) + callbacks.append(ckpt_cbk) + + trainer_config = trainer_config or {} + trainer = pl.Trainer( + **trainer_config, + enable_checkpointing=cfg.train.enable_ckpt, + callbacks=callbacks, + default_root_dir=cfg.out_dir, + max_epochs=cfg.optim.max_epoch, + ) + + trainer.fit(model, datamodule=datamodule) + trainer.test(model, datamodule=datamodule) diff --git a/torch_geometric/graphgym/utils/agg_runs.py b/torch_geometric/graphgym/utils/agg_runs.py index fe11f62c71fd..695f8a0efd37 100644 --- a/torch_geometric/graphgym/utils/agg_runs.py +++ b/torch_geometric/graphgym/utils/agg_runs.py @@ -28,7 +28,7 @@ def is_seed(s): def is_split(s): - if s in ['train', 'val', 'test']: + if s in ['train', 'val']: return True else: return False @@ -86,8 +86,8 @@ def agg_runs(dir, metric_best='auto'): validation performance. Options: auto, accuracy, auc. ''' - results = {'train': None, 'val': None, 'test': None} - results_best = {'train': None, 'val': None, 'test': None} + results = {'train': None, 'val': None} + results_best = {'train': None, 'val': None} for seed in os.listdir(dir): if is_seed(seed): dir_seed = os.path.join(dir, seed) From 9a8ae988a79b38b98e11924fbcd40871c174d06c Mon Sep 17 00:00:00 2001 From: LingxiaoShawn <562998710@qq.com> Date: Mon, 30 May 2022 05:03:38 -0400 Subject: [PATCH 0083/2432] Add `RootedSubgraph` transform and implementations `RootedEgoNets` and `RootedRWSubgraph` (#3926) * add RootedSubgraph trasform [GNNAsKernel] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update torch_geometric/transforms/rooted_subgraphs.py Co-authored-by: Matthias Fey * Update torch_geometric/transforms/rooted_subgraphs.py Co-authored-by: Matthias Fey * revise * Update torch_geometric/transforms/rooted_subgraphs.py Co-authored-by: Padarn Wilson * Update torch_geometric/transforms/rooted_subgraphs.py Co-authored-by: Padarn Wilson * Update torch_geometric/transforms/rooted_subgraphs.py Co-authored-by: Padarn Wilson * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup suggestions * remove init * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merge master changes * test * test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tests * test * update formatting * further formatting * fix changelog * add withPackage to test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix test mark * Update CHANGELOG.md Co-authored-by: Matthias Fey * refactor rooted subgraph data * refactor rooted subgraph data * fix test * Update torch_geometric/transforms/rooted_subgraphs.py Co-authored-by: Matthias Fey * precompute mapped edge_index * update * typo * flake8 * fix * typo Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey Co-authored-by: Padarn Wilson --- CHANGELOG.md | 1 + README.md | 1 + test/transforms/test_rooted_subgraph.py | 89 ++++++++++ torch_geometric/transforms/__init__.py | 3 + torch_geometric/transforms/rooted_subgraph.py | 168 ++++++++++++++++++ 5 files changed, 262 insertions(+) create mode 100644 test/transforms/test_rooted_subgraph.py create mode 100644 torch_geometric/transforms/rooted_subgraph.py diff --git a/CHANGELOG.md b/CHANGELOG.md index d41de565317d..9b21bc951843 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) - Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) diff --git a/README.md b/README.md index a9e738ca0ada..55b2ca9c896f 100644 --- a/README.md +++ b/README.md @@ -323,6 +323,7 @@ They follow an extensible design: It is easy to apply these operators and graph * **[Local Degree Profile](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.LocalDegreeProfile)** from Cai and Wang: [A Simple yet Effective Baseline for Non-attribute Graph Classification](https://arxiv.org/abs/1811.03508) (CoRR 2018) * **[CorrectAndSmooth](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.models.CorrectAndSmooth)** from Huang *et al.*: [Combining Label Propagation And Simple Models Out-performs Graph Neural Networks](https://arxiv.org/abs/2010.13993) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/correct_and_smooth.py)] * **[Gini](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.functional.gini)** and **[BRO](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.functional.bro)** regularization from Henderson *et al.*: [Improving Molecular Graph Neural Network Explainability with Orthonormalization and Induced Sparsity](https://arxiv.org/abs/2105.04854) (ICML 2021) +* **[RootedEgoNets](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.RootedEgoNets)** and **[RootedRWSubgraph](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.RootedRWSubgraph)** from Zhao *et al.*: [From Stars to Subgraphs: Uplifting Any GNN with Local Structure Awareness](https://arxiv.org/abs/2110.03753) (ICLR 2022) **Scalable GNNs:** diff --git a/test/transforms/test_rooted_subgraph.py b/test/transforms/test_rooted_subgraph.py new file mode 100644 index 000000000000..c1e593da0fb4 --- /dev/null +++ b/test/transforms/test_rooted_subgraph.py @@ -0,0 +1,89 @@ +import torch + +from torch_geometric.data import Data +from torch_geometric.loader import DataLoader +from torch_geometric.testing import withPackage +from torch_geometric.transforms import RootedEgoNets, RootedRWSubgraph + + +def test_rooted_ego_nets(): + x = torch.randn(3, 8) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + edge_attr = torch.randn(4, 8) + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + + transform = RootedEgoNets(num_hops=1) + assert str(transform) == 'RootedEgoNets(num_hops=1)' + + out = transform(data) + assert len(out) == 8 + + assert torch.equal(out.x, data.x) + assert torch.equal(out.edge_index, data.edge_index) + assert torch.equal(out.edge_attr, data.edge_attr) + + assert out.sub_edge_index.tolist() == [[0, 1, 2, 3, 3, 4, 5, 6], + [1, 0, 3, 2, 4, 3, 6, 5]] + assert out.n_id.tolist() == [0, 1, 0, 1, 2, 1, 2] + assert out.n_sub_batch.tolist() == [0, 0, 1, 1, 1, 2, 2] + assert out.e_id.tolist() == [0, 1, 0, 1, 2, 3, 2, 3] + assert out.e_sub_batch.tolist() == [0, 0, 1, 1, 1, 1, 2, 2] + + out = out.map_data() + assert len(out) == 4 + + assert torch.allclose(out.x, x[[0, 1, 0, 1, 2, 1, 2]]) + assert out.edge_index.tolist() == [[0, 1, 2, 3, 3, 4, 5, 6], + [1, 0, 3, 2, 4, 3, 6, 5]] + assert torch.allclose(out.edge_attr, edge_attr[[0, 1, 0, 1, 2, 3, 2, 3]]) + assert out.n_sub_batch.tolist() == [0, 0, 1, 1, 1, 2, 2] + + +@withPackage('torch_cluster') +def test_rooted_rw_subgraph(): + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + data = Data(edge_index=edge_index, num_nodes=3) + + transform = RootedRWSubgraph(walk_length=1) + assert str(transform) == 'RootedRWSubgraph(walk_length=1)' + + out = transform(data) + assert len(out) == 7 + + assert out.n_sub_batch.tolist() == [0, 0, 1, 1, 2, 2] + assert out.sub_edge_index.size() == (2, 6) + + out = out.map_data() + assert len(out) == 3 + + assert out.edge_index.size() == (2, 6) + assert out.num_nodes == 6 + assert out.n_sub_batch.tolist() == [0, 0, 1, 1, 2, 2] + + +def test_rooted_subgraph_minibatch(): + x = torch.randn(3, 8) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + edge_attr = torch.randn(4, 8) + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + + transform = RootedEgoNets(num_hops=1) + data = transform(data) + + loader = DataLoader([data, data], batch_size=2) + batch = next(iter(loader)) + batch = batch.map_data() + assert len(batch) == 6 + + assert batch.x.size() == (14, 8) + assert batch.edge_index.size() == (2, 16) + assert batch.edge_attr.size() == (16, 8) + assert batch.n_sub_batch.size() == (14, ) + assert batch.batch.size() == (14, ) + assert batch.ptr.size() == (3, ) + + assert batch.edge_index.min() == 0 + assert batch.edge_index.max() == 13 + + assert batch.n_sub_batch.min() == 0 + assert batch.n_sub_batch.max() == 5 diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index fbd52180fdb8..e9173b6227ae 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -46,6 +46,7 @@ from .random_node_split import RandomNodeSplit from .random_link_split import RandomLinkSplit from .add_metapaths import AddMetaPaths +from .rooted_subgraph import RootedEgoNets, RootedRWSubgraph from .largest_connected_components import LargestConnectedComponents from .virtual_node import VirtualNode from .add_positional_encoding import AddLaplacianEigenvectorPE, AddRandomWalkPE @@ -99,6 +100,8 @@ 'RandomNodeSplit', 'RandomLinkSplit', 'AddMetaPaths', + 'RootedEgoNets', + 'RootedRWSubgraph', 'LargestConnectedComponents', 'VirtualNode', 'AddLaplacianEigenvectorPE', diff --git a/torch_geometric/transforms/rooted_subgraph.py b/torch_geometric/transforms/rooted_subgraph.py new file mode 100644 index 000000000000..ca716b6a763d --- /dev/null +++ b/torch_geometric/transforms/rooted_subgraph.py @@ -0,0 +1,168 @@ +import copy +from abc import ABC, abstractmethod +from typing import Any, Tuple + +import torch +from torch import Tensor +from torch_sparse import SparseTensor + +from torch_geometric.data import Data +from torch_geometric.transforms import BaseTransform + + +class RootedSubgraphData(Data): + r"""A data object describing a homogeneous graph together with each node's + rooted subgraph. It contains several additional properties that hold the + information to map to batch of every node's rooted subgraph: + + * :obj:`sub_edge_index` (Tensor): The edge indices of all combined rooted + subgraphs. + * :obj:`n_id` (Tensor): The indices of nodes in all combined rooted + subgraphs. + * :obj:`e_id` (Tensor): The indices of edges in all combined rooted + subgraphs. + * :obj:`n_sub_batch` (Tensor): The batch vector to distinguish nodes across + different subgraphs. + * :obj:`e_sub_batch` (Tensor): The batch vector to distinguish edges across + different subgraphs. + """ + def __inc__(self, key, value, *args, **kwargs) -> Any: + if key == 'sub_edge_index': + return self.n_id.size(0) + if key in ['n_sub_batch', 'e_sub_batch']: + return 1 + int(self.n_sub_batch[-1]) + elif key == 'n_id': + return self.num_nodes + elif key == 'e_id': + return self.edge_index.size(1) + return super().__inc__(key, value, *args, **kwargs) + + def map_data(self) -> Data: + # Maps all feature information of the :class:`Data` object to each + # rooted subgraph. + data = copy.copy(self) + + for key, value in self.items(): + if key in ['sub_edge_index', 'n_id', 'e_id', 'e_sub_batch']: + del data[key] + elif key == 'n_sub_batch': + continue + elif key == 'num_nodes': + data.num_nodes = self.n_id.size(0) + elif key == 'edge_index': + data.edge_index = self.sub_edge_index + elif self.is_node_attr(key): + dim = self.__cat_dim__(key, value) + data[key] = value.index_select(dim, self.n_id) + elif self.is_edge_attr(key): + dim = self.__cat_dim__(key, value) + data[key] = value.index_select(dim, self.e_id) + + return data + + +class RootedSubgraph(BaseTransform, ABC): + r"""Base class for implementing rooted subgraph transformations.""" + @abstractmethod + def extract( + self, + data: Data, + ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + # Returns the tuple: + # :obj:`(sub_edge_index, n_id, e_id, n_sub_batch, e_sub_batch)` + # of the :class:`RootedSubgraphData` object. + pass + + def map( + self, + data: Data, + n_mask: Tensor, + ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + + n_sub_batch, n_id = n_mask.nonzero().t() + e_mask = n_mask[:, data.edge_index[0]] & n_mask[:, data.edge_index[1]] + e_sub_batch, e_id = e_mask.nonzero().t() + + sub_edge_index = data.edge_index[:, e_id] + arange = torch.arange(n_id.size(0), device=data.edge_index.device) + node_map = data.edge_index.new_ones(data.num_nodes, data.num_nodes) + node_map[n_sub_batch, n_id] = arange + sub_edge_index += (arange * data.num_nodes)[e_sub_batch] + sub_edge_index = node_map.view(-1)[sub_edge_index] + + return sub_edge_index, n_id, e_id, n_sub_batch, e_sub_batch + + def __call__(self, data: Data) -> RootedSubgraphData: + out = self.extract(data) + d = RootedSubgraphData.from_dict(data.to_dict()) + d.sub_edge_index, d.n_id, d.e_id, d.n_sub_batch, d.e_sub_batch = out + return d + + +class RootedEgoNets(RootedSubgraph): + r"""Collects rooted :math:`k`-hop EgoNets for each node in the graph, as + described in the `"From Stars to Subgraphs: Uplifting Any GNN with Local + Structure Awareness" `_ paper. + + Args: + num_hops (int): the number of hops :math:`k`. + """ + def __init__(self, num_hops: int): + super().__init__() + self.num_hops = num_hops + + def extract( + self, + data: Data, + ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + + adj_t = SparseTensor.from_edge_index( + data.edge_index, + sparse_sizes=(data.num_nodes, data.num_nodes), + ).t() + + n_mask = torch.eye(data.num_nodes, device=data.edge_index.device) + for _ in range(self.num_hops): + n_mask += adj_t @ n_mask + + return self.map(data, n_mask > 0) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}(num_hops={self.num_hops})' + + +class RootedRWSubgraph(RootedSubgraph): + """Collects rooted random-walk based subgraphs for each node in the graph, + as described in the `"From Stars to Subgraphs: Uplifting Any GNN with Local + Structure Awareness" `_ paper. + + Args: + walk_length (int): the length of the random walk. + repeat (int, optional): The number of times of repeating the random + walk to reduce randomness. (default: :obj:`1`) + """ + def __init__(self, walk_length: int, repeat: int = 1): + super().__init__() + self.walk_length = walk_length + self.repeat = repeat + + def extract( + self, + data: Data, + ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + from torch_cluster import random_walk + + start = torch.arange(data.num_nodes, device=data.edge_index.device) + start = start.view(-1, 1).repeat(1, self.repeat).view(-1) + walk = random_walk(data.edge_index[0], data.edge_index[1], start, + self.walk_length, num_nodes=data.num_nodes) + + n_mask = torch.zeros((data.num_nodes, data.num_nodes), + dtype=torch.bool, device=walk.device) + start = start.view(-1, 1).repeat(1, (self.walk_length + 1)).view(-1) + n_mask[start, walk.view(-1)] = True + + return self.map(data, n_mask) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}(walk_length={self.walk_length})' From 934e880d2376f6d97878cf8614f68993f99ff25d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 2 Jun 2022 09:19:45 +0200 Subject: [PATCH 0084/2432] CI: Fix versions of `checkout` and `setup-python` (#4751) * initial commit * changelog * Update CHANGELOG.md --- .github/workflows/building_pyg_conda.yml | 2 +- .github/workflows/building_rusty1s_conda.yml | 2 +- .github/workflows/changelog.yml | 2 +- .github/workflows/codeql_analysis.yml | 2 +- .github/workflows/documentation.yml | 4 ++-- .github/workflows/examples.yml | 4 ++-- .github/workflows/full_testing.yml | 4 ++-- .github/workflows/install.yml | 4 ++-- .github/workflows/linting.yml | 8 ++++---- .github/workflows/nightly.yml | 4 ++-- .github/workflows/testing.yml | 4 ++-- CHANGELOG.md | 1 + 12 files changed, 21 insertions(+), 20 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 33b1294eebe2..0b366543ec21 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -35,7 +35,7 @@ jobs: cuda-version: 'cu115' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Conda for Python ${{ matrix.python-version }} uses: conda-incubator/setup-miniconda@v2 with: diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index ff772f532866..87c9d22cb0f5 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -35,7 +35,7 @@ jobs: cuda-version: 'cu115' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Conda for Python ${{ matrix.python-version }} uses: conda-incubator/setup-miniconda@v2 with: diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index a968e49f88d6..b67f79db916f 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -9,5 +9,5 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: dangoslen/changelog-enforcer@v3 diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml index 04b5ed96cf72..a656daaa451e 100644 --- a/.github/workflows/codeql_analysis.yml +++ b/.github/workflows/codeql_analysis.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Initialize CodeQL uses: github/codeql-action/init@v1 diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 9cf113178039..487eb2b051a4 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -18,9 +18,9 @@ jobs: torch-version: [1.11.0] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 93606f35e3fe..fe4a2733322f 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -22,9 +22,9 @@ jobs: torchvision-version: 0.12.0 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 1d63e1bf7e79..074d3463cfdd 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -22,9 +22,9 @@ jobs: torchvision-version: 0.11.1 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml index 59b562e90361..5a0a00bbd843 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/install.yml @@ -18,9 +18,9 @@ jobs: torch-version: [1.11.0] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 7c14b8953f49..d218849aeed1 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -11,10 +11,10 @@ jobs: pylint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: 3.9 @@ -28,10 +28,10 @@ jobs: mypy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: 3.9 diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 777b98267b1e..292adfe79f11 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -11,10 +11,10 @@ jobs: if: github.repository == 'pyg-team/pytorch_geometric' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: 3.9 diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index a08037c3e99a..00aefbc30867 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -24,9 +24,9 @@ jobs: torchvision-version: 0.12.0 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b21bc951843..430c6fe752bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed versions of `checkout` and `setup-python` in CI ([#4751](https://github.com/pyg-team/pytorch_geometric/pull/4751)) - Fixed `protobuf` version ([#4719](https://github.com/pyg-team/pytorch_geometric/pull/4719)) - Fixed the ranking protocol bug in the RGCN link prediction example ([#4688](https://github.com/pyg-team/pytorch_geometric/pull/4688)) - Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) From 4ea65bdd7850846f9dc79a8a570d3817a9f4a408 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 2 Jun 2022 11:05:32 +0200 Subject: [PATCH 0085/2432] Fix `HANConv` propagation (#4753) * fix HanConv * changelog --- CHANGELOG.md | 1 + torch_geometric/nn/conv/han_conv.py | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 430c6fe752bf..50445d119ebd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed a bug in `HANConv` in which destination node features rather than source node features were propagated ([#4753](https://github.com/pyg-team/pytorch_geometric/pull/4753)) - Fixed versions of `checkout` and `setup-python` in CI ([#4751](https://github.com/pyg-team/pytorch_geometric/pull/4751)) - Fixed `protobuf` version ([#4719](https://github.com/pyg-team/pytorch_geometric/pull/4719)) - Fixed the ranking protocol bug in the RGCN link prediction example ([#4688](https://github.com/pyg-team/pytorch_geometric/pull/4688)) diff --git a/torch_geometric/nn/conv/han_conv.py b/torch_geometric/nn/conv/han_conv.py index b3144bb22965..616040ca5f62 100644 --- a/torch_geometric/nn/conv/han_conv.py +++ b/torch_geometric/nn/conv/han_conv.py @@ -135,13 +135,13 @@ def forward( edge_type = '__'.join(edge_type) lin_src = self.lin_src[edge_type] lin_dst = self.lin_dst[edge_type] + x_src = x_node_dict[src_type] x_dst = x_node_dict[dst_type] - alpha_src = (x_node_dict[src_type] * lin_src).sum(dim=-1) + alpha_src = (x_src * lin_src).sum(dim=-1) alpha_dst = (x_dst * lin_dst).sum(dim=-1) - alpha = (alpha_src, alpha_dst) - # propagate_type: (x_dst: Tensor, alpha: PairTensor) - out = self.propagate(edge_index, x_dst=x_dst, alpha=alpha, - size=None) + # propagate_type: (x_dst: PairTensor, alpha: PairTensor) + out = self.propagate(edge_index, x=(x_src, x_dst), + alpha=(alpha_src, alpha_dst), size=None) out = F.relu(out) out_dict[dst_type].append(out) @@ -157,7 +157,7 @@ def forward( return out_dict - def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor, + def message(self, x_j: Tensor, alpha_i: Tensor, alpha_j: Tensor, index: Tensor, ptr: Optional[Tensor], size_i: Optional[int]) -> Tensor: @@ -165,7 +165,7 @@ def message(self, x_dst_i: Tensor, alpha_i: Tensor, alpha_j: Tensor, alpha = F.leaky_relu(alpha, self.negative_slope) alpha = softmax(alpha, index, ptr, size_i) alpha = F.dropout(alpha, p=self.dropout, training=self.training) - out = x_dst_i * alpha.view(-1, self.heads, 1) + out = x_j * alpha.view(-1, self.heads, 1) return out.view(-1, self.out_channels) def __repr__(self) -> str: From e8de86e397ddac07db7db5e475e12933519f99c6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 2 Jun 2022 11:34:23 +0200 Subject: [PATCH 0086/2432] Tutorial: Add the `bias` vector to the `GCN` model definition (#4755) * add bias term * changelog * typo --- CHANGELOG.md | 1 + docs/source/notes/create_gnn.rst | 23 +++++++++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50445d119ebd..b190a66e1ee9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) - Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731)) diff --git a/docs/source/notes/create_gnn.rst b/docs/source/notes/create_gnn.rst index 2c5255768950..7eec25289af5 100644 --- a/docs/source/notes/create_gnn.rst +++ b/docs/source/notes/create_gnn.rst @@ -44,9 +44,10 @@ The `GCN layer `_ is mathematically defined as .. math:: - \mathbf{x}_i^{(k)} = \sum_{j \in \mathcal{N}(i) \cup \{ i \}} \frac{1}{\sqrt{\deg(i)} \cdot \sqrt{\deg(j)}} \cdot \left( \mathbf{\Theta}^{\top} \cdot \mathbf{x}_j^{(k-1)} \right), + \mathbf{x}_i^{(k)} = \sum_{j \in \mathcal{N}(i) \cup \{ i \}} \frac{1}{\sqrt{\deg(i)} \cdot \sqrt{\deg(j)}} \cdot \left( \mathbf{W}^{\top} \cdot \mathbf{x}_j^{(k-1)} \right) + \mathbf{b}, -where neighboring node features are first transformed by a weight matrix :math:`\mathbf{\Theta}`, normalized by their degree, and finally summed up. +where neighboring node features are first transformed by a weight matrix :math:`\mathbf{W}`, normalized by their degree, and finally summed up. +Lastly, we apply the bias vector :math:`\mathbf{b}` to the aggregated output. This formula can be divided into the following steps: 1. Add self-loops to the adjacency matrix. @@ -54,6 +55,7 @@ This formula can be divided into the following steps: 3. Compute normalization coefficients. 4. Normalize node features in :math:`\phi`. 5. Sum up neighboring node features (:obj:`"add"` aggregation). +6. Apply a final bias vector. Steps 1-3 are typically computed before message passing takes place. Steps 4-5 can be easily processed using the :class:`~torch_geometric.nn.conv.message_passing.MessagePassing` base class. @@ -62,13 +64,21 @@ The full layer implementation is shown below: .. code-block:: python import torch + from torch.nn import Linear, Parameter from torch_geometric.nn import MessagePassing from torch_geometric.utils import add_self_loops, degree class GCNConv(MessagePassing): def __init__(self, in_channels, out_channels): super().__init__(aggr='add') # "Add" aggregation (Step 5). - self.lin = torch.nn.Linear(in_channels, out_channels) + self.lin = Linear(in_channels, out_channels, bias=False) + self.bias = Parameter(torch.Tensor(out_channels)) + + self.reset_parameters() + + def reset_parameters(self): + self.lin.reset_parameters() + self.bias.data.zero_() def forward(self, x, edge_index): # x has shape [N, in_channels] @@ -88,7 +98,12 @@ The full layer implementation is shown below: norm = deg_inv_sqrt[row] * deg_inv_sqrt[col] # Step 4-5: Start propagating messages. - return self.propagate(edge_index, x=x, norm=norm) + out = self.propagate(edge_index, x=x, norm=norm) + + # Step 6: Apply a final bias vector. + out += self.bias + + return out def message(self, x_j, norm): # x_j has shape [E, out_channels] From ef78db3732d459848d865947e4373fb13628d52b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 2 Jun 2022 11:34:45 +0200 Subject: [PATCH 0087/2432] Fix numerical instability in `GeneralConv` and `neighbor_sample` tests (#4754) * fix full testing * changelog * update * changelog * reset * update --- CHANGELOG.md | 1 + test/loader/test_neighbor_sampler.py | 4 ++-- test/nn/conv/test_general_conv.py | 16 ++++++++-------- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b190a66e1ee9..48a758542308 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed a numerical instability in the `GeneralConv` and `neighbor_sample` tests ([#4754](https://github.com/pyg-team/pytorch_geometric/pull/4754)) - Fixed a bug in `HANConv` in which destination node features rather than source node features were propagated ([#4753](https://github.com/pyg-team/pytorch_geometric/pull/4753)) - Fixed versions of `checkout` and `setup-python` in CI ([#4751](https://github.com/pyg-team/pytorch_geometric/pull/4751)) - Fixed `protobuf` version ([#4719](https://github.com/pyg-team/pytorch_geometric/pull/4719)) diff --git a/test/loader/test_neighbor_sampler.py b/test/loader/test_neighbor_sampler.py index ad89422b6f4e..7306f6bf4a17 100644 --- a/test/loader/test_neighbor_sampler.py +++ b/test/loader/test_neighbor_sampler.py @@ -63,7 +63,7 @@ def full(self, x, edge_index): _, n_id, adjs = next(iter(loader)) out1 = model.batch(data.x[n_id], adjs) out2 = model.full(data.x, data.edge_index)[batch] - assert torch.allclose(out1, out2) + assert torch.allclose(out1, out2, atol=1e-7) class GAT(torch.nn.Module): def __init__(self, in_channels, out_channels): @@ -88,4 +88,4 @@ def full(self, x, edge_index): _, n_id, adjs = next(iter(loader)) out1 = model.batch(data.x[n_id], adjs) out2 = model.full(data.x, data.edge_index)[batch] - assert torch.allclose(out1, out2) + assert torch.allclose(out1, out2, atol=1e-7) diff --git a/test/nn/conv/test_general_conv.py b/test/nn/conv/test_general_conv.py index 51b427f74f41..62b1e45cff35 100644 --- a/test/nn/conv/test_general_conv.py +++ b/test/nn/conv/test_general_conv.py @@ -12,47 +12,47 @@ def test_general_conv(): assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, skip_linear=True) assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, directed_msg=False) assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, heads=3) assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, attention=True) assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, heads=3, attention=True) assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, heads=3, attention=True, attention_type='dot_product') assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) conv = GeneralConv(8, 32, 16, l2_normalize=True) assert conv.__repr__() == 'GeneralConv(8, 32)' out = conv(x1, edge_index, edge_attr=e1) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4), edge_attr=e1), out) + assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) From cf2010b97930f90f3f67e7079f91723b99e7e9c1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 2 Jun 2022 11:54:49 +0200 Subject: [PATCH 0088/2432] Test `HANConv` with empty tensors (#4756) * initial commit * changelog --- CHANGELOG.md | 1 + test/nn/conv/test_han_conv.py | 32 +++++++++++++++++++++++------ torch_geometric/nn/conv/han_conv.py | 5 ++--- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48a758542308..50ac495d065f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756)) - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) diff --git a/test/nn/conv/test_han_conv.py b/test/nn/conv/test_han_conv.py index a40223698f1e..fdbd3b475a8d 100644 --- a/test/nn/conv/test_han_conv.py +++ b/test/nn/conv/test_han_conv.py @@ -5,7 +5,6 @@ def test_han_conv(): - x_dict = { 'author': torch.randn(6, 16), 'paper': torch.randn(5, 12), @@ -16,8 +15,8 @@ def test_han_conv(): edge3 = torch.randint(0, 3, (2, 5), dtype=torch.long) edge_index_dict = { ('author', 'metapath0', 'author'): edge1, - ('paper', 'matapath1', 'paper'): edge2, - ('paper', 'matapath2', 'paper'): edge3, + ('paper', 'metapath1', 'paper'): edge2, + ('paper', 'metapath2', 'paper'): edge3, } adj_t_dict = {} @@ -57,7 +56,6 @@ def test_han_conv(): def test_han_conv_lazy(): - x_dict = { 'author': torch.randn(6, 16), 'paper': torch.randn(5, 12), @@ -65,8 +63,8 @@ def test_han_conv_lazy(): edge1 = torch.randint(0, 6, (2, 8), dtype=torch.long) edge2 = torch.randint(0, 5, (2, 6), dtype=torch.long) edge_index_dict = { - ('author', 'metapath0', 'author'): edge1, - ('paper', 'metapath1', 'paper'): edge2, + ('author', 'to', 'author'): edge1, + ('paper', 'to', 'paper'): edge2, } adj_t_dict = {} @@ -90,3 +88,25 @@ def test_han_conv_lazy(): for node_type in out_dict1.keys(): assert torch.allclose(out_dict1[node_type], out_dict2[node_type], atol=1e-6) + + +def test_han_conv_empty_tensor(): + x_dict = { + 'author': torch.randn(6, 16), + 'paper': torch.empty(0, 12), + } + edge_index_dict = { + ('paper', 'to', 'author'): torch.empty((2, 0), dtype=torch.long), + ('author', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), + ('paper', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), + } + + metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) + in_channels = {'author': 16, 'paper': 12} + conv = HANConv(in_channels, 16, metadata, heads=2) + + out_dict = conv(x_dict, edge_index_dict) + assert len(out_dict) == 2 + assert out_dict['author'].size() == (6, 16) + assert torch.all(out_dict['author'] == 0) + assert out_dict['paper'].size() == (0, 16) diff --git a/torch_geometric/nn/conv/han_conv.py b/torch_geometric/nn/conv/han_conv.py index 616040ca5f62..54fc0cbbd7a2 100644 --- a/torch_geometric/nn/conv/han_conv.py +++ b/torch_geometric/nn/conv/han_conv.py @@ -124,9 +124,8 @@ def forward( x_node_dict, out_dict = {}, {} # Iterate over node types: - for node_type, x_node in x_dict.items(): - x_node_dict[node_type] = self.proj[node_type](x_node).view( - -1, H, D) + for node_type, x in x_dict.items(): + x_node_dict[node_type] = self.proj[node_type](x).view(-1, H, D) out_dict[node_type] = [] # Iterate over edge types: From 09f25e9369b7f6f54e4a43413da697ddc6a77ac0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 2 Jun 2022 12:03:56 +0200 Subject: [PATCH 0089/2432] `RandomLinkSplit`: Allow `edge_type == rev_edge_type` (#4757) * initial commit * changelog --- CHANGELOG.md | 1 + test/transforms/test_random_link_split.py | 16 ++++++++++++++++ torch_geometric/transforms/random_link_split.py | 3 ++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50ac495d065f..d9d7a922ae08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Allow `edge_type == rev_edge_type` argument in `RandomLinkSplit` ([#4757](https://github.com/pyg-team/pytorch_geometric/pull/4757)) - Fixed a numerical instability in the `GeneralConv` and `neighbor_sample` tests ([#4754](https://github.com/pyg-team/pytorch_geometric/pull/4754)) - Fixed a bug in `HANConv` in which destination node features rather than source node features were propagated ([#4753](https://github.com/pyg-team/pytorch_geometric/pull/4753)) - Fixed versions of `checkout` and `setup-python` in CI ([#4751](https://github.com/pyg-team/pytorch_geometric/pull/4751)) diff --git a/test/transforms/test_random_link_split.py b/test/transforms/test_random_link_split.py index 113a083daa4b..032abe081af3 100644 --- a/test/transforms/test_random_link_split.py +++ b/test/transforms/test_random_link_split.py @@ -175,3 +175,19 @@ def test_random_link_split_on_hetero_data(): train_data['p', 'p'].edge_attr) assert train_data['p', 'a'].edge_index.size() == (2, 600) assert train_data['a', 'p'].edge_index.size() == (2, 600) + + +def test_random_link_split_on_undirected_hetero_data(): + data = HeteroData() + data['p'].x = torch.arange(100) + data['p', 'p'].edge_index = get_edge_index(100, 100, 500) + data['p', 'p'].edge_index = to_undirected(data['p', 'p'].edge_index) + + transform = RandomLinkSplit(is_undirected=True, edge_types=('p', 'p')) + train_data, val_data, test_data = transform(data) + assert train_data['p', 'p'].is_undirected() + + transform = RandomLinkSplit(is_undirected=True, edge_types=('p', 'p'), + rev_edge_types=('p', 'p')) + train_data, val_data, test_data = transform(data) + assert train_data['p', 'p'].is_undirected() diff --git a/torch_geometric/transforms/random_link_split.py b/torch_geometric/transforms/random_link_split.py index eff40bac42ee..6aaca8c78e1b 100644 --- a/torch_geometric/transforms/random_link_split.py +++ b/torch_geometric/transforms/random_link_split.py @@ -146,7 +146,8 @@ def __call__(self, data: Union[Data, HeteroData]): is_undirected = self.is_undirected is_undirected &= not store.is_bipartite() - is_undirected &= rev_edge_type is None + is_undirected &= (rev_edge_type is None + or store._key == data[rev_edge_type]._key) edge_index = store.edge_index if is_undirected: From 8bd9ae484369192af62a040540d53d6bfc881001 Mon Sep 17 00:00:00 2001 From: Rex Ying Date: Fri, 3 Jun 2022 02:02:31 -0700 Subject: [PATCH 0090/2432] Sampling according to `max_sample` within `AddMetaPaths` (#4750) * sampling according to max_sample * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * debugging prints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove coalesce * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use sparse adj for sampling * doc * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * rename * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix test * update * linting * update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/transforms/test_add_metapaths.py | 124 ++++++++++++-------- torch_geometric/transforms/add_metapaths.py | 33 +++++- 3 files changed, 105 insertions(+), 53 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9d7a922ae08..ddd3bcbeb156 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) - Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756)) - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) diff --git a/test/transforms/test_add_metapaths.py b/test/transforms/test_add_metapaths.py index 2b5c44c9a524..7895ff3c721d 100644 --- a/test/transforms/test_add_metapaths.py +++ b/test/transforms/test_add_metapaths.py @@ -1,61 +1,89 @@ +import copy + import torch +from torch import tensor from torch_geometric.data import HeteroData from torch_geometric.transforms import AddMetaPaths def test_add_metapaths(): - dblp = HeteroData() - dblp['paper'].x = torch.ones(5) - dblp['author'].x = torch.ones(6) - dblp['conference'].x = torch.ones(3) - dblp['paper', 'cites', 'paper'].edge_index = torch.tensor([[0, 1, 2, 3], - [1, 2, 4, 2]]) - dblp['paper', 'author'].edge_index = torch.tensor([[0, 1, 2, 3, 4], - [2, 2, 5, 2, 5]]) - dblp['author', 'paper'].edge_index = dblp['paper', - 'author'].edge_index[[1, 0]] - dblp['conference', 'paper'].edge_index = torch.tensor([[0, 0, 1, 2, 2], - [0, 1, 2, 3, 4]]) - dblp['paper', 'conference'].edge_index = dblp['conference', - 'paper'].edge_index[[1, 0]] + data = HeteroData() + data['p'].x = torch.ones(5) + data['a'].x = torch.ones(6) + data['c'].x = torch.ones(3) + data['p', 'p'].edge_index = tensor([[0, 1, 2, 3], [1, 2, 4, 2]]) + data['p', 'a'].edge_index = tensor([[0, 1, 2, 3, 4], [2, 2, 5, 2, 5]]) + data['a', 'p'].edge_index = data['p', 'a'].edge_index.flip([0]) + data['c', 'p'].edge_index = tensor([[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]]) + data['p', 'c'].edge_index = data['c', 'p'].edge_index.flip([0]) # Test transform options: - orig_edge_type = dblp.edge_types - metapaths = [[('paper', 'conference'), ('conference', 'paper')]] - meta1 = AddMetaPaths(metapaths)(dblp.clone()) - meta2 = AddMetaPaths(metapaths, drop_orig_edges=True)(dblp.clone()) - meta3 = AddMetaPaths(metapaths, drop_orig_edges=True, - keep_same_node_type=True)(dblp.clone()) - meta4 = AddMetaPaths(metapaths, drop_orig_edges=True, - keep_same_node_type=True, - drop_unconnected_nodes=True)(dblp.clone()) - - assert meta1['paper', 'metapath_0', 'paper'].edge_index.shape[-1] == 9 - assert meta2['paper', 'metapath_0', 'paper'].edge_index.shape[-1] == 9 - assert meta3['paper', 'metapath_0', 'paper'].edge_index.shape[-1] == 9 - assert meta4['paper', 'metapath_0', 'paper'].edge_index.shape[-1] == 9 - - assert all([i in meta1.edge_types for i in orig_edge_type]) - assert meta2.edge_types == [('paper', 'metapath_0', 'paper')] - assert meta3.edge_types == [('paper', 'cites', 'paper'), - ('paper', 'metapath_0', 'paper')] - assert meta4.edge_types == [('paper', 'cites', 'paper'), - ('paper', 'metapath_0', 'paper')] - - assert meta3.node_types == ['paper', 'author', 'conference'] - assert meta4.node_types == ['paper'] + metapaths = [[('p', 'c'), ('c', 'p')]] + + transform = AddMetaPaths(metapaths) + assert str(transform) == 'AddMetaPaths()' + meta1 = transform(copy.copy(data)) + + transform = AddMetaPaths(metapaths, drop_orig_edges=True) + assert str(transform) == 'AddMetaPaths()' + meta2 = transform(copy.copy(data)) + + transform = AddMetaPaths(metapaths, drop_orig_edges=True, + keep_same_node_type=True) + assert str(transform) == 'AddMetaPaths()' + meta3 = transform(copy.copy(data)) + + transform = AddMetaPaths(metapaths, drop_orig_edges=True, + keep_same_node_type=True, + drop_unconnected_nodes=True) + assert str(transform) == 'AddMetaPaths()' + meta4 = transform(copy.copy(data)) + + assert meta1['metapath_0'].edge_index.size() == (2, 9) + assert meta2['metapath_0'].edge_index.size() == (2, 9) + assert meta3['metapath_0'].edge_index.size() == (2, 9) + assert meta4['metapath_0'].edge_index.size() == (2, 9) + + assert all([i in meta1.edge_types for i in data.edge_types]) + assert meta2.edge_types == [('p', 'metapath_0', 'p')] + assert meta3.edge_types == [('p', 'to', 'p'), ('p', 'metapath_0', 'p')] + assert meta4.edge_types == [('p', 'to', 'p'), ('p', 'metapath_0', 'p')] + + assert meta3.node_types == ['p', 'a', 'c'] + assert meta4.node_types == ['p'] # Test 4-hop metapath: - metapaths = [[('author', 'paper'), ('paper', 'conference')], - [('author', 'paper'), ('paper', 'conference'), - ('conference', 'paper'), ('paper', 'author')]] - meta1 = AddMetaPaths(metapaths)(dblp.clone()) - new_edge_types = [('author', 'metapath_0', 'conference'), - ('author', 'metapath_1', 'author')] - assert meta1[new_edge_types[0]].edge_index.shape[-1] == 4 - assert meta1[new_edge_types[1]].edge_index.shape[-1] == 4 + metapaths = [ + [('a', 'p'), ('p', 'c')], + [('a', 'p'), ('p', 'c'), ('c', 'p'), ('p', 'a')], + ] + transform = AddMetaPaths(metapaths) + meta = transform(copy.copy(data)) + new_edge_types = [('a', 'metapath_0', 'c'), ('a', 'metapath_1', 'a')] + assert meta['metapath_0'].edge_index.size() == (2, 4) + assert meta['metapath_1'].edge_index.size() == (2, 4) # Test `metapath_dict` information: - assert list(meta1.metapath_dict.values()) == metapaths - assert list(meta1.metapath_dict.keys()) == new_edge_types + assert list(meta.metapath_dict.values()) == metapaths + assert list(meta.metapath_dict.keys()) == new_edge_types + + +def test_add_metapaths_max_sample(): + torch.manual_seed(12345) + + data = HeteroData() + data['p'].x = torch.ones(5) + data['a'].x = torch.ones(6) + data['c'].x = torch.ones(3) + data['p', 'p'].edge_index = tensor([[0, 1, 2, 3], [1, 2, 4, 2]]) + data['p', 'a'].edge_index = tensor([[0, 1, 2, 3, 4], [2, 2, 5, 2, 5]]) + data['a', 'p'].edge_index = data['p', 'a'].edge_index.flip([0]) + data['c', 'p'].edge_index = tensor([[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]]) + data['p', 'c'].edge_index = data['c', 'p'].edge_index.flip([0]) + + metapaths = [[('p', 'c'), ('c', 'p')]] + transform = AddMetaPaths(metapaths, max_sample=1) + + meta = transform(data) + assert meta['metapath_0'].edge_index.size(1) < 9 diff --git a/torch_geometric/transforms/add_metapaths.py b/torch_geometric/transforms/add_metapaths.py index e9e213dec42c..8f68bfe1c7a4 100644 --- a/torch_geometric/transforms/add_metapaths.py +++ b/torch_geometric/transforms/add_metapaths.py @@ -1,4 +1,4 @@ -from typing import List +from typing import List, Optional import torch from torch_sparse import SparseTensor @@ -7,6 +7,7 @@ from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform from torch_geometric.typing import EdgeType +from torch_geometric.utils import degree @functional_transform('add_metapaths') @@ -83,11 +84,18 @@ class AddMetaPaths(BaseTransform): (default: :obj:`False`) drop_unconnected_nodes (bool, optional): If set to :obj:`True` drop node types not connected by any edge type. (default: :obj:`False`) + max_sample (int, optional): If set, will sample at maximum + :obj:`max_sample` neighbors within metapaths. Useful in order to + tackle very dense metapath edges. (default: :obj:`None`) """ - def __init__(self, metapaths: List[List[EdgeType]], - drop_orig_edges: bool = False, - keep_same_node_type: bool = False, - drop_unconnected_nodes: bool = False): + def __init__( + self, + metapaths: List[List[EdgeType]], + drop_orig_edges: bool = False, + keep_same_node_type: bool = False, + drop_unconnected_nodes: bool = False, + max_sample: Optional[int] = None, + ): for path in metapaths: assert len(path) >= 2, f"Invalid metapath '{path}'" @@ -99,6 +107,7 @@ def __init__(self, metapaths: List[List[EdgeType]], self.drop_orig_edges = drop_orig_edges self.keep_same_node_type = keep_same_node_type self.drop_unconnected_nodes = drop_unconnected_nodes + self.max_sample = max_sample def __call__(self, data: HeteroData) -> HeteroData: edge_types = data.edge_types # save original edge types @@ -114,12 +123,19 @@ def __call__(self, data: HeteroData) -> HeteroData: edge_index=data[edge_type].edge_index, sparse_sizes=data[edge_type].size()) + if self.max_sample is not None: + adj1 = self.sample_adj(adj1) + for i, edge_type in enumerate(metapath[1:]): adj2 = SparseTensor.from_edge_index( edge_index=data[edge_type].edge_index, sparse_sizes=data[edge_type].size()) + adj1 = adj1 @ adj2 + if self.max_sample is not None: + adj1 = self.sample_adj(adj1) + row, col, _ = adj1.coo() new_edge_type = (metapath[0][0], f'metapath_{j}', metapath[-1][-1]) data[new_edge_type].edge_index = torch.vstack([row, col]) @@ -145,3 +161,10 @@ def __call__(self, data: HeteroData) -> HeteroData: del data[node] return data + + def sample_adj(self, adj: SparseTensor) -> SparseTensor: + row, col, _ = adj.coo() + deg = degree(row, num_nodes=adj.size(0)) + prob = (self.max_sample * (1. / deg))[row] + mask = torch.rand_like(prob) < prob + return adj.masked_select_nnz(mask, layout='coo') From 893aca527033888df1fbfa7207b6bc34f020ff4a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 5 Jun 2022 09:23:15 +0200 Subject: [PATCH 0091/2432] `nn.aggr.Set2Set` (#4762) * update * update * updatE * update * fix test * update * update * add todo * fix test --- CHANGELOG.md | 2 +- README.md | 2 +- test/nn/aggr/test_basic.py | 5 +- test/nn/aggr/test_lstm.py | 2 - test/nn/{glob => aggr}/test_set2set.py | 0 torch_geometric/nn/aggr/__init__.py | 2 + torch_geometric/nn/aggr/base.py | 76 +++++++++++++++++----- torch_geometric/nn/aggr/lstm.py | 19 +----- torch_geometric/nn/aggr/set2set.py | 67 +++++++++++++++++++ torch_geometric/nn/glob/__init__.py | 10 ++- torch_geometric/nn/glob/set2set.py | 90 -------------------------- 11 files changed, 141 insertions(+), 134 deletions(-) rename test/nn/{glob => aggr}/test_set2set.py (100%) create mode 100644 torch_geometric/nn/aggr/set2set.py delete mode 100644 torch_geometric/nn/glob/set2set.py diff --git a/CHANGELOG.md b/CHANGELOG.md index ddd3bcbeb156..26238f529e92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/README.md b/README.md index 55b2ca9c896f..ec81e8886913 100644 --- a/README.md +++ b/README.md @@ -254,7 +254,7 @@ It is commonly applied to graph-level tasks, which require combining node featur Expand to see all implemented pooling layers... * **[GlobalAttention](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.GlobalAttention)** from Li *et al.*: [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/global_attention.py)] -* **[Set2Set](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.Set2Set)** from Vinyals *et al.*: [Order Matters: Sequence to Sequence for Sets](https://arxiv.org/abs/1511.06391) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/set2set.py)] +* **[Set2Set](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.aggr.Set2Set)** from Vinyals *et al.*: [Order Matters: Sequence to Sequence for Sets](https://arxiv.org/abs/1511.06391) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/set2set.py)] * **[Sort Pool](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.global_sort_pool)** from Zhang *et al.*: [An End-to-End Deep Learning Architecture for Graph Classification](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf) (AAAI 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/sort_pool.py)] * **[MinCUT Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.dense.mincut_pool.dense_mincut_pool)** from Bianchi *et al.*: [MinCUT Pooling in Graph Neural Networks](https://arxiv.org/abs/1907.00481) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_mincut_pool.py)] * **[DMoN Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.dense.dmon_pool.DMoNPooling)** from Tsitsulin *et al.*: [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_dmon_pool.py)] diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py index 7214742eb56d..24227ed25488 100644 --- a/test/nn/aggr/test_basic.py +++ b/test/nn/aggr/test_basic.py @@ -20,13 +20,10 @@ def test_validate(): aggr = MeanAggregation() - with pytest.raises(ValueError, match="either 'index' or 'ptr'"): - aggr(x) - with pytest.raises(ValueError, match="invalid dimension"): aggr(x, index, dim=-3) - with pytest.raises(ValueError, match="mismatch between"): + with pytest.raises(ValueError, match="invalid 'dim_size'"): aggr(x, ptr=ptr, dim_size=2) diff --git a/test/nn/aggr/test_lstm.py b/test/nn/aggr/test_lstm.py index 216bc8c377e4..0ec27bf7e05f 100644 --- a/test/nn/aggr/test_lstm.py +++ b/test/nn/aggr/test_lstm.py @@ -11,8 +11,6 @@ def test_lstm_aggregation(): aggr = LSTMAggregation(16, 32) assert str(aggr) == 'LSTMAggregation(16, 32)' - aggr.reset_parameters() - with pytest.raises(ValueError, match="is not sorted"): aggr(x, torch.tensor([0, 1, 0, 1, 2, 1])) diff --git a/test/nn/glob/test_set2set.py b/test/nn/aggr/test_set2set.py similarity index 100% rename from test/nn/glob/test_set2set.py rename to test/nn/aggr/test_set2set.py diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py index dc43e7bbda21..adc1e7b67e85 100644 --- a/torch_geometric/nn/aggr/__init__.py +++ b/torch_geometric/nn/aggr/__init__.py @@ -10,6 +10,7 @@ PowerMeanAggregation, ) from .lstm import LSTMAggregation +from .set2set import Set2Set __all__ = classes = [ 'Aggregation', @@ -22,4 +23,5 @@ 'SoftmaxAggregation', 'PowerMeanAggregation', 'LSTMAggregation', + 'Set2Set', ] diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 2c721fcda2a6..5857fba75d7f 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -1,15 +1,15 @@ from abc import ABC, abstractmethod -from typing import Optional +from typing import Optional, Tuple import torch from torch import Tensor from torch_scatter import scatter, segment_csr +from torch_geometric.utils import to_dense_batch + class Aggregation(torch.nn.Module, ABC): r"""An abstract base class for implementing custom aggregations.""" - requires_sorted_index = False - @abstractmethod def forward(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, @@ -39,26 +39,59 @@ def __call__(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: + if dim >= x.dim() or dim < -x.dim(): + raise ValueError(f"Encountered invalid dimension '{dim}' of " + f"source tensor with {x.dim()} dimensions") + if index is None and ptr is None: - raise ValueError(f"Expected that either 'index' or 'ptr' is " - f"passed to '{self.__class__.__name__}'") + index = x.new_zeros(x.size(dim), dtype=torch.long) - if (self.requires_sorted_index and index is not None - and not torch.all(index[:-1] <= index[1:])): + if ptr is not None: + if dim_size is None: + dim_size = ptr.numel() - 1 + elif dim_size != ptr.numel() - 1: + raise ValueError(f"Encountered invalid 'dim_size' (got " + f"'{dim_size}' but expected " + f"'{ptr.numel() - 1}')") + + if index is not None: + if dim_size is None: + dim_size = int(index.max()) + 1 if index.numel() > 0 else 0 + elif index.numel() > 0 and dim_size <= int(index.max()): + raise ValueError(f"Encountered invalid 'dim_size' (got " + f"'{dim_size}' but expected " + f">= '{int(index.max()) + 1}')") + + return super().__call__(x, index, ptr=ptr, dim_size=dim_size, dim=dim) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}()' + + # Assertions ############################################################## + + def assert_index_present(self, index: Optional[Tensor]): + # TODO Currently, not all aggregators support `ptr`. This assert helps + # to ensure that we require `index` to be passed to the computation: + if index is None: + raise NotImplementedError(f"'{self.__class__.__name__}' requires " + f"'index' to be specified") + + def assert_sorted_index(self, index: Optional[Tensor]): + if index is not None and not torch.all(index[:-1] <= index[1:]): raise ValueError(f"Can not perform aggregation inside " f"'{self.__class__.__name__}' since the " f"'index' tensor is not sorted") - if dim >= x.dim() or dim < -x.dim(): - raise ValueError(f"Encountered invalid dimension '{dim}' of " - f"source tensor with {x.dim()} dimensions") + def assert_two_dimensional_input(self, x: Tensor, dim: int): + if x.dim() != 2: + raise ValueError(f"'{self.__class__.__name__}' requires " + f"two-dimensional inputs (got '{x.dim()}')") - if (ptr is not None and dim_size is not None - and dim_size != ptr.numel() - 1): - raise ValueError(f"Encountered mismatch between 'dim_size' (got " - f"'{dim_size}') and 'ptr' (got '{ptr.size(0)}')") + if dim not in [-2, 0]: + raise ValueError(f"'{self.__class__.__name__}' needs to perform " + f"aggregation in first dimension (got '{dim}')") - return super().__call__(x, index, ptr=ptr, dim_size=dim_size, dim=dim) + # Helper methods ########################################################## def reduce(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, @@ -71,8 +104,17 @@ def reduce(self, x: Tensor, index: Optional[Tensor] = None, assert index is not None return scatter(x, index, dim=dim, dim_size=dim_size, reduce=reduce) - def __repr__(self) -> str: - return f'{self.__class__.__name__}()' + def to_dense_batch(self, x: Tensor, index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2) -> Tuple[Tensor, Tensor]: + + # TODO Currently, `to_dense_batch` can only operate on `index`: + self.assert_index_present(index) + self.assert_sorted_index(index) + self.assert_two_dimensional_input(x, dim) + + return to_dense_batch(x, index, batch_size=dim_size) ############################################################################### diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py index a617d2c49daa..966e4cda1cba 100644 --- a/torch_geometric/nn/aggr/lstm.py +++ b/torch_geometric/nn/aggr/lstm.py @@ -4,7 +4,6 @@ from torch.nn import LSTM from torch_geometric.nn.aggr import Aggregation -from torch_geometric.utils import to_dense_batch class LSTMAggregation(Aggregation): @@ -22,13 +21,12 @@ class LSTMAggregation(Aggregation): out_channels (int): Size of each output sample. **kwargs (optional): Additional arguments of :class:`torch.nn.LSTM`. """ - requires_sorted_index = True - def __init__(self, in_channels: int, out_channels: int, **kwargs): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.lstm = LSTM(in_channels, out_channels, batch_first=True, **kwargs) + self.reset_parameters() def reset_parameters(self): self.lstm.reset_parameters() @@ -36,20 +34,7 @@ def reset_parameters(self): def forward(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: - - if index is None: # TODO - raise NotImplementedError(f"'{self.__class__.__name__}' with " - f"'ptr' not yet supported") - - if x.dim() != 2: - raise ValueError(f"'{self.__class__.__name__}' requires " - f"two-dimensional inputs (got '{x.dim()}')") - - if dim not in [-2, 0]: - raise ValueError(f"'{self.__class__.__name__}' needs to perform " - f"aggregation in first dimension (got '{dim}')") - - x, _ = to_dense_batch(x, index, batch_size=dim_size) + x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim) return self.lstm(x)[0][:, -1] def __repr__(self) -> str: diff --git a/torch_geometric/nn/aggr/set2set.py b/torch_geometric/nn/aggr/set2set.py new file mode 100644 index 000000000000..3c9cd00974af --- /dev/null +++ b/torch_geometric/nn/aggr/set2set.py @@ -0,0 +1,67 @@ +from typing import Optional + +import torch +from torch import Tensor + +from torch_geometric.nn.aggr import Aggregation +from torch_geometric.utils import softmax + + +class Set2Set(Aggregation): + r"""The Set2Set aggregation operator based on iterative content-based + attention, as described in the `"Order Matters: Sequence to sequence for + Sets" `_ paper + + .. math:: + \mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1}) + + \alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t) + + \mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i + + \mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t, + + where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice + the dimensionality as the input. + + Args: + in_channels (int): Size of each input sample. + processing_steps (int): Number of iterations :math:`T`. + **kwargs (optional): Additional arguments of :class:`torch.nn.LSTM`. + """ + def __init__(self, in_channels: int, processing_steps: int, **kwargs): + super().__init__() + self.in_channels = in_channels + self.out_channels = 2 * in_channels + self.processing_steps = processing_steps + self.lstm = torch.nn.LSTM(self.out_channels, in_channels, **kwargs) + self.reset_parameters() + + def reset_parameters(self): + self.lstm.reset_parameters() + + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + # TODO Currently, `to_dense_batch` can only operate on `index`: + self.assert_index_present(index) + self.assert_two_dimensional_input(x, dim) + + h = (x.new_zeros((self.lstm.num_layers, dim_size, x.size(-1))), + x.new_zeros((self.lstm.num_layers, dim_size, x.size(-1)))) + q_star = x.new_zeros(dim_size, self.out_channels) + + for _ in range(self.processing_steps): + q, h = self.lstm(q_star.unsqueeze(0), h) + q = q.view(dim_size, self.in_channels) + e = (x * q[index]).sum(dim=-1, keepdim=True) + a = softmax(e, index, ptr, dim_size, dim) + r = self.reduce(a * x, index, ptr, dim_size, dim, reduce='add') + q_star = torch.cat([q, r], dim=-1) + + return q_star + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.out_channels})') diff --git a/torch_geometric/nn/glob/__init__.py b/torch_geometric/nn/glob/__init__.py index 8b911ccf859e..be0b138ffcd2 100644 --- a/torch_geometric/nn/glob/__init__.py +++ b/torch_geometric/nn/glob/__init__.py @@ -2,7 +2,6 @@ from .glob import GlobalPooling from .sort import global_sort_pool from .attention import GlobalAttention -from .set2set import Set2Set from .gmt import GraphMultisetTransformer __all__ = [ @@ -12,8 +11,15 @@ 'GlobalPooling', 'global_sort_pool', 'GlobalAttention', - 'Set2Set', 'GraphMultisetTransformer', ] classes = __all__ + +from torch_geometric.deprecation import deprecated # noqa +from torch_geometric.nn.aggr import Set2Set # noqa + +Set2Set = deprecated( + details="use 'nn.aggr.Set2Set' instead", + func_name='nn.glob.Set2Set', +)(Set2Set) diff --git a/torch_geometric/nn/glob/set2set.py b/torch_geometric/nn/glob/set2set.py deleted file mode 100644 index 383548da7138..000000000000 --- a/torch_geometric/nn/glob/set2set.py +++ /dev/null @@ -1,90 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor -from torch_scatter import scatter_add - -from torch_geometric.utils import softmax - - -class Set2Set(torch.nn.Module): - r"""The global pooling operator based on iterative content-based attention - from the `"Order Matters: Sequence to sequence for sets" - `_ paper - - .. math:: - \mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1}) - - \alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t) - - \mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i - - \mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t, - - where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice - the dimensionality as the input. - - Args: - in_channels (int): Size of each input sample. - processing_steps (int): Number of iterations :math:`T`. - num_layers (int, optional): Number of recurrent layers, *.e.g*, setting - :obj:`num_layers=2` would mean stacking two LSTMs together to form - a stacked LSTM, with the second LSTM taking in outputs of the first - LSTM and computing the final results. (default: :obj:`1`) - - Shapes: - - **input:** - node features :math:`(|\mathcal{V}|, F)`, - batch vector :math:`(|\mathcal{V}|)` *(optional)* - - **output:** graph features :math:`(|\mathcal{G}|, 2 * F)` where - :math:`|\mathcal{G}|` denotes the number of graphs in the batch - """ - def __init__(self, in_channels: int, processing_steps: int, - num_layers: int = 1): - super().__init__() - - self.in_channels = in_channels - self.out_channels = 2 * in_channels - self.processing_steps = processing_steps - self.num_layers = num_layers - - self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels, - num_layers) - - self.reset_parameters() - - def reset_parameters(self): - self.lstm.reset_parameters() - - def forward(self, x: Tensor, batch: Optional[Tensor] = None, - size: Optional[int] = None) -> Tensor: - r""" - Args: - x (Tensor): The input node features. - batch (LongTensor, optional): A vector that maps each node to its - respective graph identifier. (default: :obj:`None`) - size (int, optional): The number of graphs in the batch. - (default: :obj:`None`) - """ - if batch is None: - batch = x.new_zeros(x.size(0), dtype=torch.int64) - - size = int(batch.max()) + 1 if size is None else size - - h = (x.new_zeros((self.num_layers, size, self.in_channels)), - x.new_zeros((self.num_layers, size, self.in_channels))) - q_star = x.new_zeros(size, self.out_channels) - - for _ in range(self.processing_steps): - q, h = self.lstm(q_star.unsqueeze(0), h) - q = q.view(size, self.in_channels) - e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True) - a = softmax(e, batch, num_nodes=size) - r = scatter_add(a * x, batch, dim=0, dim_size=size) - q_star = torch.cat([q, r], dim=-1) - - return q_star - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels})') From df6110954405744639627f4ffb5dd27b4523239b Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Tue, 7 Jun 2022 14:19:16 +0300 Subject: [PATCH 0092/2432] `MultiAggregation` and `aggregation_resolver` (#4749) * Add MulAggregation and MultiAggregation * Fix import issue * Support torch_geometric.nn.aggr package, note: jit errors to fix * Add tests for MulAggregation, MultiAggregation, aggregation_resolver and message_passing interface * Formatting * Fix __repr for gen aggrs * Move resolver * Fix test for MulAggregation * Add test for new mp interface * Add test for MultiAggregation * Minor fix * Add warming for MulAggregation with 'ptr' * Resolve aggr to Aggregation module, remove aggrs logic * changelog * Fix mul aggregation * update * update * update * update * reset Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/nn/aggr/test_basic.py | 12 ++++++--- test/nn/aggr/test_multi.py | 21 ++++++++++++++++ test/nn/test_resolver.py | 24 +++++++++++++++++- torch_geometric/nn/aggr/__init__.py | 6 +++++ torch_geometric/nn/aggr/basic.py | 21 ++++++++++++++++ torch_geometric/nn/aggr/multi.py | 34 +++++++++++++++++++++++++ torch_geometric/nn/resolver.py | 39 +++++++++++++++++++++-------- 8 files changed, 144 insertions(+), 15 deletions(-) create mode 100644 test/nn/aggr/test_multi.py create mode 100644 torch_geometric/nn/aggr/multi.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 26238f529e92..06ce456eb1e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py index 24227ed25488..f3dc0c51964f 100644 --- a/test/nn/aggr/test_basic.py +++ b/test/nn/aggr/test_basic.py @@ -5,6 +5,7 @@ MaxAggregation, MeanAggregation, MinAggregation, + MulAggregation, PowerMeanAggregation, SoftmaxAggregation, StdAggregation, @@ -29,7 +30,7 @@ def test_validate(): @pytest.mark.parametrize('Aggregation', [ MeanAggregation, SumAggregation, MaxAggregation, MinAggregation, - VarAggregation, StdAggregation + MulAggregation, VarAggregation, StdAggregation ]) def test_basic_aggregation(Aggregation): x = torch.randn(6, 16) @@ -41,7 +42,12 @@ def test_basic_aggregation(Aggregation): out = aggr(x, index) assert out.size() == (3, x.size(1)) - assert torch.allclose(out, aggr(x, ptr=ptr)) + + if isinstance(aggr, MulAggregation): + with pytest.raises(NotImplementedError, match="requires 'index'"): + aggr(x, ptr=ptr) + else: + assert torch.allclose(out, aggr(x, ptr=ptr)) @pytest.mark.parametrize('Aggregation', @@ -53,7 +59,7 @@ def test_gen_aggregation(Aggregation, learn): ptr = torch.tensor([0, 2, 5, 6]) aggr = Aggregation(learn=learn) - assert str(aggr) == f'{Aggregation.__name__}()' + assert str(aggr) == f'{Aggregation.__name__}(learn={learn})' out = aggr(x, index) assert out.size() == (3, x.size(1)) diff --git a/test/nn/aggr/test_multi.py b/test/nn/aggr/test_multi.py new file mode 100644 index 000000000000..255ca3de1e09 --- /dev/null +++ b/test/nn/aggr/test_multi.py @@ -0,0 +1,21 @@ +import torch + +from torch_geometric.nn import MultiAggregation + + +def test_multi_aggr(): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + ptr = torch.tensor([0, 2, 5, 6]) + + aggrs = ['mean', 'sum', 'max'] + aggr = MultiAggregation(aggrs) + assert str(aggr) == ('MultiAggregation([\n' + ' MeanAggregation(),\n' + ' SumAggregation(),\n' + ' MaxAggregation()\n' + '])') + + out = aggr(x, index) + assert out.size() == (3, len(aggrs) * x.size(1)) + assert torch.allclose(out, aggr(x, ptr=ptr)) diff --git a/test/nn/test_resolver.py b/test/nn/test_resolver.py index 5c67f95749f9..218381a013ec 100644 --- a/test/nn/test_resolver.py +++ b/test/nn/test_resolver.py @@ -1,6 +1,11 @@ +import pytest import torch -from torch_geometric.nn.resolver import activation_resolver +import torch_geometric +from torch_geometric.nn.resolver import ( + activation_resolver, + aggregation_resolver, +) def test_activation_resolver(): @@ -11,3 +16,20 @@ def test_activation_resolver(): assert isinstance(activation_resolver('elu'), torch.nn.ELU) assert isinstance(activation_resolver('relu'), torch.nn.ReLU) assert isinstance(activation_resolver('prelu'), torch.nn.PReLU) + + +@pytest.mark.parametrize('aggr_tuple', [ + (torch_geometric.nn.aggr.MeanAggregation, 'mean'), + (torch_geometric.nn.aggr.SumAggregation, 'sum'), + (torch_geometric.nn.aggr.MaxAggregation, 'max'), + (torch_geometric.nn.aggr.MinAggregation, 'min'), + (torch_geometric.nn.aggr.MulAggregation, 'mul'), + (torch_geometric.nn.aggr.VarAggregation, 'var'), + (torch_geometric.nn.aggr.StdAggregation, 'std'), + (torch_geometric.nn.aggr.SoftmaxAggregation, 'softmax'), + (torch_geometric.nn.aggr.PowerMeanAggregation, 'powermean'), +]) +def test_aggregation_resolver(aggr_tuple): + aggr_module, aggr_repr = aggr_tuple + assert isinstance(aggregation_resolver(aggr_module()), aggr_module) + assert isinstance(aggregation_resolver(aggr_repr), aggr_module) diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py index adc1e7b67e85..11934d179090 100644 --- a/torch_geometric/nn/aggr/__init__.py +++ b/torch_geometric/nn/aggr/__init__.py @@ -1,9 +1,12 @@ from .base import Aggregation +from .multi import MultiAggregation from .basic import ( MeanAggregation, SumAggregation, + AddAggregation, MaxAggregation, MinAggregation, + MulAggregation, VarAggregation, StdAggregation, SoftmaxAggregation, @@ -14,10 +17,13 @@ __all__ = classes = [ 'Aggregation', + 'MultiAggregation', 'MeanAggregation', 'SumAggregation', + 'AddAggregation', 'MaxAggregation', 'MinAggregation', + 'MulAggregation', 'VarAggregation', 'StdAggregation', 'SoftmaxAggregation', diff --git a/torch_geometric/nn/aggr/basic.py b/torch_geometric/nn/aggr/basic.py index 3b52fc225fad..1e5adf80fa36 100644 --- a/torch_geometric/nn/aggr/basic.py +++ b/torch_geometric/nn/aggr/basic.py @@ -22,6 +22,9 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, return self.reduce(x, index, ptr, dim_size, dim, reduce='sum') +AddAggregation = SumAggregation # Alias + + class MaxAggregation(Aggregation): def forward(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, @@ -36,6 +39,15 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, return self.reduce(x, index, ptr, dim_size, dim, reduce='min') +class MulAggregation(Aggregation): + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + # TODO Currently, `mul` reduction can only operate on `index`: + self.assert_index_present(index) + return self.reduce(x, index, None, dim_size, dim, reduce='mul') + + class VarAggregation(Aggregation): def forward(self, x: Tensor, index: Optional[Tensor] = None, *, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, @@ -61,6 +73,7 @@ def __init__(self, t: float = 1.0, learn: bool = False): super().__init__() self._init_t = t self.t = Parameter(torch.Tensor(1)) if learn else t + self.learn = learn self.reset_parameters() def reset_parameters(self): @@ -77,6 +90,9 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, alpha = softmax(alpha, index, ptr, dim_size, dim) return self.reduce(x * alpha, index, ptr, dim_size, dim, reduce='sum') + def __repr__(self) -> str: + return (f'{self.__class__.__name__}(learn={self.learn})') + class PowerMeanAggregation(Aggregation): def __init__(self, p: float = 1.0, learn: bool = False): @@ -84,8 +100,10 @@ def __init__(self, p: float = 1.0, learn: bool = False): super().__init__() self._init_p = p self.p = Parameter(torch.Tensor(1)) if learn else p + self.learn = learn self.reset_parameters() + def reset_parameters(self): if isinstance(self.p, Tensor): self.p.data.fill_(self._init_p) @@ -97,3 +115,6 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, if isinstance(self.p, (int, float)) and self.p == 1: return out return out.clamp_(min=0, max=100).pow(1. / self.p) + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}(learn={self.learn})') diff --git a/torch_geometric/nn/aggr/multi.py b/torch_geometric/nn/aggr/multi.py new file mode 100644 index 000000000000..97b2c713ba12 --- /dev/null +++ b/torch_geometric/nn/aggr/multi.py @@ -0,0 +1,34 @@ +from typing import List, Optional, Union + +import torch +from torch import Tensor + +from torch_geometric.nn.aggr import Aggregation +from torch_geometric.nn.resolver import aggregation_resolver + + +class MultiAggregation(Aggregation): + def __init__(self, aggrs: List[Union[Aggregation, str]]): + super().__init__() + + if not isinstance(aggrs, (list, tuple)): + raise ValueError(f"'aggrs' of '{self.__class__.__name__}' should " + f"be a list or tuple (got '{type(aggrs)}')") + + if len(aggrs) == 0: + raise ValueError(f"'aggrs' of '{self.__class__.__name__}' should " + f"not be empty") + + self.aggrs = [aggregation_resolver(aggr) for aggr in aggrs] + + def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + outs = [] + for aggr in self.aggrs: + outs.append(aggr(x, index, ptr=ptr, dim_size=dim_size, dim=dim)) + return torch.cat(outs, dim=-1) if len(outs) > 1 else outs[0] + + def __repr__(self) -> str: + args = [f' {aggr}' for aggr in self.aggrs] + return '{}([\n{}\n])'.format(self.__class__.__name__, ',\n'.join(args)) diff --git a/torch_geometric/nn/resolver.py b/torch_geometric/nn/resolver.py index 13ea9119eaec..8d5e16ebccfc 100644 --- a/torch_geometric/nn/resolver.py +++ b/torch_geometric/nn/resolver.py @@ -1,7 +1,6 @@ import inspect -from typing import Any, List, Union +from typing import Any, List, Optional, Union -import torch from torch import Tensor @@ -9,21 +8,24 @@ def normalize_string(s: str) -> str: return s.lower().replace('-', '').replace('_', '').replace(' ', '') -def resolver(classes: List[Any], query: Union[Any, str], *args, **kwargs): +def resolver(classes: List[Any], query: Union[Any, str], + base_cls: Optional[Any], *args, **kwargs): + if query is None or not isinstance(query, str): return query - query = normalize_string(query) + query_repr = normalize_string(query) + base_cls_repr = normalize_string(base_cls.__name__) if base_cls else '' for cls in classes: - if query == normalize_string(cls.__name__): + cls_repr = normalize_string(cls.__name__) + if query_repr in [cls_repr, cls_repr.replace(base_cls_repr, '')]: if inspect.isclass(cls): return cls(*args, **kwargs) else: return cls - return ValueError( - f"Could not resolve '{query}' among the choices " - f"{set(normalize_string(cls.__name__) for cls in classes)}") + return ValueError(f"Could not resolve '{query}' among the choices " + f"{set(cls.__name__ for cls in classes)}") # Activation Resolver ######################################################### @@ -34,11 +36,28 @@ def swish(x: Tensor) -> Tensor: def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): + import torch + base_cls = torch.nn.Module + acts = [ act for act in vars(torch.nn.modules.activation).values() - if isinstance(act, type) and issubclass(act, torch.nn.Module) + if isinstance(act, type) and issubclass(act, base_cls) ] acts += [ swish, ] - return resolver(acts, query, *args, **kwargs) + return resolver(acts, query, base_cls, *args, **kwargs) + + +# Aggregation Resolver ######################################################## + + +def aggregation_resolver(query: Union[Any, str], *args, **kwargs): + import torch_geometric.nn.aggr as aggrs + base_cls = aggrs.Aggregation + + aggrs = [ + aggr for aggr in vars(aggrs).values() + if isinstance(aggr, type) and issubclass(aggr, base_cls) + ] + return resolver(aggrs, query, base_cls, *args, **kwargs) From f8ab880ab2b475e10597b9353ffab6e1270da766 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 8 Jun 2022 12:38:58 +0200 Subject: [PATCH 0093/2432] Restrict `HeteroData` edge type indexing (#4782) * update * Changelog --- CHANGELOG.md | 1 + test/data/test_hetero_data.py | 18 ++++++++++++++++++ torch_geometric/data/hetero_data.py | 2 +- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06ce456eb1e4..3f169d068958 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Do not allow accessing edge types in `HeteroData` with two node types when there exists multiple relations between these types ([#4782](https://github.com/pyg-team/pytorch_geometric/pull/4782)) - Allow `edge_type == rev_edge_type` argument in `RandomLinkSplit` ([#4757](https://github.com/pyg-team/pytorch_geometric/pull/4757)) - Fixed a numerical instability in the `GeneralConv` and `neighbor_sample` tests ([#4754](https://github.com/pyg-team/pytorch_geometric/pull/4754)) - Fixed a bug in `HANConv` in which destination node features rather than source node features were propagated ([#4753](https://github.com/pyg-team/pytorch_geometric/pull/4753)) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index bc3a28078d8e..ba5f7a33f389 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -1,8 +1,10 @@ import copy +import pytest import torch from torch_geometric.data import HeteroData +from torch_geometric.data.storage import EdgeStorage x_paper = torch.randn(10, 16) x_author = torch.randn(5, 32) @@ -382,3 +384,19 @@ def test_to_homogeneous_and_vice_versa(): assert len(out) == 1 assert out['paper'].num_nodes == 100 assert out['author'].num_nodes == 200 + + +def test_hetero_data_to_canonical(): + data = HeteroData() + assert isinstance(data['user', 'product'], EdgeStorage) + assert len(data.edge_types) == 1 + assert isinstance(data['user', 'to', 'product'], EdgeStorage) + assert len(data.edge_types) == 1 + + data = HeteroData() + assert isinstance(data['user', 'buys', 'product'], EdgeStorage) + assert isinstance(data['user', 'clicks', 'product'], EdgeStorage) + assert len(data.edge_types) == 2 + + with pytest.raises(TypeError, match="missing 1 required"): + data['user', 'product'] diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 18aa970eb64e..d4e77c1a80e3 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -345,7 +345,7 @@ def _to_canonical(self, *args: Tuple[QueryType]) -> NodeOrEdgeType: if len(edge_types) == 1: args = edge_types[0] return args - else: + elif len(edge_types) == 0: args = (args[0], self.DEFAULT_REL, args[1]) return args From 4c60369c18b7f5b6e42ba26b59b88d39fc593838 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 13 Jun 2022 14:22:44 +0200 Subject: [PATCH 0094/2432] Fix `DimeNetPlusPlus` test (#4800) * update * changelog --- CHANGELOG.md | 2 +- test/nn/models/test_dimenet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f169d068958..e67bf67c0666 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) - Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749)) -- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700)) +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) diff --git a/test/nn/models/test_dimenet.py b/test/nn/models/test_dimenet.py index 91a72ee1cae9..1d9fc5716a2d 100644 --- a/test/nn/models/test_dimenet.py +++ b/test/nn/models/test_dimenet.py @@ -38,4 +38,4 @@ def test_dimenet_plus_plus(): loss = F.l1_loss(out, data.y) loss.backward() optimizer.step() - assert loss < 1 + assert loss < 2 From c957f7f21c69691e7b78044d5b90b10aab20aea9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 23:24:23 +0200 Subject: [PATCH 0095/2432] [pre-commit.ci] pre-commit autoupdate (#4802) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.2.0 → v4.3.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.2.0...v4.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0175c2f4bfda..88ff835ca3e1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.2.0 + rev: v4.3.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace From b57489fd759273a4a7ba4a9613fa87831daa937d Mon Sep 17 00:00:00 2001 From: Will Leeson Date: Wed, 15 Jun 2022 11:01:43 -0400 Subject: [PATCH 0096/2432] Add type hints and TorchScript support to `JumpingKnowledge` (#4805) * Adding typing and TorchScript support * Adding Typing to JumpingKnowledge and Torch Script Support * Fixing comment * Update torch_geometric/nn/models/jumping_knowledge.py Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/nn/models/test_jumping_knowledge.py | 25 ++++++++++++++++--- .../nn/models/jumping_knowledge.py | 23 ++++++++++------- 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e67bf67c0666..622e86fbf2b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) - Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) - Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756)) - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) diff --git a/test/nn/models/test_jumping_knowledge.py b/test/nn/models/test_jumping_knowledge.py index c6ecde5291a0..f1f213a759e8 100644 --- a/test/nn/models/test_jumping_knowledge.py +++ b/test/nn/models/test_jumping_knowledge.py @@ -1,6 +1,7 @@ import torch from torch_geometric.nn import JumpingKnowledge +from torch_geometric.testing import is_full_test def test_jumping_knowledge(): @@ -9,12 +10,30 @@ def test_jumping_knowledge(): model = JumpingKnowledge('cat') assert model.__repr__() == 'JumpingKnowledge(cat)' - assert model(xs).size() == (num_nodes, channels * num_layers) + + out = model(xs) + assert out.size() == (num_nodes, channels * num_layers) + + if is_full_test(): + jit = torch.jit.script(model) + assert torch.allclose(jit(xs), out) model = JumpingKnowledge('max') assert model.__repr__() == 'JumpingKnowledge(max)' - assert model(xs).size() == (num_nodes, channels) + + out = model(xs) + assert out.size() == (num_nodes, channels) + + if is_full_test(): + jit = torch.jit.script(model) + assert torch.allclose(jit(xs), out) model = JumpingKnowledge('lstm', channels, num_layers) assert model.__repr__() == 'JumpingKnowledge(lstm)' - assert model(xs).size() == (num_nodes, channels) + + out = model(xs) + assert out.size() == (num_nodes, channels) + + if is_full_test(): + jit = torch.jit.script(model) + assert torch.allclose(jit(xs), out) diff --git a/torch_geometric/nn/models/jumping_knowledge.py b/torch_geometric/nn/models/jumping_knowledge.py index 83b17053f52d..1fddd9c2f1dd 100644 --- a/torch_geometric/nn/models/jumping_knowledge.py +++ b/torch_geometric/nn/models/jumping_knowledge.py @@ -1,4 +1,7 @@ +from typing import List, Optional + import torch +from torch import Tensor from torch.nn import LSTM, Linear @@ -36,7 +39,8 @@ class JumpingKnowledge(torch.nn.Module): num_layers (int, optional): The number of layers to aggregate. Needs to be only set for LSTM-style aggregation. (default: :obj:`None`) """ - def __init__(self, mode, channels=None, num_layers=None): + def __init__(self, mode: str, channels: Optional[int] = None, + num_layers: Optional[int] = None): super().__init__() self.mode = mode.lower() assert self.mode in ['cat', 'max', 'lstm'] @@ -47,29 +51,30 @@ def __init__(self, mode, channels=None, num_layers=None): self.lstm = LSTM(channels, (num_layers * channels) // 2, bidirectional=True, batch_first=True) self.att = Linear(2 * ((num_layers * channels) // 2), 1) + else: + self.lstm = None + self.att = None self.reset_parameters() def reset_parameters(self): - if hasattr(self, 'lstm'): + if self.lstm is not None: self.lstm.reset_parameters() - if hasattr(self, 'att'): + if self.att is not None: self.att.reset_parameters() - def forward(self, xs): + def forward(self, xs: List[Tensor]) -> Tensor: r"""Aggregates representations across different layers. Args: - xs (list or tuple): List containing layer-wise representations. + xs (List[Tensor]): List containing layer-wise representations. """ - - assert isinstance(xs, list) or isinstance(xs, tuple) - if self.mode == 'cat': return torch.cat(xs, dim=-1) elif self.mode == 'max': return torch.stack(xs, dim=-1).max(dim=-1)[0] - elif self.mode == 'lstm': + else: # self.mode == 'lstm' + assert self.lstm is not None and self.att is not None x = torch.stack(xs, dim=1) # [num_nodes, num_layers, num_channels] alpha, _ = self.lstm(x) alpha = self.att(alpha).squeeze(-1) # [num_nodes, num_layers] From 86ccc61461e8949788c37f7076e62be0eac51fb9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 20 Jun 2022 07:42:41 +0200 Subject: [PATCH 0097/2432] Fix `softmax` documentation (#4824) * fix softmax doc * changelog --- CHANGELOG.md | 2 +- docs/source/modules/utils.rst | 1 - torch_geometric/utils/softmax.py | 12 +++++++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 622e86fbf2b5..dbae0e8cf8b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,7 +48,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) - Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) -- Removed unnecessary colons and fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616)) +- Fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616), [#4824](https://github.com/pyg-team/pytorch_geometric/pull/4824)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) - Fixed filtering of attributes for loaders in case `__cat_dim__ != 0` ([#4629](https://github.com/pyg-team/pytorch_geometric/pull/4629)) diff --git a/docs/source/modules/utils.rst b/docs/source/modules/utils.rst index d1b71da6d046..8c9594c27d4c 100644 --- a/docs/source/modules/utils.rst +++ b/docs/source/modules/utils.rst @@ -11,4 +11,3 @@ torch_geometric.utils .. automodule:: torch_geometric.utils :members: - :undoc-members: diff --git a/torch_geometric/utils/softmax.py b/torch_geometric/utils/softmax.py index eb08bd5f5d60..5de304012d76 100644 --- a/torch_geometric/utils/softmax.py +++ b/torch_geometric/utils/softmax.py @@ -1,16 +1,18 @@ from typing import Optional -import torch from torch import Tensor from torch_scatter import gather_csr, scatter, segment_csr from .num_nodes import maybe_num_nodes -@torch.jit.script -def softmax(src: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, num_nodes: Optional[int] = None, - dim: int = 0) -> Tensor: +def softmax( + src: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + num_nodes: Optional[int] = None, + dim: int = 0, +) -> Tensor: r"""Computes a sparsely evaluated softmax. Given a value tensor :attr:`src`, this function first groups the values along the first dimension based on the indices specified in :attr:`index`, From e3a52f9ac7b636289376a02f846376635c2a40d0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 20 Jun 2022 14:11:56 +0200 Subject: [PATCH 0098/2432] Added Python version requirement (#4825) * python req * changelog --- CHANGELOG.md | 1 + README.md | 2 ++ docs/source/notes/installation.rst | 2 ++ 3 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbae0e8cf8b9..080dae685f72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) - Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) - Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) - Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756)) diff --git a/README.md b/README.md index ec81e8886913..47f39ccafe32 100644 --- a/README.md +++ b/README.md @@ -346,6 +346,8 @@ These approaches have been implemented in PyG, and can benefit from the above GN ## Installation +PyG is available for Python 3.7 to Python 3.10. + ### Anaconda **Update:** You can now install PyG via [Anaconda](https://anaconda.org/pyg/pyg) for all major OS/PyTorch/CUDA combinations 🤗 diff --git a/docs/source/notes/installation.rst b/docs/source/notes/installation.rst index b06de33200e0..f8867d46941d 100644 --- a/docs/source/notes/installation.rst +++ b/docs/source/notes/installation.rst @@ -1,6 +1,8 @@ Installation ============ +PyG is available for Python 3.7 to Python 3.10. + .. note:: We do not recommend installation as a root user on your system Python. Please setup a `Anaconda or Miniconda `_ environment or create a `Docker image `_. From d72df6686ea53ca02ca8091df79e49a10a202eec Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 20 Jun 2022 14:20:17 +0200 Subject: [PATCH 0099/2432] Support for dense aggregations in `global_*_pool` (#4827) * global dense pool * update --- CHANGELOG.md | 1 + test/nn/glob/test_glob.py | 5 +++++ torch_geometric/nn/glob/glob.py | 12 ++++++------ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 080dae685f72..c9580f3dd638 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) - Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) - Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) - Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) diff --git a/test/nn/glob/test_glob.py b/test/nn/glob/test_glob.py index e382fd353cc0..4ff08c363c96 100644 --- a/test/nn/glob/test_glob.py +++ b/test/nn/glob/test_glob.py @@ -65,3 +65,8 @@ def test_permuted_global_pool(): assert out.size() == (2, 4) assert torch.allclose(out[0], px1.max(dim=0)[0]) assert torch.allclose(out[1], px2.max(dim=0)[0]) + + +def test_dense_global_pool(): + x = torch.randn(3, 16, 32) + assert torch.allclose(global_add_pool(x, None), x.sum(dim=1)) diff --git a/torch_geometric/nn/glob/glob.py b/torch_geometric/nn/glob/glob.py index c4eb0b541297..38598f595dd2 100644 --- a/torch_geometric/nn/glob/glob.py +++ b/torch_geometric/nn/glob/glob.py @@ -24,9 +24,9 @@ def global_add_pool(x: Tensor, batch: Optional[Tensor], Automatically calculated if not given. (default: :obj:`None`) """ if batch is None: - return x.sum(dim=0, keepdim=True) + return x.sum(dim=-2, keepdim=x.dim() == 2) size = int(batch.max().item() + 1) if size is None else size - return scatter(x, batch, dim=0, dim_size=size, reduce='add') + return scatter(x, batch, dim=-2, dim_size=size, reduce='add') def global_mean_pool(x: Tensor, batch: Optional[Tensor], @@ -48,9 +48,9 @@ def global_mean_pool(x: Tensor, batch: Optional[Tensor], Automatically calculated if not given. (default: :obj:`None`) """ if batch is None: - return x.mean(dim=0, keepdim=True) + return x.mean(dim=-2, keepdim=x.dim() == 2) size = int(batch.max().item() + 1) if size is None else size - return scatter(x, batch, dim=0, dim_size=size, reduce='mean') + return scatter(x, batch, dim=-2, dim_size=size, reduce='mean') def global_max_pool(x: Tensor, batch: Optional[Tensor], @@ -72,9 +72,9 @@ def global_max_pool(x: Tensor, batch: Optional[Tensor], Automatically calculated if not given. (default: :obj:`None`) """ if batch is None: - return x.max(dim=0, keepdim=True)[0] + return x.max(dim=-2, keepdim=x.dim() == 2)[0] size = int(batch.max().item() + 1) if size is None else size - return scatter(x, batch, dim=0, dim_size=size, reduce='max') + return scatter(x, batch, dim=-2, dim_size=size, reduce='max') class GlobalPooling(torch.nn.Module): From c13d62c92696ca724232bb3278ff26819e80e418 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 20 Jun 2022 14:42:15 +0200 Subject: [PATCH 0100/2432] Rename `RandomTranslate` to `RandomJitter` (#4828) * update * changelog * typo * typo --- CHANGELOG.md | 1 + docs/source/notes/introduction.rst | 2 +- examples/dgcnn_segmentation.py | 2 +- examples/point_transformer_segmentation.py | 2 +- examples/pointnet2_segmentation.py | 2 +- ...est_random_translate.py => test_random_jitter.py} | 12 ++++++------ torch_geometric/transforms/__init__.py | 9 +++++++-- .../{random_translate.py => random_jitter.py} | 6 +++--- 8 files changed, 21 insertions(+), 15 deletions(-) rename test/transforms/{test_random_translate.py => test_random_jitter.py} (68%) rename torch_geometric/transforms/{random_translate.py => random_jitter.py} (89%) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9580f3dd638..a349416fc941 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Renamed `RandomTranslate` to `RandomJitter` - the usage of `RandomTranslate` is now deprecated ([#4828](https://github.com/pyg-team/pytorch_geometric/pull/4828)) - Do not allow accessing edge types in `HeteroData` with two node types when there exists multiple relations between these types ([#4782](https://github.com/pyg-team/pytorch_geometric/pull/4782)) - Allow `edge_type == rev_edge_type` argument in `RandomLinkSplit` ([#4757](https://github.com/pyg-team/pytorch_geometric/pull/4757)) - Fixed a numerical instability in the `GeneralConv` and `neighbor_sample` tests ([#4754](https://github.com/pyg-team/pytorch_geometric/pull/4754)) diff --git a/docs/source/notes/introduction.rst b/docs/source/notes/introduction.rst index b128dfe3a340..d86c08d12424 100644 --- a/docs/source/notes/introduction.rst +++ b/docs/source/notes/introduction.rst @@ -330,7 +330,7 @@ In addition, we can use the :obj:`transform` argument to randomly augment a :cla dataset = ShapeNet(root='/tmp/ShapeNet', categories=['Airplane'], pre_transform=T.KNNGraph(k=6), - transform=T.RandomTranslate(0.01)) + transform=T.RandomJitter(0.01)) dataset[0] >>> Data(edge_index=[2, 15108], pos=[2518, 3], y=[2518]) diff --git a/examples/dgcnn_segmentation.py b/examples/dgcnn_segmentation.py index 9b0581f98f13..c040fa08189d 100644 --- a/examples/dgcnn_segmentation.py +++ b/examples/dgcnn_segmentation.py @@ -13,7 +13,7 @@ category = 'Airplane' # Pass in `None` to train on all categories. path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') transform = T.Compose([ - T.RandomTranslate(0.01), + T.RandomJitter(0.01), T.RandomRotate(15, axis=0), T.RandomRotate(15, axis=1), T.RandomRotate(15, axis=2) diff --git a/examples/point_transformer_segmentation.py b/examples/point_transformer_segmentation.py index 3ecfa724ed29..86f920283bea 100644 --- a/examples/point_transformer_segmentation.py +++ b/examples/point_transformer_segmentation.py @@ -22,7 +22,7 @@ category = 'Airplane' # Pass in `None` to train on all categories. path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') transform = T.Compose([ - T.RandomTranslate(0.01), + T.RandomJitter(0.01), T.RandomRotate(15, axis=0), T.RandomRotate(15, axis=1), T.RandomRotate(15, axis=2), diff --git a/examples/pointnet2_segmentation.py b/examples/pointnet2_segmentation.py index 15a50723ea47..f316450671cb 100644 --- a/examples/pointnet2_segmentation.py +++ b/examples/pointnet2_segmentation.py @@ -14,7 +14,7 @@ category = 'Airplane' # Pass in `None` to train on all categories. path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') transform = T.Compose([ - T.RandomTranslate(0.01), + T.RandomJitter(0.01), T.RandomRotate(15, axis=0), T.RandomRotate(15, axis=1), T.RandomRotate(15, axis=2) diff --git a/test/transforms/test_random_translate.py b/test/transforms/test_random_jitter.py similarity index 68% rename from test/transforms/test_random_translate.py rename to test/transforms/test_random_jitter.py index 2b335880356c..88e81cb3d08a 100644 --- a/test/transforms/test_random_translate.py +++ b/test/transforms/test_random_jitter.py @@ -1,27 +1,27 @@ import torch from torch_geometric.data import Data -from torch_geometric.transforms import RandomTranslate +from torch_geometric.transforms import RandomJitter -def test_random_translate(): - assert RandomTranslate(0.1).__repr__() == 'RandomTranslate(0.1)' +def test_random_jitter(): + assert RandomJitter(0.1).__repr__() == 'RandomJitter(0.1)' pos = torch.Tensor([[0, 0], [0, 0], [0, 0], [0, 0]]) data = Data(pos=pos) - data = RandomTranslate(0)(data) + data = RandomJitter(0)(data) assert len(data) == 1 assert data.pos.tolist() == pos.tolist() data = Data(pos=pos) - data = RandomTranslate(0.1)(data) + data = RandomJitter(0.1)(data) assert len(data) == 1 assert data.pos.min().item() >= -0.1 assert data.pos.max().item() <= 0.1 data = Data(pos=pos) - data = RandomTranslate([0.1, 1])(data) + data = RandomJitter([0.1, 1])(data) assert len(data) == 1 assert data.pos[:, 0].min().item() >= -0.1 assert data.pos[:, 0].max().item() <= 0.1 diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index e9173b6227ae..b9e2c257f930 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -16,7 +16,7 @@ from .center import Center from .normalize_rotation import NormalizeRotation from .normalize_scale import NormalizeScale -from .random_translate import RandomTranslate +from .random_jitter import RandomJitter from .random_flip import RandomFlip from .linear_transformation import LinearTransformation from .random_scale import RandomScale @@ -70,7 +70,7 @@ 'Center', 'NormalizeRotation', 'NormalizeScale', - 'RandomTranslate', + 'RandomJitter', 'RandomFlip', 'LinearTransformation', 'RandomScale', @@ -109,3 +109,8 @@ ] classes = __all__ + +from torch_geometric.deprecation import deprecated # noqa + +RandomTranslate = deprecated("use 'transforms.RandomJitter' instead", + 'transforms.RandomTranslate')(RandomJitter) diff --git a/torch_geometric/transforms/random_translate.py b/torch_geometric/transforms/random_jitter.py similarity index 89% rename from torch_geometric/transforms/random_translate.py rename to torch_geometric/transforms/random_jitter.py index af123ad1c95f..cbf5d69bd05d 100644 --- a/torch_geometric/transforms/random_translate.py +++ b/torch_geometric/transforms/random_jitter.py @@ -7,10 +7,10 @@ from torch_geometric.transforms import BaseTransform -@functional_transform('random_translate') -class RandomTranslate(BaseTransform): +@functional_transform('random_jitter') +class RandomJitter(BaseTransform): r"""Translates node positions by randomly sampled translation values - within a given interval (functional name: :obj:`random_translate`). + within a given interval (functional name: :obj:`random_jitter`). In contrast to other random transformations, translation is applied separately at each position From 4b30b6db5be000bdb87e2c9b53fefcdb33c7b1b3 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Mon, 20 Jun 2022 10:20:33 -0700 Subject: [PATCH 0101/2432] Let `Data` and `HeteroData` implement `FeatureStore` (#4807) --- CHANGELOG.md | 1 + test/data/test_data.py | 25 ++++++++++ test/data/test_hetero_data.py | 27 +++++++++++ torch_geometric/data/batch.py | 10 +++- torch_geometric/data/data.py | 67 ++++++++++++++++++++++++++- torch_geometric/data/feature_store.py | 2 +- torch_geometric/data/hetero_data.py | 55 +++++++++++++++++++++- 7 files changed, 182 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a349416fc941..67e5b2bfd7fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807)) - Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) - Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) - Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) diff --git a/test/data/test_data.py b/test/data/test_data.py index b794364be308..be06d43bed0b 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -239,3 +239,28 @@ def my_attr1(self, value): data.my_attr1 = 2 assert 'my_attr1' not in data._store assert data.my_attr1 == 2 + + +# Feature Store ############################################################### + + +def test_basic_feature_store(): + data = Data() + x = torch.randn(20, 20) + + # Put tensor: + assert data.put_tensor(copy.deepcopy(x), attr_name='x', index=None) + assert torch.equal(data.x, x) + + # Put (modify) tensor slice: + x[15:] = 0 + data.put_tensor(0, attr_name='x', index=slice(15, None, None)) + + # Get tensor: + out = data.get_tensor(attr_name='x', index=None) + assert torch.equal(x, out) + + # Remove tensor: + assert 'x' in data.__dict__['_store'] + data.remove_tensor(attr_name='x', index=None) + assert 'x' not in data.__dict__['_store'] diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index ba5f7a33f389..b26832bcb068 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -400,3 +400,30 @@ def test_hetero_data_to_canonical(): with pytest.raises(TypeError, match="missing 1 required"): data['user', 'product'] + + +# Feature Store ############################################################### + + +def test_basic_feature_store(): + data = HeteroData() + x = torch.randn(20, 20) + + # Put tensor: + assert data.put_tensor(copy.deepcopy(x), group_name='paper', attr_name='x', + index=None) + assert torch.equal(data['paper'].x, x) + + # Put (modify) tensor slice: + x[15:] = 0 + data.put_tensor(0, group_name='paper', attr_name='x', + index=slice(15, None, None)) + + # Get tensor: + out = data.get_tensor(group_name='paper', attr_name='x', index=None) + assert torch.equal(x, out) + + # Remove tensor: + assert 'x' in data['paper'].__dict__['_mapping'] + data.remove_tensor(group_name='paper', attr_name='x', index=None) + assert 'x' not in data['paper'].__dict__['_mapping'] diff --git a/torch_geometric/data/batch.py b/torch_geometric/data/batch.py index ecaae4d663b3..43e553ab1097 100644 --- a/torch_geometric/data/batch.py +++ b/torch_geometric/data/batch.py @@ -23,8 +23,16 @@ def __call__(cls, *args, **kwargs): new_cls = base_cls else: name = f'{base_cls.__name__}{cls.__name__}' + + # NOTE `MetaResolver` is necessary to resolve metaclass conflict + # problems between `DynamicInheritance` and the metaclass of + # `base_cls`. In particular, it creates a new common metaclass + # from the defined metaclasses. + class MetaResolver(type(cls), type(base_cls)): + pass + if name not in globals(): - globals()[name] = type(name, (cls, base_cls), {}) + globals()[name] = MetaResolver(name, (cls, base_cls), {}) new_cls = globals()[name] params = list(inspect.signature(base_cls.__init__).parameters.items()) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 580c9bdd3b6e..3a222246b44e 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -1,5 +1,6 @@ import copy from collections.abc import Mapping, Sequence +from dataclasses import dataclass from typing import ( Any, Callable, @@ -17,6 +18,12 @@ from torch import Tensor from torch_sparse import SparseTensor +from torch_geometric.data.feature_store import ( + FeatureStore, + FeatureTensorType, + TensorAttr, + _field_status, +) from torch_geometric.data.storage import ( BaseStorage, EdgeStorage, @@ -300,7 +307,16 @@ def contains_self_loops(self) -> bool: ############################################################################### -class Data(BaseData): +@dataclass +class DataTensorAttr(TensorAttr): + r"""Attribute class for `Data`, which does not require a `group_name`.""" + def __init__(self, attr_name=_field_status.UNSET, + index=_field_status.UNSET): + # Treat group_name as optional, and move it to the end + super().__init__(None, attr_name, index) + + +class Data(BaseData, FeatureStore): r"""A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, :class:`~torch_geometric.data.Data` tries to mimic the @@ -348,7 +364,10 @@ class Data(BaseData): def __init__(self, x: OptTensor = None, edge_index: OptTensor = None, edge_attr: OptTensor = None, y: OptTensor = None, pos: OptTensor = None, **kwargs): - super().__init__() + # `Data` doesn't support group_name, so we need to adjust `TensorAttr` + # accordingly here to avoid requiring `group_name` to be set: + super().__init__(attr_cls=DataTensorAttr) + self.__dict__['_store'] = GlobalStorage(_parent=self) if x is not None: @@ -384,6 +403,9 @@ def __setattr__(self, key: str, value: Any): def __delattr__(self, key: str): delattr(self._store, key) + # TODO consider supporting the feature store interface for + # __getitem__, __setitem__, and __delitem__ so, for example, we + # can accept key: Union[str, TensorAttr] in __getitem__. def __getitem__(self, key: str) -> Any: return self._store[key] @@ -692,6 +714,47 @@ def num_faces(self) -> Optional[int]: return self.face.size(self.__cat_dim__('face', self.face)) return None + # FeatureStore interface ########################################### + + def items(self): + r"""Returns an `ItemsView` over the stored attributes in the `Data` + object.""" + return self._store.items() + + def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: + r"""Stores a feature tensor in node storage.""" + out = getattr(self, attr.attr_name, None) + if out is not None and attr.index is not None: + # Attr name exists, handle index: + out[attr.index] = tensor + else: + # No attr name (or None index), just store tensor: + setattr(self, attr.attr_name, tensor) + return True + + def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: + r"""Obtains a feature tensor from node storage.""" + # Retrieve tensor and index accordingly: + tensor = getattr(self, attr.attr_name, None) + if tensor is not None: + # TODO this behavior is a bit odd, since TensorAttr requires that + # we set `index`. So, we assume here that indexing by `None` is + # equivalent to not indexing at all, which is not in line with + # Python semantics. + return tensor[attr.index] if attr.index is not None else tensor + return None + + def _remove_tensor(self, attr: TensorAttr) -> bool: + r"""Deletes a feature tensor from node storage.""" + # Remove tensor entirely: + if hasattr(self, attr.attr_name): + delattr(self, attr.attr_name) + return True + return False + + def __len__(self) -> int: + return BaseData.__len__(self) + ############################################################################### diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index bc7d10322497..b9c2aa623cc6 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -245,7 +245,7 @@ def __init__(self, attr_cls: Any = TensorAttr): attributes by subclassing :class:`TensorAttr` and passing the subclass as :obj:`attr_cls`.""" super().__init__() - self._attr_cls = attr_cls + self.__dict__['_attr_cls'] = attr_cls # Core (CRUD) ############################################################# diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index d4e77c1a80e3..051833a36371 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -10,6 +10,11 @@ from torch_sparse import SparseTensor from torch_geometric.data.data import BaseData, Data, size_repr +from torch_geometric.data.feature_store import ( + FeatureStore, + FeatureTensorType, + TensorAttr, +) from torch_geometric.data.storage import BaseStorage, EdgeStorage, NodeStorage from torch_geometric.typing import EdgeType, NodeType, QueryType from torch_geometric.utils import bipartite_subgraph, is_undirected @@ -18,7 +23,7 @@ NodeOrEdgeStorage = Union[NodeStorage, EdgeStorage] -class HeteroData(BaseData): +class HeteroData(BaseData, FeatureStore): r"""A data object describing a heterogeneous graph, holding multiple node and/or edge types in disjunct storage objects. Storage objects can hold either node-level, link-level or graph-level @@ -92,6 +97,8 @@ class HeteroData(BaseData): DEFAULT_REL = 'to' def __init__(self, _mapping: Optional[Dict[str, Any]] = None, **kwargs): + super().__init__() + self.__dict__['_global_store'] = BaseStorage(_parent=self) self.__dict__['_node_store_dict'] = {} self.__dict__['_edge_store_dict'] = {} @@ -616,6 +623,52 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: return data + # :obj:`FeatureStore` interface ########################################### + + def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: + r"""Stores a feature tensor in node storage.""" + if not attr.is_set('index'): + attr.index = None + + out = self._node_store_dict.get(attr.group_name, None) + if out: + # Group name exists, handle index or create new attribute name: + val = getattr(out, attr.attr_name) + if val is not None: + val[attr.index] = tensor + else: + setattr(self[attr.group_name], attr.attr_name, tensor) + else: + # No node storage found, just store tensor in new one: + setattr(self[attr.group_name], attr.attr_name, tensor) + return True + + def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: + r"""Obtains a feature tensor from node storage.""" + # Retrieve tensor and index accordingly: + tensor = getattr(self[attr.group_name], attr.attr_name, None) + if tensor is not None: + # TODO this behavior is a bit odd, since TensorAttr requires that + # we set `index`. So, we assume here that indexing by `None` is + # equivalent to not indexing at all, which is not in line with + # Python semantics. + return tensor[attr.index] if attr.index is not None else tensor + return None + + def _remove_tensor(self, attr: TensorAttr) -> bool: + r"""Deletes a feature tensor from node storage.""" + # Remove tensor entirely: + if hasattr(self[attr.group_name], attr.attr_name): + delattr(self[attr.group_name], attr.attr_name) + return True + return False + + def __len__(self) -> int: + return BaseData.__len__(self) + + def __iter__(self): + raise NotImplementedError + # Helper functions ############################################################ From b274fbdeeeb4abaedd8b2a62a4df50e99bb21cac Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Tue, 21 Jun 2022 16:12:43 -0700 Subject: [PATCH 0102/2432] `GraphStore` definition + `Data` and `HeteroData` integration (#4816) --- CHANGELOG.md | 1 + test/data/test_data.py | 32 +++++++++ test/data/test_feature_store.py | 2 +- test/data/test_graph_store.py | 55 +++++++++++++++ test/data/test_hetero_data.py | 35 ++++++++++ torch_geometric/data/data.py | 98 ++++++++++++++++++++++++++- torch_geometric/data/feature_store.py | 20 +++--- torch_geometric/data/graph_store.py | 97 ++++++++++++++++++++++++++ torch_geometric/data/hetero_data.py | 44 +++++++++--- torch_geometric/typing.py | 9 ++- 10 files changed, 369 insertions(+), 24 deletions(-) create mode 100644 test/data/test_graph_store.py create mode 100644 torch_geometric/data/graph_store.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 67e5b2bfd7fe..a7e8a737f173 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) - Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807)) - Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) - Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) diff --git a/test/data/test_data.py b/test/data/test_data.py index be06d43bed0b..71c8bf80203c 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -3,6 +3,7 @@ import pytest import torch import torch.multiprocessing as mp +import torch_sparse import torch_geometric from torch_geometric.data import Data @@ -264,3 +265,34 @@ def test_basic_feature_store(): assert 'x' in data.__dict__['_store'] data.remove_tensor(attr_name='x', index=None) assert 'x' not in data.__dict__['_store'] + + +# Graph Store ################################################################# + + +def test_basic_graph_store(): + data = Data() + + edge_index = torch.LongTensor([[0, 1], [1, 2]]) + adj = torch_sparse.SparseTensor(row=edge_index[0], col=edge_index[1]) + + def assert_equal_tensor_tuple(expected, actual): + assert len(expected) == len(actual) + for i in range(len(expected)): + assert torch.equal(expected[i], actual[i]) + + # We put all three tensor types: COO, CSR, and CSC, and we get them back + # to confirm that `GraphStore` works as intended. + coo = adj.coo()[:-1] + csr = adj.csr()[:-1] + csc = adj.csc()[:-1] + + # Put: + data.put_edge_index(coo, layout='coo') + data.put_edge_index(csr, layout='csr') + data.put_edge_index(csc, layout='csc') + + # Get: + assert_equal_tensor_tuple(coo, data.get_edge_index('coo')) + assert_equal_tensor_tuple(csr, data.get_edge_index('csr')) + assert_equal_tensor_tuple(csc, data.get_edge_index('csc')) diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py index de3249bf78bf..76d5e516a4ce 100644 --- a/test/data/test_feature_store.py +++ b/test/data/test_feature_store.py @@ -66,7 +66,7 @@ def __init__(self, attr_name=_field_status.UNSET, class MyFeatureStoreNoGroupName(MyFeatureStore): def __init__(self): super().__init__() - self._attr_cls = MyTensorAttrNoGroupName + self._tensor_attr_cls = MyTensorAttrNoGroupName @staticmethod def key(attr: TensorAttr) -> str: diff --git a/test/data/test_graph_store.py b/test/data/test_graph_store.py new file mode 100644 index 000000000000..bc0089a02ce1 --- /dev/null +++ b/test/data/test_graph_store.py @@ -0,0 +1,55 @@ +from typing import Optional + +import torch +from torch_sparse import SparseTensor + +from torch_geometric.data.graph_store import ( + EdgeAttr, + EdgeLayout, + EdgeTensorType, + GraphStore, +) + + +class MyGraphStore(GraphStore): + def __init__(self): + super().__init__() + self.store = {} + + @staticmethod + def key(attr: EdgeAttr) -> str: + return f"{attr.edge_type or ''}_{attr.layout}" + + def _put_edge_index(self, edge_index: EdgeTensorType, + edge_attr: EdgeAttr) -> bool: + self.store[MyGraphStore.key(edge_attr)] = edge_index + + def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: + return self.store.get(MyGraphStore.key(edge_attr), None) + + +def test_graph_store(): + graph_store = MyGraphStore() + edge_index = torch.LongTensor([[0, 1], [1, 2]]) + adj = SparseTensor(row=edge_index[0], col=edge_index[1]) + + def assert_equal_tensor_tuple(expected, actual): + assert len(expected) == len(actual) + for i in range(len(expected)): + assert torch.equal(expected[i], actual[i]) + + # We put all three tensor types: COO, CSR, and CSC, and we get them back + # to confirm that `GraphStore` works as intended. + coo = adj.coo()[:-1] + csr = adj.csr()[:-1] + csc = adj.csc()[:-1] + + # Put: + graph_store['edge', EdgeLayout.COO] = coo + graph_store['edge', 'csr'] = csr + graph_store['edge', 'csc'] = csc + + # Get: + assert_equal_tensor_tuple(coo, graph_store['edge', 'coo']) + assert_equal_tensor_tuple(csr, graph_store['edge', 'csr']) + assert_equal_tensor_tuple(csc, graph_store['edge', 'csc']) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index b26832bcb068..84c74041eaa8 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -2,6 +2,7 @@ import pytest import torch +import torch_sparse from torch_geometric.data import HeteroData from torch_geometric.data.storage import EdgeStorage @@ -427,3 +428,37 @@ def test_basic_feature_store(): assert 'x' in data['paper'].__dict__['_mapping'] data.remove_tensor(group_name='paper', attr_name='x', index=None) assert 'x' not in data['paper'].__dict__['_mapping'] + + +# Graph Store ################################################################# + + +def test_basic_graph_store(): + data = HeteroData() + + edge_index = torch.LongTensor([[0, 1], [1, 2]]) + adj = torch_sparse.SparseTensor(row=edge_index[0], col=edge_index[1]) + + def assert_equal_tensor_tuple(expected, actual): + assert len(expected) == len(actual) + for i in range(len(expected)): + assert torch.equal(expected[i], actual[i]) + + # We put all three tensor types: COO, CSR, and CSC, and we get them back + # to confirm that `GraphStore` works as intended. + coo = adj.coo()[:-1] + csr = adj.csr()[:-1] + csc = adj.csc()[:-1] + + # Put: + data.put_edge_index(coo, layout='coo', edge_type='1') + data.put_edge_index(csr, layout='csr', edge_type='2') + data.put_edge_index(csc, layout='csc', edge_type='3') + + # Get: + assert_equal_tensor_tuple(coo, + data.get_edge_index(layout='coo', edge_type='1')) + assert_equal_tensor_tuple(csr, + data.get_edge_index(layout='csr', edge_type='2')) + assert_equal_tensor_tuple(csc, + data.get_edge_index(layout='csc', edge_type='3')) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 3a222246b44e..ddda8a5af8d8 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -24,6 +24,7 @@ TensorAttr, _field_status, ) +from torch_geometric.data.graph_store import EdgeAttr, EdgeLayout, GraphStore from torch_geometric.data.storage import ( BaseStorage, EdgeStorage, @@ -31,7 +32,14 @@ NodeStorage, ) from torch_geometric.deprecation import deprecated -from torch_geometric.typing import EdgeType, NodeType, OptTensor +from torch_geometric.typing import ( + Adj, + EdgeTensorType, + EdgeType, + FeatureTensorType, + NodeType, + OptTensor, +) from torch_geometric.utils import subgraph @@ -316,7 +324,17 @@ def __init__(self, attr_name=_field_status.UNSET, super().__init__(None, attr_name, index) -class Data(BaseData, FeatureStore): +@dataclass +class DataEdgeAttr(EdgeAttr): + r"""Edge attribute class for `Data`, which does not require a + `edge_type`.""" + def __init__(self, layout: EdgeLayout, is_sorted: bool = False, + edge_type: EdgeType = None): + # Treat group_name as optional, and move it to the end + super().__init__(edge_type, layout, is_sorted) + + +class Data(BaseData, FeatureStore, GraphStore): r"""A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, :class:`~torch_geometric.data.Data` tries to mimic the @@ -366,7 +384,11 @@ def __init__(self, x: OptTensor = None, edge_index: OptTensor = None, pos: OptTensor = None, **kwargs): # `Data` doesn't support group_name, so we need to adjust `TensorAttr` # accordingly here to avoid requiring `group_name` to be set: - super().__init__(attr_cls=DataTensorAttr) + super().__init__(tensor_attr_cls=DataTensorAttr) + + # `Data` doesn't support edge_type, so we need to adjust `EdgeAttr` + # accordingly here to avoid requiring `edge_type` to be set: + GraphStore.__init__(self, edge_attr_cls=DataEdgeAttr) self.__dict__['_store'] = GlobalStorage(_parent=self) @@ -755,9 +777,79 @@ def _remove_tensor(self, attr: TensorAttr) -> bool: def __len__(self) -> int: return BaseData.__len__(self) + # GraphStore interface #################################################### + + def _put_edge_index(self, edge_index: EdgeTensorType, + edge_attr: EdgeAttr) -> bool: + # Convert the edge index to a recognizable format: + attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] + attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) + setattr(self, attr_name, attr_val) + return True + + def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: + # Get the requested format and the Adj tensor associated with it: + attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] + attr_val = getattr(self._store, attr_name, None) + if attr_val is not None: + # Convert from Adj type to Tuple[Tensor, Tensor] + attr_val = adj_type_to_edge_tensor_type(edge_attr.layout, attr_val) + return attr_val + ############################################################################### +EDGE_LAYOUT_TO_ATTR_NAME = { + EdgeLayout.COO: 'edge_index', + EdgeLayout.CSR: 'adj', + EdgeLayout.CSC: 'adj_t', +} + + +def edge_tensor_type_to_adj_type( + attr: EdgeAttr, + tensor_tuple: EdgeTensorType, +) -> Adj: + r"""Converts an EdgeTensorType tensor tuple to a PyG Adj tensor.""" + if attr.layout == EdgeLayout.COO: + # COO: (row, col) + if (tensor_tuple[0].storage().data_ptr() == + tensor_tuple[1].storage().data_ptr()): + # Do not copy if the tensor tuple is constructed from the same + # storage (instead, return a view): + out = torch.empty(0, dtype=tensor_tuple[0].dtype) + out.set_(tensor_tuple[0].storage(), storage_offset=0, + size=tensor_tuple[0].size() + tensor_tuple[1].size()) + return out.view(2, -1) + return torch.stack(tensor_tuple) + elif attr.layout == EdgeLayout.CSR: + # CSR: (rowptr, col) + return SparseTensor(rowptr=tensor_tuple[0], col=tensor_tuple[1], + is_sorted=True) + elif attr.layout == EdgeLayout.CSC: + # CSC: (row, colptr) this is a transposed adjacency matrix, so rowptr + # is the compressed column and col is the uncompressed row. + return SparseTensor(rowptr=tensor_tuple[1], col=tensor_tuple[0], + is_sorted=True) + raise ValueError(f"Bad edge layout (got '{attr.layout}')") + + +def adj_type_to_edge_tensor_type(layout: EdgeLayout, + edge_index: Adj) -> EdgeTensorType: + r"""Converts a PyG Adj tensor to an EdgeTensorType equivalent.""" + if isinstance(edge_index, Tensor): + return (edge_index[0], edge_index[1]) + if layout == EdgeLayout.COO: + row, col, _ = edge_index.coo() + return (row, col) + elif layout == EdgeLayout.CSR: + rowptr, col, _ = edge_index.csr() + return (rowptr, col) + else: + # CSC is just adj_t.csr(): + colptr, row, _ = edge_index.csr() + return (row, colptr) + def size_repr(key: Any, value: Any, indent: int = 0) -> str: pad = ' ' * indent diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index b9c2aa623cc6..38065f591d3c 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -239,13 +239,13 @@ def __repr__(self) -> str: class FeatureStore(MutableMapping): - def __init__(self, attr_cls: Any = TensorAttr): + def __init__(self, tensor_attr_cls: Any = TensorAttr): r"""Initializes the feature store. Implementor classes can customize the ordering and required nature of their :class:`TensorAttr` tensor attributes by subclassing :class:`TensorAttr` and passing the subclass as :obj:`attr_cls`.""" super().__init__() - self.__dict__['_attr_cls'] = attr_cls + self.__dict__['_tensor_attr_cls'] = tensor_attr_cls # Core (CRUD) ############################################################# @@ -270,7 +270,7 @@ def put_tensor(self, tensor: FeatureTensorType, *args, **kwargs) -> bool: Returns: bool: Whether insertion was successful. """ - attr = self._attr_cls.cast(*args, **kwargs) + attr = self._tensor_attr_cls.cast(*args, **kwargs) if not attr.is_fully_specified(): raise ValueError(f"The input TensorAttr '{attr}' is not fully " f"specified. Please fully specify the input by " @@ -310,7 +310,7 @@ def to_type(tensor: FeatureTensorType) -> FeatureTensorType: return tensor.numpy() return tensor - attr = self._attr_cls.cast(*args, **kwargs) + attr = self._tensor_attr_cls.cast(*args, **kwargs) if isinstance(attr.index, slice): if attr.index.start == attr.index.stop == attr.index.step is None: attr.index = None @@ -341,7 +341,7 @@ def remove_tensor(self, *args, **kwargs) -> bool: Returns: bool: Whether deletion was succesful. """ - attr = self._attr_cls.cast(*args, **kwargs) + attr = self._tensor_attr_cls.cast(*args, **kwargs) if not attr.is_fully_specified(): raise ValueError(f"The input TensorAttr '{attr}' is not fully " f"specified. Please fully specify the input by " @@ -366,7 +366,7 @@ def update_tensor(self, tensor: FeatureTensorType, *args, Returns: bool: Whether the update was succesful. """ - attr = self._attr_cls.cast(*args, **kwargs) + attr = self._tensor_attr_cls.cast(*args, **kwargs) self.remove_tensor(attr) return self.put_tensor(tensor, attr) @@ -375,7 +375,7 @@ def update_tensor(self, tensor: FeatureTensorType, *args, def view(self, *args, **kwargs) -> AttrView: r"""Returns an :class:`AttrView` of the feature store, with the defined attributes set.""" - attr = self._attr_cls.cast(*args, **kwargs) + attr = self._tensor_attr_cls.cast(*args, **kwargs) return AttrView(self, attr) # Python built-ins ######################################################## @@ -384,7 +384,7 @@ def __setitem__(self, key: TensorAttr, value: FeatureTensorType): r"""Supports store[tensor_attr] = tensor.""" # CastMixin will handle the case of key being a tuple or TensorAttr # object: - key = self._attr_cls.cast(key) + key = self._tensor_attr_cls.cast(key) # We need to fully specify the key for __setitem__ as it does not make # sense to work with a view here: key.fully_specify() @@ -403,7 +403,7 @@ def __getitem__(self, key: TensorAttr) -> Any: """ # CastMixin will handle the case of key being a tuple or TensorAttr # object: - attr = self._attr_cls.cast(key) + attr = self._tensor_attr_cls.cast(key) if attr.is_fully_specified(): return self.get_tensor(attr) # If the view is not fully specified, return a :class:`AttrView`: @@ -413,7 +413,7 @@ def __delitem__(self, key: TensorAttr): r"""Supports del store[tensor_attr].""" # CastMixin will handle the case of key being a tuple or TensorAttr # object: - key = self._attr_cls.cast(key) + key = self._tensor_attr_cls.cast(key) key.fully_specify() self.remove_tensor(key) diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py new file mode 100644 index 000000000000..b97c4064ee54 --- /dev/null +++ b/torch_geometric/data/graph_store.py @@ -0,0 +1,97 @@ +from abc import abstractmethod +from dataclasses import dataclass +from enum import Enum +from typing import Any, Optional + +from torch_geometric.typing import EdgeTensorType +from torch_geometric.utils.mixin import CastMixin + + +class EdgeLayout(Enum): + COO = 'coo' + CSC = 'csc' + CSR = 'csr' + + +@dataclass +class EdgeAttr(CastMixin): + r"""Defines the attributes of an :obj:`GraphStore` edge.""" + + # The type of the edge + edge_type: Optional[Any] + + # The layout of the edge representation + layout: EdgeLayout + + # Whether the edge index is sorted, by destination node. Useful for + # avoiding sorting costs when performing neighbor sampling, and only + # meaningful for COO (CSC and CSR are sorted by definition) + is_sorted: bool = False + + +class GraphStore: + def __init__(self, edge_attr_cls: Any = EdgeAttr): + r"""Initializes the graph store. Implementor classes can customize the + ordering and required nature of their :class:`EdgeAttr` edge attributes + by subclassing :class:`EdgeAttr` and passing the subclass as + :obj:`edge_attr_cls`.""" + super().__init__() + self.__dict__['_edge_attr_cls'] = edge_attr_cls + + # Core #################################################################### + + @abstractmethod + def _put_edge_index(self, edge_index: EdgeTensorType, + edge_attr: EdgeAttr) -> bool: + pass + + def put_edge_index(self, edge_index: EdgeTensorType, *args, + **kwargs) -> bool: + r"""Synchronously adds an edge_index tensor to the graph store. + + Args: + tensor(EdgeTensorType): an edge_index in a format specified in + attr. + **attr(EdgeAttr): the edge attributes. + """ + edge_attr = self._edge_attr_cls.cast(*args, **kwargs) + edge_attr.layout = EdgeLayout(edge_attr.layout) + + # Override is_sorted for CSC and CSR: + edge_attr.is_sorted = edge_attr.is_sorted or (edge_attr.layout in [ + EdgeLayout.CSC, EdgeLayout.CSR + ]) + return self._put_edge_index(edge_index, edge_attr) + + @abstractmethod + def _get_edge_index(self, edge_attr: EdgeAttr) -> EdgeTensorType: + pass + + def get_edge_index(self, *args, **kwargs) -> Optional[EdgeTensorType]: + r"""Synchronously gets an edge_index tensor from the materialized + graph. + + Args: + **attr(EdgeAttr): the edge attributes. + + Returns: + EdgeTensorType: an edge_index tensor corresonding to the provided + attributes, or None if there is no such tensor. + """ + edge_attr = self._edge_attr_cls.cast(*args, **kwargs) + edge_attr.layout = EdgeLayout(edge_attr.layout) + return self._get_edge_index(edge_attr) + + # TODO implement coo(), csc(), csr() methods on GraphStore, which perform + # conversions of edge indices between formats. These conversions can also + # automatically be performed in `get_edge_index` + + # Python built-ins ######################################################## + + def __setitem__(self, key: EdgeAttr, value: EdgeTensorType): + key = self._edge_attr_cls.cast(key) + self.put_edge_index(value, key) + + def __getitem__(self, key: EdgeAttr) -> Optional[EdgeTensorType]: + key = self._edge_attr_cls.cast(key) + return self.get_edge_index(key) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 051833a36371..69fec089f8f6 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -9,21 +9,31 @@ from torch import Tensor from torch_sparse import SparseTensor -from torch_geometric.data.data import BaseData, Data, size_repr -from torch_geometric.data.feature_store import ( - FeatureStore, - FeatureTensorType, - TensorAttr, +from torch_geometric.data.data import ( + EDGE_LAYOUT_TO_ATTR_NAME, + BaseData, + Data, + adj_type_to_edge_tensor_type, + edge_tensor_type_to_adj_type, + size_repr, ) +from torch_geometric.data.feature_store import FeatureStore, TensorAttr +from torch_geometric.data.graph_store import EdgeAttr, GraphStore from torch_geometric.data.storage import BaseStorage, EdgeStorage, NodeStorage -from torch_geometric.typing import EdgeType, NodeType, QueryType +from torch_geometric.typing import ( + EdgeTensorType, + EdgeType, + FeatureTensorType, + NodeType, + QueryType, +) from torch_geometric.utils import bipartite_subgraph, is_undirected NodeOrEdgeType = Union[NodeType, EdgeType] NodeOrEdgeStorage = Union[NodeStorage, EdgeStorage] -class HeteroData(BaseData, FeatureStore): +class HeteroData(BaseData, FeatureStore, GraphStore): r"""A data object describing a heterogeneous graph, holding multiple node and/or edge types in disjunct storage objects. Storage objects can hold either node-level, link-level or graph-level @@ -623,7 +633,7 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: return data - # :obj:`FeatureStore` interface ########################################### + # FeatureStore interface ################################################## def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: r"""Stores a feature tensor in node storage.""" @@ -669,6 +679,24 @@ def __len__(self) -> int: def __iter__(self): raise NotImplementedError + # GraphStore interface #################################################### + + def _put_edge_index(self, edge_index: EdgeTensorType, + edge_attr: EdgeAttr) -> bool: + # Convert the edge index to a recognizable format: + attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] + attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) + setattr(self[edge_attr.edge_type], attr_name, attr_val) + + def _get_edge_index(self, edge_attr: EdgeAttr) -> EdgeTensorType: + # Get the requested format and the Adj tensor associated with it: + attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] + attr_val = getattr(self[edge_attr.edge_type], attr_name, None) + if attr_val is not None: + # Convert from Adj type to Tuple[Tensor, Tensor] + attr_val = adj_type_to_edge_tensor_type(edge_attr.layout, attr_val) + return attr_val + # Helper functions ############################################################ diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 60bce93dfd59..8e3cc0c13be7 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -1,7 +1,6 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np -import torch from torch import Tensor from torch_sparse import SparseTensor @@ -23,7 +22,13 @@ Metadata = Tuple[List[NodeType], List[EdgeType]] # A representation of a feature tensor -FeatureTensorType = Union[torch.Tensor, np.ndarray] +FeatureTensorType = Union[Tensor, np.ndarray] + +# A representation of an edge index, following the possible formats: +# * COO: (row, col) +# * CSC: (row, colptr) +# * CSR: (rowptr, col) +EdgeTensorType = Tuple[Tensor, Tensor] # Types for message passing ################################################### From 3d766272da3d0be3d5a85527fb2d833714ecf66b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 22 Jun 2022 06:08:14 +0200 Subject: [PATCH 0103/2432] `DataLoaderIterator` doc-string (#4838) * add doc-string * changelog * typo Co-authored-by: Manan Shah --- CHANGELOG.md | 1 + torch_geometric/loader/base.py | 15 +++++++++++++++ torch_geometric/loader/hgt_loader.py | 1 + torch_geometric/loader/link_neighbor_loader.py | 1 + torch_geometric/loader/neighbor_loader.py | 1 + 5 files changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7e8a737f173..f27e60f99131 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) - Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) - Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807)) - Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) diff --git a/torch_geometric/loader/base.py b/torch_geometric/loader/base.py index cf493824cfa3..57270c8112fb 100644 --- a/torch_geometric/loader/base.py +++ b/torch_geometric/loader/base.py @@ -4,6 +4,21 @@ class DataLoaderIterator(object): + r"""A data loader iterator extended by a simple post transformation + function :meth:`transform_fn`. While the iterator may request items from + different sub-processes, :meth:`transform_fn` will always be executed in + the main process. + + This iterator is used in PyG's sampler classes, and is responsible for + feature fetching and filtering data objects after sampling has taken place + in a sub-process. This has the following advantages: + + * We do not need to share feature matrices across processes which may + prevent any errors due to too many open file handles. + * We can execute any expensive post-processing commands on the main thread + with full parallelization power (which usually executes faster). + * It lets us naturally support data already being present on the GPU. + """ def __init__(self, iterator: _BaseDataLoaderIter, transform_fn: Callable): self.iterator = iterator self.transform_fn = transform_fn diff --git a/torch_geometric/loader/hgt_loader.py b/torch_geometric/loader/hgt_loader.py index 785f3fc733f4..a892a62eff93 100644 --- a/torch_geometric/loader/hgt_loader.py +++ b/torch_geometric/loader/hgt_loader.py @@ -135,6 +135,7 @@ def sample(self, indices: List[int]) -> HeteroData: return node_dict, row_dict, col_dict, edge_dict, len(indices) def transform_fn(self, out: Any) -> HeteroData: + # NOTE This function will always be executed on the main thread! node_dict, row_dict, col_dict, edge_dict, batch_size = out data = filter_hetero_data(self.data, node_dict, row_dict, col_dict, diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index e8162ea66fd9..da9b35fd330a 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -278,6 +278,7 @@ def __init__( collate_fn=self.neighbor_sampler, **kwargs) def transform_fn(self, out: Any) -> Union[Data, HeteroData]: + # NOTE This function will always be executed on the main thread! if isinstance(self.data, Data): node, row, col, edge, edge_label_index, edge_label = out data = filter_data(self.data, node, row, col, edge, diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 744b6e4d06e5..0521ed54e0ae 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -303,6 +303,7 @@ def __init__( **kwargs) def transform_fn(self, out: Any) -> Union[Data, HeteroData]: + # NOTE This function will always be executed on the main thread! if isinstance(self.data, Data): node, row, col, edge, batch_size = out data = filter_data(self.data, node, row, col, edge, From f909d24d80a9fdc4d14701f9364de818e0dd46c7 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 22 Jun 2022 15:45:11 +0200 Subject: [PATCH 0104/2432] `HANConv`: `NaN` handling (#4841) * update * changelog --- CHANGELOG.md | 2 +- torch_geometric/nn/conv/han_conv.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f27e60f99131..6b8fb0a47553 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) - Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) - Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) -- Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756)) +- Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756), [#4841](https://github.com/pyg-team/pytorch_geometric/pull/4841)) - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) diff --git a/torch_geometric/nn/conv/han_conv.py b/torch_geometric/nn/conv/han_conv.py index 54fc0cbbd7a2..a7e8624d2a23 100644 --- a/torch_geometric/nn/conv/han_conv.py +++ b/torch_geometric/nn/conv/han_conv.py @@ -18,6 +18,8 @@ def group(xs: List[Tensor], q: nn.Parameter, else: num_edge_types = len(xs) out = torch.stack(xs) + if out.numel() == 0: + return out.view(0, out.size(-1)) attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1) attn = F.softmax(attn_score, dim=0) out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0) From 78fe5a5f84d3fbf8e2d8082b1cdce6e6d4bc5f95 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 22 Jun 2022 15:52:39 +0200 Subject: [PATCH 0105/2432] `TUDataset`: Fix interplay between `pre_transform` and `pre_filter` (#4842) * update * update --- CHANGELOG.md | 1 + torch_geometric/datasets/tu_dataset.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b8fb0a47553..9ddeeba0d2ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed a bug in `TUDataset` where `pre_filter` was not applied whenever `pre_transform` was present - Renamed `RandomTranslate` to `RandomJitter` - the usage of `RandomTranslate` is now deprecated ([#4828](https://github.com/pyg-team/pytorch_geometric/pull/4828)) - Do not allow accessing edge types in `HeteroData` with two node types when there exists multiple relations between these types ([#4782](https://github.com/pyg-team/pytorch_geometric/pull/4782)) - Allow `edge_type == rev_edge_type` argument in `RandomLinkSplit` ([#4757](https://github.com/pyg-team/pytorch_geometric/pull/4757)) diff --git a/torch_geometric/datasets/tu_dataset.py b/torch_geometric/datasets/tu_dataset.py index 88e2af51af63..5dacd27cc8a8 100644 --- a/torch_geometric/datasets/tu_dataset.py +++ b/torch_geometric/datasets/tu_dataset.py @@ -185,15 +185,17 @@ def download(self): def process(self): self.data, self.slices, sizes = read_tu_data(self.raw_dir, self.name) - if self.pre_filter is not None: + if self.pre_filter is not None or self.pre_transform is not None: data_list = [self.get(idx) for idx in range(len(self))] - data_list = [data for data in data_list if self.pre_filter(data)] - self.data, self.slices = self.collate(data_list) - if self.pre_transform is not None: - data_list = [self.get(idx) for idx in range(len(self))] - data_list = [self.pre_transform(data) for data in data_list] + if self.pre_filter is not None: + data_list = [d for d in data_list if self.pre_filter(d)] + + if self.pre_transform is not None: + data_list = [self.pre_transform(d) for d in data_list] + self.data, self.slices = self.collate(data_list) + self._data_list = None # Reset cache. torch.save((self.data, self.slices, sizes), self.processed_paths[0]) From 6c6e2cc1c350d97eeeab20f698fb1c506aed441e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 22 Jun 2022 16:11:46 +0200 Subject: [PATCH 0106/2432] GraphGym PyTorch Lightning Fixes (#4843) * graphgym fixes * update --- CHANGELOG.md | 2 +- graphgym/main.py | 3 +-- torch_geometric/graphgym/logger.py | 2 +- torch_geometric/graphgym/model_builder.py | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ddeeba0d2ea..ff558764e23d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,7 +28,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `HeteroData.subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) - Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) - Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) -- Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531), [#4689](https://github.com/pyg-team/pytorch_geometric/pull/4689)) +- Added PyTorch Lightning support in GraphGym ([#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531), [#4689](https://github.com/pyg-team/pytorch_geometric/pull/4689), [#4843](https://github.com/pyg-team/pytorch_geometric/pull/4843)) - Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) - Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620), [#4702](https://github.com/pyg-team/pytorch_geometric/pull/4702)) - Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) diff --git a/graphgym/main.py b/graphgym/main.py index 2bb62a7ab5a9..1a2abcf61296 100644 --- a/graphgym/main.py +++ b/graphgym/main.py @@ -13,10 +13,9 @@ set_out_dir, set_run_dir, ) -from torch_geometric.graphgym.loader import GraphGymDataModule from torch_geometric.graphgym.logger import set_printing from torch_geometric.graphgym.model_builder import create_model -from torch_geometric.graphgym.train import train +from torch_geometric.graphgym.train import GraphGymDataModule, train from torch_geometric.graphgym.utils.agg_runs import agg_runs from torch_geometric.graphgym.utils.comp_budget import params_count from torch_geometric.graphgym.utils.device import auto_select_device diff --git a/torch_geometric/graphgym/logger.py b/torch_geometric/graphgym/logger.py index 61dd67a8c49a..f83794546422 100644 --- a/torch_geometric/graphgym/logger.py +++ b/torch_geometric/graphgym/logger.py @@ -290,7 +290,7 @@ def _get_stats( true=outputs['true'].detach().cpu(), pred=outputs['pred_score'].detach().cpu(), loss=float(outputs['loss']), - lr=trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0], + lr=trainer.lr_schedulers[0]['scheduler'].get_last_lr()[0], time_used=time.time() - epoch_start_time, params=cfg.params, ) diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index a596648b38da..7cb7ac3d5d72 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -33,7 +33,7 @@ def _shared_step(self, batch, split: str) -> Dict: pred, true = self(batch) loss, pred_score = compute_loss(pred, true) step_end_time = time.time() - return dict(loss=loss, true=true, pred_score=pred_score, + return dict(loss=loss, true=true, pred_score=pred_score.detach(), step_end_time=step_end_time) def training_step(self, batch, *args, **kwargs): From 97c50a03db9f5e9fbb0ab42d38681cac0d2a020a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 23 Jun 2022 12:57:13 +0200 Subject: [PATCH 0107/2432] Add `size=None` explanation to jittable `MessagePassing` modules (#4850) * update * changelog * typo --- CHANGELOG.md | 1 + docs/source/notes/jit.rst | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff558764e23d..36f855cc0ee4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) - Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) - Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807)) diff --git a/docs/source/notes/jit.rst b/docs/source/notes/jit.rst index 76999da5dcc1..54891c910570 100644 --- a/docs/source/notes/jit.rst +++ b/docs/source/notes/jit.rst @@ -99,7 +99,8 @@ However, if you want your own GNN module to be jittable, you need to account for def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Optional[Tensor]) -> Tensor: - return self.propagate(edge_index, x=x, edge_weight=edge_weight) + return self.propagate(edge_index, x=x, edge_weight=edge_weight, + size=None) 2. Declaring the type of propagation arguments as a comment anywhere inside your module: @@ -115,4 +116,10 @@ However, if you want your own GNN module to be jittable, you need to account for edge_weight: Optional[Tensor]) -> Tensor: # propagate_type: (x: Tensor, edge_weight: Optional[Tensor]) - return self.propagate(edge_index, x=x, edge_weight=edge_weight) + return self.propagate(edge_index, x=x, edge_weight=edge_weight, + size=None) + +.. warning:: + + Importantly, due to TorchScript limitations, one also has to pass in the :obj:`size` attribute to :meth:`~torch_geometric.nn.conv.message_passing.MessagePassing.propagate`. + In most cases, this can be simply set to :obj:`None` in which case it will be automatically inferred. From 85cddb33004bbf2a4fe5554e2b8126cf6dde85c1 Mon Sep 17 00:00:00 2001 From: Hu Chuxuan <81068196+Hu-Chuxuan@users.noreply.github.com> Date: Thu, 23 Jun 2022 06:14:28 -0500 Subject: [PATCH 0108/2432] Add a `normalize` parameter to `dense_diff_pool` (#4847) * modified link_loss to make it viable to not normalizing * modified link_loss to make it viable to not normalizing * changelog modified * Update CHANGELOG.md Co-authored-by: Jinu Sunil * update Co-authored-by: Chuxuan Hu Co-authored-by: Matthias Fey Co-authored-by: Jinu Sunil --- CHANGELOG.md | 1 + torch_geometric/nn/dense/diff_pool.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36f855cc0ee4..56fd02033c72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) - Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) diff --git a/torch_geometric/nn/dense/diff_pool.py b/torch_geometric/nn/dense/diff_pool.py index 54e746431325..6e6abb92e0cf 100644 --- a/torch_geometric/nn/dense/diff_pool.py +++ b/torch_geometric/nn/dense/diff_pool.py @@ -3,7 +3,7 @@ EPS = 1e-15 -def dense_diff_pool(x, adj, s, mask=None): +def dense_diff_pool(x, adj, s, mask=None, normalize=True): r"""The differentiable pooling operator from the `"Hierarchical Graph Representation Learning with Differentiable Pooling" `_ paper @@ -44,6 +44,9 @@ def dense_diff_pool(x, adj, s, mask=None): mask (BoolTensor, optional): Mask matrix :math:`\mathbf{M} \in {\{ 0, 1 \}}^{B \times N}` indicating the valid nodes for each graph. (default: :obj:`None`) + normalize (bool, optional): If set to :obj:`False`, the link + prediction loss is not divided by :obj:`adj.numel()`. + (default: :obj:`True`) :rtype: (:class:`Tensor`, :class:`Tensor`, :class:`Tensor`, :class:`Tensor`) @@ -66,7 +69,8 @@ def dense_diff_pool(x, adj, s, mask=None): link_loss = adj - torch.matmul(s, s.transpose(1, 2)) link_loss = torch.norm(link_loss, p=2) - link_loss = link_loss / adj.numel() + if normalize is True: + link_loss = link_loss / adj.numel() ent_loss = (-s * torch.log(s + EPS)).sum(dim=-1).mean() From 7ea71e3a1fa6e461433c7e054c536ebf6d42a58b Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Thu, 23 Jun 2022 11:39:59 -0700 Subject: [PATCH 0109/2432] Let `NeighborLoader` accept `Tuple[FeatureStore, GraphStore]` (#4817) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_data.py | 15 +- test/data/test_feature_store.py | 30 ++-- test/data/test_graph_store.py | 20 ++- test/data/test_hetero_data.py | 36 ++-- test/loader/test_neighbor_loader.py | 144 +++++++++++++++- torch_geometric/data/data.py | 57 ++++--- torch_geometric/data/feature_store.py | 46 +++++- torch_geometric/data/graph_store.py | 30 +++- torch_geometric/data/hetero_data.py | 30 +++- torch_geometric/loader/neighbor_loader.py | 191 +++++++++++++++++++--- torch_geometric/loader/utils.py | 43 ++++- 12 files changed, 553 insertions(+), 90 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56fd02033c72..e6ae6e7e07d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/data/test_data.py b/test/data/test_data.py index 71c8bf80203c..5d14be740627 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -261,6 +261,14 @@ def test_basic_feature_store(): out = data.get_tensor(attr_name='x', index=None) assert torch.equal(x, out) + # Get tensor size: + assert data.get_tensor_size(attr_name='x') == (20, 20) + + # Get tensor attrs: + tensor_attrs = data.get_all_tensor_attrs() + assert len(tensor_attrs) == 1 + assert tensor_attrs[0].attr_name == 'x' + # Remove tensor: assert 'x' in data.__dict__['_store'] data.remove_tensor(attr_name='x', index=None) @@ -271,6 +279,7 @@ def test_basic_feature_store(): def test_basic_graph_store(): + r"""Test the core graph store API.""" data = Data() edge_index = torch.LongTensor([[0, 1], [1, 2]]) @@ -285,7 +294,7 @@ def assert_equal_tensor_tuple(expected, actual): # to confirm that `GraphStore` works as intended. coo = adj.coo()[:-1] csr = adj.csr()[:-1] - csc = adj.csc()[:-1] + csc = adj.csc()[-2::-1] # (row, colptr) # Put: data.put_edge_index(coo, layout='coo') @@ -296,3 +305,7 @@ def assert_equal_tensor_tuple(expected, actual): assert_equal_tensor_tuple(coo, data.get_edge_index('coo')) assert_equal_tensor_tuple(csr, data.get_edge_index('csr')) assert_equal_tensor_tuple(csc, data.get_edge_index('csc')) + + # Get attrs: + edge_attrs = data.get_all_edge_attrs() + assert len(edge_attrs) == 3 diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py index 76d5e516a4ce..db5fdf21af78 100644 --- a/test/data/test_feature_store.py +++ b/test/data/test_feature_store.py @@ -1,8 +1,9 @@ from dataclasses import dataclass -from typing import Optional +from typing import Dict, List, Optional, Tuple import pytest import torch +from torch import Tensor from torch_geometric.data.feature_store import ( AttrView, @@ -16,7 +17,7 @@ class MyFeatureStore(FeatureStore): def __init__(self): super().__init__() - self.store = {} + self.store: Dict[Tuple[str, str], Tensor] = {} @staticmethod def key(attr: TensorAttr) -> str: @@ -36,7 +37,6 @@ def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: index, tensor = self.store.get(MyFeatureStore.key(attr), (None, None)) - if tensor is None: return None @@ -51,6 +51,12 @@ def _remove_tensor(self, attr: TensorAttr) -> bool: del self.store[MyFeatureStore.key(attr)] return True + def _get_tensor_size(self, attr: TensorAttr) -> Tuple: + return self._get_tensor(attr).size() + + def get_all_tensor_attrs(self) -> List[str]: + return [TensorAttr(*key) for key in self.store.keys()] + def __len__(self): raise NotImplementedError @@ -68,16 +74,8 @@ def __init__(self): super().__init__() self._tensor_attr_cls = MyTensorAttrNoGroupName - @staticmethod - def key(attr: TensorAttr) -> str: - return attr.attr_name - - def __len__(self): - raise NotImplementedError - def test_feature_store(): - r"""Tests basic API and indexing functionality of a feature store.""" store = MyFeatureStore() tensor = torch.Tensor([[0, 0, 0], [1, 1, 1], [2, 2, 2]]) @@ -93,9 +91,9 @@ def test_feature_store(): store.get_tensor(group_name, attr_name, index=torch.tensor([0, 2])), tensor[torch.tensor([0, 2])], ) - assert store.get_tensor(None, None, index) is None store.remove_tensor(group_name, attr_name, None) - assert store.get_tensor(attr) is None + with pytest.raises(KeyError): + _ = store.get_tensor(attr) # Views: view = store.view(group_name=group_name) @@ -131,9 +129,11 @@ def test_feature_store(): # Deletion: del store[group_name, attr_name, index] - assert store[group_name, attr_name, index] is None + with pytest.raises(KeyError): + _ = store[group_name, attr_name, index] del store[group_name] - assert store[group_name]() is None + with pytest.raises(KeyError): + _ = store[group_name]() def test_feature_store_override(): diff --git a/test/data/test_graph_store.py b/test/data/test_graph_store.py index bc0089a02ce1..0460c3b0adcf 100644 --- a/test/data/test_graph_store.py +++ b/test/data/test_graph_store.py @@ -1,6 +1,8 @@ -from typing import Optional +from typing import Dict, Optional, Tuple +import pytest import torch +from torch import Tensor from torch_sparse import SparseTensor from torch_geometric.data.graph_store import ( @@ -14,11 +16,11 @@ class MyGraphStore(GraphStore): def __init__(self): super().__init__() - self.store = {} + self.store: Dict[EdgeAttr, Tuple[Tensor, Tensor]] = {} @staticmethod def key(attr: EdgeAttr) -> str: - return f"{attr.edge_type or ''}_{attr.layout}" + return (attr.edge_type, attr.layout.value) def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: @@ -27,6 +29,9 @@ def _put_edge_index(self, edge_index: EdgeTensorType, def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: return self.store.get(MyGraphStore.key(edge_attr), None) + def get_all_edge_attrs(self): + return [EdgeAttr(*key) for key in self.store.keys()] + def test_graph_store(): graph_store = MyGraphStore() @@ -42,7 +47,7 @@ def assert_equal_tensor_tuple(expected, actual): # to confirm that `GraphStore` works as intended. coo = adj.coo()[:-1] csr = adj.csr()[:-1] - csc = adj.csc()[:-1] + csc = adj.csc()[-2::-1] # (row, colptr) # Put: graph_store['edge', EdgeLayout.COO] = coo @@ -53,3 +58,10 @@ def assert_equal_tensor_tuple(expected, actual): assert_equal_tensor_tuple(coo, graph_store['edge', 'coo']) assert_equal_tensor_tuple(csr, graph_store['edge', 'csr']) assert_equal_tensor_tuple(csc, graph_store['edge', 'csc']) + + # Get attrs: + edge_attrs = graph_store.get_all_edge_attrs() + assert len(edge_attrs) == 3 + + with pytest.raises(KeyError): + _ = graph_store['edge_2', 'coo'] diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 84c74041eaa8..6b8d88735537 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -424,6 +424,15 @@ def test_basic_feature_store(): out = data.get_tensor(group_name='paper', attr_name='x', index=None) assert torch.equal(x, out) + # Get tensor size: + assert data.get_tensor_size(group_name='paper', attr_name='x') == (20, 20) + + # Get tensor attrs: + tensor_attrs = data.get_all_tensor_attrs() + assert len(tensor_attrs) == 1 + assert tensor_attrs[0].group_name == 'paper' + assert tensor_attrs[0].attr_name == 'x' + # Remove tensor: assert 'x' in data['paper'].__dict__['_mapping'] data.remove_tensor(group_name='paper', attr_name='x', index=None) @@ -437,7 +446,8 @@ def test_basic_graph_store(): data = HeteroData() edge_index = torch.LongTensor([[0, 1], [1, 2]]) - adj = torch_sparse.SparseTensor(row=edge_index[0], col=edge_index[1]) + adj = torch_sparse.SparseTensor(row=edge_index[0], col=edge_index[1], + sparse_sizes=(3, 3)) def assert_equal_tensor_tuple(expected, actual): assert len(expected) == len(actual) @@ -448,17 +458,21 @@ def assert_equal_tensor_tuple(expected, actual): # to confirm that `GraphStore` works as intended. coo = adj.coo()[:-1] csr = adj.csr()[:-1] - csc = adj.csc()[:-1] + csc = adj.csc()[-2::-1] # (row, colptr) # Put: - data.put_edge_index(coo, layout='coo', edge_type='1') - data.put_edge_index(csr, layout='csr', edge_type='2') - data.put_edge_index(csc, layout='csc', edge_type='3') + data.put_edge_index(coo, layout='coo', edge_type=('a', 'to', 'b')) + data.put_edge_index(csr, layout='csr', edge_type=('a', 'to', 'c')) + data.put_edge_index(csc, layout='csc', edge_type=('b', 'to', 'c')) # Get: - assert_equal_tensor_tuple(coo, - data.get_edge_index(layout='coo', edge_type='1')) - assert_equal_tensor_tuple(csr, - data.get_edge_index(layout='csr', edge_type='2')) - assert_equal_tensor_tuple(csc, - data.get_edge_index(layout='csc', edge_type='3')) + assert_equal_tensor_tuple( + coo, data.get_edge_index(layout='coo', edge_type=('a', 'to', 'b'))) + assert_equal_tensor_tuple( + csr, data.get_edge_index(layout='csr', edge_type=('a', 'to', 'c'))) + assert_equal_tensor_tuple( + csc, data.get_edge_index(layout='csc', edge_type=('b', 'to', 'c'))) + + # Get attrs: + edge_attrs = data.get_all_edge_attrs() + assert len(edge_attrs) == 3 diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 56bbfa94fe67..f711407d0ac6 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -1,3 +1,6 @@ +import itertools +import sys + import numpy as np import pytest import torch @@ -5,9 +8,15 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader +from torch_geometric.loader.neighbor_loader import get_input_nodes from torch_geometric.nn import GraphConv, to_hetero from torch_geometric.testing import withRegisteredOp -from torch_geometric.utils import k_hop_subgraph +from torch_geometric.utils import k_hop_subgraph, sort_edge_index + +sys.path.append("..") +# pylint: disable=wrong-import-order,wrong-import-position,no-name-in-module +from data.test_feature_store import MyFeatureStore # noqa: E402 +from data.test_graph_store import MyGraphStore # noqa: E402 def get_edge_index(num_src_nodes, num_dst_nodes, num_edges): @@ -275,3 +284,136 @@ def test_temporal_heterogeneous_neighbor_loader_on_cora(get_dataset): for batch in loader: mask = batch['paper'].time[0] >= batch['paper'].time[1:] assert torch.all(mask) + + +@pytest.mark.parametrize('directed', [True, False]) +def test_custom_neighbor_loader(directed): + r"""This test evaluates the correctness of a `NeighborLoader` constructed + from a feature store and graph store by comparing it to a `NeighborLoader` + constructed from a `HeteroData` object.""" + torch.manual_seed(12345) + + # Possible feature and graph stores: + feature_stores = [MyFeatureStore(), HeteroData()] + graph_stores = [MyGraphStore(), HeteroData()] + hetero_data = HeteroData() + + # Set up edge indices: + def _get_edge_index(num_src, num_dst, num_edges): + edge_index = get_edge_index(num_src, num_dst, num_edges) + edge_index = sort_edge_index(edge_index) + adj = SparseTensor.from_edge_index(edge_index, is_sorted=True) + rowptr, col, _ = adj.csr() + return edge_index, rowptr, col + + # Assertion utility: + def _assert_tensor_dict_equal(expected, actual): + assert expected.keys() == actual.keys() + for key in expected: + assert torch.equal(expected[key], actual[key]) + + # NOTE in this test, here we solely use explicit APIs, since + # `HeteroData` and `Data` both override dunder methods: + for feature_store, graph_store in itertools.product( + feature_stores, graph_stores): + # Set up node features: + x = torch.arange(100) + hetero_data['paper'].x = x + feature_store.put_tensor(x, group_name='paper', attr_name='x', + index=None) + x = torch.arange(100, 300) + hetero_data['author'].x = x + feature_store.put_tensor(x, group_name='author', attr_name='x', + index=None) + + # Set up edge indices: + edge_index, rowptr, col = _get_edge_index(100, 100, 500) + hetero_data['paper', 'to', 'paper'].edge_index = edge_index + graph_store.put_edge_index( + edge_index=(rowptr, col), + edge_type=('paper', 'to', 'paper'), + layout='csr', + ) + + edge_index, rowptr, col = _get_edge_index(100, 200, 1000) + hetero_data['paper', 'to', 'author'].edge_index = edge_index + graph_store.put_edge_index( + edge_index=(rowptr, col), + edge_type=('paper', 'to', 'author'), + layout='csr', + ) + + edge_index, rowptr, col = _get_edge_index(200, 100, 1000) + hetero_data['author', 'to', 'paper'].edge_index = edge_index + graph_store.put_edge_index( + edge_index=(rowptr, col), + edge_type=('author', 'to', 'paper'), + layout='csr', + ) + + # Construct neighbor loaders: + batch_size = 20 + input_type = 'paper' + hetero_data_loader = NeighborLoader( + data=hetero_data, + num_neighbors=[-1] * 2, + input_nodes=input_type, + batch_size=batch_size, + directed=directed, + ) + + input_type = feature_store._tensor_attr_cls(group_name='paper', + attr_name='x') + custom_loader = NeighborLoader( + data=(feature_store, graph_store), + input_nodes=input_type, + num_neighbors=[-1] * 2, + batch_size=batch_size, + is_sorted=True, + directed=directed, + ) + + # Basic assertions: + assert str(custom_loader) == 'NeighborLoader()' + assert len(custom_loader) == (100 + batch_size - 1) // batch_size + + # Equivalent input nodes: + hetero_input_nodes = get_input_nodes(hetero_data, + hetero_data_loader.input_nodes) + custom_input_nodes = get_input_nodes((feature_store, graph_store), + custom_loader.input_nodes) + + assert hetero_input_nodes == custom_input_nodes + + # Equivalent inner representations: + assert (hetero_data_loader.neighbor_sampler.node_types == + custom_loader.neighbor_sampler.node_types) + assert (hetero_data_loader.neighbor_sampler.edge_types == + custom_loader.neighbor_sampler.edge_types) + + # Equivalent neighbor sampler outputs: + expected = hetero_data_loader.neighbor_sampler([0, 1, 2, 3]) + actual = custom_loader.neighbor_sampler([0, 1, 2, 3]) + + for i in range(len(expected) - 1): + _assert_tensor_dict_equal(expected[i], actual[i]) + + # Equivalent outputs when iterating the `DataLoader`: + custom_batches = [] + for batch in custom_loader: + assert isinstance(batch, HeteroData) + custom_batches.append(batch) + + hetero_data_batches = [] + for batch in hetero_data_loader: + hetero_data_batches.append(batch) + + for expected, actual in zip(hetero_data_batches, custom_batches): + # Check node features: + for node_type in actual.node_types: + assert torch.equal(expected[node_type].x, actual[node_type].x) + + # Check edge indices: + for edge_type in actual.edge_types: + assert torch.equal(expected[edge_type].edge_index, + actual[edge_type].edge_index) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index ddda8a5af8d8..0737ee180f71 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -736,11 +736,13 @@ def num_faces(self) -> Optional[int]: return self.face.size(self.__cat_dim__('face', self.face)) return None - # FeatureStore interface ########################################### + # FeatureStore interface ################################################## def items(self): r"""Returns an `ItemsView` over the stored attributes in the `Data` object.""" + # NOTE this is necessary to override the default `MutableMapping` + # items() method. return self._store.items() def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: @@ -774,6 +776,14 @@ def _remove_tensor(self, attr: TensorAttr) -> bool: return True return False + def _get_tensor_size(self, attr: TensorAttr) -> Tuple: + r"""Returns the size of the tensor corresponding to `attr`.""" + return self._get_tensor(attr).size() + + def get_all_tensor_attrs(self) -> List[TensorAttr]: + r"""Obtains all feature attributes stored in `Data`.""" + return [TensorAttr(attr_name=name) for name in self._store.keys()] + def __len__(self) -> int: return BaseData.__len__(self) @@ -781,14 +791,17 @@ def __len__(self) -> int: def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: - # Convert the edge index to a recognizable format: + r"""Stores `edge_index` in `Data`, in the specified layout.""" + # Convert the edge index to a recognizable layout: attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) setattr(self, attr_name, attr_val) return True def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: - # Get the requested format and the Adj tensor associated with it: + r"""Obtains the edge index corresponding to `edge_attr` in `Data`, + in the specified layout.""" + # Get the requested layout and the edge tensor type associated with it: attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] attr_val = getattr(self._store, attr_name, None) if attr_val is not None: @@ -796,6 +809,15 @@ def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: attr_val = adj_type_to_edge_tensor_type(edge_attr.layout, attr_val) return attr_val + def get_all_edge_attrs(self) -> List[EdgeAttr]: + r"""Returns `EdgeAttr` objects corresponding to the edge indices stored + in `Data` and their layouts""" + out = [] + for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): + if attr_name in self: + out.append(EdgeAttr(edge_type=None, layout=layout)) + return out + ############################################################################### @@ -811,26 +833,25 @@ def edge_tensor_type_to_adj_type( tensor_tuple: EdgeTensorType, ) -> Adj: r"""Converts an EdgeTensorType tensor tuple to a PyG Adj tensor.""" + src, dst = tensor_tuple + if attr.layout == EdgeLayout.COO: # COO: (row, col) - if (tensor_tuple[0].storage().data_ptr() == - tensor_tuple[1].storage().data_ptr()): + if (src[0].storage().data_ptr() == dst[1].storage().data_ptr()): # Do not copy if the tensor tuple is constructed from the same # storage (instead, return a view): - out = torch.empty(0, dtype=tensor_tuple[0].dtype) - out.set_(tensor_tuple[0].storage(), storage_offset=0, - size=tensor_tuple[0].size() + tensor_tuple[1].size()) + out = torch.empty(0, dtype=src.dtype) + out.set_(src.storage(), storage_offset=0, + size=src.size() + dst.size()) return out.view(2, -1) return torch.stack(tensor_tuple) elif attr.layout == EdgeLayout.CSR: # CSR: (rowptr, col) - return SparseTensor(rowptr=tensor_tuple[0], col=tensor_tuple[1], - is_sorted=True) + return SparseTensor(rowptr=src, col=dst, is_sorted=True) elif attr.layout == EdgeLayout.CSC: # CSC: (row, colptr) this is a transposed adjacency matrix, so rowptr # is the compressed column and col is the uncompressed row. - return SparseTensor(rowptr=tensor_tuple[1], col=tensor_tuple[0], - is_sorted=True) + return SparseTensor(rowptr=dst, col=src, is_sorted=True) raise ValueError(f"Bad edge layout (got '{attr.layout}')") @@ -838,17 +859,13 @@ def adj_type_to_edge_tensor_type(layout: EdgeLayout, edge_index: Adj) -> EdgeTensorType: r"""Converts a PyG Adj tensor to an EdgeTensorType equivalent.""" if isinstance(edge_index, Tensor): - return (edge_index[0], edge_index[1]) + return (edge_index[0], edge_index[1]) # (row, col) if layout == EdgeLayout.COO: - row, col, _ = edge_index.coo() - return (row, col) + return edge_index.coo()[:-1] # (row, col elif layout == EdgeLayout.CSR: - rowptr, col, _ = edge_index.csr() - return (rowptr, col) + return edge_index.csr()[:-1] # (rowptr, col) else: - # CSC is just adj_t.csr(): - colptr, row, _ = edge_index.csr() - return (row, colptr) + return edge_index.csr()[-2::-1] # (row, colptr) def size_repr(key: Any, value: Any, indent: int = 0) -> str: diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index 38065f591d3c..c1a30596f791 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -25,7 +25,7 @@ from collections.abc import MutableMapping from dataclasses import dataclass from enum import Enum -from typing import Any, Optional, Union +from typing import Any, List, Optional, Tuple, Union import numpy as np import torch @@ -269,6 +269,9 @@ def put_tensor(self, tensor: FeatureTensorType, *args, **kwargs) -> bool: Returns: bool: Whether insertion was successful. + + Raises: + ValueError: if the input `TensorAttr` is not fully specified. """ attr = self._tensor_attr_cls.cast(*args, **kwargs) if not attr.is_fully_specified(): @@ -282,7 +285,7 @@ def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: r"""To be implemented by :class:`FeatureStore` subclasses.""" pass - def get_tensor(self, *args, **kwargs) -> Optional[FeatureTensorType]: + def get_tensor(self, *args, **kwargs) -> FeatureTensorType: r"""Synchronously obtains a :class:`FeatureTensorType` object from the feature store. Feature store implementors guarantee that the call :obj:`get_tensor(put_tensor(tensor, attr), attr) = tensor` holds. @@ -296,12 +299,14 @@ def get_tensor(self, *args, **kwargs) -> Optional[FeatureTensorType]: from a :class:`TensorAttr` object. Returns: - FeatureTensorType, optional: a Tensor of the same type as the - index, or :obj:`None` if no tensor was found. + FeatureTensorType: a Tensor of the same type as the index, or + :obj:`None` if no tensor was found. + + Raises: + KeyError: if the tensor corresponding to attr was not found. + ValueError: if the input `TensorAttr` is not fully specified. """ def to_type(tensor: FeatureTensorType) -> FeatureTensorType: - if tensor is None: - return None if (isinstance(attr.index, torch.Tensor) and isinstance(tensor, np.ndarray)): return torch.from_numpy(tensor) @@ -320,7 +325,10 @@ def to_type(tensor: FeatureTensorType) -> FeatureTensorType: f"specified. Please fully specify the input by " f"specifying all 'UNSET' fields.") - return to_type(self._get_tensor(attr)) + tensor = self._get_tensor(attr) + if tensor is None: + raise KeyError(f"A tensor corresponding to '{attr}' was not found") + return to_type(tensor) @abstractmethod def _remove_tensor(self, attr: TensorAttr) -> bool: @@ -340,6 +348,9 @@ def remove_tensor(self, *args, **kwargs) -> bool: Returns: bool: Whether deletion was succesful. + + Raises: + ValueError: if the input `TensorAttr` is not fully specified. """ attr = self._tensor_attr_cls.cast(*args, **kwargs) if not attr.is_fully_specified(): @@ -370,7 +381,26 @@ def update_tensor(self, tensor: FeatureTensorType, *args, self.remove_tensor(attr) return self.put_tensor(tensor, attr) - # :obj:`AttrView` methods ################################################# + # Additional methods ###################################################### + + @abstractmethod + def _get_tensor_size(self, attr: TensorAttr) -> Tuple: + pass + + def get_tensor_size(self, *args, **kwargs) -> Tuple: + r"""Obtains the size of a tensor given its attributes, or :obj:`None` + if the tensor does not exist.""" + attr = self._tensor_attr_cls.cast(*args, **kwargs) + if not attr.is_set('index'): + attr.index = None + return self._get_tensor_size(attr) + + @abstractmethod + def get_all_tensor_attrs(self) -> List[TensorAttr]: + r"""Obtains all tensor attributes stored in this feature store.""" + pass + + # `AttrView` methods ###################################################### def view(self, *args, **kwargs) -> AttrView: r"""Returns an :class:`AttrView` of the feature store, with the defined diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py index b97c4064ee54..2d3cde1a7c2c 100644 --- a/torch_geometric/data/graph_store.py +++ b/torch_geometric/data/graph_store.py @@ -1,7 +1,7 @@ from abc import abstractmethod from dataclasses import dataclass from enum import Enum -from typing import Any, Optional +from typing import Any, List, Optional from torch_geometric.typing import EdgeTensorType from torch_geometric.utils.mixin import CastMixin @@ -28,6 +28,15 @@ class EdgeAttr(CastMixin): # meaningful for COO (CSC and CSR are sorted by definition) is_sorted: bool = False + # TODO support num_nodes here, default None, so users can specify this + # instead of relying on default inferral + + def __init__(self, edge_type: Optional[Any], layout: EdgeLayout, + is_sorted: bool = False): + self.edge_type = edge_type + self.layout = EdgeLayout(layout) + self.is_sorted = is_sorted + class GraphStore: def __init__(self, edge_attr_cls: Any = EdgeAttr): @@ -64,10 +73,10 @@ def put_edge_index(self, edge_index: EdgeTensorType, *args, return self._put_edge_index(edge_index, edge_attr) @abstractmethod - def _get_edge_index(self, edge_attr: EdgeAttr) -> EdgeTensorType: + def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: pass - def get_edge_index(self, *args, **kwargs) -> Optional[EdgeTensorType]: + def get_edge_index(self, *args, **kwargs) -> EdgeTensorType: r"""Synchronously gets an edge_index tensor from the materialized graph. @@ -77,15 +86,28 @@ def get_edge_index(self, *args, **kwargs) -> Optional[EdgeTensorType]: Returns: EdgeTensorType: an edge_index tensor corresonding to the provided attributes, or None if there is no such tensor. + + Raises: + KeyError: if the edge index corresponding to attr was not found. """ edge_attr = self._edge_attr_cls.cast(*args, **kwargs) edge_attr.layout = EdgeLayout(edge_attr.layout) - return self._get_edge_index(edge_attr) + edge_index = self._get_edge_index(edge_attr) + if edge_index is None: + raise KeyError(f"An edge corresponding to '{edge_attr}' was not " + f"found") + return edge_index # TODO implement coo(), csc(), csr() methods on GraphStore, which perform # conversions of edge indices between formats. These conversions can also # automatically be performed in `get_edge_index` + # Additional methods ###################################################### + + @abstractmethod + def get_all_edge_attrs(self) -> List[EdgeAttr]: + pass + # Python built-ins ######################################################## def __setitem__(self, key: EdgeAttr, value: EdgeTensorType): diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 69fec089f8f6..c343ae4330fb 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -673,6 +673,17 @@ def _remove_tensor(self, attr: TensorAttr) -> bool: return True return False + def _get_tensor_size(self, attr: TensorAttr) -> Tuple: + r"""Returns the size of the tensor corresponding to `attr`.""" + return self._get_tensor(attr).size() + + def get_all_tensor_attrs(self) -> List[TensorAttr]: + out = [] + for group_name, group in self.node_items(): + for attr_name in group: + out.append(TensorAttr(group_name, attr_name)) + return out + def __len__(self) -> int: return BaseData.__len__(self) @@ -683,13 +694,16 @@ def __iter__(self): def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: - # Convert the edge index to a recognizable format: + r"""Stores an edge index in edge storage, in the specified layout.""" + # Convert the edge index to a recognizable layout: attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) setattr(self[edge_attr.edge_type], attr_name, attr_val) + return True - def _get_edge_index(self, edge_attr: EdgeAttr) -> EdgeTensorType: - # Get the requested format and the Adj tensor associated with it: + def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: + r"""Gets an edge index from edge storage, in the specified layout.""" + # Get the requested layout and the Adj tensor associated with it: attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] attr_val = getattr(self[edge_attr.edge_type], attr_name, None) if attr_val is not None: @@ -697,6 +711,16 @@ def _get_edge_index(self, edge_attr: EdgeAttr) -> EdgeTensorType: attr_val = adj_type_to_edge_tensor_type(edge_attr.layout, attr_val) return attr_val + def get_all_edge_attrs(self) -> List[EdgeAttr]: + r"""Returns a list of `EdgeAttr` objects corresponding to the edge + indices stored in `HeteroData` and their layouts.""" + out = [] + for edge_type, edge_store in self.edge_items(): + for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): + if attr_name in edge_store: + out.append(EdgeAttr(edge_type=edge_type, layout=layout)) + return out + # Helper functions ############################################################ diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 0521ed54e0ae..521b2805086a 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -1,14 +1,22 @@ +from collections import defaultdict from collections.abc import Sequence -from typing import Any, Callable, Iterator, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union import torch from torch import Tensor from torch_geometric.data import Data, HeteroData +from torch_geometric.data.data import ( + EDGE_LAYOUT_TO_ATTR_NAME, + edge_tensor_type_to_adj_type, +) +from torch_geometric.data.feature_store import FeatureStore, TensorAttr +from torch_geometric.data.graph_store import EdgeAttr, EdgeLayout, GraphStore from torch_geometric.loader.base import DataLoaderIterator from torch_geometric.loader.utils import ( edge_type_to_str, filter_data, + filter_feature_store, filter_hetero_data, to_csc, to_hetero_csc, @@ -19,7 +27,7 @@ class NeighborSampler: def __init__( self, - data: Union[Data, HeteroData], + data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], num_neighbors: NumNeighbors, replace: bool = False, directed: bool = True, @@ -28,12 +36,18 @@ def __init__( is_sorted: bool = False, share_memory: bool = False, ): - self.data_cls = data.__class__ + self.data_cls = data.__class__ if isinstance( + data, (Data, HeteroData)) else 'custom' self.num_neighbors = num_neighbors self.replace = replace self.directed = directed self.node_time = None + # TODO Unify the following conditionals behind the `FeatureStore` + # and `GraphStore` API + + # If we are working with a `Data` object, convert the edge_index to + # CSC and store it: if isinstance(data, Data): if time_attr is not None: # TODO `time_attr` support for homogeneous graphs @@ -47,6 +61,8 @@ def __init__( self.colptr, self.row, self.perm = out assert isinstance(num_neighbors, (list, tuple)) + # If we are working with a `HeteroData` object, convert each edge + # type's edge_index to CSC and store it: elif isinstance(data, HeteroData): if time_attr is not None: self.node_time_dict = data.collect(time_attr) @@ -74,6 +90,99 @@ def __init__( assert input_type is not None self.input_type = input_type + # If we are working with a `Tuple[FeatureStore, GraphStore]` object, + # obtain edges from GraphStore and convert them to CSC if necessary, + # storing the resulting representations: + elif isinstance(data, tuple): + # TODO support `FeatureStore` with no edge types (e.g. `Data`) + feature_store, graph_store = data + + # TODO support `collect` on `FeatureStore` + self.node_time_dict = None + if time_attr is not None: + raise ValueError( + f"'time_attr' attribute not yet supported for " + f"'{data[0].__class__.__name__}' object") + + # Obtain all node and edge metadata: + node_attrs = feature_store.get_all_tensor_attrs() + edge_attrs = graph_store.get_all_edge_attrs() + + self.node_types = [ + node_attr.group_name for node_attr in node_attrs + ] + self.edge_types = [edge_attr.edge_type for edge_attr in edge_attrs] + + # Set other required parameters: + if isinstance(num_neighbors, (list, tuple)): + num_neighbors = {key: num_neighbors for key in self.edge_types} + assert isinstance(num_neighbors, dict) + self.num_neighbors = { + edge_type_to_str(key): value + for key, value in num_neighbors.items() + } + self.num_hops = max([len(v) for v in self.num_neighbors.values()]) + + assert input_type is not None + self.input_type = input_type + + # Obtain CSC representation of graph for in-memory sampling: + # TODO this code will be replaced with a `GraphStore.sample` call + # when sampling routines are factored out to work with pyg-lib and + # GraphStore + edge_type_to_layouts: Dict[Any, + List[EdgeLayout]] = defaultdict(list) + for attr in edge_attrs: + edge_type_to_layouts[attr.edge_type].append(attr.layout) + + self.colptr_dict, self.row_dict, self.perm_dict = {}, {}, {} + for edge_type, edge_layouts in edge_type_to_layouts.items(): + key = edge_type_to_str(edge_type) + + # Select the most favorable layout, if multiple exist: + edge_layout = edge_layouts[0] + ordering = { + EdgeLayout.COO: 0, + EdgeLayout.CSR: 1, + EdgeLayout.CSC: 2 + } + for layout in edge_layouts[1:]: + if ordering[layout] > ordering[edge_layout]: + edge_layout = layout + + # TODO the below logic currently only works for CSC and CSR + # edge layouts, so throw an exception of our best format is + # COO: + if edge_layout == EdgeLayout.COO: + raise ValueError( + f"NeighborSampler currently only supports CSC and " + f"CSR edge index types in the GraphStore, but " + f"edge {edge_type} has format " + f"{edge_layout.value.upper()}. Please convert " + f"{edge_type} to either CSC or CSR formats " + f"in order to use it with NeighborSampler.") + + # Obtain edge index from backing GraphStore: + edge_index_tuple = graph_store.get_edge_index( + edge_type=edge_type, layout=edge_layout) + + # Convert to format for to_csc: + class _DataArgument(object): + pass + + data_argument = _DataArgument() + attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_layout] + edge_index = edge_tensor_type_to_adj_type( + EdgeAttr(layout=edge_layout, edge_type=edge_type), + edge_index_tuple) + + setattr(data_argument, attr_name, edge_index) + + self.colptr_dict[key], self.row_dict[key], self.perm_dict[ + key] = to_csc(data_argument, device='cpu', + share_memory=share_memory, + is_sorted=is_sorted) + else: raise TypeError(f'NeighborLoader found invalid type: {type(data)}') @@ -81,7 +190,7 @@ def __call__(self, index: Union[List[int], Tensor]): if not isinstance(index, torch.LongTensor): index = torch.LongTensor(index) - if issubclass(self.data_cls, Data): + if self.data_cls != 'custom' and issubclass(self.data_cls, Data): fn = torch.ops.torch_sparse.neighbor_sample node, row, col, edge = fn( self.colptr, @@ -93,7 +202,8 @@ def __call__(self, index: Union[List[int], Tensor]): ) return node, row, col, edge, index.numel() - elif issubclass(self.data_cls, HeteroData): + elif self.data_cls == 'custom' or issubclass(self.data_cls, + HeteroData): if self.node_time_dict is None: fn = torch.ops.torch_sparse.hetero_neighbor_sample node_dict, row_dict, col_dict, edge_dict = fn( @@ -258,7 +368,7 @@ class NeighborLoader(torch.utils.data.DataLoader): """ def __init__( self, - data: Union[Data, HeteroData], + data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], num_neighbors: NumNeighbors, input_nodes: InputNodes = None, replace: bool = False, @@ -317,6 +427,14 @@ def transform_fn(self, out: Any) -> Union[Data, HeteroData]: self.neighbor_sampler.perm_dict) data[self.neighbor_sampler.input_type].batch_size = batch_size + else: # Tuple[FeatureStore, GraphStore] + # TODO support for feature stores with no edge types + node_dict, row_dict, col_dict, edge_dict, batch_size = out + feature_store, _ = self.data + data = filter_feature_store(feature_store, node_dict, row_dict, + col_dict, edge_dict) + data[self.neighbor_sampler.input_type].batch_size = batch_size + return data if self.transform is None else self.transform(data) def _get_iterator(self) -> Iterator: @@ -329,28 +447,59 @@ def __repr__(self) -> str: ############################################################################### -def get_input_nodes(data: Union[Data, HeteroData], - input_nodes: InputNodes) -> Tuple[Optional[str], Sequence]: +def get_input_nodes( + data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], + input_nodes: Union[InputNodes, TensorAttr], +) -> Tuple[Optional[str], Sequence]: + def from_bool_tensor(tensor): + return tensor.nonzero( + as_tuple=False).view(-1) if tensor.dtype == torch.bool else tensor + if isinstance(data, Data): if input_nodes is None: return None, range(data.num_nodes) if input_nodes.dtype == torch.bool: - input_nodes = input_nodes.nonzero(as_tuple=False).view(-1) + input_nodes = from_bool_tensor(input_nodes) return None, input_nodes - assert input_nodes is not None + elif isinstance(data, HeteroData): + assert input_nodes is not None - if isinstance(input_nodes, str): - return input_nodes, range(data[input_nodes].num_nodes) + if isinstance(input_nodes, str): + return input_nodes, range(data[input_nodes].num_nodes) - assert isinstance(input_nodes, (list, tuple)) - assert len(input_nodes) == 2 - assert isinstance(input_nodes[0], str) + assert isinstance(input_nodes, (list, tuple)) + assert len(input_nodes) == 2 + assert isinstance(input_nodes[0], str) - if input_nodes[1] is None: - return input_nodes[0], range(data[input_nodes[0]].num_nodes) + if input_nodes[1] is None: + return input_nodes[0], range(data[input_nodes[0]].num_nodes) - node_type, input_nodes = input_nodes - if input_nodes.dtype == torch.bool: - input_nodes = input_nodes.nonzero(as_tuple=False).view(-1) - return node_type, input_nodes + node_type, input_nodes = input_nodes + if input_nodes.dtype == torch.bool: + input_nodes = from_bool_tensor(input_nodes) + return node_type, input_nodes + + else: # Tuple[FeatureStore, GraphStore] + # NOTE FeatureStore and GraphStore are treated as separate + # entities, so we cannot leverage the custom structure in Data and + # HeteroData to infer the number of nodes. As a result, here we expect + # that the input nodes are either explicitly provided or can be + # directly inferred from the feature store. + feature_store, _ = data + + # Explicit tensor: + if isinstance(input_nodes, Tensor): + return None, from_bool_tensor(input_nodes) + + if isinstance(input_nodes, tuple) and isinstance( + input_nodes[0], str) and isinstance(input_nodes[1], Tensor): + return input_nodes[0], from_bool_tensor(input_nodes[1]) + + # Implicit from TensorAttr (infer number of nodes from feature tensor): + assert isinstance(input_nodes, TensorAttr) + assert input_nodes.is_set('attr_name') + return getattr(input_nodes, 'group_name', None), range( + feature_store.get_tensor_size(input_nodes)[0]) + + # TODO support implicit from EdgeAttr diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 248859e2c0f9..beeb8f36dde5 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -7,6 +7,7 @@ from torch_sparse import SparseTensor from torch_geometric.data import Data, HeteroData +from torch_geometric.data.feature_store import FeatureStore from torch_geometric.data.storage import EdgeStorage, NodeStorage from torch_geometric.typing import EdgeType, OptTensor @@ -30,6 +31,10 @@ def edge_type_to_str(edge_type: Union[EdgeType, str]) -> str: return edge_type if isinstance(edge_type, str) else '__'.join(edge_type) +def str_to_edge_type(key: Union[EdgeType, str]) -> EdgeType: + return key if isinstance(key, tuple) else tuple(key.split('__')) + + def to_csc( data: Union[Data, EdgeStorage], device: Optional[torch.device] = None, @@ -43,7 +48,10 @@ def to_csc( # `perm` can be of type `None`. perm: Optional[Tensor] = None - if hasattr(data, 'adj_t'): + if hasattr(data, 'adj'): + colptr, row, _ = data.adj.csc() + + elif hasattr(data, 'adj_t'): colptr, row, _ = data.adj_t.csr() elif hasattr(data, 'edge_index'): @@ -54,7 +62,7 @@ def to_csc( colptr = torch.ops.torch_sparse.ind2ptr(col[perm], data.size(1)) else: raise AttributeError("Data object does not contain attributes " - "'adj_t' or 'edge_index'") + "'adj', 'adj_t' or 'edge_index'") colptr = colptr.to(device) row = row.to(device) @@ -176,3 +184,34 @@ def filter_hetero_data( edge_dict[edge_type_str], perm_dict[edge_type_str]) return out + + +def filter_feature_store( + feature_store: FeatureStore, + node_dict: Dict[str, Tensor], + row_dict: Dict[str, Tensor], + col_dict: Dict[str, Tensor], + edge_dict: Dict[str, Tensor], +) -> HeteroData: + r"""Constructs a `HeteroData` object from a feature store that only holds + nodes in `node` end edges in `edge` for each node and edge type, + respectively.""" + + # Construct a new `HeteroData` object: + data = HeteroData() + + # Filter edge storage: + for key in edge_dict: + edge_index = torch.stack([row_dict[key], col_dict[key]], dim=0) + data[str_to_edge_type(key)].edge_index = edge_index + + # Filter node storage: + for attr in feature_store.get_all_tensor_attrs(): + if attr.group_name in node_dict: + # If we have sampled nodes from this group, index into the + # feature store for these nodes' features: + attr.index = node_dict[attr.group_name] + tensor = feature_store.get_tensor(attr) + data[attr.group_name][attr.attr_name] = tensor + + return data From ded9a7b10ad8ebc19c97e567c7bb1ae6605253db Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Thu, 23 Jun 2022 14:28:58 -0700 Subject: [PATCH 0110/2432] Fix: `NeighborLoader` test with `Tuple[FeatureStore, GraphStore]` (#4851) --- CHANGELOG.md | 2 +- test/loader/test_neighbor_loader.py | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6ae6e7e07d8..fcc44dfdf5c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index f711407d0ac6..fda168772493 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -294,9 +294,8 @@ def test_custom_neighbor_loader(directed): torch.manual_seed(12345) # Possible feature and graph stores: - feature_stores = [MyFeatureStore(), HeteroData()] - graph_stores = [MyGraphStore(), HeteroData()] - hetero_data = HeteroData() + feature_stores = [MyFeatureStore, HeteroData] + graph_stores = [MyGraphStore, HeteroData] # Set up edge indices: def _get_edge_index(num_src, num_dst, num_edges): @@ -316,6 +315,12 @@ def _assert_tensor_dict_equal(expected, actual): # `HeteroData` and `Data` both override dunder methods: for feature_store, graph_store in itertools.product( feature_stores, graph_stores): + + # Initialize feature store, graph store, and reference: + feature_store = feature_store() + graph_store = graph_store() + hetero_data = HeteroData() + # Set up node features: x = torch.arange(100) hetero_data['paper'].x = x From 32db993b80d91e93885f1b9cca1e24a43f365813 Mon Sep 17 00:00:00 2001 From: Jiaxuan Date: Thu, 23 Jun 2022 20:14:01 -0700 Subject: [PATCH 0111/2432] Correct docstring for `SAGEConv` (#4852) * Correct docstring for SAGEConv * Correct docstring for SAGEConv --- CHANGELOG.md | 1 + torch_geometric/nn/conv/sage_conv.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcc44dfdf5c1..d27ffb749860 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Correct docstring for SAGEConv ([#4852](https://github.com/pyg-team/pytorch_geometric/pull/4852)) - Fixed a bug in `TUDataset` where `pre_filter` was not applied whenever `pre_transform` was present - Renamed `RandomTranslate` to `RandomJitter` - the usage of `RandomTranslate` is now deprecated ([#4828](https://github.com/pyg-team/pytorch_geometric/pull/4828)) - Do not allow accessing edge types in `HeteroData` with two node types when there exists multiple relations between these types ([#4782](https://github.com/pyg-team/pytorch_geometric/pull/4782)) diff --git a/torch_geometric/nn/conv/sage_conv.py b/torch_geometric/nn/conv/sage_conv.py index 27c3d38bbd98..ceb97d784d92 100644 --- a/torch_geometric/nn/conv/sage_conv.py +++ b/torch_geometric/nn/conv/sage_conv.py @@ -38,7 +38,7 @@ class SAGEConv(MessagePassing): out_channels (int): Size of each output sample. aggr (string, optional): The aggregation scheme to use (:obj:`"mean"`, :obj:`"max"`, :obj:`"lstm"`). - (default: :obj:`"add"`) + (default: :obj:`"mean"`) normalize (bool, optional): If set to :obj:`True`, output features will be :math:`\ell_2`-normalized, *i.e.*, :math:`\frac{\mathbf{x}^{\prime}_i} From c40b099fb4932b58a34beceeafe6152202bc4cf4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 24 Jun 2022 09:20:06 +0200 Subject: [PATCH 0112/2432] Custom `NeighborLoader` test (#4854) * loader_test * changelog * linting --- CHANGELOG.md | 2 +- test/data/test_feature_store.py | 52 +---- test/data/test_graph_store.py | 31 +-- test/loader/test_neighbor_loader.py | 209 ++++++------------ torch_geometric/loader/neighbor_loader.py | 51 +++-- torch_geometric/testing/__init__.py | 10 + .../{testing.py => testing/decorators.py} | 0 torch_geometric/testing/feature_store.py | 53 +++++ torch_geometric/testing/graph_store.py | 29 +++ 9 files changed, 192 insertions(+), 245 deletions(-) create mode 100644 torch_geometric/testing/__init__.py rename torch_geometric/{testing.py => testing/decorators.py} (100%) create mode 100644 torch_geometric/testing/feature_store.py create mode 100644 torch_geometric/testing/graph_store.py diff --git a/CHANGELOG.md b/CHANGELOG.md index d27ffb749860..51e41cfe27d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py index db5fdf21af78..45c410800c61 100644 --- a/test/data/test_feature_store.py +++ b/test/data/test_feature_store.py @@ -1,64 +1,14 @@ from dataclasses import dataclass -from typing import Dict, List, Optional, Tuple import pytest import torch -from torch import Tensor from torch_geometric.data.feature_store import ( AttrView, - FeatureStore, TensorAttr, _field_status, ) -from torch_geometric.typing import FeatureTensorType - - -class MyFeatureStore(FeatureStore): - def __init__(self): - super().__init__() - self.store: Dict[Tuple[str, str], Tensor] = {} - - @staticmethod - def key(attr: TensorAttr) -> str: - return (attr.group_name, attr.attr_name) - - def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: - index = attr.index - - # None indices define the obvious index: - if index is None: - index = torch.arange(0, tensor.shape[0]) - - # Store the index: - self.store[MyFeatureStore.key(attr)] = (index, tensor) - - return True - - def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: - index, tensor = self.store.get(MyFeatureStore.key(attr), (None, None)) - if tensor is None: - return None - - # None indices return the whole tensor: - if attr.index is None: - return tensor - - idx = torch.cat([(index == v).nonzero() for v in attr.index]).view(-1) - return tensor[idx] - - def _remove_tensor(self, attr: TensorAttr) -> bool: - del self.store[MyFeatureStore.key(attr)] - return True - - def _get_tensor_size(self, attr: TensorAttr) -> Tuple: - return self._get_tensor(attr).size() - - def get_all_tensor_attrs(self) -> List[str]: - return [TensorAttr(*key) for key in self.store.keys()] - - def __len__(self): - raise NotImplementedError +from torch_geometric.testing.feature_store import MyFeatureStore @dataclass diff --git a/test/data/test_graph_store.py b/test/data/test_graph_store.py index 0460c3b0adcf..5ce1e3f85434 100644 --- a/test/data/test_graph_store.py +++ b/test/data/test_graph_store.py @@ -1,36 +1,9 @@ -from typing import Dict, Optional, Tuple - import pytest import torch -from torch import Tensor from torch_sparse import SparseTensor -from torch_geometric.data.graph_store import ( - EdgeAttr, - EdgeLayout, - EdgeTensorType, - GraphStore, -) - - -class MyGraphStore(GraphStore): - def __init__(self): - super().__init__() - self.store: Dict[EdgeAttr, Tuple[Tensor, Tensor]] = {} - - @staticmethod - def key(attr: EdgeAttr) -> str: - return (attr.edge_type, attr.layout.value) - - def _put_edge_index(self, edge_index: EdgeTensorType, - edge_attr: EdgeAttr) -> bool: - self.store[MyGraphStore.key(edge_attr)] = edge_index - - def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: - return self.store.get(MyGraphStore.key(edge_attr), None) - - def get_all_edge_attrs(self): - return [EdgeAttr(*key) for key in self.store.keys()] +from torch_geometric.data.graph_store import EdgeLayout +from torch_geometric.testing.graph_store import MyGraphStore def test_graph_store(): diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index fda168772493..c8c3eda16f59 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -1,6 +1,3 @@ -import itertools -import sys - import numpy as np import pytest import torch @@ -8,15 +5,11 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader -from torch_geometric.loader.neighbor_loader import get_input_nodes from torch_geometric.nn import GraphConv, to_hetero from torch_geometric.testing import withRegisteredOp -from torch_geometric.utils import k_hop_subgraph, sort_edge_index - -sys.path.append("..") -# pylint: disable=wrong-import-order,wrong-import-position,no-name-in-module -from data.test_feature_store import MyFeatureStore # noqa: E402 -from data.test_graph_store import MyGraphStore # noqa: E402 +from torch_geometric.testing.feature_store import MyFeatureStore +from torch_geometric.testing.graph_store import MyGraphStore +from torch_geometric.utils import k_hop_subgraph def get_edge_index(num_src_nodes, num_dst_nodes, num_edges): @@ -286,139 +279,69 @@ def test_temporal_heterogeneous_neighbor_loader_on_cora(get_dataset): assert torch.all(mask) -@pytest.mark.parametrize('directed', [True, False]) -def test_custom_neighbor_loader(directed): - r"""This test evaluates the correctness of a `NeighborLoader` constructed - from a feature store and graph store by comparing it to a `NeighborLoader` - constructed from a `HeteroData` object.""" - torch.manual_seed(12345) +@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) +@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) +def test_custom_neighbor_loader(FeatureStore, GraphStore): + # Initialize feature store, graph store, and reference: + feature_store = FeatureStore() + graph_store = GraphStore() + data = HeteroData() + + # Set up node features: + x = torch.arange(100) + data['paper'].x = x + feature_store.put_tensor(x, group_name='paper', attr_name='x', index=None) - # Possible feature and graph stores: - feature_stores = [MyFeatureStore, HeteroData] - graph_stores = [MyGraphStore, HeteroData] + x = torch.arange(100, 300) + data['author'].x = x + feature_store.put_tensor(x, group_name='author', attr_name='x', index=None) # Set up edge indices: - def _get_edge_index(num_src, num_dst, num_edges): - edge_index = get_edge_index(num_src, num_dst, num_edges) - edge_index = sort_edge_index(edge_index) - adj = SparseTensor.from_edge_index(edge_index, is_sorted=True) - rowptr, col, _ = adj.csr() - return edge_index, rowptr, col - - # Assertion utility: - def _assert_tensor_dict_equal(expected, actual): - assert expected.keys() == actual.keys() - for key in expected: - assert torch.equal(expected[key], actual[key]) - - # NOTE in this test, here we solely use explicit APIs, since - # `HeteroData` and `Data` both override dunder methods: - for feature_store, graph_store in itertools.product( - feature_stores, graph_stores): - - # Initialize feature store, graph store, and reference: - feature_store = feature_store() - graph_store = graph_store() - hetero_data = HeteroData() - - # Set up node features: - x = torch.arange(100) - hetero_data['paper'].x = x - feature_store.put_tensor(x, group_name='paper', attr_name='x', - index=None) - x = torch.arange(100, 300) - hetero_data['author'].x = x - feature_store.put_tensor(x, group_name='author', attr_name='x', - index=None) - - # Set up edge indices: - edge_index, rowptr, col = _get_edge_index(100, 100, 500) - hetero_data['paper', 'to', 'paper'].edge_index = edge_index - graph_store.put_edge_index( - edge_index=(rowptr, col), - edge_type=('paper', 'to', 'paper'), - layout='csr', - ) - - edge_index, rowptr, col = _get_edge_index(100, 200, 1000) - hetero_data['paper', 'to', 'author'].edge_index = edge_index - graph_store.put_edge_index( - edge_index=(rowptr, col), - edge_type=('paper', 'to', 'author'), - layout='csr', - ) - - edge_index, rowptr, col = _get_edge_index(200, 100, 1000) - hetero_data['author', 'to', 'paper'].edge_index = edge_index - graph_store.put_edge_index( - edge_index=(rowptr, col), - edge_type=('author', 'to', 'paper'), - layout='csr', - ) - - # Construct neighbor loaders: - batch_size = 20 - input_type = 'paper' - hetero_data_loader = NeighborLoader( - data=hetero_data, - num_neighbors=[-1] * 2, - input_nodes=input_type, - batch_size=batch_size, - directed=directed, - ) - - input_type = feature_store._tensor_attr_cls(group_name='paper', - attr_name='x') - custom_loader = NeighborLoader( - data=(feature_store, graph_store), - input_nodes=input_type, - num_neighbors=[-1] * 2, - batch_size=batch_size, - is_sorted=True, - directed=directed, - ) - - # Basic assertions: - assert str(custom_loader) == 'NeighborLoader()' - assert len(custom_loader) == (100 + batch_size - 1) // batch_size - - # Equivalent input nodes: - hetero_input_nodes = get_input_nodes(hetero_data, - hetero_data_loader.input_nodes) - custom_input_nodes = get_input_nodes((feature_store, graph_store), - custom_loader.input_nodes) - - assert hetero_input_nodes == custom_input_nodes - - # Equivalent inner representations: - assert (hetero_data_loader.neighbor_sampler.node_types == - custom_loader.neighbor_sampler.node_types) - assert (hetero_data_loader.neighbor_sampler.edge_types == - custom_loader.neighbor_sampler.edge_types) - - # Equivalent neighbor sampler outputs: - expected = hetero_data_loader.neighbor_sampler([0, 1, 2, 3]) - actual = custom_loader.neighbor_sampler([0, 1, 2, 3]) - - for i in range(len(expected) - 1): - _assert_tensor_dict_equal(expected[i], actual[i]) - - # Equivalent outputs when iterating the `DataLoader`: - custom_batches = [] - for batch in custom_loader: - assert isinstance(batch, HeteroData) - custom_batches.append(batch) - - hetero_data_batches = [] - for batch in hetero_data_loader: - hetero_data_batches.append(batch) - - for expected, actual in zip(hetero_data_batches, custom_batches): - # Check node features: - for node_type in actual.node_types: - assert torch.equal(expected[node_type].x, actual[node_type].x) - - # Check edge indices: - for edge_type in actual.edge_types: - assert torch.equal(expected[edge_type].edge_index, - actual[edge_type].edge_index) + edge_index = get_edge_index(100, 100, 500) + data['paper', 'to', 'paper'].edge_index = edge_index + graph_store.put_edge_index( + edge_index=SparseTensor.from_edge_index(edge_index).csr()[:2], + edge_type=('paper', 'to', 'paper'), + layout='csr', + ) + + edge_index = get_edge_index(100, 200, 1000) + data['paper', 'to', 'author'].edge_index = edge_index + graph_store.put_edge_index( + edge_index=SparseTensor.from_edge_index(edge_index).csr()[:2], + edge_type=('paper', 'to', 'author'), + layout='csr', + ) + + edge_index = get_edge_index(200, 100, 1000) + data['author', 'to', 'paper'].edge_index = edge_index + graph_store.put_edge_index( + edge_index=SparseTensor.from_edge_index(edge_index).csr()[:2], + edge_type=('author', 'to', 'paper'), + layout='csr', + ) + + # Construct neighbor loaders: + loader1 = NeighborLoader(data, batch_size=20, + input_nodes=('paper', range(100)), + num_neighbors=[-1] * 2) + + loader2 = NeighborLoader((feature_store, graph_store), batch_size=20, + input_nodes=('paper', range(100)), + num_neighbors=[-1] * 2) + + assert str(loader1) == str(loader2) + assert len(loader1) == len(loader2) + + for batch1, batch2 in zip(loader1, loader2): + assert len(batch1) == len(batch2) + assert batch1['paper'].batch_size == batch2['paper'].batch_size + assert torch.allclose(batch1['paper'].x, batch2['paper'].x) + assert torch.allclose(batch1['author'].x, batch2['author'].x) + + assert torch.allclose(batch1['paper', 'to', 'paper'].edge_index, + batch2['paper', 'to', 'paper'].edge_index) + assert torch.allclose(batch1['paper', 'to', 'author'].edge_index, + batch2['paper', 'to', 'author'].edge_index) + assert torch.allclose(batch1['author', 'to', 'paper'].edge_index, + batch2['author', 'to', 'paper'].edge_index) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 521b2805086a..a4008f1c89cd 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -451,16 +451,16 @@ def get_input_nodes( data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], input_nodes: Union[InputNodes, TensorAttr], ) -> Tuple[Optional[str], Sequence]: - def from_bool_tensor(tensor): - return tensor.nonzero( - as_tuple=False).view(-1) if tensor.dtype == torch.bool else tensor + def to_index(tensor): + if isinstance(tensor, Tensor) and tensor.dtype == torch.bool: + return torch.nonzero(as_tuple=False).view(-1) + else: + return tensor if isinstance(data, Data): if input_nodes is None: return None, range(data.num_nodes) - if input_nodes.dtype == torch.bool: - input_nodes = from_bool_tensor(input_nodes) - return None, input_nodes + return None, to_index(input_nodes) elif isinstance(data, HeteroData): assert input_nodes is not None @@ -472,13 +472,10 @@ def from_bool_tensor(tensor): assert len(input_nodes) == 2 assert isinstance(input_nodes[0], str) - if input_nodes[1] is None: - return input_nodes[0], range(data[input_nodes[0]].num_nodes) - node_type, input_nodes = input_nodes - if input_nodes.dtype == torch.bool: - input_nodes = from_bool_tensor(input_nodes) - return node_type, input_nodes + if input_nodes is None: + return input_nodes[0], range(data[input_nodes[0]].num_nodes) + return node_type, to_index(input_nodes) else: # Tuple[FeatureStore, GraphStore] # NOTE FeatureStore and GraphStore are treated as separate @@ -488,18 +485,30 @@ def from_bool_tensor(tensor): # directly inferred from the feature store. feature_store, _ = data - # Explicit tensor: + assert input_nodes is not None + if isinstance(input_nodes, Tensor): - return None, from_bool_tensor(input_nodes) + return None, to_index(input_nodes) + + if isinstance(input_nodes, str): + num_nodes = feature_store.get_tensor_size(input_nodes)[0] + return input_nodes, range(num_nodes) + + if isinstance(input_nodes, (list, tuple)): + assert len(input_nodes) == 2 + assert isinstance(input_nodes[0], str) - if isinstance(input_nodes, tuple) and isinstance( - input_nodes[0], str) and isinstance(input_nodes[1], Tensor): - return input_nodes[0], from_bool_tensor(input_nodes[1]) + node_type, input_nodes = input_nodes + if input_nodes is None: + num_nodes = feature_store.get_tensor_size(input_nodes)[0] + return input_nodes[0], range(num_nodes) + return node_type, to_index(input_nodes) - # Implicit from TensorAttr (infer number of nodes from feature tensor): assert isinstance(input_nodes, TensorAttr) assert input_nodes.is_set('attr_name') - return getattr(input_nodes, 'group_name', None), range( - feature_store.get_tensor_size(input_nodes)[0]) - # TODO support implicit from EdgeAttr + node_type = getattr(input_nodes, 'group_name', None) + if not input_nodes.is_set('index') or input_nodes.index is None: + num_nodes = feature_store.get_tensor_size(input_nodes)[0] + return node_type, range(num_nodes) + return node_type, input_nodes.index diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py new file mode 100644 index 000000000000..083c4a593d68 --- /dev/null +++ b/torch_geometric/testing/__init__.py @@ -0,0 +1,10 @@ +from .decorators import (is_full_test, onlyFullTest, withPackage, + withRegisteredOp, withCUDA) + +__all__ = [ + 'is_full_test', + 'onlyFullTest', + 'withPackage', + 'withRegisteredOp', + 'withCUDA', +] diff --git a/torch_geometric/testing.py b/torch_geometric/testing/decorators.py similarity index 100% rename from torch_geometric/testing.py rename to torch_geometric/testing/decorators.py diff --git a/torch_geometric/testing/feature_store.py b/torch_geometric/testing/feature_store.py new file mode 100644 index 000000000000..1ed680cfdba5 --- /dev/null +++ b/torch_geometric/testing/feature_store.py @@ -0,0 +1,53 @@ +from typing import Dict, List, Optional, Tuple + +import torch +from torch import Tensor + +from torch_geometric.data.feature_store import FeatureStore, TensorAttr +from torch_geometric.typing import FeatureTensorType + + +class MyFeatureStore(FeatureStore): + def __init__(self): + super().__init__() + self.store: Dict[Tuple[str, str], Tensor] = {} + + @staticmethod + def key(attr: TensorAttr) -> str: + return (attr.group_name, attr.attr_name) + + def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: + index = attr.index + + # None indices define the obvious index: + if index is None: + index = torch.arange(0, tensor.shape[0]) + + # Store the index: + self.store[MyFeatureStore.key(attr)] = (index, tensor) + + return True + + def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: + index, tensor = self.store.get(MyFeatureStore.key(attr), (None, None)) + if tensor is None: + return None + + if attr.index is None: # None indices return the whole tensor: + return tensor + + idx = torch.cat([(index == v).nonzero() for v in attr.index]).view(-1) + return tensor[idx] + + def _remove_tensor(self, attr: TensorAttr) -> bool: + del self.store[MyFeatureStore.key(attr)] + return True + + def _get_tensor_size(self, attr: TensorAttr) -> Tuple: + return self._get_tensor(attr).size() + + def get_all_tensor_attrs(self) -> List[str]: + return [TensorAttr(*key) for key in self.store.keys()] + + def __len__(self): + raise NotImplementedError diff --git a/torch_geometric/testing/graph_store.py b/torch_geometric/testing/graph_store.py new file mode 100644 index 000000000000..1ef79fa501a6 --- /dev/null +++ b/torch_geometric/testing/graph_store.py @@ -0,0 +1,29 @@ +from typing import Dict, Optional, Tuple + +from torch import Tensor + +from torch_geometric.data.graph_store import ( + EdgeAttr, + EdgeTensorType, + GraphStore, +) + + +class MyGraphStore(GraphStore): + def __init__(self): + super().__init__() + self.store: Dict[EdgeAttr, Tuple[Tensor, Tensor]] = {} + + @staticmethod + def key(attr: EdgeAttr) -> str: + return (attr.edge_type, attr.layout.value) + + def _put_edge_index(self, edge_index: EdgeTensorType, + edge_attr: EdgeAttr) -> bool: + self.store[MyGraphStore.key(edge_attr)] = edge_index + + def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: + return self.store.get(MyGraphStore.key(edge_attr), None) + + def get_all_edge_attrs(self): + return [EdgeAttr(*key) for key in self.store.keys()] From e46fbb18b004ffc2ae5aece41c039cccbd8e0159 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Fri, 24 Jun 2022 00:44:15 -0700 Subject: [PATCH 0113/2432] `FeatureStore.multi_get_tensor` implementation (#4853) * init * CHANGELOG * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * docstring update * update interface * None check * better errors * comments * typo Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- torch_geometric/data/feature_store.py | 77 +++++++++++++++++++----- torch_geometric/loader/utils.py | 17 ++++-- torch_geometric/testing/feature_store.py | 8 ++- 4 files changed, 82 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51e41cfe27d7..3f391c343056 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) - Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) -- Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807)) +- Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807), [#4853](https://github.com/pyg-team/pytorch_geometric/pull/4853)) - Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) - Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) - Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index c1a30596f791..3c616e6e3ca5 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -280,6 +280,17 @@ def put_tensor(self, tensor: FeatureTensorType, *args, **kwargs) -> bool: f"specifying all 'UNSET' fields") return self._put_tensor(tensor, attr) + @staticmethod + def _to_type(attr: TensorAttr, + tensor: FeatureTensorType) -> FeatureTensorType: + if (isinstance(attr.index, torch.Tensor) + and isinstance(tensor, np.ndarray)): + return torch.from_numpy(tensor) + if (isinstance(attr.index, np.ndarray) + and isinstance(tensor, torch.Tensor)): + return tensor.detach().cpu().numpy() + return tensor + @abstractmethod def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: r"""To be implemented by :class:`FeatureStore` subclasses.""" @@ -299,27 +310,14 @@ def get_tensor(self, *args, **kwargs) -> FeatureTensorType: from a :class:`TensorAttr` object. Returns: - FeatureTensorType: a Tensor of the same type as the index, or - :obj:`None` if no tensor was found. + FeatureTensorType: a Tensor of the same type as the index. Raises: KeyError: if the tensor corresponding to attr was not found. ValueError: if the input `TensorAttr` is not fully specified. """ - def to_type(tensor: FeatureTensorType) -> FeatureTensorType: - if (isinstance(attr.index, torch.Tensor) - and isinstance(tensor, np.ndarray)): - return torch.from_numpy(tensor) - if (isinstance(attr.index, np.ndarray) - and isinstance(tensor, torch.Tensor)): - return tensor.numpy() - return tensor attr = self._tensor_attr_cls.cast(*args, **kwargs) - if isinstance(attr.index, slice): - if attr.index.start == attr.index.stop == attr.index.step is None: - attr.index = None - if not attr.is_fully_specified(): raise ValueError(f"The input TensorAttr '{attr}' is not fully " f"specified. Please fully specify the input by " @@ -328,7 +326,56 @@ def to_type(tensor: FeatureTensorType) -> FeatureTensorType: tensor = self._get_tensor(attr) if tensor is None: raise KeyError(f"A tensor corresponding to '{attr}' was not found") - return to_type(tensor) + return self._to_type(attr, tensor) + + def _multi_get_tensor( + self, attrs: List[TensorAttr]) -> Optional[FeatureTensorType]: + r"""To be implemented by :class:`FeatureStore` subclasses. + + .. note:: + The default implementation simply iterates over all calls to + :meth:`get_tensor`. Implementor classes that can provide + additional, more performant functionality are recommended to + to override this method. + """ + return [self._get_tensor(attr) for attr in attrs] + + def multi_get_tensor(self, + attrs: List[TensorAttr]) -> List[FeatureTensorType]: + r"""Synchronously obtains a :class:`FeatureTensorType` object from the + feature store for each tensor associated with the attributes in + `attrs`. + + Args: + attrs (List[TensorAttr]): a list of :class:`TensorAttr` attributes + that identify the tensors to get. + + Returns: + List[FeatureTensorType]: a Tensor of the same type as the index for + each attribute. + + Raises: + KeyError: if a tensor corresponding to an attr was not found. + ValueError: if any input `TensorAttr` is not fully specified. + """ + attrs = [self._tensor_attr_cls.cast(attr) for attr in attrs] + bad_attrs = [attr for attr in attrs if not attr.is_fully_specified()] + if len(bad_attrs) > 0: + raise ValueError( + f"The input TensorAttr(s) '{bad_attrs}' are not fully " + f"specified. Please fully specify them by specifying all " + f"'UNSET' fields") + + tensors = self._multi_get_tensor(attrs) + if None in tensors: + bad_attrs = [attrs[i] for i, v in enumerate(tensors) if v is None] + raise KeyError(f"Tensors corresponding to attributes " + f"'{bad_attrs}' were not found") + + return [ + self._to_type(attr, tensor) + for attr, tensor in zip(attrs, tensors) + ] @abstractmethod def _remove_tensor(self, attr: TensorAttr) -> bool: diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index beeb8f36dde5..d562f7c890af 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -206,12 +206,19 @@ def filter_feature_store( data[str_to_edge_type(key)].edge_index = edge_index # Filter node storage: - for attr in feature_store.get_all_tensor_attrs(): + attrs = feature_store.get_all_tensor_attrs() + required_attrs = [] + for attr in attrs: if attr.group_name in node_dict: - # If we have sampled nodes from this group, index into the - # feature store for these nodes' features: attr.index = node_dict[attr.group_name] - tensor = feature_store.get_tensor(attr) - data[attr.group_name][attr.attr_name] = tensor + required_attrs.append(attr) + + # NOTE Here, we utilize `feature_store.multi_get` to give the feature store + # full control over optimizing how it returns features (since the call is + # synchronous, this amounts to giving the feature store control over all + # iteration). + tensors = feature_store.multi_get_tensor(required_attrs) + for i, attr in enumerate(required_attrs): + data[attr.group_name][attr.attr_name] = tensors[i] return data diff --git a/torch_geometric/testing/feature_store.py b/torch_geometric/testing/feature_store.py index 1ed680cfdba5..c3b85b51fdf3 100644 --- a/torch_geometric/testing/feature_store.py +++ b/torch_geometric/testing/feature_store.py @@ -33,7 +33,13 @@ def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: if tensor is None: return None - if attr.index is None: # None indices return the whole tensor: + # None indices return the whole tensor: + if attr.index is None: + return tensor + + # Empty slices return the whole tensor: + if (isinstance(attr.index, slice) + and attr.index == slice(None, None, None)): return tensor idx = torch.cat([(index == v).nonzero() for v in attr.index]).view(-1) From 654c10bac8547c5a20946aef12041d98cbf3ef34 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 24 Jun 2022 10:38:11 +0200 Subject: [PATCH 0114/2432] fix (#4855) --- torch_geometric/loader/neighbor_loader.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index a4008f1c89cd..b08c907dcd6d 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -453,9 +453,8 @@ def get_input_nodes( ) -> Tuple[Optional[str], Sequence]: def to_index(tensor): if isinstance(tensor, Tensor) and tensor.dtype == torch.bool: - return torch.nonzero(as_tuple=False).view(-1) - else: - return tensor + return tensor.nonzero(as_tuple=False).view(-1) + return tensor if isinstance(data, Data): if input_nodes is None: From 7a89399de1e644b2e999ead388b046b3c8a9810a Mon Sep 17 00:00:00 2001 From: mszarma Date: Fri, 24 Jun 2022 12:28:48 +0200 Subject: [PATCH 0115/2432] Add `NeighborLoader` benchmark suite (#4815) * [benchmark] Add NeighborLoader bench * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * changelog * added tqdm Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + benchmark/loader/neighbor_loader.py | 93 +++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 benchmark/loader/neighbor_loader.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f391c343056..d3832fa1c74c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) - Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) diff --git a/benchmark/loader/neighbor_loader.py b/benchmark/loader/neighbor_loader.py new file mode 100644 index 000000000000..cd8f7ed0186b --- /dev/null +++ b/benchmark/loader/neighbor_loader.py @@ -0,0 +1,93 @@ +import argparse +import os.path as osp +from timeit import default_timer + +import tqdm +from ogb.nodeproppred import PygNodePropPredDataset + +import torch_geometric.transforms as T +from torch_geometric.datasets import OGB_MAG +from torch_geometric.loader import NeighborLoader + + +def run(args: argparse.ArgumentParser) -> None: + for dataset_name in args.datasets: + print(f"Dataset: {dataset_name}") + root = osp.join(args.root, dataset_name) + + if dataset_name == 'mag': + transform = T.ToUndirected(merge=True) + dataset = OGB_MAG(root=root, transform=transform) + train_idx = ('paper', dataset[0]['paper'].train_mask) + eval_idx = ('paper', None) + neighbor_sizes = args.hetero_neighbor_sizes + else: + dataset = PygNodePropPredDataset(f'ogbn-{dataset_name}', root) + split_idx = dataset.get_idx_split() + train_idx = split_idx['train'] + eval_idx = None + neighbor_sizes = args.homo_neighbor_sizes + + data = dataset[0].to(args.device) + + for num_neighbors in neighbor_sizes: + print(f'Training sampling with {num_neighbors} neighbors') + for batch_size in args.batch_sizes: + train_loader = NeighborLoader( + data, + num_neighbors=num_neighbors, + input_nodes=train_idx, + batch_size=batch_size, + shuffle=True, + num_workers=args.num_workers, + ) + runtimes = [] + num_iterations = 0 + for run in range(args.runs): + start = default_timer() + for batch in tqdm.tqdm(train_loader): + num_iterations += 1 + stop = default_timer() + runtimes.append(round(stop - start, 3)) + average_time = round(sum(runtimes) / args.runs, 3) + print(f'batch size={batch_size}, iterations={num_iterations}, ' + f'runtimes={runtimes}, average runtime={average_time}') + + print('Evaluation sampling with all neighbors') + for batch_size in args.eval_batch_sizes: + subgraph_loader = NeighborLoader( + data, + num_neighbors=[-1], + input_nodes=eval_idx, + batch_size=batch_size, + shuffle=False, + num_workers=args.num_workers, + ) + runtimes = [] + num_iterations = 0 + for run in range(args.runs): + start = default_timer() + for batch in tqdm.tqdm(subgraph_loader): + num_iterations += 1 + stop = default_timer() + runtimes.append(round(stop - start, 3)) + average_time = round(sum(runtimes) / args.runs, 3) + print(f'batch size={batch_size}, iterations={num_iterations}, ' + f'runtimes={runtimes}, average runtime={average_time}') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('NeighborLoader Sampling Benchmarking') + + add = parser.add_argument + add('--device', default='cpu') + add('--datasets', nargs="+", default=['arxiv', 'products', 'mag']) + add('--root', default='../../data') + add('--batch-sizes', default=[8192, 4096, 2048, 1024, 512]) + add('--eval-batch-sizes', default=[16384, 8192, 4096, 2048, 1024, 512]) + add('--homo-neighbor_sizes', default=[[10, 5], [15, 10, 5], [20, 15, 10]]) + add('--hetero-neighbor_sizes', default=[[5], [10], [10, 5]], type=int) + add('--num-workers', default=0) + add('--runs', default=3) + + run(parser.parse_args()) From 78bbfbd2fa9243f5791607c17c907b78a39c3401 Mon Sep 17 00:00:00 2001 From: Zeyuan Tan <41138939+ZenoTan@users.noreply.github.com> Date: Fri, 24 Jun 2022 14:13:39 +0100 Subject: [PATCH 0116/2432] Hotfix: Remove duplicate `node_types` in `NeighborLoader` with `FeatureStore` (#4856) * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- torch_geometric/data/feature_store.py | 3 ++- torch_geometric/loader/neighbor_loader.py | 5 ++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index 3c616e6e3ca5..d87cee0428b2 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -329,7 +329,8 @@ def get_tensor(self, *args, **kwargs) -> FeatureTensorType: return self._to_type(attr, tensor) def _multi_get_tensor( - self, attrs: List[TensorAttr]) -> Optional[FeatureTensorType]: + self, + attrs: List[TensorAttr]) -> List[Optional[FeatureTensorType]]: r"""To be implemented by :class:`FeatureStore` subclasses. .. note:: diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index b08c907dcd6d..dd139833ca35 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -108,9 +108,8 @@ def __init__( node_attrs = feature_store.get_all_tensor_attrs() edge_attrs = graph_store.get_all_edge_attrs() - self.node_types = [ - node_attr.group_name for node_attr in node_attrs - ] + self.node_types = list( + set(node_attr.group_name for node_attr in node_attrs)) self.edge_types = [edge_attr.edge_type for edge_attr in edge_attrs] # Set other required parameters: From b4f3cbedc0d701e378a1c1645c7b3a2672670a7a Mon Sep 17 00:00:00 2001 From: Zeyuan Tan <41138939+ZenoTan@users.noreply.github.com> Date: Fri, 24 Jun 2022 15:50:31 +0100 Subject: [PATCH 0117/2432] Hotfix: #4856 follow-up, also fix `edge_types` (#4857) * fix * fix test Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/loader/test_neighbor_loader.py | 22 +++++++++++++--------- torch_geometric/loader/neighbor_loader.py | 3 ++- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3832fa1c74c..dbdb57e895d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index c8c3eda16f59..b08e82c73810 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -336,12 +336,16 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): for batch1, batch2 in zip(loader1, loader2): assert len(batch1) == len(batch2) assert batch1['paper'].batch_size == batch2['paper'].batch_size - assert torch.allclose(batch1['paper'].x, batch2['paper'].x) - assert torch.allclose(batch1['author'].x, batch2['author'].x) - - assert torch.allclose(batch1['paper', 'to', 'paper'].edge_index, - batch2['paper', 'to', 'paper'].edge_index) - assert torch.allclose(batch1['paper', 'to', 'author'].edge_index, - batch2['paper', 'to', 'author'].edge_index) - assert torch.allclose(batch1['author', 'to', 'paper'].edge_index, - batch2['author', 'to', 'paper'].edge_index) + + # Mapped indices of neighbors may be differently sorted: + assert torch.allclose(batch1['paper'].x.sort()[0], + batch2['paper'].x.sort()[0]) + assert torch.allclose(batch1['author'].x.sort()[0], + batch2['author'].x.sort()[0]) + + assert (batch1['paper', 'to', 'paper'].edge_index.size() == batch1[ + 'paper', 'to', 'paper'].edge_index.size()) + assert (batch1['paper', 'to', 'author'].edge_index.size() == batch1[ + 'paper', 'to', 'author'].edge_index.size()) + assert (batch1['author', 'to', 'paper'].edge_index.size() == batch1[ + 'author', 'to', 'paper'].edge_index.size()) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index dd139833ca35..501c9a1be103 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -110,7 +110,8 @@ def __init__( self.node_types = list( set(node_attr.group_name for node_attr in node_attrs)) - self.edge_types = [edge_attr.edge_type for edge_attr in edge_attrs] + self.edge_types = list( + set(edge_attr.edge_type for edge_attr in edge_attrs)) # Set other required parameters: if isinstance(num_neighbors, (list, tuple)): From a5f833c93c3ba3887b6af4fbf56c9dbad8afd90b Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Sat, 25 Jun 2022 16:18:02 +0800 Subject: [PATCH 0118/2432] fix node type node (#4861) --- torch_geometric/loader/neighbor_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 501c9a1be103..340061bace2f 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -473,7 +473,7 @@ def to_index(tensor): node_type, input_nodes = input_nodes if input_nodes is None: - return input_nodes[0], range(data[input_nodes[0]].num_nodes) + return node_type, range(data[node_type].num_nodes) return node_type, to_index(input_nodes) else: # Tuple[FeatureStore, GraphStore] From d700ddbf246b6d91dcd59c074584f9a30fde4ee0 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Sat, 25 Jun 2022 05:40:55 -0500 Subject: [PATCH 0119/2432] Integration of `nn.aggr` within `MessagPassing` (#4779) * Add sum alias class * Add message passing integration with nn.aggr * Cleanup MessagePassing * Raise errors in resolver * changelog * Add arg_kwargs support for * update * error * update * update * fix test * update * update * typo * typo * typo * reset * update * update Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/nn/conv/test_message_passing.py | 32 ++++- test/nn/test_resolver.py | 1 + torch_geometric/nn/aggr/__init__.py | 2 - torch_geometric/nn/aggr/base.py | 6 +- torch_geometric/nn/aggr/basic.py | 25 ++-- torch_geometric/nn/aggr/lstm.py | 2 +- torch_geometric/nn/aggr/multi.py | 25 +++- torch_geometric/nn/aggr/set2set.py | 2 +- torch_geometric/nn/conv/message_passing.jinja | 9 +- torch_geometric/nn/conv/message_passing.py | 114 ++++++++++-------- torch_geometric/nn/resolver.py | 46 ++++--- 12 files changed, 159 insertions(+), 107 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbdb57e895d1..4b6244f6afee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index 196470384070..b78460530282 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -9,7 +9,7 @@ from torch_sparse import SparseTensor from torch_sparse.matmul import spmm -from torch_geometric.nn import MessagePassing +from torch_geometric.nn import MessagePassing, aggr from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size @@ -487,3 +487,33 @@ def test_explain_message(): conv._edge_mask = torch.tensor([0, 0, 0, 0], dtype=torch.float) conv._apply_sigmoid = False assert conv(x, edge_index).abs().sum() == 0. + + +class MyAggregatorConv(MessagePassing): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def forward(self, x: Tensor, edge_index: Adj) -> Tensor: + # propagate_type: (x: TEnsor) + return self.propagate(edge_index, x=x, size=None) + + +@pytest.mark.parametrize('aggr_module', [ + aggr.MeanAggregation(), + aggr.SumAggregation(), + aggr.MaxAggregation(), + aggr.SoftmaxAggregation(), + aggr.PowerMeanAggregation(), + aggr.MultiAggregation(['mean', 'max']) +]) +def test_message_passing_with_aggr_module(aggr_module): + x = torch.randn(4, 8) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + row, col = edge_index + adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) + + conv = MyAggregatorConv(aggr=aggr_module) + assert isinstance(conv.aggr_module, aggr.Aggregation) + out = conv(x, edge_index) + assert out.size(0) == 4 and out.size(1) in {8, 16} + assert torch.allclose(conv(x, adj.t()), out) diff --git a/test/nn/test_resolver.py b/test/nn/test_resolver.py index 218381a013ec..8cb2e9caff4d 100644 --- a/test/nn/test_resolver.py +++ b/test/nn/test_resolver.py @@ -21,6 +21,7 @@ def test_activation_resolver(): @pytest.mark.parametrize('aggr_tuple', [ (torch_geometric.nn.aggr.MeanAggregation, 'mean'), (torch_geometric.nn.aggr.SumAggregation, 'sum'), + (torch_geometric.nn.aggr.SumAggregation, 'add'), (torch_geometric.nn.aggr.MaxAggregation, 'max'), (torch_geometric.nn.aggr.MinAggregation, 'min'), (torch_geometric.nn.aggr.MulAggregation, 'mul'), diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py index 11934d179090..2204e0b8d9db 100644 --- a/torch_geometric/nn/aggr/__init__.py +++ b/torch_geometric/nn/aggr/__init__.py @@ -3,7 +3,6 @@ from .basic import ( MeanAggregation, SumAggregation, - AddAggregation, MaxAggregation, MinAggregation, MulAggregation, @@ -20,7 +19,6 @@ 'MultiAggregation', 'MeanAggregation', 'SumAggregation', - 'AddAggregation', 'MaxAggregation', 'MinAggregation', 'MulAggregation', diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 5857fba75d7f..26e09b41865d 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -11,7 +11,7 @@ class Aggregation(torch.nn.Module, ABC): r"""An abstract base class for implementing custom aggregations.""" @abstractmethod - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: r""" @@ -35,7 +35,7 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, def reset_parameters(self): pass - def __call__(self, x: Tensor, index: Optional[Tensor] = None, *, + def __call__(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: @@ -62,7 +62,7 @@ def __call__(self, x: Tensor, index: Optional[Tensor] = None, *, f"'{dim_size}' but expected " f">= '{int(index.max()) + 1}')") - return super().__call__(x, index, ptr=ptr, dim_size=dim_size, dim=dim) + return super().__call__(x, index, ptr, dim_size, dim) def __repr__(self) -> str: return f'{self.__class__.__name__}()' diff --git a/torch_geometric/nn/aggr/basic.py b/torch_geometric/nn/aggr/basic.py index 1e5adf80fa36..28993f785e1e 100644 --- a/torch_geometric/nn/aggr/basic.py +++ b/torch_geometric/nn/aggr/basic.py @@ -9,38 +9,35 @@ class MeanAggregation(Aggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: return self.reduce(x, index, ptr, dim_size, dim, reduce='mean') class SumAggregation(Aggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: return self.reduce(x, index, ptr, dim_size, dim, reduce='sum') -AddAggregation = SumAggregation # Alias - - class MaxAggregation(Aggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: return self.reduce(x, index, ptr, dim_size, dim, reduce='max') class MinAggregation(Aggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: return self.reduce(x, index, ptr, dim_size, dim, reduce='min') class MulAggregation(Aggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: # TODO Currently, `mul` reduction can only operate on `index`: @@ -49,21 +46,19 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, *, class VarAggregation(Aggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: - mean = self.reduce(x, index, ptr, dim_size, dim, reduce='mean') mean_2 = self.reduce(x * x, index, ptr, dim_size, dim, reduce='mean') return mean_2 - mean * mean class StdAggregation(VarAggregation): - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: - - var = super().forward(x, index, ptr=ptr, dim_size=dim_size, dim=dim) + var = super().forward(x, index, ptr, dim_size, dim) return torch.sqrt(var.relu() + 1e-5) @@ -80,7 +75,7 @@ def reset_parameters(self): if isinstance(self.t, Tensor): self.t.data.fill_(self._init_t) - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: @@ -107,7 +102,7 @@ def reset_parameters(self): if isinstance(self.p, Tensor): self.p.data.fill_(self._init_p) - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py index 966e4cda1cba..31e2830e11b1 100644 --- a/torch_geometric/nn/aggr/lstm.py +++ b/torch_geometric/nn/aggr/lstm.py @@ -31,7 +31,7 @@ def __init__(self, in_channels: int, out_channels: int, **kwargs): def reset_parameters(self): self.lstm.reset_parameters() - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim) diff --git a/torch_geometric/nn/aggr/multi.py b/torch_geometric/nn/aggr/multi.py index 97b2c713ba12..50d14876a500 100644 --- a/torch_geometric/nn/aggr/multi.py +++ b/torch_geometric/nn/aggr/multi.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union import torch from torch import Tensor @@ -8,7 +8,8 @@ class MultiAggregation(Aggregation): - def __init__(self, aggrs: List[Union[Aggregation, str]]): + def __init__(self, aggrs: List[Union[Aggregation, str]], + aggrs_kwargs: Optional[List[Dict[str, Any]]] = None): super().__init__() if not isinstance(aggrs, (list, tuple)): @@ -19,14 +20,26 @@ def __init__(self, aggrs: List[Union[Aggregation, str]]): raise ValueError(f"'aggrs' of '{self.__class__.__name__}' should " f"not be empty") - self.aggrs = [aggregation_resolver(aggr) for aggr in aggrs] - - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + if aggrs_kwargs is None: + aggrs_kwargs = [{}] * len(aggrs) + elif len(aggrs) != len(aggrs_kwargs): + raise ValueError(f"'aggrs_kwargs' with invalid length passed to " + f"'{self.__class__.__name__}' " + f"(got '{len(aggrs_kwargs)}', " + f"expected '{len(aggrs)}'). Ensure that both " + f"'aggrs' and 'aggrs_kwargs' are consistent") + + self.aggrs = torch.nn.ModuleList([ + aggregation_resolver(aggr, **aggr_kwargs) + for aggr, aggr_kwargs in zip(aggrs, aggrs_kwargs) + ]) + + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: outs = [] for aggr in self.aggrs: - outs.append(aggr(x, index, ptr=ptr, dim_size=dim_size, dim=dim)) + outs.append(aggr(x, index, ptr, dim_size, dim)) return torch.cat(outs, dim=-1) if len(outs) > 1 else outs[0] def __repr__(self) -> str: diff --git a/torch_geometric/nn/aggr/set2set.py b/torch_geometric/nn/aggr/set2set.py index 3c9cd00974af..320a15c4794a 100644 --- a/torch_geometric/nn/aggr/set2set.py +++ b/torch_geometric/nn/aggr/set2set.py @@ -40,7 +40,7 @@ def __init__(self, in_channels: int, processing_steps: int, **kwargs): def reset_parameters(self): self.lstm.reset_parameters() - def forward(self, x: Tensor, index: Optional[Tensor] = None, *, + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: diff --git a/torch_geometric/nn/conv/message_passing.jinja b/torch_geometric/nn/conv/message_passing.jinja index c04fa3f396de..f08e11c1e58d 100644 --- a/torch_geometric/nn/conv/message_passing.jinja +++ b/torch_geometric/nn/conv/message_passing.jinja @@ -234,7 +234,7 @@ class {{cls_name}}({{parent_cls_name}}): the_size = self.__check_input__(edge_index, size) in_kwargs = Propagate_{{uid}}({% for k in prop_types.keys() %}{{k}}={{k}}{{ ", " if not loop.last }}{% endfor %}) - {% if fuse and single_aggr %} + {% if fuse %} if isinstance(edge_index, SparseTensor): out = self.message_and_aggregate(edge_index{% for k in msg_and_aggr_args %}, {{k}}=in_kwargs.{{k}}{% endfor %}) return self.update(out{% for k in update_args %}, {{k}}=in_kwargs.{{k}}{% endfor %}) @@ -242,14 +242,7 @@ class {{cls_name}}({{parent_cls_name}}): kwargs = self.__collect__(edge_index, the_size, in_kwargs) out = self.message({% for k in msg_args %}{{k}}=kwargs.{{k}}{{ ", " if not loop.last }}{% endfor %}) - {% if single_aggr %} out = self.aggregate(out{% for k in aggr_args %}, {{k}}=kwargs.{{k}}{% endfor %}) - {% else %} - outs: List[Tensor] = [] - for aggr in self.aggrs: - outs.append(self.aggregate(out{% for k in aggr_args %}, {{k}}=kwargs.{{k}}{% endfor %}, aggr=aggr)) - out = self.combine(outs) - {% endif %} return self.update(out{% for k in update_args %}, {{k}}=kwargs.{{k}}{% endfor %}) {% if edge_updater_types|length > 0 %} diff --git a/torch_geometric/nn/conv/message_passing.py b/torch_geometric/nn/conv/message_passing.py index 62ed10bcae59..ac58d5932439 100644 --- a/torch_geometric/nn/conv/message_passing.py +++ b/torch_geometric/nn/conv/message_passing.py @@ -5,15 +5,26 @@ from collections import OrderedDict from inspect import Parameter from itertools import chain -from typing import Callable, List, Optional, Set, Union, get_type_hints +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Set, + Union, + get_type_hints, +) from uuid import uuid1 import torch from torch import Tensor from torch.utils.hooks import RemovableHandle -from torch_scatter import gather_csr, scatter, segment_csr +from torch_scatter import gather_csr from torch_sparse import SparseTensor +from torch_geometric.nn.aggr import Aggregation, MultiAggregation +from torch_geometric.nn.resolver import aggregation_resolver as aggr_resolver from torch_geometric.typing import Adj, Size from .utils.helpers import expand_left @@ -26,7 +37,7 @@ split_types_repr, ) -AGGRS = {'add', 'sum', 'mean', 'min', 'max', 'mul'} +FUSE_AGGRS = {'add', 'sum', 'mean', 'min', 'max'} class MessagePassing(torch.nn.Module): @@ -45,11 +56,20 @@ class MessagePassing(torch.nn.Module): create_gnn.html>`__ for the accompanying tutorial. Args: - aggr (string or list, optional): The aggregation scheme to use - (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`, - :obj:`"mul"` or :obj:`None`). If given as a list, will make use of - multiple aggregations in which different outputs will get - concatenated in the last dimension. (default: :obj:`"add"`) + aggr (string or list or Aggregation, optional): The aggregation scheme + to use, *e.g.*, :obj:`"add"`, :obj:`"sum"` :obj:`"mean"`, + :obj:`"min"`, :obj:`"max"` or :obj:`"mul"`. + In addition, can be any + :class:`~torch_geometric.nn.aggr.Aggregation` module (or any string + that automatically resolves to it). + If given as a list, will make use of multiple aggregations in which + different outputs will get concatenated in the last dimension. + If set to :obj:`None`, the :class:`MessagePassing` instantiation is + expected to implement its own aggregation logic via + :meth:`aggregate`. (default: :obj:`"add"`) + aggr_kwargs (Dict[str, Any], optional): Arguments passed to the + respective aggregation function in case it gets automatically + resolved. (default: :obj:`None`) flow (string, optional): The flow direction of message passing (:obj:`"source_to_target"` or :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) @@ -85,23 +105,31 @@ class MessagePassing(torch.nn.Module): 'size_i', 'size_j', 'ptr', 'index', 'dim_size' } - def __init__(self, aggr: Optional[Union[str, List[str]]] = "add", - flow: str = "source_to_target", node_dim: int = -2, - decomposed_layers: int = 1): - + def __init__( + self, + aggr: Optional[Union[str, List[str], Aggregation]] = "add", + *, + aggr_kwargs: Optional[Dict[str, Any]] = None, + flow: str = "source_to_target", + node_dim: int = -2, + decomposed_layers: int = 1, + **kwargs, + ): super().__init__() - if aggr is None or isinstance(aggr, str): - assert aggr is None or aggr in AGGRS - self.aggr: Optional[str] = aggr - self.aggrs: List[str] = [] + if aggr is None: + self.aggr = None + self.aggr_module = None + elif isinstance(aggr, (str, Aggregation)): + self.aggr = str(aggr) + self.aggr_module = aggr_resolver(aggr, **(aggr_kwargs or {})) elif isinstance(aggr, (tuple, list)): - assert len(set(aggr) | AGGRS) == len(AGGRS) - self.aggr: Optional[str] = None - self.aggrs: List[str] = aggr + self.aggr = [str(x) for x in aggr] + self.aggr_module = MultiAggregation(aggr, aggr_kwargs) else: - raise ValueError(f"Only strings, list and tuples are valid " - f"aggregation schemes (got '{type(aggr)}')") + raise ValueError(f"Only strings, list, tuples and instances of" + f"`torch_geometric.nn.aggr.Aggregation` are " + f"valid aggregation schemes (got '{type(aggr)}')") self.flow = flow assert flow in ['source_to_target', 'target_to_source'] @@ -126,6 +154,8 @@ def __init__(self, aggr: Optional[Union[str, List[str]]] = "add", # Support for "fused" message passing. self.fuse = self.inspector.implements('message_and_aggregate') + if self.aggr is not None: + self.fuse &= isinstance(self.aggr, str) and self.aggr in FUSE_AGGRS # Support for explainability. self._explain = False @@ -288,7 +318,7 @@ def propagate(self, edge_index: Adj, size: Size = None, **kwargs): # Run "fused" message and aggregation (if applicable). if (isinstance(edge_index, SparseTensor) and self.fuse - and not self.explain and len(self.aggrs) == 0): + and not self.explain): coll_dict = self.__collect__(self.__fused_user_args__, edge_index, size, kwargs) @@ -307,8 +337,7 @@ def propagate(self, edge_index: Adj, size: Size = None, **kwargs): update_kwargs = self.inspector.distribute('update', coll_dict) out = self.update(out, **update_kwargs) - # Otherwise, run both functions in separation. - elif isinstance(edge_index, Tensor) or not self.fuse: + else: # Otherwise, run both functions in separation. if decomposed_layers > 1: user_args = self.__user_args__ decomp_args = {a[:-2] for a in user_args if a[-2:] == '_j'} @@ -348,14 +377,7 @@ def propagate(self, edge_index: Adj, size: Size = None, **kwargs): if res is not None: aggr_kwargs = res[0] if isinstance(res, tuple) else res - if len(self.aggrs) == 0: - out = self.aggregate(out, **aggr_kwargs) - else: - outs = [] - for aggr in self.aggrs: - tmp = self.aggregate(out, aggr=aggr, **aggr_kwargs) - outs.append(tmp) - out = self.combine(outs) + out = self.aggregate(out, **aggr_kwargs) for hook in self._aggregate_forward_hooks.values(): res = hook(self, (aggr_kwargs, ), out) @@ -466,26 +488,20 @@ def explain_message(self, inputs: Tensor, size_i: int) -> Tensor: return inputs * edge_mask.view(size) def aggregate(self, inputs: Tensor, index: Tensor, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - aggr: Optional[str] = None) -> Tensor: + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None) -> Tensor: r"""Aggregates messages from neighbors as :math:`\square_{j \in \mathcal{N}(i)}`. Takes in the output of message computation as first argument and any argument which was initially passed to :meth:`propagate`. - By default, this function will delegate its call to scatter functions - that support "add", "mean", "min", "max" and "mul" operations as - specified in :meth:`__init__` by the :obj:`aggr` argument. + By default, this function will delegate its call to the underlying + :class:`~torch_geometric.nn.aggr.Aggregation` module to reduce messages + as specified in :meth:`__init__` by the :obj:`aggr` argument. """ - aggr = self.aggr if aggr is None else aggr - assert aggr is not None - if ptr is not None: - ptr = expand_left(ptr, dim=self.node_dim, dims=inputs.dim()) - return segment_csr(inputs, ptr, reduce=aggr) - else: - return scatter(inputs, index, dim=self.node_dim, dim_size=dim_size, - reduce=aggr) + return self.aggr_module(inputs, index, ptr=ptr, dim_size=dim_size, + dim=self.node_dim) def message_and_aggregate(self, adj_t: SparseTensor) -> Tensor: r"""Fuses computations of :func:`message` and :func:`aggregate` into a @@ -497,13 +513,6 @@ def message_and_aggregate(self, adj_t: SparseTensor) -> Tensor: """ raise NotImplementedError - def combine(self, inputs: List[Tensor]) -> Tensor: - r"""Combines the outputs from multiple aggregations into a single - representation. Will only get called in case :obj:`aggr` holds a list - of aggregation schemes to use.""" - assert len(inputs) > 0 - return torch.cat(inputs, dim=-1) if len(inputs) > 1 else inputs[0] - def update(self, inputs: Tensor) -> Tensor: r"""Updates node embeddings in analogy to :math:`\gamma_{\mathbf{\Theta}}` for each node @@ -759,7 +768,6 @@ def jittable(self, typing: Optional[str] = None): prop_types=prop_types, prop_return_type=prop_return_type, fuse=self.fuse, - single_aggr=len(self.aggrs) == 0, collect_types=collect_types, user_args=self.__user_args__, edge_user_args=self.__edge_user_args__, diff --git a/torch_geometric/nn/resolver.py b/torch_geometric/nn/resolver.py index 8d5e16ebccfc..8d843da52e85 100644 --- a/torch_geometric/nn/resolver.py +++ b/torch_geometric/nn/resolver.py @@ -1,5 +1,5 @@ import inspect -from typing import Any, List, Optional, Union +from typing import Any, Dict, List, Optional, Union from torch import Tensor @@ -8,24 +8,36 @@ def normalize_string(s: str) -> str: return s.lower().replace('-', '').replace('_', '').replace(' ', '') -def resolver(classes: List[Any], query: Union[Any, str], - base_cls: Optional[Any], *args, **kwargs): +def resolver(classes: List[Any], class_dict: Dict[str, Any], + query: Union[Any, str], base_cls: Optional[Any], *args, **kwargs): - if query is None or not isinstance(query, str): + if not isinstance(query, str): return query query_repr = normalize_string(query) base_cls_repr = normalize_string(base_cls.__name__) if base_cls else '' + + for key_repr, cls in class_dict.items(): + if query_repr == key_repr: + if inspect.isclass(cls): + obj = cls(*args, **kwargs) + assert callable(obj) + return obj + assert callable(cls) + return cls + for cls in classes: cls_repr = normalize_string(cls.__name__) if query_repr in [cls_repr, cls_repr.replace(base_cls_repr, '')]: if inspect.isclass(cls): - return cls(*args, **kwargs) - else: - return cls + obj = cls(*args, **kwargs) + assert callable(obj) + return obj + assert callable(cls) + return cls - return ValueError(f"Could not resolve '{query}' among the choices " - f"{set(cls.__name__ for cls in classes)}") + choices = set(cls.__name__ for cls in classes) | set(class_dict.keys()) + raise ValueError(f"Could not resolve '{query}' among choices {choices}") # Activation Resolver ######################################################### @@ -38,7 +50,6 @@ def swish(x: Tensor) -> Tensor: def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): import torch base_cls = torch.nn.Module - acts = [ act for act in vars(torch.nn.modules.activation).values() if isinstance(act, type) and issubclass(act, base_cls) @@ -46,18 +57,21 @@ def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): acts += [ swish, ] - return resolver(acts, query, base_cls, *args, **kwargs) + act_dict = {} + return resolver(acts, act_dict, query, base_cls, *args, **kwargs) # Aggregation Resolver ######################################################## def aggregation_resolver(query: Union[Any, str], *args, **kwargs): - import torch_geometric.nn.aggr as aggrs - base_cls = aggrs.Aggregation - + import torch_geometric.nn.aggr as aggr + base_cls = aggr.Aggregation aggrs = [ - aggr for aggr in vars(aggrs).values() + aggr for aggr in vars(aggr).values() if isinstance(aggr, type) and issubclass(aggr, base_cls) ] - return resolver(aggrs, query, base_cls, *args, **kwargs) + aggr_dict = { + 'add': aggr.SumAggregation, + } + return resolver(aggrs, aggr_dict, query, base_cls, *args, **kwargs) From 2003408f0f287d30faddd4903d6ce1ab9894473f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 25 Jun 2022 13:10:36 +0200 Subject: [PATCH 0120/2432] Refactor `SAGEConv` to use `LSTMAggregation` (#4863) * update * changelog --- CHANGELOG.md | 2 +- test/nn/conv/test_sage_conv.py | 2 +- torch_geometric/nn/conv/sage_conv.py | 35 +++++++--------------------- 3 files changed, 10 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b6244f6afee..21cbeec8ece2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index aa96b79cd72a..58afa7ae53bd 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -69,5 +69,5 @@ def test_lstm_sage_conv(): assert torch.allclose(conv(x, adj.t()), out) edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 0]]) - with pytest.raises(ValueError, match="is not sorted by columns"): + with pytest.raises(ValueError, match="'index' tensor is not sorted"): conv(x, edge_index) diff --git a/torch_geometric/nn/conv/sage_conv.py b/torch_geometric/nn/conv/sage_conv.py index ceb97d784d92..d7f861e0c8b1 100644 --- a/torch_geometric/nn/conv/sage_conv.py +++ b/torch_geometric/nn/conv/sage_conv.py @@ -1,16 +1,13 @@ -from typing import Optional, Tuple, Union +from typing import Tuple, Union -import torch import torch.nn.functional as F from torch import Tensor from torch.nn import LSTM -from torch_scatter import scatter from torch_sparse import SparseTensor, matmul from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.dense.linear import Linear from torch_geometric.typing import Adj, OptPairTensor, Size -from torch_geometric.utils import to_dense_batch class SAGEConv(MessagePassing): @@ -76,9 +73,6 @@ def __init__( bias: bool = True, **kwargs, ): - kwargs['aggr'] = aggr if aggr != 'lstm' else None - super().__init__(**kwargs) - self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize @@ -88,6 +82,12 @@ def __init__( if isinstance(in_channels, int): in_channels = (in_channels, in_channels) + if aggr == 'lstm': + kwargs['aggr_kwargs'] = dict(in_channels=in_channels[0], + out_channels=in_channels[0]) + + super().__init__(aggr, **kwargs) + if self.project: self.lin = Linear(in_channels[0], in_channels[0], bias=True) @@ -140,25 +140,6 @@ def message_and_aggregate(self, adj_t: SparseTensor, adj_t = adj_t.set_value(None, layout=None) return matmul(adj_t, x[0], reduce=self.aggr) - def aggregate(self, x: Tensor, index: Tensor, ptr: Optional[Tensor] = None, - dim_size: Optional[int] = None) -> Tensor: - if self.aggr is not None: - return scatter(x, index, dim=self.node_dim, dim_size=dim_size, - reduce=self.aggr) - - # LSTM aggregation: - if ptr is None and not torch.all(index[:-1] <= index[1:]): - raise ValueError(f"Can not utilize LSTM-style aggregation inside " - f"'{self.__class__.__name__}' in case the " - f"'edge_index' tensor is not sorted by columns. " - f"Run 'sort_edge_index(..., sort_by_row=False)' " - f"in a pre-processing step.") - - x, mask = to_dense_batch(x, batch=index, batch_size=dim_size) - out, _ = self.lstm(x) - return out[:, -1] - def __repr__(self) -> str: - aggr = self.aggr if self.aggr is not None else 'lstm' return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, aggr={aggr})') + f'{self.out_channels}, aggr={self.aggr})') From fb34a77e0ac6120bd178a0c721fbdc6f0a8d1b6d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 26 Jun 2022 13:11:53 +0200 Subject: [PATCH 0121/2432] Refactor `GravNetConv` to rely on new `Aggregation` (#4865) * gravnet refactor * changelog --- CHANGELOG.md | 2 +- torch_geometric/nn/conv/gravnet_conv.py | 26 +++++++++---------------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 21cbeec8ece2..35eef6927bfc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) - Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863)) -- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) diff --git a/torch_geometric/nn/conv/gravnet_conv.py b/torch_geometric/nn/conv/gravnet_conv.py index 1dfb134726b2..826701fa0214 100644 --- a/torch_geometric/nn/conv/gravnet_conv.py +++ b/torch_geometric/nn/conv/gravnet_conv.py @@ -1,8 +1,8 @@ +import warnings from typing import Optional, Union import torch from torch import Tensor -from torch_scatter import scatter from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.dense.linear import Linear @@ -35,9 +35,6 @@ class GravNetConv(MessagePassing): between the vertices; referred to as :math:`F_{\textrm{LR}}` in the paper. k (int): The number of nearest neighbors. - num_workers (int): Number of workers to use for k-NN computation. - Has no effect in case :obj:`batch` is not :obj:`None`, or the input - lies on the GPU. (default: :obj:`1`) **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.MessagePassing`. @@ -51,21 +48,24 @@ class GravNetConv(MessagePassing): *(optional)* - **output:** node features :math:`(|\mathcal{V}|, F_{out})` or :math:`(|\mathcal{V}_t|, F_{out})` if bipartite - - """ def __init__(self, in_channels: int, out_channels: int, space_dimensions: int, propagate_dimensions: int, k: int, - num_workers: int = 1, **kwargs): - super().__init__(flow='source_to_target', **kwargs) + num_workers: Optional[int] = None, **kwargs): + super().__init__(aggr=['mean', 'max'], flow='source_to_target', + **kwargs) if knn is None: raise ImportError('`GravNetConv` requires `torch-cluster`.') + if num_workers is not None: + warnings.warn( + "'num_workers' attribute in '{self.__class__.__name__}' is " + "deprecated and will be removed in a future release") + self.in_channels = in_channels self.out_channels = out_channels self.k = k - self.num_workers = num_workers self.lin_s = Linear(in_channels, space_dimensions) self.lin_h = Linear(in_channels, propagate_dimensions) @@ -123,14 +123,6 @@ def forward( def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor: return x_j * edge_weight.unsqueeze(1) - def aggregate(self, inputs: Tensor, index: Tensor, - dim_size: Optional[int] = None) -> Tensor: - out_mean = scatter(inputs, index, dim=self.node_dim, dim_size=dim_size, - reduce='mean') - out_max = scatter(inputs, index, dim=self.node_dim, dim_size=dim_size, - reduce='max') - return torch.cat([out_mean, out_max], dim=-1) - def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.in_channels}, ' f'{self.out_channels}, k={self.k})') From c097b9d87bcbb50a65d697f88fb36e7b671feb65 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 26 Jun 2022 13:27:18 +0200 Subject: [PATCH 0122/2432] Refactor `GENConv` to rely on new `Aggregation` (#4866) * update * typo * changelog --- CHANGELOG.md | 4 +- test/nn/conv/test_gen_conv.py | 2 +- torch_geometric/nn/conv/gen_conv.py | 71 ++++++---------------------- torch_geometric/nn/conv/sage_conv.py | 3 +- 4 files changed, 19 insertions(+), 61 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 35eef6927bfc..18f00c8b36fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,8 +20,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863)) -- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866)) +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) - Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) diff --git a/test/nn/conv/test_gen_conv.py b/test/nn/conv/test_gen_conv.py index f8e3375134df..a711c585632f 100644 --- a/test/nn/conv/test_gen_conv.py +++ b/test/nn/conv/test_gen_conv.py @@ -6,7 +6,7 @@ from torch_geometric.testing import is_full_test -@pytest.mark.parametrize('aggr', ['softmax', 'softmax_sg', 'power']) +@pytest.mark.parametrize('aggr', ['softmax', 'powermean']) def test_gen_conv(aggr): x1 = torch.randn(4, 16) x2 = torch.randn(2, 16) diff --git a/torch_geometric/nn/conv/gen_conv.py b/torch_geometric/nn/conv/gen_conv.py index 83e723665bdf..6631e3d820b7 100644 --- a/torch_geometric/nn/conv/gen_conv.py +++ b/torch_geometric/nn/conv/gen_conv.py @@ -1,18 +1,14 @@ from typing import List, Optional, Union -import torch -import torch.nn.functional as F from torch import Tensor from torch.nn import ( BatchNorm1d, Dropout, InstanceNorm1d, LayerNorm, - Parameter, ReLU, Sequential, ) -from torch_scatter import scatter, scatter_softmax from torch_sparse import SparseTensor from torch_geometric.nn.conv import MessagePassing @@ -72,8 +68,8 @@ class GENConv(MessagePassing): dimensionalities. out_channels (int): Size of each output sample. aggr (str, optional): The aggregation scheme to use (:obj:`"softmax"`, - :obj:`"softmax_sg"`, :obj:`"power"`, :obj:`"add"`, :obj:`"mean"`, - :obj:`max`). (default: :obj:`"softmax"`) + :obj:`"powermean"`, :obj:`"add"`, :obj:`"mean"`, :obj:`max`). + (default: :obj:`"softmax"`) t (float, optional): Initial inverse temperature for softmax aggregation. (default: :obj:`1.0`) learn_t (bool, optional): If set to :obj:`True`, will learn the value @@ -113,16 +109,22 @@ def __init__(self, in_channels: int, out_channels: int, learn_msg_scale: bool = False, norm: str = 'batch', num_layers: int = 2, eps: float = 1e-7, **kwargs): - kwargs.setdefault('aggr', None) - super().__init__(**kwargs) + # Backward compatibility: + aggr = 'softmax' if aggr == 'softmax_sg' else aggr + aggr = 'powermean' if aggr == 'power' else aggr + + aggr_kwargs = {} + if aggr == 'softmax': + aggr_kwargs = dict(t=t, learn=learn_t) + elif aggr == 'powermean': + aggr_kwargs = dict(p=p, learn=learn_p) + + super().__init__(aggr=aggr, aggr_kwargs=aggr_kwargs, **kwargs) self.in_channels = in_channels self.out_channels = out_channels - self.aggr = aggr self.eps = eps - assert aggr in ['softmax', 'softmax_sg', 'power', 'add', 'mean', 'max'] - channels = [in_channels] for i in range(num_layers - 1): channels.append(in_channels * 2) @@ -131,32 +133,15 @@ def __init__(self, in_channels: int, out_channels: int, self.msg_norm = MessageNorm(learn_msg_scale) if msg_norm else None - self.initial_t = t - self.initial_p = p - - if learn_t and aggr == 'softmax': - self.t = Parameter(torch.Tensor([t]), requires_grad=True) - else: - self.t = t - - if learn_p: - self.p = Parameter(torch.Tensor([p]), requires_grad=True) - else: - self.p = p - def reset_parameters(self): reset(self.mlp) + self.aggr_module.reset_parameters() if self.msg_norm is not None: self.msg_norm.reset_parameters() - if self.t and isinstance(self.t, Tensor): - self.t.data.fill_(self.initial_t) - if self.p and isinstance(self.p, Tensor): - self.p.data.fill_(self.initial_p) def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_attr: OptTensor = None, size: Size = None) -> Tensor: """""" - if isinstance(x, Tensor): x: OptPairTensor = (x, x) @@ -183,33 +168,7 @@ def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, def message(self, x_j: Tensor, edge_attr: OptTensor) -> Tensor: msg = x_j if edge_attr is None else x_j + edge_attr - return F.relu(msg) + self.eps - - def aggregate(self, inputs: Tensor, index: Tensor, - dim_size: Optional[int] = None) -> Tensor: - - if self.aggr == 'softmax': - out = scatter_softmax(inputs * self.t, index, dim=self.node_dim) - return scatter(inputs * out, index, dim=self.node_dim, - dim_size=dim_size, reduce='sum') - - elif self.aggr == 'softmax_sg': - out = scatter_softmax(inputs * self.t, index, - dim=self.node_dim).detach() - return scatter(inputs * out, index, dim=self.node_dim, - dim_size=dim_size, reduce='sum') - - elif self.aggr == 'power': - min_value, max_value = 1e-7, 1e1 - torch.clamp_(inputs, min_value, max_value) - out = scatter(torch.pow(inputs, self.p), index, dim=self.node_dim, - dim_size=dim_size, reduce='mean') - torch.clamp_(out, min_value, max_value) - return torch.pow(out, 1 / self.p) - - else: - return scatter(inputs, index, dim=self.node_dim, dim_size=dim_size, - reduce=self.aggr) + return msg.relu() + self.eps def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.in_channels}, ' diff --git a/torch_geometric/nn/conv/sage_conv.py b/torch_geometric/nn/conv/sage_conv.py index d7f861e0c8b1..d9515cc2b9e0 100644 --- a/torch_geometric/nn/conv/sage_conv.py +++ b/torch_geometric/nn/conv/sage_conv.py @@ -104,8 +104,7 @@ def __init__( def reset_parameters(self): if self.project: self.lin.reset_parameters() - if self.aggr is None: - self.lstm.reset_parameters() + self.aggr_module.reset_parameters() self.lin_l.reset_parameters() if self.root_weight: self.lin_r.reset_parameters() From d37ede5e635690eb7757c11f4882494fe1f2c895 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Mon, 27 Jun 2022 14:29:51 +0800 Subject: [PATCH 0123/2432] add meaningful error message (#4870) --- torch_geometric/loader/neighbor_loader.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 340061bace2f..5493b31e02f5 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -218,7 +218,15 @@ def __call__(self, index: Union[List[int], Tensor]): self.directed, ) else: - fn = torch.ops.torch_sparse.hetero_temporal_neighbor_sample + try: + fn = torch.ops.torch_sparse.hetero_temporal_neighbor_sample + except RuntimeError as e: + raise RuntimeError( + "'torch_sparse' operator " + "'hetero_temporal_neighbor_sample' not " + "found. Currently requires building " + "'torch_sparse' from master.", e) + node_dict, row_dict, col_dict, edge_dict = fn( self.node_types, self.edge_types, From 927346e0d69a3925b99cbaf3fb9be53bb0b4a402 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 27 Jun 2022 09:01:48 +0200 Subject: [PATCH 0124/2432] Fix `GENConv` test (#4872) * Fix GENConv test * changelog --- CHANGELOG.md | 2 +- test/nn/conv/test_gen_conv.py | 43 ++++++++++++++++------------------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18f00c8b36fd..082c62d3e17a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/conv/test_gen_conv.py b/test/nn/conv/test_gen_conv.py index a711c585632f..90a335607363 100644 --- a/test/nn/conv/test_gen_conv.py +++ b/test/nn/conv/test_gen_conv.py @@ -20,52 +20,49 @@ def test_gen_conv(aggr): assert conv.__repr__() == f'GENConv(16, 32, aggr={aggr})' out11 = conv(x1, edge_index) assert out11.size() == (4, 32) - assert conv(x1, edge_index, size=(4, 4)).tolist() == out11.tolist() - assert conv(x1, adj1.t()).tolist() == out11.tolist() + assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out11) + assert torch.allclose(conv(x1, adj1.t()), out11) out12 = conv(x1, edge_index, value) assert out12.size() == (4, 32) - assert conv(x1, edge_index, value, (4, 4)).tolist() == out12.tolist() - assert conv(x1, adj2.t()).tolist() == out12.tolist() + assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out12) + assert torch.allclose(conv(x1, adj2.t()), out12) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out11.tolist() - assert jit(x1, edge_index, size=(4, 4)).tolist() == out11.tolist() - assert jit(x1, edge_index, value).tolist() == out12.tolist() - assert jit(x1, edge_index, value, - size=(4, 4)).tolist() == out12.tolist() + assert torch.allclose(jit(x1, edge_index), out11) + assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11) + assert torch.allclose(jit(x1, edge_index, value), out12) + assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12) t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj1.t()).tolist() == out11.tolist() - assert jit(x1, adj2.t()).tolist() == out12.tolist() + assert torch.allclose(jit(x1, adj1.t()), out11) + assert torch.allclose(jit(x1, adj2.t()), out12) adj1 = adj1.sparse_resize((4, 2)) adj2 = adj2.sparse_resize((4, 2)) out21 = conv((x1, x2), edge_index) assert out21.size() == (2, 32) - assert conv((x1, x2), edge_index, size=(4, 2)).tolist() == out21.tolist() - assert conv((x1, x2), adj1.t()).tolist() == out21.tolist() + assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out21) + assert torch.allclose(conv((x1, x2), adj1.t()), out21) out22 = conv((x1, x2), edge_index, value) assert out22.size() == (2, 32) - assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out22.tolist() - assert conv((x1, x2), adj2.t()).tolist() == out22.tolist() + assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out22) + assert torch.allclose(conv((x1, x2), adj2.t()), out22) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out21.tolist() - assert jit((x1, x2), edge_index, - size=(4, 2)).tolist() == out21.tolist() - assert jit((x1, x2), edge_index, value).tolist() == out22.tolist() - assert jit((x1, x2), edge_index, value, - (4, 2)).tolist() == out22.tolist() + assert torch.allclose(jit((x1, x2), edge_index), out21) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21) + assert torch.allclose(jit((x1, x2), edge_index, value), out22) + assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22) t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj1.t()).tolist() == out21.tolist() - assert jit((x1, x2), adj2.t()).tolist() == out22.tolist() + assert torch.allclose(jit((x1, x2), adj1.t()), out21) + assert torch.allclose(jit((x1, x2), adj2.t()), out22) From 0f7e018aff9f9ca5356bf90b27e3546e26343c37 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Jun 2022 08:09:25 +0200 Subject: [PATCH 0125/2432] Add `filter_per_worker` flag to data loaders (#4873) * filter per worker * changelog --- CHANGELOG.md | 1 + torch_geometric/loader/base.py | 2 +- torch_geometric/loader/hgt_loader.py | 27 ++++++++++++++--- .../loader/link_neighbor_loader.py | 29 +++++++++++++++---- torch_geometric/loader/neighbor_loader.py | 28 ++++++++++++++---- 5 files changed, 72 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 082c62d3e17a..977f759a5bf9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) - Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) diff --git a/torch_geometric/loader/base.py b/torch_geometric/loader/base.py index 57270c8112fb..eaf9ecd99438 100644 --- a/torch_geometric/loader/base.py +++ b/torch_geometric/loader/base.py @@ -3,7 +3,7 @@ from torch.utils.data.dataloader import _BaseDataLoaderIter -class DataLoaderIterator(object): +class DataLoaderIterator: r"""A data loader iterator extended by a simple post transformation function :meth:`transform_fn`. While the iterator may request items from different sub-processes, :meth:`transform_fn` will always be executed in diff --git a/torch_geometric/loader/hgt_loader.py b/torch_geometric/loader/hgt_loader.py index a892a62eff93..00e9c2d3eb53 100644 --- a/torch_geometric/loader/hgt_loader.py +++ b/torch_geometric/loader/hgt_loader.py @@ -77,6 +77,14 @@ class HGTLoader(torch.utils.data.DataLoader): transform (Callable, optional): A function/transform that takes in an a sampled mini-batch and returns a transformed version. (default: :obj:`None`) + filter_per_worker (bool, optional): If set to :obj:`True`, will filter + the returning data in each worker's subprocess rather than in the + main process. + Setting this to :obj:`True` is generally not recommended: + (1) it may result in too many open file handles, + (2) it may slown down data loading, + (3) it requires operating on CPU tensors. + (default: :obj:`False`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -87,6 +95,7 @@ def __init__( num_samples: Union[List[int], Dict[NodeType, List[int]]], input_nodes: Union[NodeType, Tuple[NodeType, Optional[Tensor]]], transform: Callable = None, + filter_per_worker: bool = False, **kwargs, ): if 'collate_fn' in kwargs: @@ -112,6 +121,7 @@ def __init__( self.input_nodes = input_nodes self.num_hops = max([len(v) for v in num_samples.values()]) self.transform = transform + self.filter_per_worker = filter_per_worker self.sample_fn = torch.ops.torch_sparse.hgt_sample # Convert the graph data into a suitable format for sampling. @@ -120,7 +130,7 @@ def __init__( self.colptr_dict, self.row_dict, self.perm_dict = to_hetero_csc( data, device='cpu', share_memory=kwargs.get('num_workers', 0) > 0) - super().__init__(input_nodes[1].tolist(), collate_fn=self.sample, + super().__init__(input_nodes[1].tolist(), collate_fn=self.collate_fn, **kwargs) def sample(self, indices: List[int]) -> HeteroData: @@ -134,8 +144,7 @@ def sample(self, indices: List[int]) -> HeteroData: ) return node_dict, row_dict, col_dict, edge_dict, len(indices) - def transform_fn(self, out: Any) -> HeteroData: - # NOTE This function will always be executed on the main thread! + def filter_fn(self, out: Any) -> HeteroData: node_dict, row_dict, col_dict, edge_dict, batch_size = out data = filter_hetero_data(self.data, node_dict, row_dict, col_dict, @@ -144,8 +153,18 @@ def transform_fn(self, out: Any) -> HeteroData: return data if self.transform is None else self.transform(data) + def collate_fn(self, indices: List[int]) -> Any: + out = self.sample(indices) + if self.filter_per_worker: + # We execute `filter_fn` in the worker process. + out = self.filter_fn(out) + return out + def _get_iterator(self) -> Iterator: - return DataLoaderIterator(super()._get_iterator(), self.transform_fn) + if self.filter_per_worker: + return super()._get_iterator() + # We execute `filter_fn` in the main process. + return DataLoaderIterator(super()._get_iterator(), self.filter_fn) def __repr__(self) -> str: return f'{self.__class__.__name__}()' diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index da9b35fd330a..d4da7db645c6 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -223,6 +223,14 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): :obj:`edge_index` is sorted by column. This avoids internal re-sorting of the data and can improve runtime and memory efficiency. (default: :obj:`False`) + filter_per_worker (bool, optional): If set to :obj:`True`, will filter + the returning data in each worker's subprocess rather than in the + main process. + Setting this to :obj:`True` is generally not recommended: + (1) it may result in too many open file handles, + (2) it may slown down data loading, + (3) it requires operating on CPU tensors. + (default: :obj:`False`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -238,6 +246,7 @@ def __init__( neg_sampling_ratio: float = 0.0, transform: Callable = None, is_sorted: bool = False, + filter_per_worker: bool = False, neighbor_sampler: Optional[LinkNeighborSampler] = None, **kwargs, ): @@ -255,9 +264,10 @@ def __init__( self.edge_label = edge_label self.replace = replace self.directed = directed + self.neg_sampling_ratio = neg_sampling_ratio self.transform = transform + self.filter_per_worker = filter_per_worker self.neighbor_sampler = neighbor_sampler - self.neg_sampling_ratio = neg_sampling_ratio edge_type, edge_label_index = get_edge_label_index( data, edge_label_index) @@ -275,10 +285,9 @@ def __init__( ) super().__init__(Dataset(edge_label_index, edge_label), - collate_fn=self.neighbor_sampler, **kwargs) + collate_fn=self.collate_fn, **kwargs) - def transform_fn(self, out: Any) -> Union[Data, HeteroData]: - # NOTE This function will always be executed on the main thread! + def filter_fn(self, out: Any) -> Union[Data, HeteroData]: if isinstance(self.data, Data): node, row, col, edge, edge_label_index, edge_label = out data = filter_data(self.data, node, row, col, edge, @@ -300,8 +309,18 @@ def transform_fn(self, out: Any) -> Union[Data, HeteroData]: return data if self.transform is None else self.transform(data) + def collate_fn(self, index: Union[List[int], Tensor]) -> Any: + out = self.neighbor_sampler(index) + if self.filter_per_worker: + # We execute `filter_fn` in the worker process. + out = self.filter_fn(out) + return out + def _get_iterator(self) -> Iterator: - return DataLoaderIterator(super()._get_iterator(), self.transform_fn) + if self.filter_per_worker: + return super()._get_iterator() + # We execute `filter_fn` in the main process. + return DataLoaderIterator(super()._get_iterator(), self.filter_fn) def __repr__(self) -> str: return f'{self.__class__.__name__}()' diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 5493b31e02f5..9f0fe4193871 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -370,6 +370,14 @@ class NeighborLoader(torch.utils.data.DataLoader): :obj:`edge_index` is sorted by column. This avoids internal re-sorting of the data and can improve runtime and memory efficiency. (default: :obj:`False`) + filter_per_worker (bool, optional): If set to :obj:`True`, will filter + the returning data in each worker's subprocess rather than in the + main process. + Setting this to :obj:`True` is generally not recommended: + (1) it may result in too many open file handles, + (2) it may slown down data loading, + (3) it requires operating on CPU tensors. + (default: :obj:`False`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -384,6 +392,7 @@ def __init__( time_attr: Optional[str] = None, transform: Callable = None, is_sorted: bool = False, + filter_per_worker: bool = False, neighbor_sampler: Optional[NeighborSampler] = None, **kwargs, ): @@ -401,6 +410,7 @@ def __init__( self.replace = replace self.directed = directed self.transform = transform + self.filter_per_worker = filter_per_worker self.neighbor_sampler = neighbor_sampler node_type, input_nodes = get_input_nodes(data, input_nodes) @@ -417,11 +427,9 @@ def __init__( share_memory=kwargs.get('num_workers', 0) > 0, ) - super().__init__(input_nodes, collate_fn=self.neighbor_sampler, - **kwargs) + super().__init__(input_nodes, collate_fn=self.collate_fn, **kwargs) - def transform_fn(self, out: Any) -> Union[Data, HeteroData]: - # NOTE This function will always be executed on the main thread! + def filter_fn(self, out: Any) -> Union[Data, HeteroData]: if isinstance(self.data, Data): node, row, col, edge, batch_size = out data = filter_data(self.data, node, row, col, edge, @@ -445,8 +453,18 @@ def transform_fn(self, out: Any) -> Union[Data, HeteroData]: return data if self.transform is None else self.transform(data) + def collate_fn(self, index: Union[List[int], Tensor]) -> Any: + out = self.neighbor_sampler(index) + if self.filter_per_worker: + # We execute `filter_fn` in the worker process. + out = self.filter_fn(out) + return out + def _get_iterator(self) -> Iterator: - return DataLoaderIterator(super()._get_iterator(), self.transform_fn) + if self.filter_per_worker: + return super()._get_iterator() + # We execute `filter_fn` in the main process. + return DataLoaderIterator(super()._get_iterator(), self.filter_fn) def __repr__(self) -> str: return f'{self.__class__.__name__}()' From cfda07410ddc0d55d7f153899bcd80e22dd5d979 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Tue, 28 Jun 2022 21:01:08 +0530 Subject: [PATCH 0126/2432] Added `time_attr` to `LinkNeighborLoader` (#4877) * added time_attr to link_neighbor_loader * updated link_neighbor_loader test * refactoring neighbor_loader and link_neighbor_loader * Update torch_geometric/loader/link_neighbor_loader.py Co-authored-by: Matthias Fey * add type hint. Co-authored-by: Matthias Fey * docs update * updated changelog Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/loader/test_link_neighbor_loader.py | 24 +++++ .../loader/link_neighbor_loader.py | 43 +++----- torch_geometric/loader/neighbor_loader.py | 99 ++++++++++--------- 4 files changed, 93 insertions(+), 74 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 977f759a5bf9..a0257f809d15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) - Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index f2dd71f415f0..ec0cbba51ed8 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -3,6 +3,7 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import LinkNeighborLoader +from torch_geometric.testing import withRegisteredOp def get_edge_index(num_src_nodes, num_dst_nodes, num_edges): @@ -187,3 +188,26 @@ def test_link_neighbor_loader_edge_label(): assert batch.edge_label.dtype == torch.long assert torch.all(batch.edge_label[:10] == 2) assert torch.all(batch.edge_label[10:] == 0) + + +@withRegisteredOp('torch_sparse.hetero_temporal_neighbor_sample') +def test_temporal_heterogeneous_link_neighbor_loader(): + torch.manual_seed(12345) + + data = HeteroData() + + data['paper'].x = torch.arange(100) + data['paper'].time = torch.arange(data['paper'].num_nodes) + data['author'].x = torch.arange(100, 300) + + data['paper', 'paper'].edge_index = get_edge_index(100, 100, 500) + data['paper', 'author'].edge_index = get_edge_index(100, 200, 1000) + data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000) + + loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, + edge_label_index=('paper', 'paper'), + batch_size=1, time_attr='time') + + for batch in loader: + mask = batch['paper'].time[0] >= batch['paper'].time[1:] + assert torch.all(mask) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index d4da7db645c6..98d527d6e475 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -65,25 +65,14 @@ def __call__(self, query: List[Tuple[Tensor]]): edge_label_index, edge_label) if issubclass(self.data_cls, Data): - sample_fn = torch.ops.torch_sparse.neighbor_sample query_nodes = edge_label_index.view(-1) query_nodes, reverse = query_nodes.unique(return_inverse=True) edge_label_index = reverse.view(2, -1) - - node, row, col, edge = sample_fn( - self.colptr, - self.row, - query_nodes, - self.num_neighbors, - self.replace, - self.directed, - ) - - return node, row, col, edge, edge_label_index, edge_label + return self._sparse_neighbor_sample(query_nodes) + ( + edge_label_index, edge_label) elif issubclass(self.data_cls, HeteroData): - sample_fn = torch.ops.torch_sparse.hetero_neighbor_sample if self.input_type[0] != self.input_type[-1]: query_src = edge_label_index[0] @@ -100,21 +89,8 @@ def __call__(self, query: List[Tuple[Tensor]]): query_nodes, reverse = query_nodes.unique(return_inverse=True) edge_label_index = reverse.view(2, -1) query_node_dict = {self.input_type[0]: query_nodes} - - node_dict, row_dict, col_dict, edge_dict = sample_fn( - self.node_types, - self.edge_types, - self.colptr_dict, - self.row_dict, - query_node_dict, - self.num_neighbors, - self.num_hops, - self.replace, - self.directed, - ) - - return (node_dict, row_dict, col_dict, edge_dict, edge_label_index, - edge_label) + return self._hetero_sparse_neighbor_sample(query_node_dict) + ( + edge_label_index, edge_label) class LinkNeighborLoader(torch.utils.data.DataLoader): @@ -178,6 +154,10 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): :obj:`neg_sampling_ratio` is currently implemented in an approximate way, *i.e.* negative edges may contain false negatives. + :obj:`time_attr` is currently implemented such that for an edge + `(src_node, dst_node)`, the neighbors of `src_node` can have a later + timestamp than `dst_node` or vice-versa. + Args: data (torch_geometric.data.Data or torch_geometric.data.HeteroData): The :class:`~torch_geometric.data.Data` or @@ -216,6 +196,11 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): :meth:`F.binary_cross_entropy`) and of type :obj:`torch.long` for multi-class classification (to facilitate the ease-of-use of :meth:`F.cross_entropy`). (default: :obj:`0.0`). + time_attr (str, optional): The name of the attribute that denotes + timestamps for the nodes in the graph. + If set, temporal sampling will be used such that neighbors are + guaranteed to fulfill temporal constraints, *i.e.* neighbors have + an earlier timestamp than the center node. (default: :obj:`None`) transform (Callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) @@ -244,6 +229,7 @@ def __init__( replace: bool = False, directed: bool = True, neg_sampling_ratio: float = 0.0, + time_attr: Optional[str] = None, transform: Callable = None, is_sorted: bool = False, filter_per_worker: bool = False, @@ -281,6 +267,7 @@ def __init__( input_type=edge_type, is_sorted=is_sorted, neg_sampling_ratio=self.neg_sampling_ratio, + time_attr=time_attr, share_memory=kwargs.get('num_workers', 0) > 0, ) diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 9f0fe4193871..4cf8716cdb82 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -186,60 +186,67 @@ class _DataArgument(object): else: raise TypeError(f'NeighborLoader found invalid type: {type(data)}') + def _sparse_neighbor_sample(self, index: Tensor): + fn = torch.ops.torch_sparse.neighbor_sample + node, row, col, edge = fn( + self.colptr, + self.row, + index, + self.num_neighbors, + self.replace, + self.directed, + ) + return node, row, col, edge + + def _hetero_sparse_neighbor_sample(self, index_dict: Dict[str, Tensor]): + if self.node_time_dict is None: + fn = torch.ops.torch_sparse.hetero_neighbor_sample + node_dict, row_dict, col_dict, edge_dict = fn( + self.node_types, + self.edge_types, + self.colptr_dict, + self.row_dict, + index_dict, + self.num_neighbors, + self.num_hops, + self.replace, + self.directed, + ) + else: + try: + fn = torch.ops.torch_sparse.hetero_temporal_neighbor_sample + except RuntimeError as e: + raise RuntimeError( + "'torch_sparse' operator " + "'hetero_temporal_neighbor_sample' not " + "found. Currently requires building " + "'torch_sparse' from master.", e) + + node_dict, row_dict, col_dict, edge_dict = fn( + self.node_types, + self.edge_types, + self.colptr_dict, + self.row_dict, + index_dict, + self.num_neighbors, + self.node_time_dict, + self.num_hops, + self.replace, + self.directed, + ) + return node_dict, row_dict, col_dict, edge_dict + def __call__(self, index: Union[List[int], Tensor]): if not isinstance(index, torch.LongTensor): index = torch.LongTensor(index) if self.data_cls != 'custom' and issubclass(self.data_cls, Data): - fn = torch.ops.torch_sparse.neighbor_sample - node, row, col, edge = fn( - self.colptr, - self.row, - index, - self.num_neighbors, - self.replace, - self.directed, - ) - return node, row, col, edge, index.numel() + return self._sparse_neighbor_sample(index) + (index.numel(), ) elif self.data_cls == 'custom' or issubclass(self.data_cls, HeteroData): - if self.node_time_dict is None: - fn = torch.ops.torch_sparse.hetero_neighbor_sample - node_dict, row_dict, col_dict, edge_dict = fn( - self.node_types, - self.edge_types, - self.colptr_dict, - self.row_dict, - {self.input_type: index}, - self.num_neighbors, - self.num_hops, - self.replace, - self.directed, - ) - else: - try: - fn = torch.ops.torch_sparse.hetero_temporal_neighbor_sample - except RuntimeError as e: - raise RuntimeError( - "'torch_sparse' operator " - "'hetero_temporal_neighbor_sample' not " - "found. Currently requires building " - "'torch_sparse' from master.", e) - - node_dict, row_dict, col_dict, edge_dict = fn( - self.node_types, - self.edge_types, - self.colptr_dict, - self.row_dict, - {self.input_type: index}, - self.num_neighbors, - self.node_time_dict, - self.num_hops, - self.replace, - self.directed, - ) - return node_dict, row_dict, col_dict, edge_dict, index.numel() + return self._hetero_sparse_neighbor_sample( + {self.input_type: index}) + (index.numel(), ) class NeighborLoader(torch.utils.data.DataLoader): From 9fc80f38930b89180ac78707efba4a68f4c395b3 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Tue, 28 Jun 2022 16:22:47 -0700 Subject: [PATCH 0127/2432] Fix: `Data` and `HeteroData` feature store `get_all_tensor_attrs` (#4882) --- CHANGELOG.md | 2 +- test/data/test_data.py | 2 ++ test/data/test_hetero_data.py | 3 +++ torch_geometric/data/data.py | 5 ++++- torch_geometric/data/hetero_data.py | 3 ++- 5 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0257f809d15..8e352af49689 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/data/test_data.py b/test/data/test_data.py index 5d14be740627..eac9fc8cea9f 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -248,6 +248,8 @@ def my_attr1(self, value): def test_basic_feature_store(): data = Data() x = torch.randn(20, 20) + data.not_a_tensor_attr = 10 # don't include, not a tensor attr + data.bad_attr = torch.randn(10, 20) # don't include, bad cat_dim # Put tensor: assert data.put_tensor(copy.deepcopy(x), attr_name='x', index=None) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 6b8d88735537..ae4ed8d0776c 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -428,6 +428,9 @@ def test_basic_feature_store(): assert data.get_tensor_size(group_name='paper', attr_name='x') == (20, 20) # Get tensor attrs: + data['paper'].num_nodes = 20 # don't include, not a tensor attr + data['paper'].bad_attr = torch.randn(10, 20) # don't include, bad cat_dim + tensor_attrs = data.get_all_tensor_attrs() assert len(tensor_attrs) == 1 assert tensor_attrs[0].group_name == 'paper' diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 0737ee180f71..512ad581f53c 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -782,7 +782,10 @@ def _get_tensor_size(self, attr: TensorAttr) -> Tuple: def get_all_tensor_attrs(self) -> List[TensorAttr]: r"""Obtains all feature attributes stored in `Data`.""" - return [TensorAttr(attr_name=name) for name in self._store.keys()] + return [ + TensorAttr(attr_name=name) for name in self._store.keys() + if self._store.is_node_attr(name) + ] def __len__(self) -> int: return BaseData.__len__(self) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index c343ae4330fb..c7a26686ec63 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -681,7 +681,8 @@ def get_all_tensor_attrs(self) -> List[TensorAttr]: out = [] for group_name, group in self.node_items(): for attr_name in group: - out.append(TensorAttr(group_name, attr_name)) + if group.is_node_attr(attr_name): + out.append(TensorAttr(group_name, attr_name)) return out def __len__(self) -> int: From 65ab1e001ef9dcafa7e67a1872cb14270bd64051 Mon Sep 17 00:00:00 2001 From: Bingnan Wang <137074046@qq.com> Date: Wed, 29 Jun 2022 14:35:49 +0800 Subject: [PATCH 0128/2432] Add support for `predict_dataloader` in `LightningNodeData` (#4884) * Update lightning_datamodule.py * Update lightning_datamodule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/data/lightning_datamodule.py | 36 ++++++++++++++++---- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e352af49689..b270e7ad05ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) diff --git a/torch_geometric/data/lightning_datamodule.py b/torch_geometric/data/lightning_datamodule.py index cafe28d4c77a..2c9f68ab018b 100644 --- a/torch_geometric/data/lightning_datamodule.py +++ b/torch_geometric/data/lightning_datamodule.py @@ -191,15 +191,30 @@ class LightningNodeData(LightningDataModule): data (Data or HeteroData): The :class:`~torch_geometric.data.Data` or :class:`~torch_geometric.data.HeteroData` graph object. input_train_nodes (torch.Tensor or str or (str, torch.Tensor)): The - indices of training nodes. If not given, will try to automatically - infer them from the :obj:`data` object. (default: :obj:`None`) + indices of training nodes. + If not given, will try to automatically infer them from the + :obj:`data` object by searching for :obj:`train_mask`, + :obj:`train_idx`, or :obj:`train_index` attributes. + (default: :obj:`None`) input_val_nodes (torch.Tensor or str or (str, torch.Tensor)): The - indices of validation nodes. If not given, will try to - automatically infer them from the :obj:`data` object. + indices of validation nodes. + If not given, will try to automatically infer them from the + :obj:`data` object by searching for :obj:`val_mask`, + :obj:`valid_mask`, :obj:`val_idx`, :obj:`valid_idx`, + :obj:`val_index`, or :obj:`valid_index` attributes. (default: :obj:`None`) input_test_nodes (torch.Tensor or str or (str, torch.Tensor)): The - indices of test nodes. If not given, will try to automatically - infer them from the :obj:`data` object. (default: :obj:`None`) + indices of test nodes. + If not given, will try to automatically infer them from the + :obj:`data` object by searching for :obj:`test_mask`, + :obj:`test_idx`, or :obj:`test_index` attributes. + (default: :obj:`None`) + input_pred_nodes (torch.Tensor or str or (str, torch.Tensor)): The + indices of prediction nodes. + If not given, will try to automatically infer them from the + :obj:`data` object by searching for :obj:`pred_mask`, + :obj:`pred_idx`, or :obj:`pred_index` attributes. + (default: :obj:`None`) loader (str): The scalability technique to use (:obj:`"full"`, :obj:`"neighbor"`). (default: :obj:`"neighbor"`) batch_size (int, optional): How many samples per batch to load. @@ -216,6 +231,7 @@ def __init__( input_train_nodes: InputNodes = None, input_val_nodes: InputNodes = None, input_test_nodes: InputNodes = None, + input_pred_nodes: InputNodes = None, loader: str = "neighbor", batch_size: int = 1, num_workers: int = 0, @@ -236,6 +252,9 @@ def __init__( if input_test_nodes is None: input_test_nodes = infer_input_nodes(data, split='test') + if input_pred_nodes is None: + input_pred_nodes = infer_input_nodes(data, split='pred') + if loader == 'full' and batch_size != 1: warnings.warn(f"Re-setting 'batch_size' to 1 in " f"'{self.__class__.__name__}' for loader='full' " @@ -279,6 +298,7 @@ def __init__( self.input_train_nodes = input_train_nodes self.input_val_nodes = input_val_nodes self.input_test_nodes = input_test_nodes + self.input_pred_nodes = input_pred_nodes def prepare_data(self): """""" @@ -323,6 +343,10 @@ def test_dataloader(self) -> DataLoader: """""" return self.dataloader(self.input_test_nodes, shuffle=False) + def predict_dataloader(self) -> DataLoader: + """""" + return self.dataloader(self.input_pred_nodes, shuffle=False) + def __repr__(self) -> str: kwargs = kwargs_repr(data=self.data, loader=self.loader, **self.kwargs) return f'{self.__class__.__name__}({kwargs})' From 6c3c235a653da2f11231eec70a6d010d345f87c7 Mon Sep 17 00:00:00 2001 From: Amitabha Roy Date: Wed, 29 Jun 2022 13:47:57 -0400 Subject: [PATCH 0129/2432] Add `LinkNeighborLoader` to Pytorch Lightning datamodule (#4868) * First cut changes. * Include batch size in the data object returned from the sampler. * No need to return batch size. * Remove unused import. * Refactor into a LightningLinkData module. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes. * Add changelog message and fix import order. * Add a link neighbor loader test. * Update torch_geometric/data/lightning_datamodule.py Co-authored-by: Jinu Sunil * Update torch_geometric/data/lightning_datamodule.py Co-authored-by: Jinu Sunil * Address review comments. * Address review comments. * Fix docstring issues. * Remove unused imports. * Fix long lines. Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinu Sunil --- CHANGELOG.md | 1 + test/data/test_lightning_datamodule.py | 25 ++- torch_geometric/data/__init__.py | 7 +- torch_geometric/data/lightning_datamodule.py | 169 +++++++++++++++++- .../loader/link_neighbor_loader.py | 5 +- 5 files changed, 198 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b270e7ad05ad..54569d998a03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `LinkeNeighborLoader` support to lightning datamodule ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) - Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) diff --git a/test/data/test_lightning_datamodule.py b/test/data/test_lightning_datamodule.py index 21423538365f..db9f6b546b30 100644 --- a/test/data/test_lightning_datamodule.py +++ b/test/data/test_lightning_datamodule.py @@ -4,7 +4,11 @@ import torch import torch.nn.functional as F -from torch_geometric.data import LightningDataset, LightningNodeData +from torch_geometric.data import ( + LightningDataset, + LightningLinkData, + LightningNodeData, +) from torch_geometric.nn import global_mean_pool from torch_geometric.testing import onlyFullTest, withCUDA, withPackage @@ -264,3 +268,22 @@ def test_lightning_hetero_node_data(get_dataset): offset += 5 * devices * math.ceil(400 / (devices * 32)) # `train` offset += 5 * devices * math.ceil(400 / (devices * 32)) # `val` assert torch.all(new_x > (old_x + offset - 4)) # Ensure shared data. + + +@withCUDA +@onlyFullTest +@withPackage('pytorch_lightning') +def test_lightning_hetero_link_data(get_dataset): + # TODO: Add more datasets. + dataset = get_dataset(name='DBLP') + data = dataset[0] + datamodule = LightningLinkData(data, loader='link_neighbor', + num_neighbors=[5], batch_size=32, + num_workers=3) + input_edges = (('author', 'dummy', 'paper'), data['author', + 'paper']['edge_index']) + loader = datamodule.dataloader(input_edges=input_edges, input_labels=None, + shuffle=True) + batch = next(iter(loader)) + assert (batch['author', 'dummy', + 'paper']['edge_label_index'].shape[1] == 32) diff --git a/torch_geometric/data/__init__.py b/torch_geometric/data/__init__.py index d1a05a542058..d3d68baa18ab 100644 --- a/torch_geometric/data/__init__.py +++ b/torch_geometric/data/__init__.py @@ -4,7 +4,11 @@ from .batch import Batch from .dataset import Dataset from .in_memory_dataset import InMemoryDataset -from .lightning_datamodule import LightningDataset, LightningNodeData +from .lightning_datamodule import ( + LightningDataset, + LightningLinkData, + LightningNodeData, +) from .makedirs import makedirs from .download import download_url from .extract import extract_tar, extract_zip, extract_bz2, extract_gz @@ -18,6 +22,7 @@ 'InMemoryDataset', 'LightningDataset', 'LightningNodeData', + 'LightningLinkData', 'makedirs', 'download_url', 'extract_tar', diff --git a/torch_geometric/data/lightning_datamodule.py b/torch_geometric/data/lightning_datamodule.py index 2c9f68ab018b..e2ac5039bd00 100644 --- a/torch_geometric/data/lightning_datamodule.py +++ b/torch_geometric/data/lightning_datamodule.py @@ -4,13 +4,14 @@ import torch from torch_geometric.data import Data, Dataset, HeteroData +from torch_geometric.loader import LinkNeighborLoader from torch_geometric.loader.dataloader import DataLoader from torch_geometric.loader.neighbor_loader import ( NeighborLoader, NeighborSampler, get_input_nodes, ) -from torch_geometric.typing import InputNodes +from torch_geometric.typing import InputEdges, InputNodes try: from pytorch_lightning import LightningDataModule as PLLightningDataModule @@ -245,9 +246,8 @@ def __init__( if input_val_nodes is None: input_val_nodes = infer_input_nodes(data, split='val') - - if input_val_nodes is None: - input_val_nodes = infer_input_nodes(data, split='valid') + if input_val_nodes is None: + input_val_nodes = infer_input_nodes(data, split='valid') if input_test_nodes is None: input_test_nodes = infer_input_nodes(data, split='test') @@ -352,6 +352,167 @@ def __repr__(self) -> str: return f'{self.__class__.__name__}({kwargs})' +# TODO: Unify implementation with LightningNodeData via a common base class. +class LightningLinkData(LightningDataModule): + r"""Converts a :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object into a + :class:`pytorch_lightning.LightningDataModule` variant, which can be + automatically used as a :obj:`datamodule` for multi-GPU link-level + training (such as for link prediction) via `PyTorch Lightning + `_. :class:`LightningDataset` will + take care of providing mini-batches via + :class:`~torch_geometric.loader.LinkNeighborLoader`. + + .. note:: + + Currently only the + :class:`pytorch_lightning.strategies.SingleDeviceStrategy` and + :class:`pytorch_lightning.strategies.DDPSpawnStrategy` training + strategies of `PyTorch Lightning + `__ are supported in order to correctly share data across + all devices/processes: + + .. code-block:: + + import pytorch_lightning as pl + trainer = pl.Trainer(strategy="ddp_spawn", accelerator="gpu", + devices=4) + trainer.fit(model, datamodule) + + Args: + data (Data or HeteroData): The :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` graph object. + input_train_edges (Tensor or EdgeType or Tuple[EdgeType, Tensor]): + The training edges. (default: :obj:`None`) + input_train_edge_label (Tensor, optional): + The labels of train edge indices. + input_val_edges (Tensor or EdgeType or Tuple[EdgeType, Tensor]): + The validation edges. (default: :obj:`None`) + input_val_edge_label (Tensor, optional): + The labels of val edge indices. + input_test_edges (Tensor or EdgeType or Tuple[EdgeType, Tensor]): + The test edges. (default: :obj:`None`) + input_test_edge_label (Tensor, optional): + The labels of train edge indices. + loader (str): The scalability technique to use (:obj:`"full"`, + :obj:`"link_neighbor"`). (default: :obj:`"link_neighbor"`) + batch_size (int, optional): How many samples per batch to load. + (default: :obj:`1`) + num_workers: How many subprocesses to use for data loading. + :obj:`0` means that the data will be loaded in the main process. + (default: :obj:`0`) + **kwargs (optional): Additional arguments of + :class:`torch_geometric.loader.LinkNeighborLoader`. + """ + def __init__( + self, + data: Union[Data, HeteroData], + input_train_edges: InputEdges = None, + input_train_edge_label: torch.Tensor = None, + input_val_edges: InputEdges = None, + input_val_edge_label: torch.Tensor = None, + input_test_edges: InputEdges = None, + input_test_edge_label: torch.Tensor = None, + loader: str = "link_neighbor", + batch_size: int = 1, + num_workers: int = 0, + **kwargs, + ): + + assert loader in ['full', 'link_neighbor'] + # TODO: Handle or document behavior where none of train, val, test + # edges are specified. + if loader == 'full' and batch_size != 1: + warnings.warn(f"Re-setting 'batch_size' to 1 in " + f"'{self.__class__.__name__}' for loader='full' " + f"(got '{batch_size}')") + batch_size = 1 + + if loader == 'full' and num_workers != 0: + warnings.warn(f"Re-setting 'num_workers' to 0 in " + f"'{self.__class__.__name__}' for loader='full' " + f"(got '{num_workers}')") + num_workers = 0 + + super().__init__( + has_val=input_val_edges is not None, + has_test=input_test_edges is not None, + batch_size=batch_size, + num_workers=num_workers, + **kwargs, + ) + + if loader == 'full': + if kwargs.get('pin_memory', False): + warnings.warn(f"Re-setting 'pin_memory' to 'False' in " + f"'{self.__class__.__name__}' for loader='full' " + f"(got 'True')") + self.kwargs['pin_memory'] = False + + self.data = data + self.loader = loader + + self.input_train_edges = input_train_edges + self.input_train_edge_label = input_train_edge_label + self.input_val_edges = input_val_edges + self.input_val_edge_label = input_val_edge_label + self.input_test_edges = input_test_edges + self.input_test_edge_label = input_test_edge_label + + def prepare_data(self): + """""" + if self.loader == 'full': + try: + num_devices = self.trainer.num_devices + except AttributeError: + # PyTorch Lightning < 1.6 backward compatibility: + num_devices = self.trainer.num_processes + num_devices = max(num_devices, self.trainer.num_gpus) + + if num_devices > 1: + raise ValueError( + f"'{self.__class__.__name__}' with loader='full' requires " + f"training on a single device") + super().prepare_data() + + def dataloader(self, input_edges: InputEdges, input_labels: torch.Tensor, + shuffle: bool) -> DataLoader: + if self.loader == 'full': + warnings.filterwarnings('ignore', '.*does not have many workers.*') + warnings.filterwarnings('ignore', '.*data loading bottlenecks.*') + return torch.utils.data.DataLoader([self.data], shuffle=False, + collate_fn=lambda xs: xs[0], + **self.kwargs) + + if self.loader == 'link_neighbor': + return LinkNeighborLoader(data=self.data, + edge_label_index=input_edges, + edge_label=input_labels, shuffle=shuffle, + **self.kwargs) + + raise NotImplementedError + + def train_dataloader(self) -> DataLoader: + """""" + return self.dataloader(self.input_train_edges, + self.input_train_edge_label, shuffle=True) + + def val_dataloader(self) -> DataLoader: + """""" + return self.dataloader(self.input_val_edges, self.input_val_edge_label, + shuffle=False) + + def test_dataloader(self) -> DataLoader: + """""" + return self.dataloader(self.input_test_edges, + self.input_test_edge_label, shuffle=False) + + def __repr__(self) -> str: + kwargs = kwargs_repr(data=self.data, loader=self.loader, **self.kwargs) + return f'{self.__class__.__name__}({kwargs})' + + ############################################################################### diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 98d527d6e475..b3d4cc70bd63 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -108,7 +108,7 @@ class LinkNeighborLoader(torch.utils.data.DataLoader): .. code-block:: python from torch_geometric.datasets import Planetoid - from torch_geometric.loader import NeighborLoader + from torch_geometric.loader import LinkNeighborLoader data = Planetoid(path, name='Cora')[0] @@ -276,7 +276,7 @@ def __init__( def filter_fn(self, out: Any) -> Union[Data, HeteroData]: if isinstance(self.data, Data): - node, row, col, edge, edge_label_index, edge_label = out + (node, row, col, edge, edge_label_index, edge_label) = out data = filter_data(self.data, node, row, col, edge, self.neighbor_sampler.perm) data.edge_label_index = edge_label_index @@ -355,7 +355,6 @@ def get_edge_label_index( edge_type, edge_label_index = edge_label_index edge_type = data._to_canonical(*edge_type) - assert edge_type in data.edge_types if edge_label_index is None: return edge_type, data[edge_type].edge_index From 7f55f4198164be5124a253813cfc041ad84ec2d0 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Wed, 29 Jun 2022 12:50:31 -0700 Subject: [PATCH 0130/2432] `GraphStore`: support `COO` layouts, refactor conversion logic (#4883) Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/data/test_data.py | 2 +- test/data/test_graph_store.py | 56 +++++ test/data/test_hetero_data.py | 9 +- test/loader/test_neighbor_loader.py | 31 +-- torch_geometric/data/data.py | 83 +++---- torch_geometric/data/graph_store.py | 266 +++++++++++++++++++++- torch_geometric/data/hetero_data.py | 27 ++- torch_geometric/loader/neighbor_loader.py | 77 ++----- torch_geometric/loader/utils.py | 2 + torch_geometric/testing/graph_store.py | 4 +- 11 files changed, 406 insertions(+), 153 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 54569d998a03..55764f4c8505 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/data/test_data.py b/test/data/test_data.py index eac9fc8cea9f..895a9ec961e8 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -299,7 +299,7 @@ def assert_equal_tensor_tuple(expected, actual): csc = adj.csc()[-2::-1] # (row, colptr) # Put: - data.put_edge_index(coo, layout='coo') + data.put_edge_index(coo, layout='coo', size=(3, 3)) data.put_edge_index(csr, layout='csr') data.put_edge_index(csc, layout='csc') diff --git a/test/data/test_graph_store.py b/test/data/test_graph_store.py index 5ce1e3f85434..fda9bd6a13fc 100644 --- a/test/data/test_graph_store.py +++ b/test/data/test_graph_store.py @@ -4,6 +4,13 @@ from torch_geometric.data.graph_store import EdgeLayout from torch_geometric.testing.graph_store import MyGraphStore +from torch_geometric.utils.sort_edge_index import sort_edge_index + + +def get_edge_index(num_src_nodes, num_dst_nodes, num_edges): + row = torch.randint(num_src_nodes, (num_edges, ), dtype=torch.long) + col = torch.randint(num_dst_nodes, (num_edges, ), dtype=torch.long) + return torch.stack([row, col], dim=0) def test_graph_store(): @@ -38,3 +45,52 @@ def assert_equal_tensor_tuple(expected, actual): with pytest.raises(KeyError): _ = graph_store['edge_2', 'coo'] + + +def test_graph_store_conversion(): + graph_store = MyGraphStore() + edge_index = get_edge_index(100, 100, 300) + edge_index = sort_edge_index(edge_index, sort_by_row=False) + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) + + coo = (edge_index[0], edge_index[1]) + csr = adj.csr()[:2] + csc = adj.csc()[-2::-1] + + # Put all edge indices: + graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'), + layout='coo', size=(100, 100), is_sorted=True) + + graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'), + layout='csr', size=(100, 100)) + + graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'), + layout='csc', size=(100, 100)) + + def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor): + assert torch.equal(sort_edge_index(expected), sort_edge_index(actual)) + + # Convert to COO: + row_dict, col_dict, perm_dict = graph_store.coo() + assert len(row_dict) == len(col_dict) == len(perm_dict) == 3 + for key in row_dict.keys(): + actual = torch.stack((row_dict[key], col_dict[key])) + assert_edge_index_equal(actual, edge_index) + assert perm_dict[key] is None + + # Convert to CSR: + rowptr_dict, col_dict, perm_dict = graph_store.csr() + assert len(rowptr_dict) == len(col_dict) == len(perm_dict) == 3 + for key in rowptr_dict: + assert torch.equal(rowptr_dict[key], csr[0]) + assert torch.equal(col_dict[key], csr[1]) + if key == ('v', '1', 'v'): + assert perm_dict[key] is not None + + # Convert to CSC: + row_dict, colptr_dict, perm_dict = graph_store.csc() + assert len(row_dict) == len(colptr_dict) == len(perm_dict) == 3 + for key in row_dict: + assert torch.equal(row_dict[key], csc[0]) + assert torch.equal(colptr_dict[key], csc[1]) + assert perm_dict[key] is None diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index ae4ed8d0776c..463eb0eb9760 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -464,9 +464,12 @@ def assert_equal_tensor_tuple(expected, actual): csc = adj.csc()[-2::-1] # (row, colptr) # Put: - data.put_edge_index(coo, layout='coo', edge_type=('a', 'to', 'b')) - data.put_edge_index(csr, layout='csr', edge_type=('a', 'to', 'c')) - data.put_edge_index(csc, layout='csc', edge_type=('b', 'to', 'c')) + data.put_edge_index(coo, layout='coo', edge_type=('a', 'to', 'b'), + size=(3, 3)) + data.put_edge_index(csr, layout='csr', edge_type=('a', 'to', 'c'), + size=(3, 3)) + data.put_edge_index(csc, layout='csc', edge_type=('b', 'to', 'c'), + size=(3, 3)) # Get: assert_equal_tensor_tuple( diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index b08e82c73810..789403f2747e 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -297,29 +297,30 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): feature_store.put_tensor(x, group_name='author', attr_name='x', index=None) # Set up edge indices: + + # COO: edge_index = get_edge_index(100, 100, 500) data['paper', 'to', 'paper'].edge_index = edge_index - graph_store.put_edge_index( - edge_index=SparseTensor.from_edge_index(edge_index).csr()[:2], - edge_type=('paper', 'to', 'paper'), - layout='csr', - ) + coo = (edge_index[0], edge_index[1]) + graph_store.put_edge_index(edge_index=coo, + edge_type=('paper', 'to', 'paper'), + layout='coo', size=(100, 100)) + # CSR: edge_index = get_edge_index(100, 200, 1000) data['paper', 'to', 'author'].edge_index = edge_index - graph_store.put_edge_index( - edge_index=SparseTensor.from_edge_index(edge_index).csr()[:2], - edge_type=('paper', 'to', 'author'), - layout='csr', - ) + csr = SparseTensor.from_edge_index(edge_index).csr()[:2] + graph_store.put_edge_index(edge_index=csr, + edge_type=('paper', 'to', 'author'), + layout='csr', size=(100, 200)) + # CSC: edge_index = get_edge_index(200, 100, 1000) data['author', 'to', 'paper'].edge_index = edge_index - graph_store.put_edge_index( - edge_index=SparseTensor.from_edge_index(edge_index).csr()[:2], - edge_type=('author', 'to', 'paper'), - layout='csr', - ) + csc = SparseTensor(row=edge_index[1], col=edge_index[0]).csr()[-2::-1] + graph_store.put_edge_index(edge_index=csc, + edge_type=('author', 'to', 'paper'), + layout='csc', size=(200, 100)) # Construct neighbor loaders: loader1 = NeighborLoader(data, batch_size=20, diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 512ad581f53c..7894c7b423c8 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -24,7 +24,14 @@ TensorAttr, _field_status, ) -from torch_geometric.data.graph_store import EdgeAttr, EdgeLayout, GraphStore +from torch_geometric.data.graph_store import ( + EDGE_LAYOUT_TO_ATTR_NAME, + EdgeAttr, + EdgeLayout, + GraphStore, + adj_type_to_edge_tensor_type, + edge_tensor_type_to_adj_type, +) from torch_geometric.data.storage import ( BaseStorage, EdgeStorage, @@ -33,7 +40,6 @@ ) from torch_geometric.deprecation import deprecated from torch_geometric.typing import ( - Adj, EdgeTensorType, EdgeType, FeatureTensorType, @@ -328,10 +334,15 @@ def __init__(self, attr_name=_field_status.UNSET, class DataEdgeAttr(EdgeAttr): r"""Edge attribute class for `Data`, which does not require a `edge_type`.""" - def __init__(self, layout: EdgeLayout, is_sorted: bool = False, - edge_type: EdgeType = None): - # Treat group_name as optional, and move it to the end - super().__init__(edge_type, layout, is_sorted) + def __init__( + self, + layout: EdgeLayout, + is_sorted: bool = False, + size: Optional[Tuple[int, int]] = None, + edge_type: EdgeType = None, + ): + # Treat edge_type as optional, and move it to the end + super().__init__(edge_type, layout, is_sorted, size) class Data(BaseData, FeatureStore, GraphStore): @@ -795,10 +806,20 @@ def __len__(self) -> int: def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: r"""Stores `edge_index` in `Data`, in the specified layout.""" + # Convert the edge index to a recognizable layout: attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) setattr(self, attr_name, attr_val) + + # Set size, if possible: + size = edge_attr.size + if size is not None: + if size[0] != size[1]: + raise ValueError( + f"'Data' requires size[0] == size[1], but received " + f"the tuple {size}.") + self.num_nodes = size[0] return True def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: @@ -818,58 +839,14 @@ def get_all_edge_attrs(self) -> List[EdgeAttr]: out = [] for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): if attr_name in self: - out.append(EdgeAttr(edge_type=None, layout=layout)) + out.append( + EdgeAttr(edge_type=None, layout=layout, + size=(self.num_nodes, self.num_nodes))) return out ############################################################################### -EDGE_LAYOUT_TO_ATTR_NAME = { - EdgeLayout.COO: 'edge_index', - EdgeLayout.CSR: 'adj', - EdgeLayout.CSC: 'adj_t', -} - - -def edge_tensor_type_to_adj_type( - attr: EdgeAttr, - tensor_tuple: EdgeTensorType, -) -> Adj: - r"""Converts an EdgeTensorType tensor tuple to a PyG Adj tensor.""" - src, dst = tensor_tuple - - if attr.layout == EdgeLayout.COO: - # COO: (row, col) - if (src[0].storage().data_ptr() == dst[1].storage().data_ptr()): - # Do not copy if the tensor tuple is constructed from the same - # storage (instead, return a view): - out = torch.empty(0, dtype=src.dtype) - out.set_(src.storage(), storage_offset=0, - size=src.size() + dst.size()) - return out.view(2, -1) - return torch.stack(tensor_tuple) - elif attr.layout == EdgeLayout.CSR: - # CSR: (rowptr, col) - return SparseTensor(rowptr=src, col=dst, is_sorted=True) - elif attr.layout == EdgeLayout.CSC: - # CSC: (row, colptr) this is a transposed adjacency matrix, so rowptr - # is the compressed column and col is the uncompressed row. - return SparseTensor(rowptr=dst, col=src, is_sorted=True) - raise ValueError(f"Bad edge layout (got '{attr.layout}')") - - -def adj_type_to_edge_tensor_type(layout: EdgeLayout, - edge_index: Adj) -> EdgeTensorType: - r"""Converts a PyG Adj tensor to an EdgeTensorType equivalent.""" - if isinstance(edge_index, Tensor): - return (edge_index[0], edge_index[1]) # (row, col) - if layout == EdgeLayout.COO: - return edge_index.coo()[:-1] # (row, col - elif layout == EdgeLayout.CSR: - return edge_index.csr()[:-1] # (rowptr, col) - else: - return edge_index.csr()[-2::-1] # (row, colptr) - def size_repr(key: Any, value: Any, indent: int = 0) -> str: pad = ' ' * indent diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py index 2d3cde1a7c2c..48e66bd17503 100644 --- a/torch_geometric/data/graph_store.py +++ b/torch_geometric/data/graph_store.py @@ -1,11 +1,30 @@ +import copy +import warnings from abc import abstractmethod +from collections import defaultdict from dataclasses import dataclass from enum import Enum -from typing import Any, List, Optional +from typing import Any, Dict, List, Optional, Tuple -from torch_geometric.typing import EdgeTensorType +import torch +from torch import Tensor +from torch_sparse import SparseTensor + +from torch_geometric.typing import Adj, EdgeTensorType, OptTensor from torch_geometric.utils.mixin import CastMixin +# The output of converting between two types in the GraphStore is a Tuple of +# dictionaries: row, col, and perm. The dictionaries are keyed by the edge +# type of the input edge attribute. +# * The row dictionary contains the row tensor for COO, the row pointer for +# CSR, or the row tensor for CSC +# * The col dictionary contains the col tensor for COO, the col tensor for +# CSR, or the col pointer for CSC +# * The perm dictionary contains the permutation of edges that was applied +# in converting between formats, if applicable. +ConversionOutputType = Tuple[Dict[str, Tensor], Dict[str, Tensor], + Dict[str, OptTensor]] + class EdgeLayout(Enum): COO = 'coo' @@ -28,14 +47,22 @@ class EdgeAttr(CastMixin): # meaningful for COO (CSC and CSR are sorted by definition) is_sorted: bool = False - # TODO support num_nodes here, default None, so users can specify this - # instead of relying on default inferral + # The number of nodes in this edge type. If set to None, will attempt to + # infer with the simple heuristic int(self.edge_index.max()) + 1 + size: Optional[Tuple[int, int]] = None - def __init__(self, edge_type: Optional[Any], layout: EdgeLayout, - is_sorted: bool = False): + # NOTE we define __init__ to force-cast layout + def __init__( + self, + edge_type: Optional[Any], + layout: EdgeLayout, + is_sorted: bool = False, + size: Optional[Tuple[int, int]] = None, + ): self.edge_type = edge_type self.layout = EdgeLayout(layout) self.is_sorted = is_sorted + self.size = size class GraphStore: @@ -92,15 +119,133 @@ def get_edge_index(self, *args, **kwargs) -> EdgeTensorType: """ edge_attr = self._edge_attr_cls.cast(*args, **kwargs) edge_attr.layout = EdgeLayout(edge_attr.layout) + # Override is_sorted for CSC and CSR: + edge_attr.is_sorted = edge_attr.is_sorted or (edge_attr.layout in [ + EdgeLayout.CSC, EdgeLayout.CSR + ]) edge_index = self._get_edge_index(edge_attr) if edge_index is None: raise KeyError(f"An edge corresponding to '{edge_attr}' was not " f"found") return edge_index - # TODO implement coo(), csc(), csr() methods on GraphStore, which perform - # conversions of edge indices between formats. These conversions can also - # automatically be performed in `get_edge_index` + # Layout Conversion ####################################################### + + # TODO support `replace` to replace the existing edge index. + def _to_layout(self, layout: EdgeLayout, + store: bool = False) -> ConversionOutputType: + # Obtain all edge attributes, grouped by type: + edge_attrs = self.get_all_edge_attrs() + edge_type_to_attrs: Dict[Any, List[EdgeAttr]] = defaultdict(list) + for attr in edge_attrs: + edge_type_to_attrs[attr.edge_type].append(attr) + + # Convert layouts for each attribute from its most favorable original + # layout to the desired layout. Store permutations of edges if + # necessary as part of the conversion: + row_dict, col_dict, perm_dict = {}, {}, {} + for edge_attrs in edge_type_to_attrs.values(): + edge_layouts = [edge_attr.layout for edge_attr in edge_attrs] + + # Ignore if requested layout is already present: + if layout in edge_layouts: + from_attr = edge_attrs[edge_layouts.index(layout)] + row, col = self.get_edge_index(from_attr) + perm = None + + # Convert otherwise: + else: + # Pick the most favorable layout to convert from. We prefer + # COO to CSC/CSR: + from_attr = None + if EdgeLayout.COO in edge_layouts: + from_attr = edge_attrs[edge_layouts.index(EdgeLayout.COO)] + elif EdgeLayout.CSC in edge_layouts: + from_attr = edge_attrs[edge_layouts.index(EdgeLayout.CSC)] + else: + from_attr = edge_attrs[edge_layouts.index(EdgeLayout.CSR)] + + from_tuple = self.get_edge_index(from_attr) + + # Convert to the new layout: + if layout == EdgeLayout.COO: + if from_attr.layout == EdgeLayout.CSR: + col = from_tuple[1] + row = torch.ops.torch_sparse.ptr2ind( + from_tuple[0], col.numel()) + else: + row = from_tuple[0] + col = torch.ops.torch_sparse.ptr2ind( + from_tuple[1], row.numel()) + perm = None + + elif layout == EdgeLayout.CSR: + # We convert to CSR by converting to CSC on the transpose + if from_attr.layout == EdgeLayout.COO: + adj = edge_tensor_type_to_adj_type( + from_attr, (from_tuple[1], from_tuple[0])) + else: + adj = edge_tensor_type_to_adj_type( + from_attr, from_tuple).t() + + # NOTE we set is_sorted=False here as is_sorted refers to + # the edge_index being sorted by the destination node + # (column), but here we deal with the transpose + from_attr_copy = copy.copy(from_attr) + from_attr_copy.is_sorted = False + from_attr_copy.size = None if from_attr.size is None else ( + from_attr.size[1], from_attr.size[0]) + + # Actually rowptr, col, perm + row, col, perm = to_csc(adj, from_attr_copy, device='cpu') + + else: + adj = edge_tensor_type_to_adj_type(from_attr, from_tuple) + + # Actually colptr, row, perm + col, row, perm = to_csc(adj, from_attr, device='cpu') + + row_dict[from_attr.edge_type] = row + col_dict[from_attr.edge_type] = col + perm_dict[from_attr.edge_type] = perm + + if store and layout not in edge_layouts: + # We do not store converted edge indices if this conversion + # results in a permutation of nodes in the original edge index. + # This is to exercise an abundance of caution in the case that + # there are edge attributes. + if perm is not None: + warnings.warn(f"Edge index {from_attr.edge_type} with " + f"layout {from_attr.layout} was not sorted " + f"by destination node, so conversion to " + f"{layout} resulted in a permutation of " + f"the order of edges. As a result, the " + f"converted edge is not being re-stored in " + f"the graph store. Please sort the edge " + f"index and set 'is_sorted=True' to avoid " + f"this warning.") + else: + is_sorted = (layout != EdgeLayout.COO) + self._put_edge_index((row, col), + EdgeAttr(from_attr.edge_type, layout, + is_sorted, from_attr.size)) + + return row_dict, col_dict, perm_dict + + def coo(self, store: bool = False) -> ConversionOutputType: + r"""Converts the edge indices in the graph store to COO format, + optionally storing the converted edge indices in the graph store.""" + return self._to_layout(EdgeLayout.COO, store) + + def csr(self, store: bool = False) -> ConversionOutputType: + r"""Converts the edge indices in the graph store to CSR format, + optionally storing the converted edge indices in the graph store.""" + return self._to_layout(EdgeLayout.CSR, store) + + def csc(self, store: bool = False) -> ConversionOutputType: + r"""Converts the edge indices in the graph store to CSC format, + optionally storing the converted edge indices in the graph store.""" + return self._to_layout(EdgeLayout.CSC, store) # Additional methods ###################################################### @@ -117,3 +262,106 @@ def __setitem__(self, key: EdgeAttr, value: EdgeTensorType): def __getitem__(self, key: EdgeAttr) -> Optional[EdgeTensorType]: key = self._edge_attr_cls.cast(key) return self.get_edge_index(key) + + +# Data and HeteroData utilities ############################################### + +EDGE_LAYOUT_TO_ATTR_NAME = { + EdgeLayout.COO: 'edge_index', + EdgeLayout.CSR: 'adj', + EdgeLayout.CSC: 'adj_t', +} + + +def edge_tensor_type_to_adj_type( + attr: EdgeAttr, + tensor_tuple: EdgeTensorType, +) -> Adj: + r"""Converts an EdgeTensorType tensor tuple to a PyG Adj tensor.""" + src, dst = tensor_tuple + + if attr.layout == EdgeLayout.COO: + # COO: (row, col) + if (src[0].storage().data_ptr() == dst[1].storage().data_ptr() + and src.storage_offset() < dst.storage_offset()): + # Do not copy if the tensor tuple is constructed from the same + # storage (instead, return a view): + out = torch.empty(0, dtype=src.dtype) + out.set_(src.storage(), storage_offset=src.storage_offset(), + size=(src.size()[0] + dst.size()[0], )) + return out.view(2, -1) + return torch.stack(tensor_tuple) + elif attr.layout == EdgeLayout.CSR: + # CSR: (rowptr, col) + return SparseTensor(rowptr=src, col=dst, is_sorted=True, + sparse_sizes=attr.size) + elif attr.layout == EdgeLayout.CSC: + # CSC: (row, colptr) is a transposed adjacency matrix, so rowptr + # is the compressed column and col is the uncompressed row. + sparse_sizes = None if attr.size is None else (attr.size[1], + attr.size[0]) + return SparseTensor(rowptr=dst, col=src, is_sorted=True, + sparse_sizes=sparse_sizes) + raise ValueError(f"Bad edge layout (got '{attr.layout}')") + + +def adj_type_to_edge_tensor_type(layout: EdgeLayout, + edge_index: Adj) -> EdgeTensorType: + r"""Converts a PyG Adj tensor to an EdgeTensorType equivalent.""" + if isinstance(edge_index, Tensor): + return (edge_index[0], edge_index[1]) # (row, col) + if layout == EdgeLayout.COO: + return edge_index.coo()[:-1] # (row, col) + elif layout == EdgeLayout.CSR: + return edge_index.csr()[:-1] # (rowptr, col) + else: + return edge_index.csr()[-2::-1] # (row, colptr) + + +############################################################################### + + +def to_csc( + adj: Adj, + edge_attr: EdgeAttr, + device: Optional[torch.device] = None, + share_memory: bool = False, +) -> Tuple[Tensor, Tensor, OptTensor]: + # Convert the graph data into a suitable format for sampling (CSC format). + # Returns the `colptr` and `row` indices of the graph, as well as an + # `perm` vector that denotes the permutation of edges. + # Since no permutation of edges is applied when using `SparseTensor`, + # `perm` can be of type `None`. + perm: Optional[Tensor] = None + layout = edge_attr.layout + is_sorted = edge_attr.is_sorted + size = edge_attr.size + + if layout == EdgeLayout.CSR: + colptr, row, _ = adj.csc() + elif layout == EdgeLayout.CSC: + colptr, row, _ = adj.csr() + else: + if size is None: + raise ValueError( + f"Edge {edge_attr.edge_type} cannot be converted " + f"to a different type without specifying 'size' for " + f"the source and destination node types (got {size}). " + f"Please specify these parameters for successful execution. ") + (row, col) = adj + if not is_sorted: + perm = (col * size[0]).add_(row).argsort() + row = row[perm] + colptr = torch.ops.torch_sparse.ind2ptr(col[perm], size[1]) + + colptr = colptr.to(device) + row = row.to(device) + perm = perm.to(device) if perm is not None else None + + if not colptr.is_cuda and share_memory: + colptr.share_memory_() + row.share_memory_() + if perm is not None: + perm.share_memory_() + + return colptr, row, perm diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index c7a26686ec63..6a0505b2654f 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -9,16 +9,15 @@ from torch import Tensor from torch_sparse import SparseTensor -from torch_geometric.data.data import ( +from torch_geometric.data.data import BaseData, Data, size_repr +from torch_geometric.data.feature_store import FeatureStore, TensorAttr +from torch_geometric.data.graph_store import ( EDGE_LAYOUT_TO_ATTR_NAME, - BaseData, - Data, + EdgeAttr, + GraphStore, adj_type_to_edge_tensor_type, edge_tensor_type_to_adj_type, - size_repr, ) -from torch_geometric.data.feature_store import FeatureStore, TensorAttr -from torch_geometric.data.graph_store import EdgeAttr, GraphStore from torch_geometric.data.storage import BaseStorage, EdgeStorage, NodeStorage from torch_geometric.typing import ( EdgeTensorType, @@ -696,10 +695,22 @@ def __iter__(self): def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: r"""Stores an edge index in edge storage, in the specified layout.""" + # Convert the edge index to a recognizable layout: attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_attr.layout] attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) setattr(self[edge_attr.edge_type], attr_name, attr_val) + + key = self._to_canonical(edge_attr.edge_type) + src, _, dst = key + + # Handle num_nodes, if possible: + size = edge_attr.size + if size is not None: + # TODO better warning in the case of overwriting 'num_nodes' + self[src].num_nodes = size[0] + self[dst].num_nodes = size[1] + return True def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: @@ -719,7 +730,9 @@ def get_all_edge_attrs(self) -> List[EdgeAttr]: for edge_type, edge_store in self.edge_items(): for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): if attr_name in edge_store: - out.append(EdgeAttr(edge_type=edge_type, layout=layout)) + out.append( + EdgeAttr(edge_type=edge_type, layout=layout, + size=self[edge_type].size())) return out diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 4cf8716cdb82..ff3c0e7b9cfa 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -1,4 +1,3 @@ -from collections import defaultdict from collections.abc import Sequence from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union @@ -6,12 +5,8 @@ from torch import Tensor from torch_geometric.data import Data, HeteroData -from torch_geometric.data.data import ( - EDGE_LAYOUT_TO_ATTR_NAME, - edge_tensor_type_to_adj_type, -) from torch_geometric.data.feature_store import FeatureStore, TensorAttr -from torch_geometric.data.graph_store import EdgeAttr, EdgeLayout, GraphStore +from torch_geometric.data.graph_store import GraphStore from torch_geometric.loader.base import DataLoaderIterator from torch_geometric.loader.utils import ( edge_type_to_str, @@ -126,62 +121,20 @@ def __init__( assert input_type is not None self.input_type = input_type - # Obtain CSC representation of graph for in-memory sampling: - # TODO this code will be replaced with a `GraphStore.sample` call - # when sampling routines are factored out to work with pyg-lib and - # GraphStore - edge_type_to_layouts: Dict[Any, - List[EdgeLayout]] = defaultdict(list) - for attr in edge_attrs: - edge_type_to_layouts[attr.edge_type].append(attr.layout) - - self.colptr_dict, self.row_dict, self.perm_dict = {}, {}, {} - for edge_type, edge_layouts in edge_type_to_layouts.items(): - key = edge_type_to_str(edge_type) - - # Select the most favorable layout, if multiple exist: - edge_layout = edge_layouts[0] - ordering = { - EdgeLayout.COO: 0, - EdgeLayout.CSR: 1, - EdgeLayout.CSC: 2 - } - for layout in edge_layouts[1:]: - if ordering[layout] > ordering[edge_layout]: - edge_layout = layout - - # TODO the below logic currently only works for CSC and CSR - # edge layouts, so throw an exception of our best format is - # COO: - if edge_layout == EdgeLayout.COO: - raise ValueError( - f"NeighborSampler currently only supports CSC and " - f"CSR edge index types in the GraphStore, but " - f"edge {edge_type} has format " - f"{edge_layout.value.upper()}. Please convert " - f"{edge_type} to either CSC or CSR formats " - f"in order to use it with NeighborSampler.") - - # Obtain edge index from backing GraphStore: - edge_index_tuple = graph_store.get_edge_index( - edge_type=edge_type, layout=edge_layout) - - # Convert to format for to_csc: - class _DataArgument(object): - pass - - data_argument = _DataArgument() - attr_name = EDGE_LAYOUT_TO_ATTR_NAME[edge_layout] - edge_index = edge_tensor_type_to_adj_type( - EdgeAttr(layout=edge_layout, edge_type=edge_type), - edge_index_tuple) - - setattr(data_argument, attr_name, edge_index) - - self.colptr_dict[key], self.row_dict[key], self.perm_dict[ - key] = to_csc(data_argument, device='cpu', - share_memory=share_memory, - is_sorted=is_sorted) + # Obtain CSC representations for in-memory sampling: + row_dict, colptr_dict, perm_dict = graph_store.csc() + self.row_dict = { + edge_type_to_str(k): v + for k, v in row_dict.items() + } + self.colptr_dict = { + edge_type_to_str(k): v + for k, v in colptr_dict.items() + } + self.perm_dict = { + edge_type_to_str(k): v + for k, v in perm_dict.items() + } else: raise TypeError(f'NeighborLoader found invalid type: {type(data)}') diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index d562f7c890af..5f97418f3f96 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -35,6 +35,7 @@ def str_to_edge_type(key: Union[EdgeType, str]) -> EdgeType: return key if isinstance(key, tuple) else tuple(key.split('__')) +# TODO deprecate when FeatureStore / GraphStore unification is complete def to_csc( data: Union[Data, EdgeStorage], device: Optional[torch.device] = None, @@ -201,6 +202,7 @@ def filter_feature_store( data = HeteroData() # Filter edge storage: + # TODO support edge attributes for key in edge_dict: edge_index = torch.stack([row_dict[key], col_dict[key]], dim=0) data[str_to_edge_type(key)].edge_index = edge_index diff --git a/torch_geometric/testing/graph_store.py b/torch_geometric/testing/graph_store.py index 1ef79fa501a6..ab49938014e5 100644 --- a/torch_geometric/testing/graph_store.py +++ b/torch_geometric/testing/graph_store.py @@ -16,7 +16,7 @@ def __init__(self): @staticmethod def key(attr: EdgeAttr) -> str: - return (attr.edge_type, attr.layout.value) + return (attr.edge_type, attr.layout.value, attr.is_sorted, attr.size) def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: @@ -26,4 +26,4 @@ def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: return self.store.get(MyGraphStore.key(edge_attr), None) def get_all_edge_attrs(self): - return [EdgeAttr(*key) for key in self.store.keys()] + return [EdgeAttr(*key) for key in self.store] From 3d6eb74e2ddf89da1a57531147fc29bb8b02e924 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 30 Jun 2022 10:51:56 +0200 Subject: [PATCH 0131/2432] `Data.validate()` and `HeteroData.validate()` (#4885) * update * changelog * update * typo --- CHANGELOG.md | 3 +- test/data/test_data.py | 1 + test/data/test_hetero_data.py | 1 + torch_geometric/data/data.py | 36 +++++++++++++++++++ torch_geometric/data/hetero_data.py | 55 ++++++++++++++++++++++++++++- 5 files changed, 94 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55764f4c8505..900693c5152a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added -- Added `LinkeNeighborLoader` support to lightning datamodule ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) +- Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) +- Added `LinkNeighborLoader` support to `LightningDataModule` ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) - Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) diff --git a/test/data/test_data.py b/test/data/test_data.py index 895a9ec961e8..d1ac60ccac17 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -15,6 +15,7 @@ def test_data(): x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t() edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]]) data = Data(x=x, edge_index=edge_index).to(torch.device('cpu')) + data.validate(raise_on_error=True) N = data.num_nodes assert N == 3 diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 463eb0eb9760..9e627a2d096c 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -39,6 +39,7 @@ def test_init_hetero_data(): data['paper', 'paper'].edge_index = edge_index_paper_paper data['paper', 'author'].edge_index = edge_index_paper_author data['author', 'paper'].edge_index = edge_index_author_paper + data.validate(raise_on_error=True) assert len(data) == 2 assert data.node_types == ['v1', 'paper', 'author'] diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 7894c7b423c8..a7423f07ee62 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -1,4 +1,5 @@ import copy +import warnings from collections.abc import Mapping, Sequence from dataclasses import dataclass from typing import ( @@ -514,6 +515,34 @@ def __inc__(self, key: str, value: Any, *args, **kwargs) -> Any: else: return 0 + def validate(self, raise_on_error: bool = True) -> bool: + r"""Validates the correctness of the data.""" + cls_name = self.__class__.__name__ + status = True + + num_nodes = self.num_nodes + if num_nodes is None: + status = False + warn_or_raise(f"'num_nodes' is undefined in '{cls_name}'", + raise_on_error) + + if 'edge_index' in self and self.edge_index.numel() > 0: + if self.edge_index.min() < 0: + status = False + warn_or_raise( + f"'edge_index' contains negative indices in " + f"'{cls_name}' (found {int(self.edge_index.min())})", + raise_on_error) + + if num_nodes is not None and self.edge_index.max() >= num_nodes: + status = False + warn_or_raise( + f"'edge_index' contains larger indices than the number " + f"of nodes ({num_nodes}) in '{cls_name}' " + f"(found {int(self.edge_index.max())})", raise_on_error) + + return status + def debug(self): pass # TODO @@ -879,3 +908,10 @@ def size_repr(key: Any, value: Any, indent: int = 0) -> str: return f'{pad}\033[1m{key}\033[0m={out}' else: return f'{pad}{key}={out}' + + +def warn_or_raise(msg: str, raise_on_error: bool = True): + if raise_on_error: + raise ValueError(msg) + else: + warnings.warn(msg) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 6a0505b2654f..d68b0d7ca880 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -9,7 +9,7 @@ from torch import Tensor from torch_sparse import SparseTensor -from torch_geometric.data.data import BaseData, Data, size_repr +from torch_geometric.data.data import BaseData, Data, size_repr, warn_or_raise from torch_geometric.data.feature_store import FeatureStore, TensorAttr from torch_geometric.data.graph_store import ( EDGE_LAYOUT_TO_ATTR_NAME, @@ -325,6 +325,59 @@ def is_undirected(self) -> bool: edge_index, _, _ = to_homogeneous_edge_index(self) return is_undirected(edge_index, num_nodes=self.num_nodes) + def validate(self, raise_on_error: bool = True) -> bool: + r"""Validates the correctness of the data.""" + cls_name = self.__class__.__name__ + status = True + + for edge_type, store in self._edge_store_dict.items(): + src, _, dst = edge_type + + num_src_nodes = self[src].num_nodes + num_dst_nodes = self[dst].num_nodes + if num_src_nodes is None: + status = False + warn_or_raise( + f"'num_nodes' is undefined in node type '{src}' of " + f"'{cls_name}'", raise_on_error) + + if num_dst_nodes is None: + status = False + warn_or_raise( + f"'num_nodes' is undefined in node type '{dst}' of " + f"'{cls_name}'", raise_on_error) + + if 'edge_index' in store and store.edge_index.numel() > 0: + if store.edge_index.min() < 0: + status = False + warn_or_raise( + f"'edge_index' of edge type {edge_type} contains " + f"negative indices in '{cls_name}' " + f"(found {int(store.edge_index.min())})", + raise_on_error) + + if (num_src_nodes is not None + and store.edge_index[0].max() >= num_src_nodes): + status = False + warn_or_raise( + f"'edge_index' of edge type {edge_type} contains" + f"larger source indices than the number of nodes" + f"({num_src_nodes}) of this node type in '{cls_name}' " + f"(found {int(store.edge_index[0].max())})", + raise_on_error) + + if (num_dst_nodes is not None + and store.edge_index[1].max() >= num_dst_nodes): + status = False + warn_or_raise( + f"'edge_index' of edge type {edge_type} contains" + f"larger destination indices than the number of nodes" + f"({num_dst_nodes}) of this node type in '{cls_name}' " + f"(found {int(store.edge_index[1].max())})", + raise_on_error) + + return status + def debug(self): pass # TODO From 9bb19b6f55d0bfcce755096a516bcd552511deda Mon Sep 17 00:00:00 2001 From: jan-meissner <96341562+jan-meissner@users.noreply.github.com> Date: Thu, 30 Jun 2022 10:52:35 +0200 Subject: [PATCH 0132/2432] Add `batch` and `ptr` vectors for a list of tensors and nested dicts (#4837) * Added support of _ptr and _batch for nested structures, matching the implementation of _collate. Added test in test_batch.py. * Changed comment added pre-commits. * Added support of batch vectors for recursive data structures. * Update CHANGELOG.md Updated CHANGELOG.md. Still missing pull link. * Extended `InMemoryDataset` to infer len() correctly when using lists of tensors. Modified seperate such that InMemoryDataset can deal with lists of tensors correctly. Added test for it. * Added test to cover special elif case in `separate`. * Apply suggestions from code review Fix elif bug, change _batch name, clean-up (by rust1s) Co-authored-by: Matthias Fey * Merged function _batch and _ptr to _batch_and_ptr. * Updated CHANGELOG.md * Fixed Type Hint. * Update collate.py Removed old/wrong code comment. Co-authored-by: janmeissnerRWTH <96341562+janmeissnerRWTH@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 3 ++ test/data/test_batch.py | 39 ++++++++++++++++ test/data/test_dataset.py | 55 +++++++++++++++++++++++ torch_geometric/data/collate.py | 41 ++++++++++++++--- torch_geometric/data/in_memory_dataset.py | 15 ++++--- torch_geometric/data/separate.py | 5 +++ 6 files changed, 147 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 900693c5152a..b5bab5330031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) - Added `LinkNeighborLoader` support to `LightningDataModule` ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) - Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) @@ -47,6 +48,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed `InMemoryDataset` inferring wrong `len` for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) +- Fixed `Batch.separate` when using it for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Correct docstring for SAGEConv ([#4852](https://github.com/pyg-team/pytorch_geometric/pull/4852)) - Fixed a bug in `TUDataset` where `pre_filter` was not applied whenever `pre_transform` was present - Renamed `RandomTranslate` to `RandomJitter` - the usage of `RandomTranslate` is now deprecated ([#4828](https://github.com/pyg-team/pytorch_geometric/pull/4828)) diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 098710a8d7b4..2e41b0e1de13 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -387,3 +387,42 @@ def test_batch_with_empty_list(): assert batch.nontensor == [[], []] assert batch[0].nontensor == [] assert batch[1].nontensor == [] + + +def test_nested_follow_batch(): + def tr(n, m): + return torch.rand((n, m)) + + d1 = Data(xs=[tr(4, 3), tr(11, 4), tr(1, 2)], a={"aa": tr(11, 3)}, + x=tr(10, 5)) + d2 = Data(xs=[tr(5, 3), tr(14, 4), tr(3, 2)], a={"aa": tr(2, 3)}, + x=tr(11, 5)) + d3 = Data(xs=[tr(6, 3), tr(15, 4), tr(2, 2)], a={"aa": tr(4, 3)}, + x=tr(9, 5)) + d4 = Data(xs=[tr(4, 3), tr(16, 4), tr(1, 2)], a={"aa": tr(8, 3)}, + x=tr(8, 5)) + + # Dataset + data_list = [d1, d2, d3, d4] + + batch = Batch.from_data_list(data_list, follow_batch=['xs', 'a']) + + # assert shapes + assert batch.xs[0].shape == (19, 3) + assert batch.xs[1].shape == (56, 4) + assert batch.xs[2].shape == (7, 2) + assert batch.a['aa'].shape == (25, 3) + + assert len(batch.xs_batch) == 3 + assert len(batch.a_batch) == 1 + + # assert _batch + assert batch.xs_batch[0].tolist() == \ + [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + assert batch.xs_batch[1].tolist() == \ + [0] * 11 + [1] * 14 + [2] * 15 + [3] * 16 + assert batch.xs_batch[2].tolist() == \ + [0] * 1 + [1] * 3 + [2] * 2 + [3] * 1 + + assert batch.a_batch['aa'].tolist() == \ + [0] * 11 + [1] * 2 + [2] * 4 + [3] * 8 diff --git a/test/data/test_dataset.py b/test/data/test_dataset.py index e3214fac22e9..6cafea226ced 100644 --- a/test/data/test_dataset.py +++ b/test/data/test_dataset.py @@ -1,4 +1,5 @@ import torch +from torch_sparse import SparseTensor from torch_geometric.data import Data, Dataset, HeteroData, InMemoryDataset @@ -158,3 +159,57 @@ def _process(self): ds = DS3() assert not ds.enter_download assert not ds.enter_process + + +class MyTestDataset2(InMemoryDataset): + def __init__(self, data_list): + super().__init__('/tmp/MyTestDataset2') + self.data, self.slices = self.collate(data_list) + + +def test_lists_of_tensors_in_memory_dataset(): + def tr(n, m): + return torch.rand((n, m)) + + d1 = Data(xs=[tr(4, 3), tr(11, 4), tr(1, 2)]) + d2 = Data(xs=[tr(5, 3), tr(14, 4), tr(3, 2)]) + d3 = Data(xs=[tr(6, 3), tr(15, 4), tr(2, 2)]) + d4 = Data(xs=[tr(4, 3), tr(16, 4), tr(1, 2)]) + + data_list = [d1, d2, d3, d4] + + dataset = MyTestDataset2(data_list) + assert len(dataset) == 4 + assert dataset[0].xs[1].shape == (11, 4) + assert dataset[0].xs[2].shape == (1, 2) + assert dataset[1].xs[0].shape == (5, 3) + assert dataset[2].xs[1].shape == (15, 4) + assert dataset[3].xs[1].shape == (16, 4) + + +class MyTestDataset3(InMemoryDataset): + def __init__(self, data_list): + super().__init__('/tmp/MyTestDataset3') + self.data, self.slices = self.collate(data_list) + + +def test_lists_of_SparseTensors(): + e1 = torch.tensor([[4, 1, 3, 2, 2, 3], [1, 3, 2, 3, 3, 2]]) + e2 = torch.tensor([[0, 1, 4, 7, 2, 9], [7, 2, 2, 1, 4, 7]]) + e3 = torch.tensor([[3, 5, 1, 2, 3, 3], [5, 0, 2, 1, 3, 7]]) + e4 = torch.tensor([[0, 1, 9, 2, 0, 3], [1, 1, 2, 1, 3, 2]]) + adj1 = SparseTensor.from_edge_index(e1, sparse_sizes=(11, 11)) + adj2 = SparseTensor.from_edge_index(e2, sparse_sizes=(22, 22)) + adj3 = SparseTensor.from_edge_index(e3, sparse_sizes=(12, 12)) + adj4 = SparseTensor.from_edge_index(e4, sparse_sizes=(15, 15)) + + d1 = Data(adj_test=[adj1, adj2]) + d2 = Data(adj_test=[adj3, adj4]) + + data_list = [d1, d2] + dataset = MyTestDataset3(data_list) + assert len(dataset) == 2 + assert dataset[0].adj_test[0].sparse_sizes() == (11, 11) + assert dataset[0].adj_test[1].sparse_sizes() == (22, 22) + assert dataset[1].adj_test[0].sparse_sizes() == (12, 12) + assert dataset[1].adj_test[1].sparse_sizes() == (15, 15) diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index beddcee4afef..fd253301d8bf 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -96,12 +96,10 @@ def collate( inc_dict[attr] = incs # Add an additional batch vector for the given attribute: - if (attr in follow_batch and isinstance(slices, Tensor) - and slices.dim() == 1): - repeats = slices[1:] - slices[:-1] - batch = repeat_interleave(repeats.tolist(), device=device) + if attr in follow_batch: + batch, ptr = _batch_and_ptr(slices, device) out_store[f'{attr}_batch'] = batch - out_store[f'{attr}_ptr'] = cumsum(repeats.to(device)) + out_store[f'{attr}_ptr'] = ptr # In case the storage holds node, we add a top-level batch vector it: if (add_batch and isinstance(stores[0], NodeStorage) @@ -199,6 +197,39 @@ def _collate( return values, slices, None +def _batch_and_ptr( + slices: Any, + device: Optional[torch.device] = None, +) -> Tuple[Any, Any]: + if (isinstance(slices, Tensor) and slices.dim() == 1): + # Default case, turn slices tensor into batch. + repeats = slices[1:] - slices[:-1] + batch = repeat_interleave(repeats.tolist(), device=device) + ptr = cumsum(repeats.to(device)) + return batch, ptr + + elif isinstance(slices, Mapping): + # Recursively batch elements of dictionaries. + batch, ptr = {}, {} + for k, v in slices.items(): + batch[k], ptr[k] = _batch_and_ptr(v, device) + return batch, ptr + + elif (isinstance(slices, Sequence) and not isinstance(slices, str) + and isinstance(slices[0], Tensor)): + # Recursively batch elements of lists. + batch, ptr = [], [] + for s in slices: + sub_batch, sub_ptr = _batch_and_ptr(s, device) + batch.append(sub_batch) + ptr.append(sub_ptr) + return batch, ptr + + else: + # Failure of batching, usually due to slices.dim() != 1 + return None, None + + ############################################################################### diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index 33929726f672..2d9f454615e5 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -1,5 +1,5 @@ import copy -from collections.abc import Mapping +from collections.abc import Mapping, Sequence from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union import torch @@ -130,10 +130,13 @@ def copy(self, idx: Optional[IndexType] = None) -> 'InMemoryDataset': return dataset -def nested_iter(mapping: Mapping) -> Iterable: - for key, value in mapping.items(): - if isinstance(value, Mapping): +def nested_iter(node: Union[Mapping, Sequence]) -> Iterable: + if isinstance(node, Mapping): + for key, value in node.items(): for inner_key, inner_value in nested_iter(value): yield inner_key, inner_value - else: - yield key, value + elif isinstance(node, Sequence): + for i, inner_value in enumerate(node): + yield i, inner_value + else: + yield None, node diff --git a/torch_geometric/data/separate.py b/torch_geometric/data/separate.py index b4e708e11533..af8af03ef7ad 100644 --- a/torch_geometric/data/separate.py +++ b/torch_geometric/data/separate.py @@ -90,6 +90,11 @@ def _separate( and not isinstance(value[0], str) and len(value[0]) > 0 and isinstance(value[0][0], (Tensor, SparseTensor))): # Recursively separate elements of lists of lists. + return [elem[idx] for elem in value] + + elif (isinstance(value, Sequence) and not isinstance(value, str) + and isinstance(value[0], (Tensor, SparseTensor))): + # Recursively separate elements of lists of Tensors/SparseTensors. return [ _separate(key, elem, idx, slices[i], incs[i] if decrement else None, batch, store, decrement) From b32bbdb584ce635573f94548a651a14428bbc38a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 30 Jun 2022 13:24:28 +0200 Subject: [PATCH 0133/2432] Fix typos in `MinCutPooling` (#4895) * update * changelog --- CHANGELOG.md | 2 +- README.md | 2 +- torch_geometric/nn/dense/mincut_pool.py | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5bab5330031..552c5a64a0f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,7 +68,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) - Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) - The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) -- Fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616), [#4824](https://github.com/pyg-team/pytorch_geometric/pull/4824)) +- Fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616), [#4824](https://github.com/pyg-team/pytorch_geometric/pull/4824), [#4895](https://github.com/pyg-team/pytorch_geometric/pull/4895)) - The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) - Fixed subclass behaviour of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) - Fixed filtering of attributes for loaders in case `__cat_dim__ != 0` ([#4629](https://github.com/pyg-team/pytorch_geometric/pull/4629)) diff --git a/README.md b/README.md index 47f39ccafe32..ae0ba8f16e60 100644 --- a/README.md +++ b/README.md @@ -256,7 +256,7 @@ It is commonly applied to graph-level tasks, which require combining node featur * **[GlobalAttention](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.GlobalAttention)** from Li *et al.*: [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/global_attention.py)] * **[Set2Set](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.aggr.Set2Set)** from Vinyals *et al.*: [Order Matters: Sequence to Sequence for Sets](https://arxiv.org/abs/1511.06391) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/set2set.py)] * **[Sort Pool](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.global_sort_pool)** from Zhang *et al.*: [An End-to-End Deep Learning Architecture for Graph Classification](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf) (AAAI 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/sort_pool.py)] -* **[MinCUT Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.dense.mincut_pool.dense_mincut_pool)** from Bianchi *et al.*: [MinCUT Pooling in Graph Neural Networks](https://arxiv.org/abs/1907.00481) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_mincut_pool.py)] +* **[MinCut Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.dense.mincut_pool.dense_mincut_pool)** from Bianchi *et al.*: [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481) (ICML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_mincut_pool.py)] * **[DMoN Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.dense.dmon_pool.DMoNPooling)** from Tsitsulin *et al.*: [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_dmon_pool.py)] * **[Graclus Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.pool.graclus)** from Dhillon *et al.*: [Weighted Graph Cuts without Eigenvectors: A Multilevel Approach](http://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf) (PAMI 2007) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_graclus.py)] * **[Voxel Grid Pooling](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.pool.voxel_grid)** from, *e.g.*, Simonovsky and Komodakis: [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_voxel_grid.py)] diff --git a/torch_geometric/nn/dense/mincut_pool.py b/torch_geometric/nn/dense/mincut_pool.py index 4abfc1e9b448..8cf0b55697fe 100644 --- a/torch_geometric/nn/dense/mincut_pool.py +++ b/torch_geometric/nn/dense/mincut_pool.py @@ -4,8 +4,9 @@ def dense_mincut_pool(x, adj, s, mask=None): - r"""The MinCUt pooling operator from the `"Mincut Pooling in Graph Neural - Networks" `_ paper + r"""The MinCut pooling operator from the `"Spectral Clustering in Graph + Neural Networks for Graph Pooling" `_ + paper .. math:: \mathbf{X}^{\prime} &= {\mathrm{softmax}(\mathbf{S})}^{\top} \cdot @@ -17,7 +18,7 @@ def dense_mincut_pool(x, adj, s, mask=None): based on dense learned assignments :math:`\mathbf{S} \in \mathbb{R}^{B \times N \times C}`. Returns the pooled node feature matrix, the coarsened and symmetrically - normalized adjacency matrix and two auxiliary objectives: (1) The minCUT + normalized adjacency matrix and two auxiliary objectives: (1) The MinCut loss .. math:: @@ -67,7 +68,7 @@ def dense_mincut_pool(x, adj, s, mask=None): out = torch.matmul(s.transpose(1, 2), x) out_adj = torch.matmul(torch.matmul(s.transpose(1, 2), adj), s) - # MinCUT regularization. + # MinCut regularization. mincut_num = _rank3_trace(out_adj) d_flat = torch.einsum('ijk->ij', adj) d = _rank3_diag(d_flat) From e8b6defb2fb4f734cde6784a3761fdaf12161e8c Mon Sep 17 00:00:00 2001 From: Gennaro Gala Date: Thu, 30 Jun 2022 13:32:52 +0200 Subject: [PATCH 0134/2432] Remove unnecessary inclusion of self-loops when sampling negative edges (#4880) * Update autoencoder.py Handling self-loops is only useful when negative edges are not provided. * Drop self-loops removal and addition * changelog Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/models/autoencoder.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 552c5a64a0f6..8d01a69e7396 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) - Fixed `InMemoryDataset` inferring wrong `len` for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Fixed `Batch.separate` when using it for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Correct docstring for SAGEConv ([#4852](https://github.com/pyg-team/pytorch_geometric/pull/4852)) diff --git a/torch_geometric/nn/models/autoencoder.py b/torch_geometric/nn/models/autoencoder.py index 9f7e22e1a416..f1e63573021a 100644 --- a/torch_geometric/nn/models/autoencoder.py +++ b/torch_geometric/nn/models/autoencoder.py @@ -94,9 +94,6 @@ def recon_loss(self, z, pos_edge_index, neg_edge_index=None): pos_loss = -torch.log( self.decoder(z, pos_edge_index, sigmoid=True) + EPS).mean() - # Do not include self-loops in negative samples - pos_edge_index, _ = remove_self_loops(pos_edge_index) - pos_edge_index, _ = add_self_loops(pos_edge_index) if neg_edge_index is None: neg_edge_index = negative_sampling(pos_edge_index, z.size(0)) neg_loss = -torch.log(1 - From d2a5c5c4cc462fe1c15f3aa84f8987e5425999de Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 1 Jul 2022 12:57:25 +0200 Subject: [PATCH 0135/2432] Fix test in `LinkNeighborLoader` (#4908) * fix test * changelog * fix test --- CHANGELOG.md | 2 +- test/loader/test_link_neighbor_loader.py | 8 +++++--- torch_geometric/nn/models/autoencoder.py | 6 +----- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d01a69e7396..6ffe23f23878 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) - Added `LinkNeighborLoader` support to `LightningDataModule` ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) - Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) -- Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877)) +- Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) - Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index ec0cbba51ed8..a92399a414de 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -206,8 +206,10 @@ def test_temporal_heterogeneous_link_neighbor_loader(): loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, edge_label_index=('paper', 'paper'), - batch_size=1, time_attr='time') + batch_size=32, time_attr='time') for batch in loader: - mask = batch['paper'].time[0] >= batch['paper'].time[1:] - assert torch.all(mask) + max_time = batch['paper'].time.max() + seed_nodes = batch['paper', 'paper'].edge_label_index.view(-1) + seed_max_time = batch['paper'].time[seed_nodes].max() + assert seed_max_time >= max_time diff --git a/torch_geometric/nn/models/autoencoder.py b/torch_geometric/nn/models/autoencoder.py index f1e63573021a..97421ccf519a 100644 --- a/torch_geometric/nn/models/autoencoder.py +++ b/torch_geometric/nn/models/autoencoder.py @@ -1,10 +1,6 @@ import torch -from torch_geometric.utils import ( - add_self_loops, - negative_sampling, - remove_self_loops, -) +from torch_geometric.utils import negative_sampling from ..inits import reset From e430d940d2e8d6f870485fd09eccaa4fe9781675 Mon Sep 17 00:00:00 2001 From: Thorsten Kurth Date: Sat, 2 Jul 2022 07:14:10 +0200 Subject: [PATCH 0136/2432] Fixing `to_heterogeneous` to work with GPU (#4910) * fixing to_heterogeneous to work with gpu data * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Your Name Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- torch_geometric/data/data.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index a7423f07ee62..7a19bdb414a8 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -657,7 +657,8 @@ def to_heterogeneous(self, node_type: Optional[Tensor] = None, node_ids, index_map = {}, torch.empty_like(node_type) for i, key in enumerate(node_type_names): node_ids[i] = (node_type == i).nonzero(as_tuple=False).view(-1) - index_map[node_ids[i]] = torch.arange(len(node_ids[i])) + index_map[node_ids[i]] = torch.arange(len(node_ids[i]), + device=index_map.device) # We iterate over edge types to find the local edge indices: edge_ids = {} From fd944f350489ff071d6bf2d6097747e158d171ab Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Sun, 3 Jul 2022 16:08:00 +0800 Subject: [PATCH 0137/2432] Refactor `PNAConv` to rely on new `Aggregation` (#4864) * migrate PNAConv to new aggregator * add changelog * refactor to use hook for scale * create DegreeScalerAggregation * pep8 * typo * add test * address comments --- CHANGELOG.md | 2 +- test/nn/aggr/test_scaler.py | 24 +++++++++ torch_geometric/nn/aggr/__init__.py | 2 + torch_geometric/nn/aggr/scaler.py | 79 +++++++++++++++++++++++++++++ torch_geometric/nn/conv/pna_conv.py | 54 ++------------------ 5 files changed, 110 insertions(+), 51 deletions(-) create mode 100644 test/nn/aggr/test_scaler.py create mode 100644 torch_geometric/nn/aggr/scaler.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ffe23f23878..5f80882306c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/aggr/test_scaler.py b/test/nn/aggr/test_scaler.py new file mode 100644 index 000000000000..0d205d8b6dbb --- /dev/null +++ b/test/nn/aggr/test_scaler.py @@ -0,0 +1,24 @@ +import pytest +import torch + +from torch_geometric.nn import DegreeScalerAggregation + + +def test_degree_scaler_aggregation(): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + ptr = torch.tensor([0, 2, 5, 6]) + deg = torch.tensor([0, 3, 0, 1, 1, 0]) + + aggrs = ['mean', 'sum', 'max'] + scalers = [ + 'identity', 'amplification', 'attenuation', 'linear', 'inverse_linear' + ] + aggr = DegreeScalerAggregation(aggrs, scalers, deg) + assert str(aggr) == 'DegreeScalerAggregation()' + + out = aggr(x, index) + assert out.size() == (3, 240) + + with pytest.raises(NotImplementedError): + aggr(x, ptr=ptr) diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py index 2204e0b8d9db..dbc6c42ec449 100644 --- a/torch_geometric/nn/aggr/__init__.py +++ b/torch_geometric/nn/aggr/__init__.py @@ -13,6 +13,7 @@ ) from .lstm import LSTMAggregation from .set2set import Set2Set +from .scaler import DegreeScalerAggregation __all__ = classes = [ 'Aggregation', @@ -28,4 +29,5 @@ 'PowerMeanAggregation', 'LSTMAggregation', 'Set2Set', + 'DegreeScalerAggregation', ] diff --git a/torch_geometric/nn/aggr/scaler.py b/torch_geometric/nn/aggr/scaler.py new file mode 100644 index 000000000000..ce703a12e454 --- /dev/null +++ b/torch_geometric/nn/aggr/scaler.py @@ -0,0 +1,79 @@ +from typing import Any, Dict, List, Optional, Union + +import torch +from torch import Tensor + +from torch_geometric.nn.aggr import Aggregation, MultiAggregation +from torch_geometric.utils import degree + + +class DegreeScalerAggregation(Aggregation): + """ + Class that combines together one or more aggregators and then transforms + the result with one or more scalers. The scalers are normalised by the + in-degree of the training set and so must be provided at construction. + + Args: + aggrs (list of string or list or Aggregation): The list of + aggregations given as :class:`~torch_geometric.nn.aggr.Aggregation` + (or any string that automatically resolves to it). + scalers (list of str): Set of scaling function identifiers, namely + :obj:`"identity"`, :obj:`"amplification"`, + :obj:`"attenuation"`, :obj:`"linear"` and + :obj:`"inverse_linear"`. + deg (Tensor): Histogram of in-degrees of nodes in the training set, + used by scalers to normalize. + aggr_kwargs (List[Dict[str, Any]], optional): Arguments passed to the + respective aggregation functions in case it gets automatically + resolved. (default: :obj:`None`) + """ + def __init__(self, aggrs: List[Union[Aggregation, str]], + scalers: List[str], deg: Tensor, + aggrs_kwargs: Optional[List[Dict[str, Any]]] = None): + + super().__init__() + + # TODO: Support non-lists + if not isinstance(aggrs, list): + raise RuntimeError("`aggrs` must be a list of aggregations ") + + self.aggr = MultiAggregation(aggrs, aggrs_kwargs) + self.scalers = scalers + + deg = deg.to(torch.float) + num_nodes = int(deg.sum()) + bin_degrees = torch.arange(deg.numel()) + self.avg_deg: Dict[str, float] = { + 'lin': float((bin_degrees * deg).sum()) / num_nodes, + 'log': float(((bin_degrees + 1).log() * deg).sum()) / num_nodes, + 'exp': float((bin_degrees.exp() * deg).sum()) / num_nodes, + } + + def forward(self, x: Tensor, index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, + dim: int = -2) -> Tensor: + + self.assert_index_present(index) + + out = self.aggr(x, index, ptr, dim_size, dim) + deg = degree(index, dtype=out.dtype).clamp_(1) + + size = [1] * len(out.size()) + size[dim] = -1 + deg = deg.view(*size) + outs = [] + for scaler in self.scalers: + if scaler == 'identity': + pass + elif scaler == 'amplification': + out = out * (torch.log(deg + 1) / self.avg_deg['log']) + elif scaler == 'attenuation': + out = out * (self.avg_deg['log'] / torch.log(deg + 1)) + elif scaler == 'linear': + out = out * (deg / self.avg_deg['lin']) + elif scaler == 'inverse_linear': + out = out * (self.avg_deg['lin'] / deg) + else: + raise ValueError(f'Unknown scaler "{scaler}".') + outs.append(out) + return torch.cat(outs, dim=-1) if len(outs) > 1 else outs[0] diff --git a/torch_geometric/nn/conv/pna_conv.py b/torch_geometric/nn/conv/pna_conv.py index 3eee2f43adf8..0371f16e0a60 100644 --- a/torch_geometric/nn/conv/pna_conv.py +++ b/torch_geometric/nn/conv/pna_conv.py @@ -3,12 +3,11 @@ import torch from torch import Tensor from torch.nn import ModuleList, ReLU, Sequential -from torch_scatter import scatter +from torch_geometric.nn.aggr import DegreeScalerAggregation from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.dense.linear import Linear from torch_geometric.typing import Adj, OptTensor -from torch_geometric.utils import degree from ..inits import reset @@ -86,7 +85,9 @@ def __init__(self, in_channels: int, out_channels: int, pre_layers: int = 1, post_layers: int = 1, divide_input: bool = False, **kwargs): - kwargs.setdefault('aggr', None) + aggr = DegreeScalerAggregation(aggregators, scalers, deg) + kwargs.setdefault('aggr', aggr) + super().__init__(node_dim=0, **kwargs) if divide_input: @@ -95,8 +96,6 @@ def __init__(self, in_channels: int, out_channels: int, self.in_channels = in_channels self.out_channels = out_channels - self.aggregators = aggregators - self.scalers = scalers self.edge_dim = edge_dim self.towers = towers self.divide_input = divide_input @@ -178,51 +177,6 @@ def message(self, x_i: Tensor, x_j: Tensor, hs = [nn(h[:, i]) for i, nn in enumerate(self.pre_nns)] return torch.stack(hs, dim=1) - def aggregate(self, inputs: Tensor, index: Tensor, - dim_size: Optional[int] = None) -> Tensor: - - outs = [] - for aggregator in self.aggregators: - if aggregator == 'sum': - out = scatter(inputs, index, 0, None, dim_size, reduce='sum') - elif aggregator == 'mean': - out = scatter(inputs, index, 0, None, dim_size, reduce='mean') - elif aggregator == 'min': - out = scatter(inputs, index, 0, None, dim_size, reduce='min') - elif aggregator == 'max': - out = scatter(inputs, index, 0, None, dim_size, reduce='max') - elif aggregator == 'var' or aggregator == 'std': - mean = scatter(inputs, index, 0, None, dim_size, reduce='mean') - mean_squares = scatter(inputs * inputs, index, 0, None, - dim_size, reduce='mean') - out = mean_squares - mean * mean - if aggregator == 'std': - out = torch.sqrt(torch.relu(out) + 1e-5) - else: - raise ValueError(f'Unknown aggregator "{aggregator}".') - outs.append(out) - out = torch.cat(outs, dim=-1) - - deg = degree(index, dim_size, dtype=inputs.dtype) - deg = deg.clamp_(1).view(-1, 1, 1) - - outs = [] - for scaler in self.scalers: - if scaler == 'identity': - pass - elif scaler == 'amplification': - out = out * (torch.log(deg + 1) / self.avg_deg['log']) - elif scaler == 'attenuation': - out = out * (self.avg_deg['log'] / torch.log(deg + 1)) - elif scaler == 'linear': - out = out * (deg / self.avg_deg['lin']) - elif scaler == 'inverse_linear': - out = out * (self.avg_deg['lin'] / deg) - else: - raise ValueError(f'Unknown scaler "{scaler}".') - outs.append(out) - return torch.cat(outs, dim=-1) - def __repr__(self): return (f'{self.__class__.__name__}({self.in_channels}, ' f'{self.out_channels}, towers={self.towers}, ' From 985ba6dc7b48618fcd0d5e5c7e8a1d6157966075 Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Thu, 7 Jul 2022 12:36:10 +0530 Subject: [PATCH 0138/2432] feat(docker): Add opencontainers image-spec to `Dockerfile` (#4897) For reference more annotations can be found [here](https://github.com/opencontainers/image-spec/blob/main/annotations.md) Co-authored-by: Matthias Fey --- docker/Dockerfile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 7fcb8d4cea67..58407e815c55 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,12 @@ FROM ubuntu:18.04 +# metainformation +LABEL org.opencontainers.image.version = "2.0.4" +LABEL org.opencontainers.image.authors = "Matthias Fey" +LABEL org.opencontainers.image.source = "https://github.com/pyg-team/pytorch_geometric" +LABEL org.opencontainers.image.licenses = "MIT" +LABEL org.opencontainers.image.base.name="docker.io/library/ubuntu:18.04" + RUN apt-get update && apt-get install -y apt-transport-https ca-certificates && \ rm -rf /var/lib/apt/lists/* From 1ad526c7fbedadd9d0a17515ca28924b809b3ca6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 7 Jul 2022 10:15:32 +0200 Subject: [PATCH 0139/2432] Fixed `data.subgraph` for 0-dim tensors (#4932) * fix cluster data * changelog --- CHANGELOG.md | 1 + torch_geometric/data/storage.py | 8 ++++---- torch_geometric/loader/cluster.py | 16 +++++++--------- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f80882306c8..c5b5b6d9ee6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed `data.subgraph` generation for 0-dim tensors ([#4932](https://github.com/pyg-team/pytorch_geometric/pull/4932)) - Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) - Fixed `InMemoryDataset` inferring wrong `len` for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Fixed `Batch.separate` when using it for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index 1b51571f277b..569c1de5585b 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -301,7 +301,7 @@ def is_node_attr(self, key: str) -> bool: cat_dim = self._parent().__cat_dim__(key, value, self) if not isinstance(value, Tensor): return False - if value.size(cat_dim) != self.num_nodes: + if value.dim() == 0 or value.size(cat_dim) != self.num_nodes: return False return True @@ -385,7 +385,7 @@ def is_edge_attr(self, key: str) -> bool: cat_dim = self._parent().__cat_dim__(key, value, self) if not isinstance(value, Tensor): return False - if value.size(cat_dim) != self.num_edges: + if value.dim() == 0 or value.size(cat_dim) != self.num_edges: return False return True @@ -466,7 +466,7 @@ def is_node_attr(self, key: str) -> bool: num_nodes, num_edges = self.num_nodes, self.num_edges if not isinstance(value, Tensor): return False - if value.size(cat_dim) != num_nodes: + if value.dim() == 0 or value.size(cat_dim) != num_nodes: return False if num_nodes != num_edges: return True @@ -479,7 +479,7 @@ def is_edge_attr(self, key: str) -> bool: num_nodes, num_edges = self.num_nodes, self.num_edges if not isinstance(value, Tensor): return False - if value.size(cat_dim) != num_edges: + if value.dim() == 0 or value.size(cat_dim) != num_edges: return False if num_nodes != num_edges: return True diff --git a/torch_geometric/loader/cluster.py b/torch_geometric/loader/cluster.py index 7e6c9392c8f1..5b86e9d3b508 100644 --- a/torch_geometric/loader/cluster.py +++ b/torch_geometric/loader/cluster.py @@ -60,17 +60,15 @@ def __init__(self, data, num_parts: int, recursive: bool = False, self.perm = perm def __permute_data__(self, data, node_idx, adj): - data = copy.copy(data) - N = data.num_nodes + out = copy.copy(data) + for key, value in data.items(): + if data.is_node_attr(key): + out[key] = value[node_idx] - for key, item in data: - if isinstance(item, torch.Tensor) and item.size(0) == N: - data[key] = item[node_idx] + out.edge_index = None + out.adj = adj - data.edge_index = None - data.adj = adj - - return data + return out def __len__(self): return self.partptr.numel() - 1 From 5f77394f6293d5e51374e35d93c32cabce8155e1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 7 Jul 2022 10:38:07 +0200 Subject: [PATCH 0140/2432] Add TorchScript support to `DegreeScalerAggregation` (#4934) * update * changelog * update * update * update --- .github/workflows/testing.yml | 2 +- CHANGELOG.md | 2 +- test/nn/aggr/test_scaler.py | 7 +-- torch_geometric/nn/aggr/base.py | 23 +++++----- torch_geometric/nn/aggr/basic.py | 8 +++- torch_geometric/nn/aggr/scaler.py | 67 +++++++++++++++++------------ torch_geometric/nn/conv/pna_conv.py | 16 +------ torch_geometric/utils/degree.py | 5 ++- 8 files changed, 68 insertions(+), 62 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 00aefbc30867..02d5c2701436 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -37,7 +37,7 @@ jobs: - name: Install internal dependencies run: | pip install torch-scatter -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html - pip install torch-sparse -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html + pip install torch-sparse==0.6.13 -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html pip install torch-cluster -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html pip install torch-spline-conv -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html diff --git a/CHANGELOG.md b/CHANGELOG.md index c5b5b6d9ee6f..6b00aa5311f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872), [#4934](https://github.com/pyg-team/pytorch_geometric/pull/4934)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/test/nn/aggr/test_scaler.py b/test/nn/aggr/test_scaler.py index 0d205d8b6dbb..e630a1e926ca 100644 --- a/test/nn/aggr/test_scaler.py +++ b/test/nn/aggr/test_scaler.py @@ -10,15 +10,16 @@ def test_degree_scaler_aggregation(): ptr = torch.tensor([0, 2, 5, 6]) deg = torch.tensor([0, 3, 0, 1, 1, 0]) - aggrs = ['mean', 'sum', 'max'] - scalers = [ + aggr = ['mean', 'sum', 'max'] + scaler = [ 'identity', 'amplification', 'attenuation', 'linear', 'inverse_linear' ] - aggr = DegreeScalerAggregation(aggrs, scalers, deg) + aggr = DegreeScalerAggregation(aggr, scaler, deg) assert str(aggr) == 'DegreeScalerAggregation()' out = aggr(x, index) assert out.size() == (3, 240) + assert torch.allclose(torch.jit.script(aggr)(x, index), out) with pytest.raises(NotImplementedError): aggr(x, ptr=ptr) diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 26e09b41865d..54b114162849 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -1,4 +1,3 @@ -from abc import ABC, abstractmethod from typing import Optional, Tuple import torch @@ -8,9 +7,10 @@ from torch_geometric.utils import to_dense_batch -class Aggregation(torch.nn.Module, ABC): +class Aggregation(torch.nn.Module): r"""An abstract base class for implementing custom aggregations.""" - @abstractmethod + + # @abstractmethod def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: @@ -73,23 +73,22 @@ def assert_index_present(self, index: Optional[Tensor]): # TODO Currently, not all aggregators support `ptr`. This assert helps # to ensure that we require `index` to be passed to the computation: if index is None: - raise NotImplementedError(f"'{self.__class__.__name__}' requires " - f"'index' to be specified") + raise NotImplementedError( + "Aggregation requires 'index' to be specified") def assert_sorted_index(self, index: Optional[Tensor]): if index is not None and not torch.all(index[:-1] <= index[1:]): - raise ValueError(f"Can not perform aggregation inside " - f"'{self.__class__.__name__}' since the " - f"'index' tensor is not sorted") + raise ValueError("Can not perform aggregation since the 'index' " + "tensor is not sorted") def assert_two_dimensional_input(self, x: Tensor, dim: int): if x.dim() != 2: - raise ValueError(f"'{self.__class__.__name__}' requires " - f"two-dimensional inputs (got '{x.dim()}')") + raise ValueError(f"Aggregation requires two-dimensional inputs " + f"(got '{x.dim()}')") if dim not in [-2, 0]: - raise ValueError(f"'{self.__class__.__name__}' needs to perform " - f"aggregation in first dimension (got '{dim}')") + raise ValueError(f"Aggregation needs to perform aggregation in " + f"first dimension (got '{dim}')") # Helper methods ########################################################## diff --git a/torch_geometric/nn/aggr/basic.py b/torch_geometric/nn/aggr/basic.py index 28993f785e1e..34930d2dcf73 100644 --- a/torch_geometric/nn/aggr/basic.py +++ b/torch_geometric/nn/aggr/basic.py @@ -54,11 +54,15 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, return mean_2 - mean * mean -class StdAggregation(VarAggregation): +class StdAggregation(Aggregation): + def __init__(self): + super().__init__() + self.var_aggr = VarAggregation() + def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: - var = super().forward(x, index, ptr, dim_size, dim) + var = self.var_aggr(x, index, ptr, dim_size, dim) return torch.sqrt(var.relu() + 1e-5) diff --git a/torch_geometric/nn/aggr/scaler.py b/torch_geometric/nn/aggr/scaler.py index ce703a12e454..314ffd1052fd 100644 --- a/torch_geometric/nn/aggr/scaler.py +++ b/torch_geometric/nn/aggr/scaler.py @@ -4,45 +4,54 @@ from torch import Tensor from torch_geometric.nn.aggr import Aggregation, MultiAggregation +from torch_geometric.nn.resolver import aggregation_resolver as aggr_resolver from torch_geometric.utils import degree class DegreeScalerAggregation(Aggregation): - """ - Class that combines together one or more aggregators and then transforms - the result with one or more scalers. The scalers are normalised by the - in-degree of the training set and so must be provided at construction. + r"""Combines one or more aggregators and transforms its output with one or + more scalers as introduced in the `"Principal Neighbourhood Aggregation for + Graph Nets" `_ paper. + The scalers are normalised by the in-degree of the training set and so must + be provided at time of construction. + See :class:`torch_geometric.nn.conv.PNAConv` for more information. Args: - aggrs (list of string or list or Aggregation): The list of - aggregations given as :class:`~torch_geometric.nn.aggr.Aggregation` - (or any string that automatically resolves to it). - scalers (list of str): Set of scaling function identifiers, namely - :obj:`"identity"`, :obj:`"amplification"`, - :obj:`"attenuation"`, :obj:`"linear"` and - :obj:`"inverse_linear"`. + aggr (string or list or Aggregation): The aggregation scheme to use. + See :class:`~torch_geometric.nn.conv.MessagePassing` for more + information. + scaler (str or list): Set of scaling function identifiers, namely one + or more of :obj:`"identity"`, :obj:`"amplification"`, + :obj:`"attenuation"`, :obj:`"linear"` and :obj:`"inverse_linear"`. deg (Tensor): Histogram of in-degrees of nodes in the training set, used by scalers to normalize. - aggr_kwargs (List[Dict[str, Any]], optional): Arguments passed to the - respective aggregation functions in case it gets automatically + aggr_kwargs (Dict[str, Any], optional): Arguments passed to the + respective aggregation function in case it gets automatically resolved. (default: :obj:`None`) """ - def __init__(self, aggrs: List[Union[Aggregation, str]], - scalers: List[str], deg: Tensor, - aggrs_kwargs: Optional[List[Dict[str, Any]]] = None): - + def __init__( + self, + aggr: Union[str, List[str], Aggregation], + scaler: Union[str, List[str]], + deg: Tensor, + aggr_kwargs: Optional[List[Dict[str, Any]]] = None, + ): super().__init__() - # TODO: Support non-lists - if not isinstance(aggrs, list): - raise RuntimeError("`aggrs` must be a list of aggregations ") + if isinstance(aggr, (str, Aggregation)): + self.aggr = aggr_resolver(aggr, **(aggr_kwargs or {})) + elif isinstance(aggr, (tuple, list)): + self.aggr = MultiAggregation(aggr, aggr_kwargs) + else: + raise ValueError(f"Only strings, list, tuples and instances of" + f"`torch_geometric.nn.aggr.Aggregation` are " + f"valid aggregation schemes (got '{type(aggr)}')") - self.aggr = MultiAggregation(aggrs, aggrs_kwargs) - self.scalers = scalers + self.scaler = [scaler] if isinstance(aggr, str) else scaler deg = deg.to(torch.float) num_nodes = int(deg.sum()) - bin_degrees = torch.arange(deg.numel()) + bin_degrees = torch.arange(deg.numel(), device=deg.device) self.avg_deg: Dict[str, float] = { 'lin': float((bin_degrees * deg).sum()) / num_nodes, 'log': float(((bin_degrees + 1).log() * deg).sum()) / num_nodes, @@ -53,16 +62,19 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: + # TODO Currently, `degree` can only operate on `index`: self.assert_index_present(index) out = self.aggr(x, index, ptr, dim_size, dim) - deg = degree(index, dtype=out.dtype).clamp_(1) + assert index is not None + deg = degree(index, dtype=out.dtype).clamp_(1) size = [1] * len(out.size()) size[dim] = -1 - deg = deg.view(*size) + deg = deg.view(size) + outs = [] - for scaler in self.scalers: + for scaler in self.scaler: if scaler == 'identity': pass elif scaler == 'amplification': @@ -74,6 +86,7 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, elif scaler == 'inverse_linear': out = out * (self.avg_deg['lin'] / deg) else: - raise ValueError(f'Unknown scaler "{scaler}".') + raise ValueError(f"Unknown scaler '{scaler}'") outs.append(out) + return torch.cat(outs, dim=-1) if len(outs) > 1 else outs[0] diff --git a/torch_geometric/nn/conv/pna_conv.py b/torch_geometric/nn/conv/pna_conv.py index 0371f16e0a60..d5ee0f9e144b 100644 --- a/torch_geometric/nn/conv/pna_conv.py +++ b/torch_geometric/nn/conv/pna_conv.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Optional +from typing import List, Optional import torch from torch import Tensor @@ -86,9 +86,7 @@ def __init__(self, in_channels: int, out_channels: int, divide_input: bool = False, **kwargs): aggr = DegreeScalerAggregation(aggregators, scalers, deg) - kwargs.setdefault('aggr', aggr) - - super().__init__(node_dim=0, **kwargs) + super().__init__(aggr=aggr, node_dim=0, **kwargs) if divide_input: assert in_channels % towers == 0 @@ -103,15 +101,6 @@ def __init__(self, in_channels: int, out_channels: int, self.F_in = in_channels // towers if divide_input else in_channels self.F_out = self.out_channels // towers - deg = deg.to(torch.float) - num_nodes = int(deg.sum()) - bin_degrees = torch.arange(deg.numel()) - self.avg_deg: Dict[str, float] = { - 'lin': float((bin_degrees * deg).sum()) / num_nodes, - 'log': float(((bin_degrees + 1).log() * deg).sum()) / num_nodes, - 'exp': float((bin_degrees.exp() * deg).sum()) / num_nodes, - } - if self.edge_dim is not None: self.edge_encoder = Linear(edge_dim, self.F_in) @@ -147,7 +136,6 @@ def reset_parameters(self): def forward(self, x: Tensor, edge_index: Adj, edge_attr: OptTensor = None) -> Tensor: """""" - if self.divide_input: x = x.view(-1, self.towers, self.F_in) else: diff --git a/torch_geometric/utils/degree.py b/torch_geometric/utils/degree.py index d34c6ed52001..eeee39e67463 100644 --- a/torch_geometric/utils/degree.py +++ b/torch_geometric/utils/degree.py @@ -1,12 +1,13 @@ from typing import Optional import torch +from torch import Tensor from .num_nodes import maybe_num_nodes -def degree(index, num_nodes: Optional[int] = None, - dtype: Optional[torch.dtype] = None): +def degree(index: Tensor, num_nodes: Optional[int] = None, + dtype: Optional[torch.dtype] = None) -> Tensor: r"""Computes the (unweighted) degree of a given one-dimensional index tensor. From 843ba6ca0a8bf1c34482f8bda4557e6887d64eb0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 7 Jul 2022 11:11:53 +0200 Subject: [PATCH 0141/2432] Fix: `len(batch) == batch.num_graphs` (#4931) * fix lenght of batch * changelog * fix * fix test --- CHANGELOG.md | 1 + test/data/test_batch.py | 18 +++++------- test/datasets/test_enzymes.py | 26 ++++++++--------- test/datasets/test_planetoid.py | 37 ++++++++++++------------- test/loader/test_dataloader.py | 10 +++---- test/loader/test_shadow.py | 4 +-- test/transforms/test_rooted_subgraph.py | 2 +- test/transforms/test_to_superpixels.py | 32 ++++++++++----------- torch_geometric/data/batch.py | 3 ++ torch_geometric/data/collate.py | 7 ++++- 10 files changed, 71 insertions(+), 69 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b00aa5311f9..14f844e3324b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- `len(batch)` will now return the number of graphs inside the batch, not the number of attributes ([#4931](https://github.com/pyg-team/pytorch_geometric/pull/4931)) - Fixed `data.subgraph` generation for 0-dim tensors ([#4932](https://github.com/pyg-team/pytorch_geometric/pull/4932)) - Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) - Fixed `InMemoryDataset` inferring wrong `len` for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 2e41b0e1de13..33c5d33ca6a6 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -52,8 +52,7 @@ def test_batch(): assert str(batch) == ('DataBatch(x=[3], edge_index=[2, 4], y=[1], ' 'x_sp=[3, 1, nnz=3], adj=[3, 3, nnz=4], s=[1], ' 'array=[1], num_nodes=3, batch=[3], ptr=[2])') - assert batch.num_graphs == 1 - assert len(batch) == 10 + assert batch.num_graphs == len(batch) == 1 assert batch.x.tolist() == [1, 2, 3] assert batch.y.tolist() == [1] assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() @@ -72,8 +71,7 @@ def test_batch(): 'x_sp=[9, 1, nnz=9], adj=[9, 9, nnz=12], s=[3], ' 's_batch=[3], s_ptr=[4], array=[3], num_nodes=9, ' 'batch=[9], ptr=[4])') - assert batch.num_graphs == 3 - assert len(batch) == 12 + assert batch.num_graphs == len(batch) == 3 assert batch.x.tolist() == [1, 2, 3, 1, 2, 1, 2, 3, 4] assert batch.y.tolist() == [1, 2, 3] assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() @@ -174,7 +172,7 @@ def __cat_dim__(self, key, value, *args, **kwargs): assert str(batch) == ('MyDataBatch(x=[5], y=[2], foo=[2, 4], batch=[5], ' 'ptr=[3])') - assert len(batch) == 5 + assert batch.num_graphs == len(batch) == 2 assert batch.x.tolist() == [1, 2, 3, 1, 2] assert batch.foo.size() == (2, 4) assert batch.foo[0].tolist() == foo1.tolist() @@ -208,7 +206,7 @@ def test_pickling(): assert batch.num_nodes == 20 assert batch.__class__.__name__ == 'DataBatch' - assert len(batch) == 3 + assert batch.num_graphs == len(batch) == 4 os.remove(path) @@ -230,8 +228,7 @@ def test_recursive_batch(): batch = Batch.from_data_list([data1, data2]) - assert len(batch) == 5 - assert batch.num_graphs == 2 + assert batch.num_graphs == len(batch) == 2 assert batch.num_nodes == 90 assert torch.allclose(batch.x['1'], @@ -267,7 +264,7 @@ def test_batching_of_batches(): batch = Batch.from_data_list([data, data]) batch = Batch.from_data_list([batch, batch]) - assert len(batch) == 2 + assert batch.num_graphs == len(batch) == 2 assert batch.x[0:2].tolist() == data.x.tolist() assert batch.x[2:4].tolist() == data.x.tolist() assert batch.x[4:6].tolist() == data.x.tolist() @@ -296,8 +293,7 @@ def test_hetero_batch(): batch = Batch.from_data_list([data1, data2]) - assert len(batch) == 5 - assert batch.num_graphs == 2 + assert batch.num_graphs == len(batch) == 2 assert batch.num_nodes == 450 assert torch.allclose(batch['p'].x[:100], data1['p'].x) diff --git a/test/datasets/test_enzymes.py b/test/datasets/test_enzymes.py index 7b7116a1ec37..45c16383f1ba 100644 --- a/test/datasets/test_enzymes.py +++ b/test/datasets/test_enzymes.py @@ -22,25 +22,24 @@ def test_enzymes(get_dataset): assert len(dataset[mask]) == 100 loader = DataLoader(dataset, batch_size=len(dataset)) - for data in loader: - assert data.num_graphs == 600 + for batch in loader: + assert batch.num_graphs == len(batch) == 600 - avg_num_nodes = data.num_nodes / data.num_graphs + avg_num_nodes = batch.num_nodes / batch.num_graphs assert pytest.approx(avg_num_nodes, abs=1e-2) == 32.63 - avg_num_edges = data.num_edges / (2 * data.num_graphs) + avg_num_edges = batch.num_edges / (2 * batch.num_graphs) assert pytest.approx(avg_num_edges, abs=1e-2) == 62.14 - assert len(data) == 5 - assert list(data.x.size()) == [data.num_nodes, 3] - assert list(data.y.size()) == [data.num_graphs] - assert data.y.max() + 1 == 6 - assert list(data.batch.size()) == [data.num_nodes] - assert data.ptr.numel() == data.num_graphs + 1 + assert list(batch.x.size()) == [batch.num_nodes, 3] + assert list(batch.y.size()) == [batch.num_graphs] + assert batch.y.max() + 1 == 6 + assert list(batch.batch.size()) == [batch.num_nodes] + assert batch.ptr.numel() == batch.num_graphs + 1 - assert data.has_isolated_nodes() - assert not data.has_self_loops() - assert data.is_undirected() + assert batch.has_isolated_nodes() + assert not batch.has_self_loops() + assert batch.is_undirected() loader = DataListLoader(dataset, batch_size=len(dataset)) for data_list in loader: @@ -49,7 +48,6 @@ def test_enzymes(get_dataset): dataset.transform = ToDense(num_nodes=126) loader = DenseDataLoader(dataset, batch_size=len(dataset)) for data in loader: - assert len(data) == 4 assert list(data.x.size()) == [600, 126, 3] assert list(data.adj.size()) == [600, 126, 126] assert list(data.mask.size()) == [600, 126] diff --git a/test/datasets/test_planetoid.py b/test/datasets/test_planetoid.py index 3cc2634f442c..b5db9456ca59 100644 --- a/test/datasets/test_planetoid.py +++ b/test/datasets/test_planetoid.py @@ -8,25 +8,24 @@ def test_citeseer(get_dataset): assert len(dataset) == 1 assert dataset.__repr__() == 'CiteSeer()' - for data in loader: - assert data.num_graphs == 1 - assert data.num_nodes == 3327 - assert data.num_edges / 2 == 4552 - - assert len(data) == 8 - assert list(data.x.size()) == [data.num_nodes, 3703] - assert list(data.y.size()) == [data.num_nodes] - assert data.y.max() + 1 == 6 - assert data.train_mask.sum() == 6 * 20 - assert data.val_mask.sum() == 500 - assert data.test_mask.sum() == 1000 - assert (data.train_mask & data.val_mask & data.test_mask).sum() == 0 - assert list(data.batch.size()) == [data.num_nodes] - assert data.ptr.tolist() == [0, data.num_nodes] - - assert data.has_isolated_nodes() - assert not data.has_self_loops() - assert data.is_undirected() + for batch in loader: + assert batch.num_graphs == len(batch) == 1 + assert batch.num_nodes == 3327 + assert batch.num_edges / 2 == 4552 + + assert list(batch.x.size()) == [batch.num_nodes, 3703] + assert list(batch.y.size()) == [batch.num_nodes] + assert batch.y.max() + 1 == 6 + assert batch.train_mask.sum() == 6 * 20 + assert batch.val_mask.sum() == 500 + assert batch.test_mask.sum() == 1000 + assert (batch.train_mask & batch.val_mask & batch.test_mask).sum() == 0 + assert list(batch.batch.size()) == [batch.num_nodes] + assert batch.ptr.tolist() == [0, batch.num_nodes] + + assert batch.has_isolated_nodes() + assert not batch.has_self_loops() + assert batch.is_undirected() def test_citeseer_with_full_split(get_dataset): diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index a793066c95de..afa200c0902b 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -39,7 +39,7 @@ def test_dataloader(num_workers): assert len(loader) == 2 for batch in loader: - assert len(batch) == 8 + assert batch.num_graphs == len(batch) == 2 assert batch.batch.tolist() == [0, 0, 0, 1, 1, 1] assert batch.ptr.tolist() == [0, 3, 6] assert batch.x.tolist() == [[1], [1], [1], [1], [1], [1]] @@ -58,7 +58,7 @@ def test_dataloader(num_workers): assert len(loader) == 2 for batch in loader: - assert len(batch) == 10 + assert batch.num_graphs == len(batch) == 2 assert batch.edge_index_batch.tolist() == [0, 0, 0, 0, 1, 1, 1, 1] @@ -72,10 +72,10 @@ def test_multiprocessing(): queue.put(batch) batch = queue.get() - assert len(batch) == 3 + assert batch.num_graphs == len(batch) == 2 batch = queue.get() - assert len(batch) == 3 + assert batch.num_graphs == len(batch) == 2 def test_pin_memory(): @@ -104,7 +104,7 @@ def test_heterogeneous_dataloader(num_workers): assert len(loader) == 2 for batch in loader: - assert len(batch) == 5 + assert batch.num_graphs == len(batch) == 2 assert batch.num_nodes == 600 for store in batch.stores: diff --git a/test/loader/test_shadow.py b/test/loader/test_shadow.py index ac853a6145d1..ddd8a959ac5a 100644 --- a/test/loader/test_shadow.py +++ b/test/loader/test_shadow.py @@ -20,7 +20,7 @@ def test_shadow_k_hop_sampler(): assert len(loader) == 1 batch1 = next(iter(loader)) - assert len(batch1) == 7 + assert batch1.num_graphs == len(batch1) == 2 assert batch1.batch.tolist() == [0, 0, 0, 0, 1, 1, 1] assert batch1.ptr.tolist() == [0, 4, 7] @@ -42,7 +42,7 @@ def test_shadow_k_hop_sampler(): assert len(loader) == 1 batch2 = next(iter(loader)) - assert len(batch2) == 6 + assert batch2.num_graphs == len(batch2) == 2 assert batch1.batch.tolist() == batch2.batch.tolist() assert batch1.ptr.tolist() == batch2.ptr.tolist() diff --git a/test/transforms/test_rooted_subgraph.py b/test/transforms/test_rooted_subgraph.py index c1e593da0fb4..1cc025e1888a 100644 --- a/test/transforms/test_rooted_subgraph.py +++ b/test/transforms/test_rooted_subgraph.py @@ -73,7 +73,7 @@ def test_rooted_subgraph_minibatch(): loader = DataLoader([data, data], batch_size=2) batch = next(iter(loader)) batch = batch.map_data() - assert len(batch) == 6 + assert batch.num_graphs == len(batch) == 2 assert batch.x.size() == (14, 8) assert batch.edge_index.size() == (2, 16) diff --git a/test/transforms/test_to_superpixels.py b/test/transforms/test_to_superpixels.py index 1c6c4011d0f1..fc5acfbf80ca 100644 --- a/test/transforms/test_to_superpixels.py +++ b/test/transforms/test_to_superpixels.py @@ -57,13 +57,13 @@ def test_to_superpixels(): assert y == 7 loader = DataLoader(dataset, batch_size=2, shuffle=False) - for data, y in loader: - assert len(data) == 4 - assert data.pos.dim() == 2 and data.pos.size(1) == 2 - assert data.x.dim() == 2 and data.x.size(1) == 1 - assert data.batch.dim() == 1 - assert data.ptr.dim() == 1 - assert data.pos.size(0) == data.x.size(0) == data.batch.size(0) + for batch, y in loader: + assert batch.num_graphs == len(batch) == 2 + assert batch.pos.dim() == 2 and batch.pos.size(1) == 2 + assert batch.x.dim() == 2 and batch.x.size(1) == 1 + assert batch.batch.dim() == 1 + assert batch.ptr.dim() == 1 + assert batch.pos.size(0) == batch.x.size(0) == batch.batch.size(0) assert y.tolist() == [7, 2] break @@ -81,15 +81,15 @@ def test_to_superpixels(): assert y == 7 loader = DataLoader(dataset, batch_size=2, shuffle=False) - for data, y in loader: - assert len(data) == 6 - assert data.pos.dim() == 2 and data.pos.size(1) == 2 - assert data.x.dim() == 2 and data.x.size(1) == 1 - assert data.batch.dim() == 1 - assert data.ptr.dim() == 1 - assert data.pos.size(0) == data.x.size(0) == data.batch.size(0) - assert data.seg.size() == (2, 28, 28) - assert data.img.size() == (2, 1, 28, 28) + for batch, y in loader: + assert batch.num_graphs == len(batch) == 2 + assert batch.pos.dim() == 2 and batch.pos.size(1) == 2 + assert batch.x.dim() == 2 and batch.x.size(1) == 1 + assert batch.batch.dim() == 1 + assert batch.ptr.dim() == 1 + assert batch.pos.size(0) == batch.x.size(0) == batch.batch.size(0) + assert batch.seg.size() == (2, 28, 28) + assert batch.img.size() == (2, 1, 28, 28) assert y.tolist() == [7, 2] break diff --git a/torch_geometric/data/batch.py b/torch_geometric/data/batch.py index 43e553ab1097..b24ff9b32d66 100644 --- a/torch_geometric/data/batch.py +++ b/torch_geometric/data/batch.py @@ -180,6 +180,9 @@ def num_graphs(self) -> int: else: raise ValueError("Can not infer the number of graphs") + def __len__(self) -> int: + return self.num_graphs + def __reduce__(self): state = self.__dict__.copy() return DynamicInheritanceGetter(), self.__class__.__bases__, state diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index fd253301d8bf..93a541858dc1 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -142,7 +142,12 @@ def _collate( # Write directly into shared memory to avoid an extra copy: numel = sum(value.numel() for value in values) storage = elem.storage()._new_shared(numel) - out = elem.new(storage) + shape = list(elem.size()) + if cat_dim is None or elem.dim() == 0: + shape = [len(values)] + shape + else: + shape[cat_dim] = int(slices[-1]) + out = elem.new(storage).resize_(*shape) else: out = None From ee582f21044300d433925f5dcdc6cb407447e5bc Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 7 Jul 2022 11:25:17 +0200 Subject: [PATCH 0142/2432] Fix `degree` bug in `PNAConv` (#4935) * fix bug * changelog --- CHANGELOG.md | 2 +- torch_geometric/nn/aggr/scaler.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14f844e3324b..39387ef25e6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) - Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) - Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872), [#4934](https://github.com/pyg-team/pytorch_geometric/pull/4934)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872), [#4934](https://github.com/pyg-team/pytorch_geometric/pull/4934), [#4935](https://github.com/pyg-team/pytorch_geometric/pull/4935)) - Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) - Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) - Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) diff --git a/torch_geometric/nn/aggr/scaler.py b/torch_geometric/nn/aggr/scaler.py index 314ffd1052fd..7c480e648774 100644 --- a/torch_geometric/nn/aggr/scaler.py +++ b/torch_geometric/nn/aggr/scaler.py @@ -68,7 +68,7 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, out = self.aggr(x, index, ptr, dim_size, dim) assert index is not None - deg = degree(index, dtype=out.dtype).clamp_(1) + deg = degree(index, num_nodes=dim_size, dtype=out.dtype).clamp_(1) size = [1] * len(out.size()) size[dim] = -1 deg = deg.view(size) From 02a702687b600f34b13bfac4aa78c67510af6e4f Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Thu, 7 Jul 2022 14:45:00 +0300 Subject: [PATCH 0143/2432] Add notebook tutorial for `torch_geometric.nn.aggr` package (#4927) * Add notebook tutorial for package * changelog * Update docs/source/notes/colabs.rst Co-authored-by: Guohao Li Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + docs/source/notes/colabs.rst | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39387ef25e6e..24ae0a8ee31e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) - Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) - Added `LinkNeighborLoader` support to `LightningDataModule` ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) diff --git a/docs/source/notes/colabs.rst b/docs/source/notes/colabs.rst index f45b5fb64f96..24aaca5f4ed0 100644 --- a/docs/source/notes/colabs.rst +++ b/docs/source/notes/colabs.rst @@ -9,6 +9,7 @@ We have prepared a list of colab notebooks that practically introduces you to th 4. `Scaling Graph Neural Networks `__ 5. `Point Cloud Classification with Graph Neural Networks `__ 6. `Explaining GNN Model Predictions using Captum `__ +7. `Customizing Aggregations within Message Passing `__ **Stanford CS224W Graph ML Tutorials:** From 31866b5a5478cc50b4e7b1c11450f6330e48ab51 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Thu, 7 Jul 2022 21:58:12 +0300 Subject: [PATCH 0144/2432] Add `normalization_resolver` to resolve normalization layers (#4926) * Add * changelog * Simplify test * Change test for normalization_resolver Co-authored-by: Guohao Li --- CHANGELOG.md | 1 + test/nn/test_resolver.py | 19 +++++++++++++++++++ torch_geometric/nn/resolver.py | 16 ++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24ae0a8ee31e..7800caaf33cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926)) - Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) - Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) diff --git a/test/nn/test_resolver.py b/test/nn/test_resolver.py index 8cb2e9caff4d..88874079cae4 100644 --- a/test/nn/test_resolver.py +++ b/test/nn/test_resolver.py @@ -5,6 +5,7 @@ from torch_geometric.nn.resolver import ( activation_resolver, aggregation_resolver, + normalization_resolver, ) @@ -34,3 +35,21 @@ def test_aggregation_resolver(aggr_tuple): aggr_module, aggr_repr = aggr_tuple assert isinstance(aggregation_resolver(aggr_module()), aggr_module) assert isinstance(aggregation_resolver(aggr_repr), aggr_module) + + +@pytest.mark.parametrize('norm_tuple', [ + (torch_geometric.nn.norm.BatchNorm, 'batch_norm', (16, )), + (torch_geometric.nn.norm.InstanceNorm, 'instance_norm', (16, )), + (torch_geometric.nn.norm.LayerNorm, 'layer_norm', (16, )), + (torch_geometric.nn.norm.GraphNorm, 'graph_norm', (16, )), + (torch_geometric.nn.norm.GraphSizeNorm, 'graphsize_norm', ()), + (torch_geometric.nn.norm.PairNorm, 'pair_norm', ()), + (torch_geometric.nn.norm.MessageNorm, 'message_norm', ()), + (torch_geometric.nn.norm.DiffGroupNorm, 'diffgroup_norm', (16, 4)), +]) +def test_normalization_resolver(norm_tuple): + norm_module, norm_repr, norm_args = norm_tuple + assert isinstance(normalization_resolver(norm_module(*norm_args)), + norm_module) + assert isinstance(normalization_resolver(norm_repr, *norm_args), + norm_module) diff --git a/torch_geometric/nn/resolver.py b/torch_geometric/nn/resolver.py index 8d843da52e85..00a00ed05930 100644 --- a/torch_geometric/nn/resolver.py +++ b/torch_geometric/nn/resolver.py @@ -61,6 +61,22 @@ def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): return resolver(acts, act_dict, query, base_cls, *args, **kwargs) +# Normalization Resolver ###################################################### + + +def normalization_resolver(query: Union[Any, str], *args, **kwargs): + import torch + + import torch_geometric.nn.norm as norm + base_cls = torch.nn.Module + norms = [ + norm for norm in vars(norm).values() + if isinstance(norm, type) and issubclass(norm, base_cls) + ] + norm_dict = {} + return resolver(norms, norm_dict, query, base_cls, *args, **kwargs) + + # Aggregation Resolver ######################################################## From fdb1ab0527edc93d81d488b0e4725639bb092dd5 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Thu, 7 Jul 2022 13:33:29 -0700 Subject: [PATCH 0145/2432] `GraphStore`: `Data`, `HeteroData` respect `is_sorted` (#4922) --- CHANGELOG.md | 2 +- test/loader/test_neighbor_loader.py | 9 +++ torch_geometric/data/data.py | 20 ++++-- torch_geometric/data/graph_store.py | 101 ++++++++++++++++------------ torch_geometric/data/hetero_data.py | 18 +++-- 5 files changed, 93 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7800caaf33cb..0a8a4dbc6d33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 789403f2747e..445cfdf840ee 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -322,6 +322,15 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): edge_type=('author', 'to', 'paper'), layout='csc', size=(200, 100)) + # COO (sorted): + edge_index = get_edge_index(200, 200, 100) + edge_index = edge_index[:, edge_index[1].argsort()] + data['author', 'to', 'author'].edge_index = edge_index + coo = (edge_index[0], edge_index[1]) + graph_store.put_edge_index(edge_index=coo, + edge_type=('author', 'to', 'author'), + layout='coo', size=(200, 200), is_sorted=True) + # Construct neighbor loaders: loader1 = NeighborLoader(data, batch_size=20, input_nodes=('paper', range(100)), diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 7a19bdb414a8..a2ad1eef9c1f 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -842,6 +842,12 @@ def _put_edge_index(self, edge_index: EdgeTensorType, attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) setattr(self, attr_name, attr_val) + # Set edge attributes: + if not hasattr(self, '_edge_attrs'): + self._edge_attrs = {} + + self._edge_attrs[edge_attr.layout.value] = edge_attr + # Set size, if possible: size = edge_attr.size if size is not None: @@ -866,13 +872,13 @@ def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: def get_all_edge_attrs(self) -> List[EdgeAttr]: r"""Returns `EdgeAttr` objects corresponding to the edge indices stored in `Data` and their layouts""" - out = [] - for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): - if attr_name in self: - out.append( - EdgeAttr(edge_type=None, layout=layout, - size=(self.num_nodes, self.num_nodes))) - return out + if not hasattr(self, '_edge_attrs'): + return [] + + edge_attrs = self._edge_attrs.values() + for attr in edge_attrs: + attr.size = (self.num_nodes, self.num_nodes) + return edge_attrs ############################################################################### diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py index 48e66bd17503..8f35792c254f 100644 --- a/torch_geometric/data/graph_store.py +++ b/torch_geometric/data/graph_store.py @@ -117,9 +117,12 @@ def get_edge_index(self, *args, **kwargs) -> EdgeTensorType: Raises: KeyError: if the edge index corresponding to attr was not found. """ + edge_attr = self._edge_attr_cls.cast(*args, **kwargs) edge_attr.layout = EdgeLayout(edge_attr.layout) # Override is_sorted for CSC and CSR: + # TODO treat is_sorted specially in this function, where is_sorted=True + # returns an edge index sorted by column. edge_attr.is_sorted = edge_attr.is_sorted or (edge_attr.layout in [ EdgeLayout.CSC, EdgeLayout.CSR ]) @@ -131,9 +134,57 @@ def get_edge_index(self, *args, **kwargs) -> EdgeTensorType: # Layout Conversion ####################################################### + def _edge_to_layout( + self, + attr: EdgeAttr, + layout: EdgeLayout, + ) -> Tuple[Tensor, Tensor, OptTensor]: + from_tuple = self.get_edge_index(attr) + + if layout == EdgeLayout.COO: + if attr.layout == EdgeLayout.CSR: + col = from_tuple[1] + row = torch.ops.torch_sparse.ptr2ind(from_tuple[0], + col.numel()) + else: + row = from_tuple[0] + col = torch.ops.torch_sparse.ptr2ind(from_tuple[1], + row.numel()) + perm = None + + elif layout == EdgeLayout.CSR: + # We convert to CSR by converting to CSC on the transpose + if attr.layout == EdgeLayout.COO: + adj = edge_tensor_type_to_adj_type( + attr, (from_tuple[1], from_tuple[0])) + else: + adj = edge_tensor_type_to_adj_type(attr, from_tuple).t() + + # NOTE we set is_sorted=False here as is_sorted refers to + # the edge_index being sorted by the destination node + # (column), but here we deal with the transpose + attr_copy = copy.copy(attr) + attr_copy.is_sorted = False + attr_copy.size = None if attr.size is None else (attr.size[1], + attr.size[0]) + + # Actually rowptr, col, perm + row, col, perm = to_csc(adj, attr_copy, device='cpu') + + else: + adj = edge_tensor_type_to_adj_type(attr, from_tuple) + + # Actually colptr, row, perm + col, row, perm = to_csc(adj, attr, device='cpu') + + return row, col, perm + # TODO support `replace` to replace the existing edge index. - def _to_layout(self, layout: EdgeLayout, - store: bool = False) -> ConversionOutputType: + def _all_edges_to_layout( + self, + layout: EdgeLayout, + store: bool = False, + ) -> ConversionOutputType: # Obtain all edge attributes, grouped by type: edge_attrs = self.get_all_edge_attrs() edge_type_to_attrs: Dict[Any, List[EdgeAttr]] = defaultdict(list) @@ -165,45 +216,7 @@ def _to_layout(self, layout: EdgeLayout, else: from_attr = edge_attrs[edge_layouts.index(EdgeLayout.CSR)] - from_tuple = self.get_edge_index(from_attr) - - # Convert to the new layout: - if layout == EdgeLayout.COO: - if from_attr.layout == EdgeLayout.CSR: - col = from_tuple[1] - row = torch.ops.torch_sparse.ptr2ind( - from_tuple[0], col.numel()) - else: - row = from_tuple[0] - col = torch.ops.torch_sparse.ptr2ind( - from_tuple[1], row.numel()) - perm = None - - elif layout == EdgeLayout.CSR: - # We convert to CSR by converting to CSC on the transpose - if from_attr.layout == EdgeLayout.COO: - adj = edge_tensor_type_to_adj_type( - from_attr, (from_tuple[1], from_tuple[0])) - else: - adj = edge_tensor_type_to_adj_type( - from_attr, from_tuple).t() - - # NOTE we set is_sorted=False here as is_sorted refers to - # the edge_index being sorted by the destination node - # (column), but here we deal with the transpose - from_attr_copy = copy.copy(from_attr) - from_attr_copy.is_sorted = False - from_attr_copy.size = None if from_attr.size is None else ( - from_attr.size[1], from_attr.size[0]) - - # Actually rowptr, col, perm - row, col, perm = to_csc(adj, from_attr_copy, device='cpu') - - else: - adj = edge_tensor_type_to_adj_type(from_attr, from_tuple) - - # Actually colptr, row, perm - col, row, perm = to_csc(adj, from_attr, device='cpu') + row, col, perm = self._edge_to_layout(from_attr, layout) row_dict[from_attr.edge_type] = row col_dict[from_attr.edge_type] = col @@ -235,17 +248,17 @@ def _to_layout(self, layout: EdgeLayout, def coo(self, store: bool = False) -> ConversionOutputType: r"""Converts the edge indices in the graph store to COO format, optionally storing the converted edge indices in the graph store.""" - return self._to_layout(EdgeLayout.COO, store) + return self._all_edges_to_layout(EdgeLayout.COO, store) def csr(self, store: bool = False) -> ConversionOutputType: r"""Converts the edge indices in the graph store to CSR format, optionally storing the converted edge indices in the graph store.""" - return self._to_layout(EdgeLayout.CSR, store) + return self._all_edges_to_layout(EdgeLayout.CSR, store) def csc(self, store: bool = False) -> ConversionOutputType: r"""Converts the edge indices in the graph store to CSC format, optionally storing the converted edge indices in the graph store.""" - return self._to_layout(EdgeLayout.CSC, store) + return self._all_edges_to_layout(EdgeLayout.CSC, store) # Additional methods ###################################################### diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index d68b0d7ca880..1cdf571ae088 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -754,6 +754,13 @@ def _put_edge_index(self, edge_index: EdgeTensorType, attr_val = edge_tensor_type_to_adj_type(edge_attr, edge_index) setattr(self[edge_attr.edge_type], attr_name, attr_val) + # Set edge attributes: + if not hasattr(self[edge_attr.edge_type], '_edge_attrs'): + self[edge_attr.edge_type]._edge_attrs = {} + + self[edge_attr.edge_type]._edge_attrs[ + edge_attr.layout.value] = edge_attr + key = self._to_canonical(edge_attr.edge_type) src, _, dst = key @@ -781,11 +788,12 @@ def get_all_edge_attrs(self) -> List[EdgeAttr]: indices stored in `HeteroData` and their layouts.""" out = [] for edge_type, edge_store in self.edge_items(): - for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): - if attr_name in edge_store: - out.append( - EdgeAttr(edge_type=edge_type, layout=layout, - size=self[edge_type].size())) + if not hasattr(self[edge_type], '_edge_attrs'): + continue + edge_attrs = self[edge_type]._edge_attrs.values() + for attr in edge_attrs: + attr.size = self[edge_type].size() + out.extend(edge_attrs) return out From db5e6d9c14be028ee2bb804ed2c2a11d5d9cc0bf Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Thu, 7 Jul 2022 15:00:40 -0700 Subject: [PATCH 0146/2432] `NeighborLoader`: support temporal sampling with `(FeatureStore, GraphStore)` (#4929) --- CHANGELOG.md | 2 +- test/loader/test_neighbor_loader.py | 45 +++++++++++++++++++++++ torch_geometric/data/data.py | 13 +++++++ torch_geometric/data/hetero_data.py | 21 ++++++++++- torch_geometric/loader/neighbor_loader.py | 31 ++++++++++++---- 5 files changed, 102 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a8a4dbc6d33..e03d3bd0a2be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 445cfdf840ee..3afd5223f80c 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -4,6 +4,7 @@ from torch_sparse import SparseTensor from torch_geometric.data import Data, HeteroData +from torch_geometric.data.feature_store import TensorAttr from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero from torch_geometric.testing import withRegisteredOp @@ -359,3 +360,47 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): 'paper', 'to', 'author'].edge_index.size()) assert (batch1['author', 'to', 'paper'].edge_index.size() == batch1[ 'author', 'to', 'paper'].edge_index.size()) + + +@withRegisteredOp('torch_sparse.hetero_temporal_neighbor_sample') +@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) +@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) +def test_temporal_custom_neighbor_loader_on_cora(get_dataset, FeatureStore, + GraphStore): + # Initialize dataset (once): + dataset = get_dataset(name='Cora') + data = dataset[0] + + # Initialize feature store, graph store, and reference: + feature_store = FeatureStore() + graph_store = GraphStore() + hetero_data = HeteroData() + + feature_store.put_tensor(data.x, group_name='paper', attr_name='x', + index=None) + hetero_data['paper'].x = data.x + + feature_store.put_tensor(torch.arange(data.num_nodes), group_name='paper', + attr_name='time', index=None) + hetero_data['paper'].time = torch.arange(data.num_nodes) + + num_nodes = data.x.size(dim=0) + graph_store.put_edge_index(edge_index=data.edge_index, + edge_type=('paper', 'to', 'paper'), + layout='coo', size=(num_nodes, num_nodes)) + hetero_data['paper', 'to', 'paper'].edge_index = data.edge_index + + loader1 = NeighborLoader(hetero_data, num_neighbors=[-1, -1], + input_nodes='paper', time_attr='time', + batch_size=128) + + loader2 = NeighborLoader( + (feature_store, graph_store), + num_neighbors=[-1, -1], + input_nodes=TensorAttr(group_name='paper', attr_name='x'), + time_attr='time', + batch_size=128, + ) + + for batch1, batch2 in zip(loader1, loader2): + assert torch.equal(batch1['paper'].time, batch2['paper'].time) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index a2ad1eef9c1f..2c3200e4ab25 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -874,10 +874,23 @@ def get_all_edge_attrs(self) -> List[EdgeAttr]: in `Data` and their layouts""" if not hasattr(self, '_edge_attrs'): return [] + added_attrs = set() + # Check edges added via _put_edge_index: edge_attrs = self._edge_attrs.values() for attr in edge_attrs: attr.size = (self.num_nodes, self.num_nodes) + added_attrs.add(attr.layout) + + # Check edges added through regular interface: + # TODO deprecate this and store edge attributes for all edges in + # EdgeStorage + for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): + if attr_name in self and layout not in added_attrs: + edge_attrs.append( + EdgeAttr(edge_type=None, layout=layout, + size=(self.num_nodes, self.num_nodes))) + return edge_attrs diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 1cdf571ae088..ebb79dbc2d18 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -695,7 +695,7 @@ def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: out = self._node_store_dict.get(attr.group_name, None) if out: # Group name exists, handle index or create new attribute name: - val = getattr(out, attr.attr_name) + val = getattr(out, attr.attr_name, None) if val is not None: val[attr.index] = tensor else: @@ -787,13 +787,30 @@ def get_all_edge_attrs(self) -> List[EdgeAttr]: r"""Returns a list of `EdgeAttr` objects corresponding to the edge indices stored in `HeteroData` and their layouts.""" out = [] - for edge_type, edge_store in self.edge_items(): + added_attrs = set() + + # Check edges added via _put_edge_index: + for edge_type, _ in self.edge_items(): if not hasattr(self[edge_type], '_edge_attrs'): continue edge_attrs = self[edge_type]._edge_attrs.values() for attr in edge_attrs: attr.size = self[edge_type].size() + added_attrs.add((attr.edge_type, attr.layout)) out.extend(edge_attrs) + + # Check edges added through regular interface: + # TODO deprecate this and store edge attributes for all edges in + # EdgeStorage + for edge_type, edge_store in self.edge_items(): + for layout, attr_name in EDGE_LAYOUT_TO_ATTR_NAME.items(): + # Don't double count: + if attr_name in edge_store and ((edge_type, layout) + not in added_attrs): + out.append( + EdgeAttr(edge_type=edge_type, layout=layout, + size=self[edge_type].size())) + return out diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index ff3c0e7b9cfa..cdf89f97b473 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -95,9 +95,21 @@ def __init__( # TODO support `collect` on `FeatureStore` self.node_time_dict = None if time_attr is not None: - raise ValueError( - f"'time_attr' attribute not yet supported for " - f"'{data[0].__class__.__name__}' object") + # We need to obtain all features with 'attr_name=time_attr' + # from the feature store and store them in node_time_dict. To + # do so, we make an explicit feature store GET call here with + # the relevant 'TensorAttr's + time_attrs = [ + attr for attr in feature_store.get_all_tensor_attrs() + if attr.attr_name == time_attr + ] + for attr in time_attrs: + attr.index = None + time_tensors = feature_store.multi_get_tensor(time_attrs) + self.node_time_dict = { + time_attr.group_name: time_tensor + for time_attr, time_tensor in zip(time_attrs, time_tensors) + } # Obtain all node and edge metadata: node_attrs = feature_store.get_all_tensor_attrs() @@ -475,9 +487,12 @@ def to_index(tensor): if isinstance(input_nodes, Tensor): return None, to_index(input_nodes) + # Can't infer number of nodes from a group_name; need an attr_name if isinstance(input_nodes, str): - num_nodes = feature_store.get_tensor_size(input_nodes)[0] - return input_nodes, range(num_nodes) + raise NotImplementedError( + f"Cannot infer the number of nodes from a single string " + f"(got '{input_nodes}'). Please pass a more explicit " + f"representation. ") if isinstance(input_nodes, (list, tuple)): assert len(input_nodes) == 2 @@ -485,8 +500,10 @@ def to_index(tensor): node_type, input_nodes = input_nodes if input_nodes is None: - num_nodes = feature_store.get_tensor_size(input_nodes)[0] - return input_nodes[0], range(num_nodes) + raise NotImplementedError( + f"Cannot infer the number of nodes from a node type alone " + f"(got '{input_nodes}'). Please pass a more explicit " + f"representation. ") return node_type, to_index(input_nodes) assert isinstance(input_nodes, TensorAttr) From d220afee9d4e826bfb7cf4eaddcaddfbec71ee34 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Fri, 8 Jul 2022 15:24:36 +0300 Subject: [PATCH 0147/2432] Add node-wise normalization mode in `LayerNorm` (#4944) * Add node-wise normalization in LayerNorm * changelog * update Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update Co-authored-by: Guohao Li Co-authored-by: Matthias Fey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/nn/norm/test_layer_norm.py | 7 ++-- torch_geometric/nn/norm/layer_norm.py | 54 +++++++++++++++++---------- 3 files changed, 40 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e03d3bd0a2be..15090708aa02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) - Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926)) - Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) - Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) diff --git a/test/nn/norm/test_layer_norm.py b/test/nn/norm/test_layer_norm.py index e75e2889fe8b..79a94498c8d1 100644 --- a/test/nn/norm/test_layer_norm.py +++ b/test/nn/norm/test_layer_norm.py @@ -6,12 +6,13 @@ @pytest.mark.parametrize('affine', [True, False]) -def test_layer_norm(affine): +@pytest.mark.parametrize('mode', ['graph', 'node']) +def test_layer_norm(affine, mode): x = torch.randn(100, 16) batch = torch.zeros(100, dtype=torch.long) - norm = LayerNorm(16, affine=affine) - assert norm.__repr__() == 'LayerNorm(16)' + norm = LayerNorm(16, affine=affine, mode=mode) + assert norm.__repr__() == f'LayerNorm(16, mode={mode})' if is_full_test(): torch.jit.script(norm) diff --git a/torch_geometric/nn/norm/layer_norm.py b/torch_geometric/nn/norm/layer_norm.py index 81c607e031f3..61c6677d9e1d 100644 --- a/torch_geometric/nn/norm/layer_norm.py +++ b/torch_geometric/nn/norm/layer_norm.py @@ -1,4 +1,5 @@ import torch +import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from torch_scatter import scatter @@ -29,12 +30,19 @@ class LayerNorm(torch.nn.Module): affine (bool, optional): If set to :obj:`True`, this module has learnable affine parameters :math:`\gamma` and :math:`\beta`. (default: :obj:`True`) + mode (str, optinal): The normalization mode to use for layer + normalization. (:obj:`"graph"` or :obj:`"node"`). If :obj:`"graph"` + is used, each graph will be considered as an element to be + normalized. If `"node"` is used, each node will be considered as + an element to be normalized. (default: :obj:`"graph"`) """ - def __init__(self, in_channels, eps=1e-5, affine=True): + def __init__(self, in_channels: int, eps: float = 1e-5, + affine: bool = True, mode: str = 'graph'): super().__init__() self.in_channels = in_channels self.eps = eps + self.mode = mode if affine: self.weight = Parameter(torch.Tensor(in_channels)) @@ -51,31 +59,39 @@ def reset_parameters(self): def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: """""" - if batch is None: - x = x - x.mean() - out = x / (x.std(unbiased=False) + self.eps) + if self.mode == 'graph': + if batch is None: + x = x - x.mean() + out = x / (x.std(unbiased=False) + self.eps) - else: - batch_size = int(batch.max()) + 1 + else: + batch_size = int(batch.max()) + 1 + + norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1) + norm = norm.mul_(x.size(-1)).view(-1, 1) + + mean = scatter(x, batch, dim=0, dim_size=batch_size, + reduce='add').sum(dim=-1, keepdim=True) / norm - norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1) - norm = norm.mul_(x.size(-1)).view(-1, 1) + x = x - mean.index_select(0, batch) - mean = scatter(x, batch, dim=0, dim_size=batch_size, - reduce='add').sum(dim=-1, keepdim=True) / norm + var = scatter(x * x, batch, dim=0, dim_size=batch_size, + reduce='add').sum(dim=-1, keepdim=True) + var = var / norm - x = x - mean.index_select(0, batch) + out = x / (var + self.eps).sqrt().index_select(0, batch) - var = scatter(x * x, batch, dim=0, dim_size=batch_size, - reduce='add').sum(dim=-1, keepdim=True) - var = var / norm + if self.weight is not None and self.bias is not None: + out = out * self.weight + self.bias - out = x / (var + self.eps).sqrt().index_select(0, batch) + return out - if self.weight is not None and self.bias is not None: - out = out * self.weight + self.bias + if self.mode == 'node': + return F.layer_norm(x, (self.in_channels, ), self.weight, + self.bias, self.eps) - return out + raise ValueError(f"Unknow normalization mode: {self.mode}") def __repr__(self): - return f'{self.__class__.__name__}({self.in_channels})' + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'mode={self.mode})') From 15a6fff364d814d0bb40a5cfc9a3c99fe749ee98 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 9 Jul 2022 11:19:40 +0200 Subject: [PATCH 0148/2432] fix mempool test (#4948) --- test/nn/pool/test_mem_pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/pool/test_mem_pool.py b/test/nn/pool/test_mem_pool.py index eacfaa916435..c196ec7a901c 100644 --- a/test/nn/pool/test_mem_pool.py +++ b/test/nn/pool/test_mem_pool.py @@ -22,5 +22,5 @@ def test_mem_pool(): assert out1.size() == (5, 2, 8) assert out2.size() == (5, 1, 4) assert S[~mask].sum() == 0 - assert S[mask].sum() == x.size(0) + assert round(S[mask].sum().item()) == x.size(0) assert float(loss) > 0 From 5bc03a0f5966ed35ba6e24afe4df1abc35356db4 Mon Sep 17 00:00:00 2001 From: Padarn Wilson Date: Sat, 9 Jul 2022 18:02:43 +0800 Subject: [PATCH 0149/2432] Make benchmark arguments more flexible (#4862) * update changelog * make benchmark arguments more flexible * make benchmark arguments more flexible * add ast for lists * update more argument --- CHANGELOG.md | 2 +- benchmark/loader/neighbor_loader.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15090708aa02..d7b3d0e8d925 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) -- Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815)) +- Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815), [#4862](https://github.com/pyg-team/pytorch_geometric/pull/4862/files)) - Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) diff --git a/benchmark/loader/neighbor_loader.py b/benchmark/loader/neighbor_loader.py index cd8f7ed0186b..32e91ffc4ced 100644 --- a/benchmark/loader/neighbor_loader.py +++ b/benchmark/loader/neighbor_loader.py @@ -1,4 +1,5 @@ import argparse +import ast import os.path as osp from timeit import default_timer @@ -83,10 +84,14 @@ def run(args: argparse.ArgumentParser) -> None: add('--device', default='cpu') add('--datasets', nargs="+", default=['arxiv', 'products', 'mag']) add('--root', default='../../data') - add('--batch-sizes', default=[8192, 4096, 2048, 1024, 512]) - add('--eval-batch-sizes', default=[16384, 8192, 4096, 2048, 1024, 512]) - add('--homo-neighbor_sizes', default=[[10, 5], [15, 10, 5], [20, 15, 10]]) - add('--hetero-neighbor_sizes', default=[[5], [10], [10, 5]], type=int) + add('--batch-sizes', default=[8192, 4096, 2048, 1024, 512], + type=ast.literal_eval) + add('--eval-batch-sizes', default=[16384, 8192, 4096, 2048, 1024, 512], + type=ast.literal_eval) + add('--homo-neighbor_sizes', default=[[10, 5], [15, 10, 5], [20, 15, 10]], + type=ast.literal_eval) + add('--hetero-neighbor_sizes', default=[[5], [10], [10, 5]], + type=ast.literal_eval) add('--num-workers', default=0) add('--runs', default=3) From 64d44fe55b9fceed3ca4d94e6dab9889d8ca3fa2 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Mon, 11 Jul 2022 13:43:11 +0300 Subject: [PATCH 0150/2432] Support `normalization_resolver` in `MLP` (#4951) * Support normalization_resolver in MLP * chngelog * update Co-authored-by: Matthias Fey * update Co-authored-by: Matthias Fey * Fix mlp norm in test * changelog Co-authored-by: Guohao Li Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- test/nn/models/test_mlp.py | 12 ++++---- torch_geometric/nn/models/mlp.py | 48 +++++++++++++++++++++----------- 3 files changed, 39 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7b3d0e8d925..0a7bc391753e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added - Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) -- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926)) +- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951)) - Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) - Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) diff --git a/test/nn/models/test_mlp.py b/test/nn/models/test_mlp.py index 92d94e46783b..db9cf7877d23 100644 --- a/test/nn/models/test_mlp.py +++ b/test/nn/models/test_mlp.py @@ -7,15 +7,17 @@ from torch_geometric.testing import is_full_test -@pytest.mark.parametrize('batch_norm,act_first,plain_last', - product([False, True], [False, True], [False, True])) -def test_mlp(batch_norm, act_first, plain_last): +@pytest.mark.parametrize( + 'norm, act_first, plain_last', + product(['batch_norm', None], [False, True], [False, True]), +) +def test_mlp(norm, act_first, plain_last): x = torch.randn(4, 16) torch.manual_seed(12345) mlp = MLP( [16, 32, 32, 64], - batch_norm=batch_norm, + norm=norm, act_first=act_first, plain_last=plain_last, ) @@ -33,7 +35,7 @@ def test_mlp(batch_norm, act_first, plain_last): hidden_channels=32, out_channels=64, num_layers=3, - batch_norm=batch_norm, + norm=norm, act_first=act_first, plain_last=plain_last, ) diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 908f7c1621e2..5fcc5083bf0f 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -1,12 +1,16 @@ +import warnings from typing import Any, Dict, List, Optional, Union import torch import torch.nn.functional as F from torch import Tensor -from torch.nn import BatchNorm1d, Identity +from torch.nn import Identity from torch_geometric.nn.dense.linear import Linear -from torch_geometric.nn.resolver import activation_resolver +from torch_geometric.nn.resolver import ( + activation_resolver, + normalization_resolver, +) from torch_geometric.typing import NoneType @@ -48,23 +52,22 @@ class MLP(torch.nn.Module): embedding. (default: :obj:`0.`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) - batch_norm (bool, optional): If set to :obj:`False`, will not make use - of batch normalization. (default: :obj:`True`) act_first (bool, optional): If set to :obj:`True`, activation is applied before normalization. (default: :obj:`False`) act_kwargs (Dict[str, Any], optional): Arguments passed to the respective activation function defined by :obj:`act`. (default: :obj:`None`) - batch_norm_kwargs (Dict[str, Any], optional): Arguments passed to - :class:`torch.nn.BatchNorm1d` in case :obj:`batch_norm == True`. + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`"batch_norm"`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) plain_last (bool, optional): If set to :obj:`False`, will apply non-linearity, batch normalization and dropout to the last layer as well. (default: :obj:`True`) bias (bool, optional): If set to :obj:`False`, the module will not learn additive biases. (default: :obj:`True`) - relu_first (bool, optional): Deprecated in favor of :obj:`act_first`. - (default: :obj:`False`) + **kwargs (optional): Additional deprecated arguments of the MLP layer. """ def __init__( self, @@ -76,18 +79,25 @@ def __init__( num_layers: Optional[int] = None, dropout: float = 0., act: str = "relu", - batch_norm: bool = True, act_first: bool = False, act_kwargs: Optional[Dict[str, Any]] = None, - batch_norm_kwargs: Optional[Dict[str, Any]] = None, + norm: Optional[str] = 'batch_norm', + norm_kwargs: Optional[Dict[str, Any]] = None, plain_last: bool = True, bias: bool = True, - relu_first: bool = False, + **kwargs, ): super().__init__() - act_first = act_first or relu_first # Backward compatibility. - batch_norm_kwargs = batch_norm_kwargs or {} + # Backward compatibility: + act_first = act_first or kwargs.get("relu_first", False) + batch_norm = kwargs.get("batch_norm", None) + if batch_norm is not None and isinstance(batch_norm, bool): + warnings.warn("Argument `batch_norm` is deprecated, " + "please use `norm` to specify normalization layer.") + norm = 'batch_norm' if batch_norm else None + batch_norm_kwargs = kwargs.get("batch_norm_kwargs", None) + norm_kwargs = batch_norm_kwargs or {} if isinstance(channel_list, int): in_channels = channel_list @@ -114,11 +124,15 @@ def __init__( self.norms = torch.nn.ModuleList() iterator = channel_list[1:-1] if plain_last else channel_list[1:] for hidden_channels in iterator: - if batch_norm: - norm = BatchNorm1d(hidden_channels, **batch_norm_kwargs) + if norm is not None: + norm_layer = normalization_resolver( + norm, + hidden_channels, + **(norm_kwargs or {}), + ) else: - norm = Identity() - self.norms.append(norm) + norm_layer = Identity() + self.norms.append(norm_layer) self.reset_parameters() From 423b923bfba6ed2436555920fb9459084ecd10cb Mon Sep 17 00:00:00 2001 From: Zhe Chen Date: Mon, 11 Jul 2022 20:25:22 +0900 Subject: [PATCH 0151/2432] Add `unbatch_edge_index` (#4903) * Add unbatch_edge_index * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update CHANGELOG.md * Optimize code, and merge unbatch_edge_index into unbatch.py * update * fix Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/utils/test_unbatch.py | 14 +++++++++++++- torch_geometric/utils/__init__.py | 3 ++- torch_geometric/utils/unbatch.py | 20 ++++++++++++++++++++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a7bc391753e..17bb59ba7e27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added `unbatch_edge_index` functionality for splitting an `edge_index` tensor according to a `batch` vector ([#4903](https://github.com/pyg-team/pytorch_geometric/pull/4903)) - Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) - Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951)) - Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) diff --git a/test/utils/test_unbatch.py b/test/utils/test_unbatch.py index 1d72da5a565a..a6841cc747b5 100644 --- a/test/utils/test_unbatch.py +++ b/test/utils/test_unbatch.py @@ -1,6 +1,6 @@ import torch -from torch_geometric.utils import unbatch +from torch_geometric.utils import unbatch, unbatch_edge_index def test_unbatch(): @@ -11,3 +11,15 @@ def test_unbatch(): assert len(out) == 5 for i in range(len(out)): assert torch.equal(out[i], src[batch == i]) + + +def test_unbatch_edge_index(): + edge_index = torch.tensor([ + [0, 1, 1, 2, 2, 3, 4, 5, 5, 6], + [1, 0, 2, 1, 3, 2, 5, 4, 6, 5], + ]) + batch = torch.tensor([0, 0, 0, 0, 1, 1, 1]) + + edge_indices = unbatch_edge_index(edge_index, batch) + assert edge_indices[0].tolist() == [[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]] + assert edge_indices[1].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 8b7e89e99947..c0c577c34ae5 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -17,7 +17,7 @@ from .to_dense_batch import to_dense_batch from .to_dense_adj import to_dense_adj from .sparse import dense_to_sparse -from .unbatch import unbatch +from .unbatch import unbatch, unbatch_edge_index from .normalized_cut import normalized_cut from .grid import grid from .geodesic import geodesic_distance @@ -63,6 +63,7 @@ 'to_dense_adj', 'dense_to_sparse', 'unbatch', + 'unbatch_edge_index', 'normalized_cut', 'grid', 'geodesic_distance', diff --git a/torch_geometric/utils/unbatch.py b/torch_geometric/utils/unbatch.py index 61ae14b73b88..9f2067a63bf3 100644 --- a/torch_geometric/utils/unbatch.py +++ b/torch_geometric/utils/unbatch.py @@ -22,3 +22,23 @@ def unbatch(src: Tensor, batch: Tensor, dim: int = 0) -> List[Tensor]: """ sizes = degree(batch, dtype=torch.long).tolist() return src.split(sizes, dim) + + +def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: + r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector. + + Args: + edge_index (Tensor): The edge_index tensor. Must be ordered. + batch (LongTensor): The batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + node to a specific example. Must be ordered. + + :rtype: :class:`List[Tensor]` + """ + deg = degree(batch, dtype=torch.int64) + ptr = torch.cat([deg.new_zeros(1), deg.cumsum(dim=0)[:-1]], dim=0) + + edge_batch = batch[edge_index[0]] + edge_index = edge_index - ptr[edge_batch] + sizes = degree(edge_batch, dtype=torch.int64).cpu().tolist() + return edge_index.split(sizes, dim=1) From 95bb29f7b7b45553d0a80d1953ae771b98a609df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20F=C3=B6rster?= Date: Mon, 11 Jul 2022 13:31:36 +0200 Subject: [PATCH 0152/2432] Fixed `BasicGNN` for `num_layers=1` and `out_channels` given (#4943) * Fixed BasicGNN for the case of 1 layer and out_channels given * Fix doc strings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated changelog * update * update doc-string * update * test with num_layers=3 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/models/test_basic_gnn.py | 38 +++++++++++------- torch_geometric/nn/models/basic_gnn.py | 53 ++++++++++++++++---------- 3 files changed, 58 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17bb59ba7e27..1209e7dfbf8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed `BasicGNN` for `num_layers=1`, which now respects a desired number of `out_channels`([#4943](https://github.com/pyg-team/pytorch_geometric/pull/4943)) - `len(batch)` will now return the number of graphs inside the batch, not the number of attributes ([#4931](https://github.com/pyg-team/pytorch_geometric/pull/4931)) - Fixed `data.subgraph` generation for 0-dim tensors ([#4932](https://github.com/pyg-team/pytorch_geometric/pull/4932)) - Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index f22bda68ec5b..029dca54ebd2 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -24,9 +24,9 @@ def test_gcn(out_dim, dropout, act, norm, jk): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) out_channels = 16 if out_dim is None else out_dim - model = GCN(8, 16, num_layers=2, out_channels=out_dim, dropout=dropout, + model = GCN(8, 16, num_layers=3, out_channels=out_dim, dropout=dropout, act=act, norm=norm, jk=jk) - assert str(model) == f'GCN(8, {out_channels}, num_layers=2)' + assert str(model) == f'GCN(8, {out_channels}, num_layers=3)' assert model(x, edge_index).size() == (3, out_channels) @@ -37,9 +37,9 @@ def test_graph_sage(out_dim, dropout, act, norm, jk): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) out_channels = 16 if out_dim is None else out_dim - model = GraphSAGE(8, 16, num_layers=2, out_channels=out_dim, + model = GraphSAGE(8, 16, num_layers=3, out_channels=out_dim, dropout=dropout, act=act, norm=norm, jk=jk) - assert str(model) == f'GraphSAGE(8, {out_channels}, num_layers=2)' + assert str(model) == f'GraphSAGE(8, {out_channels}, num_layers=3)' assert model(x, edge_index).size() == (3, out_channels) @@ -50,9 +50,9 @@ def test_gin(out_dim, dropout, act, norm, jk): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) out_channels = 16 if out_dim is None else out_dim - model = GIN(8, 16, num_layers=2, out_channels=out_dim, dropout=dropout, + model = GIN(8, 16, num_layers=3, out_channels=out_dim, dropout=dropout, act=act, norm=norm, jk=jk) - assert str(model) == f'GIN(8, {out_channels}, num_layers=2)' + assert str(model) == f'GIN(8, {out_channels}, num_layers=3)' assert model(x, edge_index).size() == (3, out_channels) @@ -64,14 +64,14 @@ def test_gat(out_dim, dropout, act, norm, jk): out_channels = 16 if out_dim is None else out_dim for v2 in [False, True]: - model = GAT(8, 16, num_layers=2, out_channels=out_dim, v2=v2, + model = GAT(8, 16, num_layers=3, out_channels=out_dim, v2=v2, dropout=dropout, act=act, norm=norm, jk=jk) - assert str(model) == f'GAT(8, {out_channels}, num_layers=2)' + assert str(model) == f'GAT(8, {out_channels}, num_layers=3)' assert model(x, edge_index).size() == (3, out_channels) - model = GAT(8, 16, num_layers=2, out_channels=out_dim, v2=v2, + model = GAT(8, 16, num_layers=3, out_channels=out_dim, v2=v2, dropout=dropout, act=act, norm=norm, jk=jk, heads=4) - assert str(model) == f'GAT(8, {out_channels}, num_layers=2)' + assert str(model) == f'GAT(8, {out_channels}, num_layers=3)' assert model(x, edge_index).size() == (3, out_channels) @@ -87,10 +87,20 @@ def test_pna(out_dim, dropout, act, norm, jk): 'identity', 'amplification', 'attenuation', 'linear', 'inverse_linear' ] - model = PNA(8, 16, num_layers=2, out_channels=out_dim, dropout=dropout, + model = PNA(8, 16, num_layers=3, out_channels=out_dim, dropout=dropout, act=act, norm=norm, jk=jk, aggregators=aggregators, scalers=scalers, deg=deg) - assert str(model) == f'PNA(8, {out_channels}, num_layers=2)' + assert str(model) == f'PNA(8, {out_channels}, num_layers=3)' + assert model(x, edge_index).size() == (3, out_channels) + + +@pytest.mark.parametrize('out_dim,jk', product(out_dims, jks)) +def test_one_layer_gnn(out_dim, jk): + x = torch.randn(3, 8) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + out_channels = 16 if out_dim is None else out_dim + + model = GraphSAGE(8, 16, num_layers=1, out_channels=out_dim, jk=jk) assert model(x, edge_index).size() == (3, out_channels) @@ -100,7 +110,7 @@ def test_packaging(): x = torch.randn(3, 8) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - model = GraphSAGE(8, 16, num_layers=2) + model = GraphSAGE(8, 16, num_layers=3) path = osp.join(torch.hub._get_torch_home(), 'pyg_test_model.pt') torch.save(model, path) @@ -108,7 +118,7 @@ def test_packaging(): with torch.no_grad(): assert model(x, edge_index).size() == (3, 16) - model = GraphSAGE(8, 16, num_layers=2) + model = GraphSAGE(8, 16, num_layers=3) path = osp.join(torch.hub._get_torch_home(), 'pyg_test_package.pt') with torch.package.PackageExporter(path) as pe: pe.extern('torch_geometric.nn.**') diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index ba9cfd912ddf..dd796b05b49d 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -80,18 +80,21 @@ def __init__( self.out_channels = hidden_channels self.convs = ModuleList() - self.convs.append( - self.init_conv(in_channels, hidden_channels, **kwargs)) + if num_layers > 1: + self.convs.append( + self.init_conv(in_channels, hidden_channels, **kwargs)) + in_channels = hidden_channels for _ in range(num_layers - 2): self.convs.append( - self.init_conv(hidden_channels, hidden_channels, **kwargs)) + self.init_conv(in_channels, hidden_channels, **kwargs)) + in_channels = hidden_channels if out_channels is not None and jk is None: self._is_conv_to_out = True self.convs.append( - self.init_conv(hidden_channels, out_channels, **kwargs)) + self.init_conv(in_channels, out_channels, **kwargs)) else: self.convs.append( - self.init_conv(hidden_channels, hidden_channels, **kwargs)) + self.init_conv(in_channels, hidden_channels, **kwargs)) self.norms = None if norm is not None: @@ -169,9 +172,11 @@ class GCN(BasicGNN): use. (default: :obj:`"relu"`) norm (torch.nn.Module, optional): The normalization operator to use. (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode - (:obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, :obj:`"lstm"`). - (default: :obj:`"last"`) + jk (str, optional): The Jumping Knowledge mode. If specified, the model + will additionally apply a final linear transformation to transform + node embeddings to the expected output feature dimensionality. + (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, + :obj:`"lstm"`). (default: :obj:`None`) act_first (bool, optional): If set to :obj:`True`, activation is applied before normalization. (default: :obj:`False`) act_kwargs (Dict[str, Any], optional): Arguments passed to the @@ -202,9 +207,11 @@ class GraphSAGE(BasicGNN): use. (default: :obj:`"relu"`) norm (torch.nn.Module, optional): The normalization operator to use. (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode - (:obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, :obj:`"lstm"`). - (default: :obj:`"last"`) + jk (str, optional): The Jumping Knowledge mode. If specified, the model + will additionally apply a final linear transformation to transform + node embeddings to the expected output feature dimensionality. + (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, + :obj:`"lstm"`). (default: :obj:`None`) act_first (bool, optional): If set to :obj:`True`, activation is applied before normalization. (default: :obj:`False`) act_kwargs (Dict[str, Any], optional): Arguments passed to the @@ -235,9 +242,11 @@ class GIN(BasicGNN): (default: :obj:`torch.nn.ReLU(inplace=True)`) norm (torch.nn.Module, optional): The normalization operator to use. (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode - (:obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, :obj:`"lstm"`). - (default: :obj:`"last"`) + jk (str, optional): The Jumping Knowledge mode. If specified, the model + will additionally apply a final linear transformation to transform + node embeddings to the expected output feature dimensionality. + (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, + :obj:`"lstm"`). (default: :obj:`None`) act_first (bool, optional): If set to :obj:`True`, activation is applied before normalization. (default: :obj:`False`) act_kwargs (Dict[str, Any], optional): Arguments passed to the @@ -275,9 +284,11 @@ class GAT(BasicGNN): use. (default: :obj:`"relu"`) norm (torch.nn.Module, optional): The normalization operator to use. (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode - (:obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, :obj:`"lstm"`). - (default: :obj:`"last"`) + jk (str, optional): The Jumping Knowledge mode. If specified, the model + will additionally apply a final linear transformation to transform + node embeddings to the expected output feature dimensionality. + (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, + :obj:`"lstm"`). (default: :obj:`None`) act_first (bool, optional): If set to :obj:`True`, activation is applied before normalization. (default: :obj:`False`) act_kwargs (Dict[str, Any], optional): Arguments passed to the @@ -329,9 +340,11 @@ class PNA(BasicGNN): use. (default: :obj:`"relu"`) norm (torch.nn.Module, optional): The normalization operator to use. (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode - (:obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, :obj:`"lstm"`). - (default: :obj:`"last"`) + jk (str, optional): The Jumping Knowledge mode. If specified, the model + will additionally apply a final linear transformation to transform + node embeddings to the expected output feature dimensionality. + (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, + :obj:`"lstm"`). (default: :obj:`None`) act_first (bool, optional): If set to :obj:`True`, activation is applied before normalization. (default: :obj:`False`) act_kwargs (Dict[str, Any], optional): Arguments passed to the From fa15c8da98a33b7fb12670392b114f189fadd42a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 12 Jul 2022 04:48:33 +0200 Subject: [PATCH 0153/2432] [pre-commit.ci] pre-commit autoupdate (#4961) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/adrienverge/yamllint.git: v1.26.3 → v1.27.1](https://github.com/adrienverge/yamllint.git/compare/v1.26.3...v1.27.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 88ff835ca3e1..53b84ccf960b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,7 +12,7 @@ repos: )$ - repo: https://github.com/adrienverge/yamllint.git - rev: v1.26.3 + rev: v1.27.1 hooks: - id: yamllint args: [-c=.yamllint.yml] From 6f62e531b747ca3a6b80e1f92de8535b7e267c73 Mon Sep 17 00:00:00 2001 From: Yijun Tian Date: Tue, 12 Jul 2022 02:26:12 -0400 Subject: [PATCH 0154/2432] fix an indent bug for HGT in doc (#4963) --- docs/source/notes/heterogeneous.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/notes/heterogeneous.rst b/docs/source/notes/heterogeneous.rst index bbe7242d4f60..effd5912fd1c 100644 --- a/docs/source/notes/heterogeneous.rst +++ b/docs/source/notes/heterogeneous.rst @@ -379,7 +379,7 @@ These operators can be directly used to build heterogeneous GNN models as can be for _ in range(num_layers): conv = HGTConv(hidden_channels, hidden_channels, data.metadata(), num_heads, group='sum') - self.convs.append(conv) + self.convs.append(conv) self.lin = Linear(hidden_channels, out_channels) From 5ce04c58744040243aa10d49566bf86cd0358ab0 Mon Sep 17 00:00:00 2001 From: Manan Shah Date: Mon, 11 Jul 2022 23:36:48 -0700 Subject: [PATCH 0155/2432] `GraphStore`: handle zero-sized edge indices (#4962) * init * update * update Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- torch_geometric/data/graph_store.py | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1209e7dfbf8c..16f0bef99dff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815), [#4862](https://github.com/pyg-team/pytorch_geometric/pull/4862/files)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922), [#4962](https://github.com/pyg-team/pytorch_geometric/pull/4962)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py index 8f35792c254f..f8ba2fed5099 100644 --- a/torch_geometric/data/graph_store.py +++ b/torch_geometric/data/graph_store.py @@ -293,8 +293,12 @@ def edge_tensor_type_to_adj_type( r"""Converts an EdgeTensorType tensor tuple to a PyG Adj tensor.""" src, dst = tensor_tuple - if attr.layout == EdgeLayout.COO: - # COO: (row, col) + if attr.layout == EdgeLayout.COO: # COO: (row, col) + assert src.dim() == 1 and dst.dim() == 1 and src.numel() == dst.numel() + + if src.numel() == 0: + return torch.stack(tensor_tuple, dim=0) + if (src[0].storage().data_ptr() == dst[1].storage().data_ptr() and src.storage_offset() < dst.storage_offset()): # Do not copy if the tensor tuple is constructed from the same @@ -303,14 +307,16 @@ def edge_tensor_type_to_adj_type( out.set_(src.storage(), storage_offset=src.storage_offset(), size=(src.size()[0] + dst.size()[0], )) return out.view(2, -1) - return torch.stack(tensor_tuple) - elif attr.layout == EdgeLayout.CSR: - # CSR: (rowptr, col) + + return torch.stack(tensor_tuple, dim=0) + + elif attr.layout == EdgeLayout.CSR: # CSR: (rowptr, col) return SparseTensor(rowptr=src, col=dst, is_sorted=True, sparse_sizes=attr.size) - elif attr.layout == EdgeLayout.CSC: - # CSC: (row, colptr) is a transposed adjacency matrix, so rowptr - # is the compressed column and col is the uncompressed row. + + elif attr.layout == EdgeLayout.CSC: # CSC: (row, colptr) + # CSC is a transposed adjacency matrix, so rowptr is the compressed + # column and col is the uncompressed row. sparse_sizes = None if attr.size is None else (attr.size[1], attr.size[0]) return SparseTensor(rowptr=dst, col=src, is_sorted=True, From bac7021a9447cd58e7355007cb146b30e51f22c8 Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Tue, 12 Jul 2022 09:43:56 +0300 Subject: [PATCH 0156/2432] Support `normalization_resolver` in `BasicGNN` (#4958) * Support normalization_resolver in basic_gnn * changelog * update * update * update * fix * update * reset Co-authored-by: Guohao Li Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- test/graphgym/test_config_store.py | 3 +- torch_geometric/nn/models/basic_gnn.py | 61 +++++++++++++++++++------- torch_geometric/nn/models/mlp.py | 6 +-- 4 files changed, 50 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16f0bef99dff..d0c208204cae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added `unbatch_edge_index` functionality for splitting an `edge_index` tensor according to a `batch` vector ([#4903](https://github.com/pyg-team/pytorch_geometric/pull/4903)) - Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) -- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951)) +- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951), [#4958](https://github.com/pyg-team/pytorch_geometric/pull/4958)) - Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) - Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) diff --git a/test/graphgym/test_config_store.py b/test/graphgym/test_config_store.py index 7539c544485f..d1640f3b4bd1 100644 --- a/test/graphgym/test_config_store.py +++ b/test/graphgym/test_config_store.py @@ -41,7 +41,7 @@ def test_config_store(): assert cfg.dataset.transform.AddSelfLoops.fill_value is None # Check `cfg.model`: - assert len(cfg.model) == 11 + assert len(cfg.model) == 12 assert cfg.model._target_.split('.')[-1] == 'GCN' assert cfg.model.in_channels == 34 assert cfg.model.out_channels == 4 @@ -50,6 +50,7 @@ def test_config_store(): assert cfg.model.dropout == 0.0 assert cfg.model.act == 'relu' assert cfg.model.norm is None + assert cfg.model.norm_kwargs is None assert cfg.model.jk is None assert not cfg.model.act_first assert cfg.model.act_kwargs is None diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index dd796b05b49d..424183bfcf3e 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -17,7 +17,10 @@ ) from torch_geometric.nn.models import MLP from torch_geometric.nn.models.jumping_knowledge import JumpingKnowledge -from torch_geometric.nn.resolver import activation_resolver +from torch_geometric.nn.resolver import ( + activation_resolver, + normalization_resolver, +) from torch_geometric.typing import Adj @@ -34,18 +37,21 @@ class BasicGNN(torch.nn.Module): dropout (float, optional): Dropout probability. (default: :obj:`0.`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) - norm (torch.nn.Module, optional): The normalization operator to use. + act_first (bool, optional): If set to :obj:`True`, activation is + applied before normalization. (default: :obj:`False`) + act_kwargs (Dict[str, Any], optional): Arguments passed to the + respective activation function defined by :obj:`act`. + (default: :obj:`None`) + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`None`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) jk (str, optional): The Jumping Knowledge mode. If specified, the model will additionally apply a final linear transformation to transform node embeddings to the expected output feature dimensionality. (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, :obj:`"lstm"`). (default: :obj:`None`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) **kwargs (optional): Additional arguments of the underlying :class:`torch_geometric.nn.conv.MessagePassing` layers. """ @@ -57,10 +63,11 @@ def __init__( out_channels: Optional[int] = None, dropout: float = 0.0, act: Union[str, Callable, None] = "relu", - norm: Optional[torch.nn.Module] = None, - jk: Optional[str] = None, act_first: bool = False, act_kwargs: Optional[Dict[str, Any]] = None, + norm: Union[str, Callable, None] = None, + norm_kwargs: Optional[Dict[str, Any]] = None, + jk: Optional[str] = None, **kwargs, ): super().__init__() @@ -98,11 +105,16 @@ def __init__( self.norms = None if norm is not None: + norm_layer = normalization_resolver( + norm, + hidden_channels, + **(norm_kwargs or {}), + ) self.norms = ModuleList() for _ in range(num_layers - 1): - self.norms.append(copy.deepcopy(norm)) + self.norms.append(copy.deepcopy(norm_layer)) if jk is not None: - self.norms.append(copy.deepcopy(norm)) + self.norms.append(copy.deepcopy(norm_layer)) if jk is not None and jk != 'last': self.jk = JumpingKnowledge(jk, hidden_channels, num_layers) @@ -170,7 +182,10 @@ class GCN(BasicGNN): dropout (float, optional): Dropout probability. (default: :obj:`0.`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) - norm (torch.nn.Module, optional): The normalization operator to use. + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`None`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) jk (str, optional): The Jumping Knowledge mode. If specified, the model will additionally apply a final linear transformation to transform @@ -205,7 +220,10 @@ class GraphSAGE(BasicGNN): dropout (float, optional): Dropout probability. (default: :obj:`0.`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) - norm (torch.nn.Module, optional): The normalization operator to use. + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`None`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) jk (str, optional): The Jumping Knowledge mode. If specified, the model will additionally apply a final linear transformation to transform @@ -240,7 +258,10 @@ class GIN(BasicGNN): dropout (float, optional): Dropout probability. (default: :obj:`0.`) act (Callable, optional): The non-linear activation function to use. (default: :obj:`torch.nn.ReLU(inplace=True)`) - norm (torch.nn.Module, optional): The normalization operator to use. + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`None`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) jk (str, optional): The Jumping Knowledge mode. If specified, the model will additionally apply a final linear transformation to transform @@ -257,7 +278,7 @@ class GIN(BasicGNN): """ def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: - mlp = MLP([in_channels, out_channels, out_channels], batch_norm=True) + mlp = MLP([in_channels, out_channels, out_channels], norm="batch_norm") return GINConv(mlp, **kwargs) @@ -282,7 +303,10 @@ class GAT(BasicGNN): dropout (float, optional): Dropout probability. (default: :obj:`0.`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) - norm (torch.nn.Module, optional): The normalization operator to use. + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`None`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) jk (str, optional): The Jumping Knowledge mode. If specified, the model will additionally apply a final linear transformation to transform @@ -338,7 +362,10 @@ class PNA(BasicGNN): dropout (float, optional): Dropout probability. (default: :obj:`0.`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) - norm (torch.nn.Module, optional): The normalization operator to use. + norm (str or Callable, optional): The normalization function to + use. (default: :obj:`None`) + norm_kwargs (Dict[str, Any], optional): Arguments passed to the + respective normalization function defined by :obj:`norm`. (default: :obj:`None`) jk (str, optional): The Jumping Knowledge mode. If specified, the model will additionally apply a final linear transformation to transform diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 5fcc5083bf0f..4cdfbf88e88a 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -1,5 +1,5 @@ import warnings -from typing import Any, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.nn.functional as F @@ -78,10 +78,10 @@ def __init__( out_channels: Optional[int] = None, num_layers: Optional[int] = None, dropout: float = 0., - act: str = "relu", + act: Union[str, Callable, None] = "relu", act_first: bool = False, act_kwargs: Optional[Dict[str, Any]] = None, - norm: Optional[str] = 'batch_norm', + norm: Union[str, Callable, None] = "batch_norm", norm_kwargs: Optional[Dict[str, Any]] = None, plain_last: bool = True, bias: bool = True, From a8601aafd7fc52b87b3f85e86013e64cb7af3e2d Mon Sep 17 00:00:00 2001 From: Guohao Li Date: Tue, 12 Jul 2022 09:55:37 +0300 Subject: [PATCH 0157/2432] Fix `norm` in `examples` and `dmon_pool` (#4959) * Fix norm in examples and dmon_pool * changelog * update Co-authored-by: Guohao Li Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- examples/correct_and_smooth.py | 2 +- examples/dgcnn_classification.py | 3 +-- examples/dgcnn_segmentation.py | 2 +- examples/glnn.py | 2 +- examples/mutag_gin.py | 2 +- examples/point_transformer_classification.py | 26 ++++++-------------- examples/point_transformer_segmentation.py | 23 ++++++----------- examples/pointnet2_classification.py | 2 +- examples/pointnet2_segmentation.py | 3 +-- examples/pytorch_ignite/gin.py | 2 +- examples/pytorch_lightning/gin.py | 2 +- examples/seal_link_pred.py | 2 +- torch_geometric/nn/dense/dmon_pool.py | 2 +- 14 files changed, 27 insertions(+), 48 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0c208204cae..45986bf69249 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added `unbatch_edge_index` functionality for splitting an `edge_index` tensor according to a `batch` vector ([#4903](https://github.com/pyg-team/pytorch_geometric/pull/4903)) - Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) -- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951), [#4958](https://github.com/pyg-team/pytorch_geometric/pull/4958)) +- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951), [#4958](https://github.com/pyg-team/pytorch_geometric/pull/4958), [#4959](https://github.com/pyg-team/pytorch_geometric/pull/4959)) - Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) - Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) - Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) diff --git a/examples/correct_and_smooth.py b/examples/correct_and_smooth.py index 7e71e91e16e1..e550ccc645f3 100644 --- a/examples/correct_and_smooth.py +++ b/examples/correct_and_smooth.py @@ -15,7 +15,7 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = MLP([dataset.num_features, 200, 200, dataset.num_classes], dropout=0.5, - batch_norm=True, act_first=True).to(device) + norm="batch_norm", act_first=True).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) criterion = torch.nn.CrossEntropyLoss() diff --git a/examples/dgcnn_classification.py b/examples/dgcnn_classification.py index 9e3f8e7bf0c9..ba55b75d79df 100644 --- a/examples/dgcnn_classification.py +++ b/examples/dgcnn_classification.py @@ -27,8 +27,7 @@ def __init__(self, out_channels, k=20, aggr='max'): self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr) self.lin1 = Linear(128 + 64, 1024) - self.mlp = MLP([1024, 512, 256, out_channels], dropout=0.5, - batch_norm=False) + self.mlp = MLP([1024, 512, 256, out_channels], dropout=0.5, norm=None) def forward(self, data): pos, batch = data.pos, data.batch diff --git a/examples/dgcnn_segmentation.py b/examples/dgcnn_segmentation.py index c040fa08189d..7714196c6dd3 100644 --- a/examples/dgcnn_segmentation.py +++ b/examples/dgcnn_segmentation.py @@ -38,7 +38,7 @@ def __init__(self, out_channels, k=30, aggr='max'): self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr) self.mlp = MLP([3 * 64, 1024, 256, 128, out_channels], dropout=0.5, - batch_norm=False) + norm=None) def forward(self, data): x, pos, batch = data.x, data.pos, data.batch diff --git a/examples/glnn.py b/examples/glnn.py index dd72b227f5ba..38412d9ff272 100644 --- a/examples/glnn.py +++ b/examples/glnn.py @@ -25,7 +25,7 @@ gnn = GCN(dataset.num_node_features, hidden_channels=16, out_channels=dataset.num_classes, num_layers=2).to(device) mlp = MLP([dataset.num_node_features, 64, dataset.num_classes], dropout=0.5, - batch_norm=False).to(device) + norm=None).to(device) gnn_optimizer = torch.optim.Adam(gnn.parameters(), lr=0.01, weight_decay=5e-4) mlp_optimizer = torch.optim.Adam(mlp.parameters(), lr=0.01, weight_decay=5e-4) diff --git a/examples/mutag_gin.py b/examples/mutag_gin.py index d5401e8f2bd8..4f0d1f1805bc 100644 --- a/examples/mutag_gin.py +++ b/examples/mutag_gin.py @@ -45,7 +45,7 @@ def __init__(self, in_channels, hidden_channels, out_channels, num_layers): in_channels = hidden_channels self.mlp = MLP([hidden_channels, hidden_channels, out_channels], - batch_norm=False, dropout=0.5) + norm=None, dropout=0.5) def forward(self, x, edge_index, batch): for conv in self.convs: diff --git a/examples/point_transformer_classification.py b/examples/point_transformer_classification.py index ab2216eaba9f..33e106d68a8d 100644 --- a/examples/point_transformer_classification.py +++ b/examples/point_transformer_classification.py @@ -2,18 +2,14 @@ import torch import torch.nn.functional as F -from torch.nn import BatchNorm1d as BN -from torch.nn import Identity from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq from torch_cluster import fps, knn_graph from torch_scatter import scatter_max import torch_geometric.transforms as T from torch_geometric.datasets import ModelNet from torch_geometric.loader import DataLoader -from torch_geometric.nn import global_mean_pool +from torch_geometric.nn import MLP, global_mean_pool from torch_geometric.nn.conv import PointTransformerConv from torch_geometric.nn.pool import knn @@ -31,9 +27,10 @@ def __init__(self, in_channels, out_channels): self.lin_in = Lin(in_channels, in_channels) self.lin_out = Lin(out_channels, out_channels) - self.pos_nn = MLP([3, 64, out_channels], batch_norm=False) + self.pos_nn = MLP([3, 64, out_channels], norm=None, plain_last=False) - self.attn_nn = MLP([out_channels, 64, out_channels], batch_norm=False) + self.attn_nn = MLP([out_channels, 64, out_channels], norm=None, + plain_last=False) self.transformer = PointTransformerConv(in_channels, out_channels, pos_nn=self.pos_nn, @@ -55,7 +52,7 @@ def __init__(self, in_channels, out_channels, ratio=0.25, k=16): super().__init__() self.k = k self.ratio = ratio - self.mlp = MLP([in_channels, out_channels]) + self.mlp = MLP([in_channels, out_channels], plain_last=False) def forward(self, x, pos, batch): # FPS sampling @@ -80,14 +77,6 @@ def forward(self, x, pos, batch): return out, sub_pos, sub_batch -def MLP(channels, batch_norm=True): - return Seq(*[ - Seq(Lin(channels[i - 1], channels[i]), - BN(channels[i]) if batch_norm else Identity(), ReLU()) - for i in range(1, len(channels)) - ]) - - class Net(torch.nn.Module): def __init__(self, in_channels, out_channels, dim_model, k=16): super().__init__() @@ -97,7 +86,7 @@ def __init__(self, in_channels, out_channels, dim_model, k=16): in_channels = max(in_channels, 1) # first block - self.mlp_input = MLP([in_channels, dim_model[0]]) + self.mlp_input = MLP([in_channels, dim_model[0]], plain_last=False) self.transformer_input = TransformerBlock(in_channels=dim_model[0], out_channels=dim_model[0]) @@ -116,8 +105,7 @@ def __init__(self, in_channels, out_channels, dim_model, k=16): out_channels=dim_model[i + 1])) # class score computation - self.mlp_output = Seq(Lin(dim_model[-1], 64), ReLU(), Lin(64, 64), - ReLU(), Lin(64, out_channels)) + self.mlp_output = MLP([dim_model[-1], 64, out_channels], norm=None) def forward(self, x, pos, batch=None): diff --git a/examples/point_transformer_segmentation.py b/examples/point_transformer_segmentation.py index 86f920283bea..92b2d5d8153b 100644 --- a/examples/point_transformer_segmentation.py +++ b/examples/point_transformer_segmentation.py @@ -2,14 +2,7 @@ import torch import torch.nn.functional as F -from point_transformer_classification import ( - MLP, - TransformerBlock, - TransitionDown, -) -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq +from point_transformer_classification import TransformerBlock, TransitionDown from torch_cluster import knn_graph from torch_scatter import scatter from torchmetrics.functional import jaccard_index @@ -17,7 +10,7 @@ import torch_geometric.transforms as T from torch_geometric.datasets import ShapeNet from torch_geometric.loader import DataLoader -from torch_geometric.nn.unpool import knn_interpolate +from torch_geometric.nn import MLP, knn_interpolate category = 'Airplane' # Pass in `None` to train on all categories. path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') @@ -43,8 +36,8 @@ class TransitionUp(torch.nn.Module): ''' def __init__(self, in_channels, out_channels): super().__init__() - self.mlp_sub = MLP([in_channels, out_channels]) - self.mlp = MLP([out_channels, out_channels]) + self.mlp_sub = MLP([in_channels, out_channels], plain_last=False) + self.mlp = MLP([out_channels, out_channels], plain_last=False) def forward(self, x, x_sub, pos, pos_sub, batch=None, batch_sub=None): # transform low-res features and reduce the number of features @@ -68,7 +61,7 @@ def __init__(self, in_channels, out_channels, dim_model, k=16): in_channels = max(in_channels, 1) # first block - self.mlp_input = MLP([in_channels, dim_model[0]]) + self.mlp_input = MLP([in_channels, dim_model[0]], plain_last=False) self.transformer_input = TransformerBlock( in_channels=dim_model[0], @@ -102,7 +95,8 @@ def __init__(self, in_channels, out_channels, dim_model, k=16): out_channels=dim_model[i])) # summit layers - self.mlp_summit = MLP([dim_model[-1], dim_model[-1]], batch_norm=False) + self.mlp_summit = MLP([dim_model[-1], dim_model[-1]], norm=None, + plain_last=False) self.transformer_summit = TransformerBlock( in_channels=dim_model[-1], @@ -110,8 +104,7 @@ def __init__(self, in_channels, out_channels, dim_model, k=16): ) # class score computation - self.mlp_output = Seq(Lin(dim_model[0], 64), ReLU(), Lin(64, 64), - ReLU(), Lin(64, out_channels)) + self.mlp_output = MLP([dim_model[0], 64, out_channels], norm=None) def forward(self, x, pos, batch=None): diff --git a/examples/pointnet2_classification.py b/examples/pointnet2_classification.py index 7f94cf724051..4abfb13577e2 100644 --- a/examples/pointnet2_classification.py +++ b/examples/pointnet2_classification.py @@ -49,7 +49,7 @@ def __init__(self): self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256])) self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024])) - self.mlp = MLP([1024, 512, 256, 10], dropout=0.5, batch_norm=False) + self.mlp = MLP([1024, 512, 256, 10], dropout=0.5, norm=None) def forward(self, data): sa0_out = (data.x, data.pos, data.batch) diff --git a/examples/pointnet2_segmentation.py b/examples/pointnet2_segmentation.py index f316450671cb..ae3259b678dd 100644 --- a/examples/pointnet2_segmentation.py +++ b/examples/pointnet2_segmentation.py @@ -57,8 +57,7 @@ def __init__(self, num_classes): self.fp2_module = FPModule(3, MLP([256 + 128, 256, 128])) self.fp1_module = FPModule(3, MLP([128 + 3, 128, 128, 128])) - self.mlp = MLP([128, 128, 128, num_classes], dropout=0.5, - batch_norm=False) + self.mlp = MLP([128, 128, 128, num_classes], dropout=0.5, norm=None) self.lin1 = torch.nn.Linear(128, 128) self.lin2 = torch.nn.Linear(128, 128) diff --git a/examples/pytorch_ignite/gin.py b/examples/pytorch_ignite/gin.py index 6317a8a3b2da..1a631eeaaf1f 100644 --- a/examples/pytorch_ignite/gin.py +++ b/examples/pytorch_ignite/gin.py @@ -23,7 +23,7 @@ def __init__(self, in_channels: int, out_channels: int, dropout=dropout, jk='cat') self.classifier = MLP([hidden_channels, hidden_channels, out_channels], - batch_norm=True, dropout=dropout) + norm="batch_norm", dropout=dropout) def forward(self, data): x = self.gnn(data.x, data.edge_index) diff --git a/examples/pytorch_lightning/gin.py b/examples/pytorch_lightning/gin.py index 457d16791df2..1b0955fb8b62 100644 --- a/examples/pytorch_lightning/gin.py +++ b/examples/pytorch_lightning/gin.py @@ -21,7 +21,7 @@ def __init__(self, in_channels: int, out_channels: int, dropout=dropout, jk='cat') self.classifier = MLP([hidden_channels, hidden_channels, out_channels], - batch_norm=True, dropout=dropout) + norm="batch_norm", dropout=dropout) self.train_acc = Accuracy() self.val_acc = Accuracy() diff --git a/examples/seal_link_pred.py b/examples/seal_link_pred.py index 9189ed1b0bbe..c68c539c2d9d 100644 --- a/examples/seal_link_pred.py +++ b/examples/seal_link_pred.py @@ -161,7 +161,7 @@ def __init__(self, hidden_channels, num_layers, GNN=GCNConv, k=0.6): conv1d_kws[1], 1) dense_dim = int((self.k - 2) / 2 + 1) dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] - self.mlp = MLP([dense_dim, 128, 1], dropout=0.5, batch_norm=False) + self.mlp = MLP([dense_dim, 128, 1], dropout=0.5, norm=None) def forward(self, x, edge_index, batch): xs = [x] diff --git a/torch_geometric/nn/dense/dmon_pool.py b/torch_geometric/nn/dense/dmon_pool.py index d455ca566635..51b1c5b4e919 100644 --- a/torch_geometric/nn/dense/dmon_pool.py +++ b/torch_geometric/nn/dense/dmon_pool.py @@ -65,7 +65,7 @@ def __init__(self, channels: Union[int, List[int]], k: int, channels = [channels] from torch_geometric.nn.models.mlp import MLP - self.mlp = MLP(channels + [k], act='selu', batch_norm=False) + self.mlp = MLP(channels + [k], act='selu', norm=None) self.dropout = dropout From 424408991bde86aca47940e6cbc83ce5606ad585 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 12 Jul 2022 15:37:09 +0200 Subject: [PATCH 0158/2432] [Benchmark] Fix `bool` arguments in `argparse` (#4967) * fix argparse * changelog --- CHANGELOG.md | 3 ++- benchmark/citation/appnp.py | 6 +++--- benchmark/citation/arma.py | 8 ++++---- benchmark/citation/cheb.py | 6 +++--- benchmark/citation/gat.py | 6 +++--- benchmark/citation/gcn.py | 6 +++--- benchmark/citation/run.sh | 40 ++++++++++++++++++------------------- benchmark/citation/sgc.py | 6 +++--- 8 files changed, 41 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 45986bf69249..42dc7fbacd4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,7 +52,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed -- Fixed `BasicGNN` for `num_layers=1`, which now respects a desired number of `out_channels`([#4943](https://github.com/pyg-team/pytorch_geometric/pull/4943)) +- Fixed `bool` arugments in `argparse` in `benchmark/` ([#4967](https://github.com/pyg-team/pytorch_geometric/pull/4967)) +- Fixed `BasicGNN` for `num_layers=1`, which now respects a desired number of `out_channels` ([#4943](https://github.com/pyg-team/pytorch_geometric/pull/4943)) - `len(batch)` will now return the number of graphs inside the batch, not the number of attributes ([#4931](https://github.com/pyg-team/pytorch_geometric/pull/4931)) - Fixed `data.subgraph` generation for 0-dim tensors ([#4932](https://github.com/pyg-team/pytorch_geometric/pull/4932)) - Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) diff --git a/benchmark/citation/appnp.py b/benchmark/citation/appnp.py index 10f805a0c06f..a0bffc4075ad 100644 --- a/benchmark/citation/appnp.py +++ b/benchmark/citation/appnp.py @@ -9,7 +9,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', type=bool, default=False) +parser.add_argument('--random_splits', action='http://github.com/store_true') parser.add_argument('--runs', type=int, default=100) parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--lr', type=float, default=0.01) @@ -17,7 +17,7 @@ parser.add_argument('--early_stopping', type=int, default=10) parser.add_argument('--hidden', type=int, default=64) parser.add_argument('--dropout', type=float, default=0.5) -parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--no_normalize_features', action='http://github.com/store_true') parser.add_argument('--K', type=int, default=10) parser.add_argument('--alpha', type=float, default=0.1) args = parser.parse_args() @@ -44,7 +44,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.normalize_features) +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) diff --git a/benchmark/citation/arma.py b/benchmark/citation/arma.py index 0d0405e7a548..a87c487a04c2 100644 --- a/benchmark/citation/arma.py +++ b/benchmark/citation/arma.py @@ -8,7 +8,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', type=bool, default=False) +parser.add_argument('--random_splits', action='http://github.com/store_true') parser.add_argument('--runs', type=int, default=100) parser.add_argument('--epochs', type=int, default=1000) parser.add_argument('--lr', type=float, default=0.01) @@ -16,10 +16,10 @@ parser.add_argument('--early_stopping', type=int, default=100) parser.add_argument('--hidden', type=int, default=16) parser.add_argument('--dropout', type=float, default=0.5) -parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--no_normalize_features', action='http://github.com/store_true') parser.add_argument('--num_stacks', type=int, default=1) parser.add_argument('--num_layers', type=int, default=1) -parser.add_argument('--shared_weights', type=bool, default=False) +parser.add_argument('--shared_weights', action='http://github.com/store_true') parser.add_argument('--skip_dropout', type=float, default=0.75) args = parser.parse_args() @@ -46,7 +46,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.normalize_features) +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) diff --git a/benchmark/citation/cheb.py b/benchmark/citation/cheb.py index 18e6b4b23934..b39afa7dfb48 100644 --- a/benchmark/citation/cheb.py +++ b/benchmark/citation/cheb.py @@ -8,7 +8,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', type=bool, default=False) +parser.add_argument('--random_splits', action='http://github.com/store_true') parser.add_argument('--runs', type=int, default=100) parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--lr', type=float, default=0.01) @@ -16,7 +16,7 @@ parser.add_argument('--early_stopping', type=int, default=10) parser.add_argument('--hidden', type=int, default=16) parser.add_argument('--dropout', type=float, default=0.5) -parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--no_normalize_features', action='http://github.com/store_true') parser.add_argument('--num_hops', type=int, default=3) args = parser.parse_args() @@ -39,7 +39,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.normalize_features) +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) diff --git a/benchmark/citation/gat.py b/benchmark/citation/gat.py index 0f85e5144a73..230482a62359 100644 --- a/benchmark/citation/gat.py +++ b/benchmark/citation/gat.py @@ -8,7 +8,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', type=bool, default=False) +parser.add_argument('--random_splits', action='http://github.com/store_true') parser.add_argument('--runs', type=int, default=100) parser.add_argument('--epochs', type=int, default=1000) parser.add_argument('--lr', type=float, default=0.005) @@ -16,7 +16,7 @@ parser.add_argument('--early_stopping', type=int, default=100) parser.add_argument('--hidden', type=int, default=8) parser.add_argument('--dropout', type=float, default=0.6) -parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--no_normalize_features', action='http://github.com/store_true') parser.add_argument('--heads', type=int, default=8) parser.add_argument('--output_heads', type=int, default=1) args = parser.parse_args() @@ -44,7 +44,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.normalize_features) +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) diff --git a/benchmark/citation/gcn.py b/benchmark/citation/gcn.py index b8c220f519b8..09a9b7981943 100644 --- a/benchmark/citation/gcn.py +++ b/benchmark/citation/gcn.py @@ -8,7 +8,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', type=bool, default=False) +parser.add_argument('--random_splits', action='http://github.com/store_true') parser.add_argument('--runs', type=int, default=100) parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--lr', type=float, default=0.01) @@ -16,7 +16,7 @@ parser.add_argument('--early_stopping', type=int, default=10) parser.add_argument('--hidden', type=int, default=16) parser.add_argument('--dropout', type=float, default=0.5) -parser.add_argument('--normalize_features', type=bool, default=True) +parser.add_argument('--no_normalize_features', action='http://github.com/store_true') args = parser.parse_args() @@ -38,7 +38,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.normalize_features) +dataset = get_planetoid_dataset(args.dataset, args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) diff --git a/benchmark/citation/run.sh b/benchmark/citation/run.sh index dc584555fe7a..4765326d64fc 100755 --- a/benchmark/citation/run.sh +++ b/benchmark/citation/run.sh @@ -5,78 +5,78 @@ echo "====" echo "GCN" python gcn.py --dataset=Cora -python gcn.py --dataset=Cora --random_splits=True +python gcn.py --dataset=Cora --random_splits echo "GAT" python gat.py --dataset=Cora -python gat.py --dataset=Cora --random_splits=True +python gat.py --dataset=Cora --random_splits echo "Cheby" python cheb.py --dataset=Cora --num_hops=3 -python cheb.py --dataset=Cora --num_hops=3 --random_splits=True +python cheb.py --dataset=Cora --num_hops=3 --random_splits echo "SGC" python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 -python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 --random_splits=True +python sgc.py --dataset=Cora --K=3 --weight_decay=0.0005 --random_splits echo "ARMA" -python arma.py --dataset=Cora --num_stacks=2 --num_layers=1 --shared_weights=True -python arma.py --dataset=Cora --num_stacks=3 --num_layers=1 --shared_weights=True --random_splits=True +python arma.py --dataset=Cora --num_stacks=2 --num_layers=1 --shared_weights +python arma.py --dataset=Cora --num_stacks=3 --num_layers=1 --shared_weights --random_splits echo "APPNP" python appnp.py --dataset=Cora --alpha=0.1 -python appnp.py --dataset=Cora --alpha=0.1 --random_splits=True +python appnp.py --dataset=Cora --alpha=0.1 --random_splits echo "CiteSeer" echo "========" echo "GCN" python gcn.py --dataset=CiteSeer -python gcn.py --dataset=CiteSeer --random_splits=True +python gcn.py --dataset=CiteSeer --random_splits echo "GAT" python gat.py --dataset=CiteSeer -python gat.py --dataset=CiteSeer --random_splits=True +python gat.py --dataset=CiteSeer --random_splits echo "Cheby" python cheb.py --dataset=CiteSeer --num_hops=2 -python cheb.py --dataset=CiteSeer --num_hops=3 --random_splits=True +python cheb.py --dataset=CiteSeer --num_hops=3 --random_splits echo "SGC" python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 -python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 --random_splits=True +python sgc.py --dataset=CiteSeer --K=2 --weight_decay=0.005 --random_splits echo "ARMA" -python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights=True -python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights=True --random_splits=True +python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights +python arma.py --dataset=CiteSeer --num_stacks=3 --num_layers=1 --shared_weights --random_splits echo "APPNP" python appnp.py --dataset=CiteSeer --alpha=0.1 -python appnp.py --dataset=CiteSeer --alpha=0.1 --random_splits=True +python appnp.py --dataset=CiteSeer --alpha=0.1 --random_splits echo "PubMed" echo "======" echo "GCN" python gcn.py --dataset=PubMed -python gcn.py --dataset=PubMed --random_splits=True +python gcn.py --dataset=PubMed --random_splits echo "GAT" python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 -python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 --random_splits=True +python gat.py --dataset=PubMed --lr=0.01 --weight_decay=0.001 --output_heads=8 --random_splits echo "Cheby" python cheb.py --dataset=PubMed --num_hops=2 -python cheb.py --dataset=PubMed --num_hops=2 --random_splits=True +python cheb.py --dataset=PubMed --num_hops=2 --random_splits echo "SGC" python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 -python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 --random_splits=True +python sgc.py --dataset=PubMed --K=2 --weight_decay=0.0005 --random_splits echo "ARMA" python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0 -python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0.5 --random_splits=True +python arma.py --dataset=PubMed --num_stacks=2 --num_layers=1 --skip_dropout=0.5 --random_splits echo "APPNP" python appnp.py --dataset=PubMed --alpha=0.1 -python appnp.py --dataset=PubMed --alpha=0.1 --random_splits=True +python appnp.py --dataset=PubMed --alpha=0.1 --random_splits diff --git a/benchmark/citation/sgc.py b/benchmark/citation/sgc.py index b21a37e07a5c..9153510fe804 100644 --- a/benchmark/citation/sgc.py +++ b/benchmark/citation/sgc.py @@ -8,13 +8,13 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', type=bool, default=False) +parser.add_argument('--random_splits', action='http://github.com/store_true') parser.add_argument('--runs', type=int, default=100) parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--weight_decay', type=float, default=0.0005) parser.add_argument('--early_stopping', type=int, default=10) -parser.add_argument('--normalize_features', type=bool, default=False) +parser.add_argument('--no_normalize_features', action='http://github.com/store_true') parser.add_argument('--K', type=int, default=2) args = parser.parse_args() @@ -34,7 +34,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.normalize_features) +dataset = get_planetoid_dataset(args.dataset, args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) From 845e5d84923a887b795045ef2b78fe0ac85aa8b2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 12 Jul 2022 15:42:51 +0200 Subject: [PATCH 0159/2432] Fix `GraphStore` with empty edge indices (#4968) * fix graphstore * changelog: --- CHANGELOG.md | 2 +- torch_geometric/data/graph_store.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42dc7fbacd4a..63521df17363 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) - Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) - Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815), [#4862](https://github.com/pyg-team/pytorch_geometric/pull/4862/files)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922), [#4962](https://github.com/pyg-team/pytorch_geometric/pull/4962)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922), [#4962](https://github.com/pyg-team/pytorch_geometric/pull/4962), [#4968](https://github.com/pyg-team/pytorch_geometric/pull/4968)) - Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) - Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) - Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py index f8ba2fed5099..908cd5496d34 100644 --- a/torch_geometric/data/graph_store.py +++ b/torch_geometric/data/graph_store.py @@ -297,7 +297,7 @@ def edge_tensor_type_to_adj_type( assert src.dim() == 1 and dst.dim() == 1 and src.numel() == dst.numel() if src.numel() == 0: - return torch.stack(tensor_tuple, dim=0) + return torch.empty((2, 0), dtype=torch.long, device=src.device) if (src[0].storage().data_ptr() == dst[1].storage().data_ptr() and src.storage_offset() < dst.storage_offset()): @@ -308,7 +308,7 @@ def edge_tensor_type_to_adj_type( size=(src.size()[0] + dst.size()[0], )) return out.view(2, -1) - return torch.stack(tensor_tuple, dim=0) + return torch.stack([src, dst], dim=0) elif attr.layout == EdgeLayout.CSR: # CSR: (rowptr, col) return SparseTensor(rowptr=src, col=dst, is_sorted=True, From 38befa3a1714c9d7101357fbc113bfec83feaed9 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Tue, 12 Jul 2022 14:07:27 +0000 Subject: [PATCH 0160/2432] fix #4967 --- benchmark/citation/gcn.py | 2 +- benchmark/citation/sgc.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/citation/gcn.py b/benchmark/citation/gcn.py index 09a9b7981943..52d5f8f09c31 100644 --- a/benchmark/citation/gcn.py +++ b/benchmark/citation/gcn.py @@ -38,7 +38,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.no_normalize_features) +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) diff --git a/benchmark/citation/sgc.py b/benchmark/citation/sgc.py index 9153510fe804..bfa9e45ad12e 100644 --- a/benchmark/citation/sgc.py +++ b/benchmark/citation/sgc.py @@ -34,7 +34,7 @@ def forward(self, data): return F.log_softmax(x, dim=1) -dataset = get_planetoid_dataset(args.dataset, args.no_normalize_features) +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, args.early_stopping, permute_masks) From da7be5d7c617c45c93cf76243009da786c0ae873 Mon Sep 17 00:00:00 2001 From: Kyle Whitecross <35904712+kpstesla@users.noreply.github.com> Date: Tue, 12 Jul 2022 10:01:26 -0700 Subject: [PATCH 0161/2432] Stronger check for label dimensions in CorrectAndSmooth and LabelPropagation (#4970) * check y.numel() to avoid calling F.one_hot on one-hot matrices * changelog --- CHANGELOG.md | 1 + torch_geometric/nn/models/correct_and_smooth.py | 4 ++-- torch_geometric/nn/models/label_prop.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63521df17363..e5c350e81fb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) - Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) ### Changed +- Fixed issue where one-hot tensors were passed to `F.one_hot` ([4970](https://github.com/pyg-team/pytorch_geometric/pull/4970)) - Fixed `bool` arugments in `argparse` in `benchmark/` ([#4967](https://github.com/pyg-team/pytorch_geometric/pull/4967)) - Fixed `BasicGNN` for `num_layers=1`, which now respects a desired number of `out_channels` ([#4943](https://github.com/pyg-team/pytorch_geometric/pull/4943)) - `len(batch)` will now return the number of graphs inside the batch, not the number of attributes ([#4931](https://github.com/pyg-team/pytorch_geometric/pull/4931)) diff --git a/torch_geometric/nn/models/correct_and_smooth.py b/torch_geometric/nn/models/correct_and_smooth.py index 66e9568e8557..a78c5c83fce3 100644 --- a/torch_geometric/nn/models/correct_and_smooth.py +++ b/torch_geometric/nn/models/correct_and_smooth.py @@ -92,7 +92,7 @@ def correct(self, y_soft: Tensor, y_true: Tensor, mask: Tensor, numel = int(mask.sum()) if mask.dtype == torch.bool else mask.size(0) assert y_true.size(0) == numel - if y_true.dtype == torch.long: + if y_true.dtype == torch.long and y_true.size(0) == y_true.numel(): y_true = F.one_hot(y_true.view(-1), y_soft.size(-1)) y_true = y_true.to(y_soft.dtype) @@ -125,7 +125,7 @@ def smooth(self, y_soft: Tensor, y_true: Tensor, mask: Tensor, numel = int(mask.sum()) if mask.dtype == torch.bool else mask.size(0) assert y_true.size(0) == numel - if y_true.dtype == torch.long: + if y_true.dtype == torch.long and y_true.size(0) == y_true.numel(): y_true = F.one_hot(y_true.view(-1), y_soft.size(-1)) y_true = y_true.to(y_soft.dtype) diff --git a/torch_geometric/nn/models/label_prop.py b/torch_geometric/nn/models/label_prop.py index c5c9f43456da..fa8b732e5390 100644 --- a/torch_geometric/nn/models/label_prop.py +++ b/torch_geometric/nn/models/label_prop.py @@ -38,7 +38,7 @@ def forward( ) -> Tensor: """""" - if y.dtype == torch.long: + if y.dtype == torch.long and y.size(0) == y.numel(): y = F.one_hot(y.view(-1)).to(torch.float) out = y From 65226936edfa7bc87abacbe56335c5706216f4a1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 13 Jul 2022 12:36:29 +0200 Subject: [PATCH 0162/2432] PyTorch 1.12 support (#4975) * Fix norm in examples and dmon_pool * changelog * update * PyTorch 1.12 support * changelog * updaet * reset * update * update Co-authored-by: Guohao Li --- .github/workflows/building_pyg_conda.yml | 24 ++++++------- .github/workflows/building_rusty1s_conda.yml | 24 ++++++------- .github/workflows/documentation.yml | 2 +- .github/workflows/examples.yml | 6 ++-- .github/workflows/full_testing.yml | 8 ++--- .github/workflows/install.yml | 2 +- .github/workflows/testing.yml | 8 ++--- CHANGELOG.md | 1 + README.md | 38 ++++++++++---------- docs/source/notes/installation.rst | 22 ++++++------ docs/source/notes/quick-start.html | 19 ++++++++-- test/graphgym/test_config_store.py | 2 -- 12 files changed, 84 insertions(+), 72 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 0b366543ec21..4e58081e3577 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -12,27 +12,25 @@ jobs: matrix: os: [ubuntu-18.04, macos-10.15, windows-2019] python-version: ['3.7', '3.8', '3.9', '3.10'] - torch-version: [1.10.0, 1.11.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu115'] + torch-version: [1.11.0, 1.12.0] + cuda-version: ['cpu', 'cu102', 'cu113', 'cu115', 'cu116'] exclude: - - torch-version: 1.10.0 + - torch-version: 1.11.0 + cuda-version: 'cu116' + - torch-version: 1.12.0 cuda-version: 'cu115' - - torch-version: 1.10.0 - python-version: '3.10' - - os: windows-2019 - torch-version: 1.11.0 - cuda-version: 'cu102' - os: macos-10.15 cuda-version: 'cu102' - os: macos-10.15 cuda-version: 'cu113' - os: macos-10.15 cuda-version: 'cu115' - # There is a weird `glibc=2.27` bug going on for this combination: - - os: ubuntu-18.04 - python-version: '3.10' - - os: ubuntu-18.04 - cuda-version: 'cu115' + - os: macos-10.15 + cuda-version: 'cu116' + - os: windows-2019 + cuda-version: 'cu102' + - os: windows-2019 # Complains about CUDA mismatch. + python-version: '3.7' steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index 87c9d22cb0f5..36b6ade834c0 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -12,27 +12,25 @@ jobs: matrix: os: [ubuntu-18.04, macos-10.15, windows-2019] python-version: ['3.7', '3.8', '3.9', '3.10'] - torch-version: [1.10.0, 1.11.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu115'] + torch-version: [1.11.0, 1.12.0] + cuda-version: ['cpu', 'cu102', 'cu113', 'cu115', 'cu116'] exclude: - - torch-version: 1.10.0 + - torch-version: 1.11.0 + cuda-version: 'cu116' + - torch-version: 1.12.0 cuda-version: 'cu115' - - torch-version: 1.10.0 - python-version: '3.10' - - os: windows-2019 - torch-version: 1.11.0 - cuda-version: 'cu102' - os: macos-10.15 cuda-version: 'cu102' - os: macos-10.15 cuda-version: 'cu113' - os: macos-10.15 cuda-version: 'cu115' - # There is a weird `glibc=2.27` bug going on for this combination: - - os: ubuntu-18.04 - python-version: '3.10' - - os: ubuntu-18.04 - cuda-version: 'cu115' + - os: macos-10.15 + cuda-version: 'cu116' + - os: windows-2019 + cuda-version: 'cu102' + - os: windows-2019 # Complains about CUDA mismatch. + python-version: '3.7' steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 487eb2b051a4..80be03f3b7cd 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -15,7 +15,7 @@ jobs: matrix: os: [ubuntu-latest] python-version: [3.8] - torch-version: [1.11.0] + torch-version: [1.12.0] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index fe4a2733322f..891fbe5e80fb 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -16,10 +16,10 @@ jobs: matrix: os: [ubuntu-latest] python-version: [3.9] - torch-version: [1.11.0] + torch-version: [1.12.0] include: - - torch-version: 1.11.0 - torchvision-version: 0.12.0 + - torch-version: 1.12.0 + torchvision-version: 0.13.0 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 074d3463cfdd..d59c8644f4cd 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -15,11 +15,11 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest] - python-version: ['3.7', '3.8', '3.9'] - torch-version: [1.10.0] + python-version: ['3.7', '3.8', '3.9', '3.10'] + torch-version: [1.12.0] include: - - torch-version: 1.10.0 - torchvision-version: 0.11.1 + - torch-version: 1.12.0 + torchvision-version: 0.13.0 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml index 5a0a00bbd843..58e8d27434ea 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/install.yml @@ -15,7 +15,7 @@ jobs: matrix: os: [ubuntu-latest] python-version: [3.9] - torch-version: [1.11.0] + torch-version: [1.12.0] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 02d5c2701436..3ff30e075068 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -16,12 +16,12 @@ jobs: matrix: os: [ubuntu-latest] python-version: [3.9] - torch-version: [1.10.0, 1.11.0] + torch-version: [1.11.0, 1.12.0] include: - - torch-version: 1.10.0 - torchvision-version: 0.11.1 - torch-version: 1.11.0 torchvision-version: 0.12.0 + - torch-version: 1.12.0 + torchvision-version: 0.13.0 steps: - uses: actions/checkout@v3 @@ -37,7 +37,7 @@ jobs: - name: Install internal dependencies run: | pip install torch-scatter -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html - pip install torch-sparse==0.6.13 -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html + pip install torch-sparse -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html pip install torch-cluster -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html pip install torch-spline-conv -f https://data.pyg.org/whl/torch-${{ matrix.torch-version }}+cpu.html diff --git a/CHANGELOG.md b/CHANGELOG.md index e5c350e81fb8..bf3822e817fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.0.5] - 2022-MM-DD ### Added +- Added PyTorch 1.12 support ([#4975](https://github.com/pyg-team/pytorch_geometric/pull/4975)) - Added `unbatch_edge_index` functionality for splitting an `edge_index` tensor according to a `batch` vector ([#4903](https://github.com/pyg-team/pytorch_geometric/pull/4903)) - Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) - Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951), [#4958](https://github.com/pyg-team/pytorch_geometric/pull/4958), [#4959](https://github.com/pyg-team/pytorch_geometric/pull/4959)) diff --git a/README.md b/README.md index ae0ba8f16e60..9fbf61d7d352 100644 --- a/README.md +++ b/README.md @@ -357,23 +357,25 @@ Given that you have [PyTorch >= 1.8.0 installed](https://pytorch.org/get-started conda install pyg -c pyg ``` +**Note:** Conda packages are not published for PyTorch 1.12 yet. + ### Pip Wheels We alternatively provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl). -#### PyTorch 1.11 +#### PyTorch 1.12 -To install the binaries for PyTorch 1.11.0, simply run +To install the binaries for PyTorch 1.12.0, simply run ``` -pip install torch-scatter -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html -pip install torch-sparse -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html +pip install torch-scatter -f https://data.pyg.org/whl/torch-1.12.0+${CUDA}.html +pip install torch-sparse -f https://data.pyg.org/whl/torch-1.12.0+${CUDA}.html pip install torch-geometric ``` -where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu113`, or `cu115` depending on your PyTorch installation (`torch.version.cuda`). +where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu113`, or `cu116` depending on your PyTorch installation. -| | `cpu` | `cu102` | `cu113` | `cu115` | +| | `cpu` | `cu102` | `cu113` | `cu116` | |-------------|-------|---------|---------|---------| | **Linux** | ✅ | ✅ | ✅ | ✅ | | **Windows** | ✅ | | ✅ | ✅ | @@ -382,36 +384,36 @@ where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu113`, or `cu115` For additional but optional functionality, run ``` -pip install torch-cluster -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html -pip install torch-spline-conv -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html +pip install torch-cluster -f https://data.pyg.org/whl/torch-1.12.0+${CUDA}.html +pip install torch-spline-conv -f https://data.pyg.org/whl/torch-1.12.0+${CUDA}.html ``` -#### PyTorch 1.10 +#### PyTorch 1.11 -To install the binaries for PyTorch 1.10.0, PyTorch 1.10.1 and 1.10.2, simply run +To install the binaries for PyTorch 1.11.0, simply run ``` -pip install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+${CUDA}.html -pip install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+${CUDA}.html +pip install torch-scatter -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html +pip install torch-sparse -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html pip install torch-geometric ``` -where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu111` or `cu113` depending on your PyTorch installation (`torch.version.cuda`). +where `${CUDA}` should be replaced by either `cpu`, `cu102`, `cu113`, or `cu115` depending on your PyTorch installation (`torch.version.cuda`). -| | `cpu` | `cu102` | `cu111` | `cu113` | +| | `cpu` | `cu102` | `cu113` | `cu115` | |-------------|-------|---------|---------|---------| | **Linux** | ✅ | ✅ | ✅ | ✅ | -| **Windows** | ✅ | ✅ | ✅ | ✅ | +| **Windows** | ✅ | | ✅ | ✅ | | **macOS** | ✅ | | | | For additional but optional functionality, run ``` -pip install torch-cluster -f https://data.pyg.org/whl/torch-1.10.0+${CUDA}.html -pip install torch-spline-conv -f https://data.pyg.org/whl/torch-1.10.0+${CUDA}.html +pip install torch-cluster -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html +pip install torch-spline-conv -f https://data.pyg.org/whl/torch-1.11.0+${CUDA}.html ``` -**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1 and PyTorch 1.9.0 (following the same procedure). +**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, and PyTorch 1.10.0/1.10.1/1.10.2 (following the same procedure). For older versions, you might need to explicitly specify the latest supported version number in order to prevent a manual installation from source. You can look up the latest supported version number [here](https://data.pyg.org/whl). diff --git a/docs/source/notes/installation.rst b/docs/source/notes/installation.rst index f8867d46941d..77b7e95ecda8 100644 --- a/docs/source/notes/installation.rst +++ b/docs/source/notes/installation.rst @@ -23,6 +23,8 @@ Given that you have `PyTorch >= 1.8.0 installed `_. We provide pip wheels for these packages for all major OS/PyTorch/CUDA combinations, see `here `__: -#. Ensure that at least PyTorch 1.10.0 is installed: +#. Ensure that at least PyTorch 1.11.0 is installed: .. code-block:: none python -c "import torch; print(torch.__version__)" - >>> 1.11.0 + >>> 1.12.0 #. Find the CUDA version PyTorch was installed with: @@ -52,21 +54,21 @@ We provide pip wheels for these packages for all major OS/PyTorch/CUDA combinati pip install torch-sparse -f https://data.pyg.org/whl/torch-${TORCH}+${CUDA}.html pip install torch-geometric - where :obj:`${CUDA}` and :obj:`${TORCH}` should be replaced by the specific CUDA version (:obj:`cpu`, :obj:`cu102`, :obj:`cu113`, :obj:`cu115`) and PyTorch version (:obj:`1.10.0`, :obj:`1.11.0`), respectively. - For example, for PyTorch 1.11.* and CUDA 11.3, type: + where :obj:`${CUDA}` and :obj:`${TORCH}` should be replaced by the specific CUDA version (:obj:`cpu`, :obj:`cu102`, :obj:`cu113`, :obj:`cu115`) and PyTorch version (:obj:`1.11.0`, :obj:`1.12.0`), respectively. + For example, for PyTorch 1.12.* and CUDA 11.6, type: .. code-block:: none - pip install torch-scatter -f https://data.pyg.org/whl/torch-1.11.0+cu113.html - pip install torch-sparse -f https://data.pyg.org/whl/torch-1.11.0+cu113.html + pip install torch-scatter -f https://data.pyg.org/whl/torch-1.12.0+cu116.html + pip install torch-sparse -f https://data.pyg.org/whl/torch-1.12.0+cu116.html pip install torch-geometric - For PyTorch 1.10.* and CUDA 10.2, type: + For PyTorch 1.11.* and CUDA 11.3, type: .. code-block:: none - pip install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+cu102.html - pip install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+cu102.html + pip install torch-scatter -f https://data.pyg.org/whl/torch-1.11.0+cu113.html + pip install torch-sparse -f https://data.pyg.org/whl/torch-1.11.0+cu113.html pip install torch-geometric #. Install additional packages *(optional)*: @@ -80,7 +82,7 @@ We provide pip wheels for these packages for all major OS/PyTorch/CUDA combinati following the same procedure as mentioned above. -Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1 and PyTorch 1.9.0 (following the same procedure). +**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, and PyTorch 1.10.0/1.10.1/1.10.2 (following the same procedure). **For older versions, you need to explicitly specify the latest supported version number** in order to prevent a manual installation from source. You can look up the latest supported version number `here `__. diff --git a/docs/source/notes/quick-start.html b/docs/source/notes/quick-start.html index 9a82a2f7375a..44ffd5bac302 100644 --- a/docs/source/notes/quick-start.html +++ b/docs/source/notes/quick-start.html @@ -75,8 +75,8 @@ diff --git a/docs/source/modules/nn.rst b/docs/source/modules/nn.rst index 09c195bd466a..29af7abc1f52 100644 --- a/docs/source/modules/nn.rst +++ b/docs/source/modules/nn.rst @@ -135,8 +135,11 @@ For combining via attention, we need to additionally specify the :obj:`in_channe .. code-block:: python - multi_aggr = aggr.MultiAggregation(['mean', 'std'], in_channels=64, - out_channels=64, num_heads=4)) + multi_aggr = aggr.MultiAggregation( + aggrs=['mean', 'std'], + mode='attn', + mode_kwargs=dict(in_channels=64, out_channels=64, num_heads=4), + ) If aggregations are given as a list, they will be automatically resolved to a :class:`~torch_geometric.nn.aggr.MultiAggregation`, *e.g.*, :obj:`aggr=['mean', 'std', 'median']`. From f221ed81b7bd2c019b0c421e620ac29e95dce52a Mon Sep 17 00:00:00 2001 From: Jakub Pietrak <97102979+JakubPietrakIntel@users.noreply.github.com> Date: Wed, 22 Mar 2023 09:47:28 +0100 Subject: [PATCH 1036/2432] Fix typo in CPU affinity tutorial (#7001) Fix typo in manual training time **decreased** (was increased) --- docs/source/advanced/cpu_affinity.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/advanced/cpu_affinity.rst b/docs/source/advanced/cpu_affinity.rst index f8c484832b07..c128aea4ecec 100644 --- a/docs/source/advanced/cpu_affinity.rst +++ b/docs/source/advanced/cpu_affinity.rst @@ -178,7 +178,7 @@ Training times for each model/dataset combination were obtained by taking a mean Then, the affinity means were normalized with respect to the mean baseline measurement. This value is denoted on the :math:`y`-axis. The labels above each result indicate the end-to-end performance gain from using the discussed configuration. -Over all model/dataset samples, the average training time is increased by **1.53x** for plain affinity and **1.85x** for the affinity with socket separation. +Over all model/dataset samples, the average training time is decreased by **1.53x** for plain affinity and **1.85x** for the affinity with socket separation. .. figure:: ../_figures/training_affinity.png :width: 100% From 86af56cc9700005da2560366394a8e6b0bc85c41 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 22 Mar 2023 10:28:17 +0100 Subject: [PATCH 1037/2432] `torch.compile(model, dynamic=True)` test (#7002) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- docs/source/tutorial/compile.rst | 8 +++-- examples/compile/gin.py | 3 ++ test/nn/test_compile_basic.py | 22 -------------- test/nn/test_compile_dynamic.py | 51 ++++++++++++++++++++++++++++++++ torch_geometric/testing/data.py | 7 +++-- 6 files changed, 65 insertions(+), 28 deletions(-) create mode 100644 test/nn/test_compile_dynamic.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f3cc1ccc475c..c56c0c3ce32d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,7 +37,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `Pad` transform ([#5940](https://github.com/pyg-team/pytorch_geometric/pull/5940), [#6697](https://github.com/pyg-team/pytorch_geometric/pull/6697), [#6731](https://github.com/pyg-team/pytorch_geometric/pull/6731), [#6758](https://github.com/pyg-team/pytorch_geometric/pull/6758)) - Added full batch mode to the inference benchmark ([#6631](https://github.com/pyg-team/pytorch_geometric/pull/6631)) - Added `cat` aggregation type to the `HeteroConv` class so that features can be concatenated during grouping ([#6634](https://github.com/pyg-team/pytorch_geometric/pull/6634)) -- Added `torch.compile` support and benchmark study ([#6610](https://github.com/pyg-team/pytorch_geometric/pull/6610), [#6952](https://github.com/pyg-team/pytorch_geometric/pull/6952), [#6953](https://github.com/pyg-team/pytorch_geometric/pull/6953), [#6980](https://github.com/pyg-team/pytorch_geometric/pull/6980), [#6983](https://github.com/pyg-team/pytorch_geometric/pull/6983), [#6984](https://github.com/pyg-team/pytorch_geometric/pull/6984), [#6985](https://github.com/pyg-team/pytorch_geometric/pull/6985), [#6986](https://github.com/pyg-team/pytorch_geometric/pull/6986), [#6989](https://github.com/pyg-team/pytorch_geometric/pull/6989)) +- Added `torch.compile` support and benchmark study ([#6610](https://github.com/pyg-team/pytorch_geometric/pull/6610), [#6952](https://github.com/pyg-team/pytorch_geometric/pull/6952), [#6953](https://github.com/pyg-team/pytorch_geometric/pull/6953), [#6980](https://github.com/pyg-team/pytorch_geometric/pull/6980), [#6983](https://github.com/pyg-team/pytorch_geometric/pull/6983), [#6984](https://github.com/pyg-team/pytorch_geometric/pull/6984), [#6985](https://github.com/pyg-team/pytorch_geometric/pull/6985), [#6986](https://github.com/pyg-team/pytorch_geometric/pull/6986), [#6989](https://github.com/pyg-team/pytorch_geometric/pull/6989), [#7002](https://github.com/pyg-team/pytorch_geometric/pull/7002)) - Added the `AntiSymmetricConv` layer ([#6577](https://github.com/pyg-team/pytorch_geometric/pull/6577)) - Added a mixin for Huggingface model hub integration ([#5930](https://github.com/pyg-team/pytorch_geometric/pull/5930), [#6591](https://github.com/pyg-team/pytorch_geometric/pull/6591)) - Added support for accelerated GNN layers in `nn.conv.cugraph` via `cugraph-ops` ([#6278](https://github.com/pyg-team/pytorch_geometric/pull/6278), [#6388](https://github.com/pyg-team/pytorch_geometric/pull/6388), [#6412](https://github.com/pyg-team/pytorch_geometric/pull/6412)) diff --git a/docs/source/tutorial/compile.rst b/docs/source/tutorial/compile.rst index e134a0082722..63b9fd5d46b0 100644 --- a/docs/source/tutorial/compile.rst +++ b/docs/source/tutorial/compile.rst @@ -60,9 +60,11 @@ We have incorporated multiple examples in :obj:`examples/compile` that further s #. `Node Classification `__ via :class:`~torch_geometric.nn.models.GCN` #. `Graph Classification `__ via :class:`~torch_geometric.nn.models.GIN` -Note that :meth:`torch.compile` will currently re-compile the model everytime it sees an input with a different shape. -That currently does not play that nicely with the way :pyg:`PyG` performs mini-batching. -While we are working with the :pytorch:`PyTorch` team to fix this limitation, a temporary workaround is to utilize the :class:`~torch_geometric.transforms.Pad` transformation to ensure that all inputs are of equal shape. +Note that :meth:`torch.compile(model, dynamic=True)` does sadly not yet work for :pyg:`PyG` models on :pytorch:`PyTorch 2.0`. +While static compilation via :meth:`torch.compile(model, dynamic=False)` works fine, it will re-compile the model everytime it sees an input with a different shape. +That currently does not play that nicely with the way :pyg:`PyG` performs mini-batching, and will hence lead to major slow-downs. +We are working with the :pytorch:`PyTorch` team to fix this limitation (see `this `_ :github:`GitHub` issue). +A temporary workaround is to utilize the :class:`torch_geometric.transforms.Pad` transformation to ensure that all inputs are of equal shape. If you notice that :meth:`~torch_geometric.compile` fails for a certain :pyg:`PyG` model, do not hesitate to reach out either on :github:`null` `GitHub `_ or :slack:`null` `Slack `_. We are very eager to improve :meth:`~torch_geometric.compile` support across the whole :pyg:`PyG` code base. diff --git a/examples/compile/gin.py b/examples/compile/gin.py index 8fb7e2ae08b4..46d9b6795345 100644 --- a/examples/compile/gin.py +++ b/examples/compile/gin.py @@ -45,6 +45,9 @@ def forward(self, x, edge_index, batch): model = GIN(dataset.num_features, dataset.num_classes).to(device) # Compile the model into an optimized version: +# Note that `compile(model, dynamic=True)` does not work yet in PyTorch 2.0, so +# we use `transforms.Pad` and static compilation as a current workaround. +# See: https://github.com/pytorch/pytorch/issues/94640 model = torch_geometric.compile(model) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) diff --git a/test/nn/test_compile_basic.py b/test/nn/test_compile_basic.py index 8d836506731b..f89a7e00005b 100644 --- a/test/nn/test_compile_basic.py +++ b/test/nn/test_compile_basic.py @@ -79,28 +79,6 @@ def test_torch_compile(device): assert torch.allclose(out, expected, atol=1e-6) -@withCUDA -@onlyLinux -@disableExtensions -@withPackage('torch>=2.0.0') -def test_dynamic_torch_compile(device): - compiled_gather_scatter = torch_geometric.compile(gather_scatter) - - x = torch.randn(10, 16, device=device) - edge_index = torch.randint(0, x.size(0), (2, 40), device=device) - - expected = gather_scatter(x, edge_index) - out = compiled_gather_scatter(x, edge_index) - assert torch.allclose(out, expected, atol=1e-6) - - x = torch.randn(20, 16, device=device) - edge_index = torch.randint(0, x.size(0), (2, 80), device=device) - - expected = gather_scatter(x, edge_index) - out = compiled_gather_scatter(x, edge_index) - assert torch.allclose(out, expected, atol=1e-6) - - if __name__ == '__main__': import argparse diff --git a/test/nn/test_compile_dynamic.py b/test/nn/test_compile_dynamic.py new file mode 100644 index 000000000000..378eb56156a4 --- /dev/null +++ b/test/nn/test_compile_dynamic.py @@ -0,0 +1,51 @@ +import random + +import pytest +import torch +from torch import Tensor + +import torch_geometric +from torch_geometric.testing import ( + disableExtensions, + get_random_edge_index, + onlyLinux, + withCUDA, + withPackage, +) +from torch_geometric.utils import scatter + + +class MySAGEConv(torch.nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.lin_src = torch.nn.Linear(in_channels, out_channels) + self.lin_dst = torch.nn.Linear(in_channels, out_channels) + + def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: + x_j = x[edge_index[0]] + out = scatter(x_j, edge_index[1], dim_size=x.size(0), reduce='mean') + return self.lin_src(out) + self.lin_dst(x) + + +@withCUDA +@onlyLinux +@disableExtensions +@withPackage('torch>=2.0.0') +def test_dynamic_torch_compile(device): + conv = MySAGEConv(64, 64).to(device) + conv = torch_geometric.compile(conv, dynamic=True) + + optimizer = torch.optim.Adam(conv.parameters(), lr=0.01) + + with pytest.raises(RuntimeError): + for _ in range(10): + N = random.randrange(100, 500) + E = random.randrange(200, 1000) + + x = torch.randn(N, 64, device=device) + edge_index = get_random_edge_index(N, N, E, device=device) + + optimizer.zero_grad() + expected = conv(x, edge_index) + expected.mean().backward() + optimizer.step() diff --git a/torch_geometric/testing/data.py b/torch_geometric/testing/data.py index 4044b9f799d8..9e8c776d575b 100644 --- a/torch_geometric/testing/data.py +++ b/torch_geometric/testing/data.py @@ -11,9 +11,12 @@ def get_random_edge_index( num_dst_nodes: int, num_edges: int, dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, ) -> Tensor: - row = torch.randint(num_src_nodes, (num_edges, ), dtype=dtype) - col = torch.randint(num_dst_nodes, (num_edges, ), dtype=dtype) + row = torch.randint(num_src_nodes, (num_edges, ), dtype=dtype, + device=device) + col = torch.randint(num_dst_nodes, (num_edges, ), dtype=dtype, + device=device) return torch.stack([row, col], dim=0) From 5f4a21c96e91bb4d974b7ab5f65ebabce9effd38 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 22 Mar 2023 12:14:59 +0100 Subject: [PATCH 1038/2432] Memory-efficient `one_hot` implementation (#7005) --- CHANGELOG.md | 1 + test/nn/conv/test_wl_conv.py | 4 +- test/utils/test_one_hot.py | 17 +++++++++ torch_geometric/datasets/ged_dataset.py | 6 +-- torch_geometric/datasets/linkx_dataset.py | 4 +- torch_geometric/datasets/qm9.py | 10 ++--- torch_geometric/io/sdf.py | 5 +-- torch_geometric/io/tu.py | 17 ++++++--- torch_geometric/nn/conv/rgcn_conv.py | 5 +-- .../nn/models/correct_and_smooth.py | 10 ++--- torch_geometric/nn/models/label_prop.py | 5 +-- torch_geometric/transforms/grid_sampling.py | 6 +-- torch_geometric/transforms/one_hot_degree.py | 5 +-- torch_geometric/utils/__init__.py | 2 + torch_geometric/utils/one_hot.py | 38 +++++++++++++++++++ 15 files changed, 94 insertions(+), 41 deletions(-) create mode 100644 test/utils/test_one_hot.py create mode 100644 torch_geometric/utils/one_hot.py diff --git a/CHANGELOG.md b/CHANGELOG.md index c56c0c3ce32d..51c67378e921 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a memory-efficient `utils.one_hot` implementation ([#7005](https://github.com/pyg-team/pytorch_geometric/pull/7005)) - Added `HeteroDictLinear` and an optimized `FastHGTConv` module ([#6178](https://github.com/pyg-team/pytorch_geometric/pull/6178), [#6998](https://github.com/pyg-team/pytorch_geometric/pull/6998)) - Added the `DenseGATConv` module ([#6928](https://github.com/pyg-team/pytorch_geometric/pull/6928)) - Added `trim_to_layer` utility function for more efficient `NeighborLoader` use-cases ([#6661](https://github.com/pyg-team/pytorch_geometric/pull/6661)) diff --git a/test/nn/conv/test_wl_conv.py b/test/nn/conv/test_wl_conv.py index 5378263616ce..889e578da9d9 100644 --- a/test/nn/conv/test_wl_conv.py +++ b/test/nn/conv/test_wl_conv.py @@ -1,13 +1,13 @@ import torch -import torch.nn.functional as F from torch_sparse import SparseTensor from torch_geometric.nn import WLConv +from torch_geometric.utils import one_hot def test_wl_conv(): x1 = torch.tensor([1, 0, 0, 1]) - x2 = F.one_hot(x1).to(torch.float) + x2 = one_hot(x1) edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) adj1 = SparseTensor.from_edge_index(edge_index) adj2 = adj1.to_torch_sparse_csc_tensor() diff --git a/test/utils/test_one_hot.py b/test/utils/test_one_hot.py new file mode 100644 index 000000000000..3a978bd41380 --- /dev/null +++ b/test/utils/test_one_hot.py @@ -0,0 +1,17 @@ +import torch + +from torch_geometric.utils import one_hot + + +def test_one_hot(): + index = torch.tensor([0, 1, 2]) + + out = one_hot(index) + assert out.size() == (3, 3) + assert out.dtype == torch.float + assert out.tolist() == [[1, 0, 0], [0, 1, 0], [0, 0, 1]] + + out = one_hot(index, num_classes=4, dtype=torch.long) + assert out.size() == (3, 4) + assert out.dtype == torch.long + assert out.tolist() == [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]] diff --git a/torch_geometric/datasets/ged_dataset.py b/torch_geometric/datasets/ged_dataset.py index 5501d429ecce..acac04fa58a8 100644 --- a/torch_geometric/datasets/ged_dataset.py +++ b/torch_geometric/datasets/ged_dataset.py @@ -5,7 +5,6 @@ from typing import Callable, List, Optional import torch -import torch.nn.functional as F from torch_geometric.data import ( Data, @@ -14,7 +13,7 @@ extract_tar, extract_zip, ) -from torch_geometric.utils import to_undirected +from torch_geometric.utils import one_hot, to_undirected class GEDDataset(InMemoryDataset): @@ -201,8 +200,7 @@ def process(self): x = torch.zeros(data.num_nodes, dtype=torch.long) for node, info in G.nodes(data=True): x[int(node)] = self.types.index(info['type']) - data.x = F.one_hot(x, num_classes=len(self.types)).to( - torch.float) + data.x = one_hot(x, num_classes=len(self.types)) if self.pre_filter is not None and not self.pre_filter(data): continue diff --git a/torch_geometric/datasets/linkx_dataset.py b/torch_geometric/datasets/linkx_dataset.py index bdee351d27b5..0e2344090a53 100644 --- a/torch_geometric/datasets/linkx_dataset.py +++ b/torch_geometric/datasets/linkx_dataset.py @@ -3,9 +3,9 @@ import numpy as np import torch -import torch.nn.functional as F from torch_geometric.data import Data, InMemoryDataset, download_url +from torch_geometric.utils import one_hot class LINKXDataset(InMemoryDataset): @@ -132,7 +132,7 @@ def _process_facebook(self): x = torch.cat([metadata[:, :1], metadata[:, 2:]], dim=-1) for i in range(x.size(1)): _, out = x[:, i].unique(return_inverse=True) - xs.append(F.one_hot(out).to(torch.float)) + xs.append(one_hot(out)) x = torch.cat(xs, dim=-1) data = Data(x=x, edge_index=edge_index, y=y) diff --git a/torch_geometric/datasets/qm9.py b/torch_geometric/datasets/qm9.py index 7e471249025d..c719b0d57ec7 100644 --- a/torch_geometric/datasets/qm9.py +++ b/torch_geometric/datasets/qm9.py @@ -4,7 +4,6 @@ from typing import Callable, List, Optional import torch -import torch.nn.functional as F from tqdm import tqdm from torch_geometric.data import ( @@ -13,7 +12,7 @@ download_url, extract_zip, ) -from torch_geometric.utils import scatter +from torch_geometric.utils import one_hot, scatter HAR2EV = 27.211386246 KCALMOL2EV = 0.04336414 @@ -271,8 +270,7 @@ def process(self): edge_index = torch.tensor([row, col], dtype=torch.long) edge_type = torch.tensor(edge_type, dtype=torch.long) - edge_attr = F.one_hot(edge_type, - num_classes=len(bonds)).to(torch.float) + edge_attr = one_hot(edge_type, num_classes=len(bonds)) perm = (edge_index[0] * N + edge_index[1]).argsort() edge_index = edge_index[:, perm] @@ -283,10 +281,10 @@ def process(self): hs = (z == 1).to(torch.float) num_hs = scatter(hs[row], col, dim_size=N, reduce='sum').tolist() - x1 = F.one_hot(torch.tensor(type_idx), num_classes=len(types)) + x1 = one_hot(torch.tensor(type_idx), num_classes=len(types)) x2 = torch.tensor([atomic_number, aromatic, sp, sp2, sp3, num_hs], dtype=torch.float).t().contiguous() - x = torch.cat([x1.to(torch.float), x2], dim=-1) + x = torch.cat([x1, x2], dim=-1) y = target[i].unsqueeze(0) name = mol.GetProp('_Name') diff --git a/torch_geometric/io/sdf.py b/torch_geometric/io/sdf.py index 7aa1a493d66d..11756d275689 100644 --- a/torch_geometric/io/sdf.py +++ b/torch_geometric/io/sdf.py @@ -1,9 +1,8 @@ import torch -import torch.nn.functional as F from torch_geometric.data import Data from torch_geometric.io import parse_txt_array -from torch_geometric.utils import coalesce +from torch_geometric.utils import coalesce, one_hot elems = {'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4} @@ -15,7 +14,7 @@ def parse_sdf(src): atom_block = src[1:num_atoms + 1] pos = parse_txt_array(atom_block, end=3) x = torch.tensor([elems[item.split()[3]] for item in atom_block]) - x = F.one_hot(x, num_classes=len(elems)) + x = one_hot(x, num_classes=len(elems)) bond_block = src[1 + num_atoms:1 + num_atoms + num_bonds] row, col = parse_txt_array(bond_block, end=2, dtype=torch.long).t() - 1 diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py index 01b36025ec7b..0083a380620e 100644 --- a/torch_geometric/io/tu.py +++ b/torch_geometric/io/tu.py @@ -4,11 +4,10 @@ import numpy as np import torch -import torch.nn.functional as F from torch_geometric.data import Data from torch_geometric.io import read_txt_array -from torch_geometric.utils import coalesce, remove_self_loops +from torch_geometric.utils import coalesce, one_hot, remove_self_loops names = [ 'A', 'graph_indicator', 'node_labels', 'node_attributes' @@ -36,8 +35,11 @@ def read_tu_data(folder, prefix): node_labels = node_labels.unsqueeze(-1) node_labels = node_labels - node_labels.min(dim=0)[0] node_labels = node_labels.unbind(dim=-1) - node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels] - node_labels = torch.cat(node_labels, dim=-1).to(torch.float) + node_labels = [one_hot(x) for x in node_labels] + if len(node_labels) == 1: + node_labels = node_labels[0] + else: + node_labels = torch.cat(node_labels, dim=-1) edge_attributes = torch.empty((edge_index.size(1), 0)) if 'edge_attributes' in names: @@ -52,8 +54,11 @@ def read_tu_data(folder, prefix): edge_labels = edge_labels.unsqueeze(-1) edge_labels = edge_labels - edge_labels.min(dim=0)[0] edge_labels = edge_labels.unbind(dim=-1) - edge_labels = [F.one_hot(e, num_classes=-1) for e in edge_labels] - edge_labels = torch.cat(edge_labels, dim=-1).to(torch.float) + edge_labels = [one_hot(e) for e in edge_labels] + if len(edge_labels) == 1: + edge_labels = edge_labels[0] + else: + edge_labels = torch.cat(edge_labels, dim=-1) x = cat([node_attributes, node_labels]) edge_attr = cat([edge_attributes, edge_labels]) diff --git a/torch_geometric/nn/conv/rgcn_conv.py b/torch_geometric/nn/conv/rgcn_conv.py index 930311b6ca36..938f687590bc 100644 --- a/torch_geometric/nn/conv/rgcn_conv.py +++ b/torch_geometric/nn/conv/rgcn_conv.py @@ -1,7 +1,6 @@ from typing import Optional, Tuple, Union import torch -import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from torch.nn import Parameter as Param @@ -16,7 +15,7 @@ pyg_lib, torch_sparse, ) -from torch_geometric.utils import index_sort, scatter, spmm +from torch_geometric.utils import index_sort, one_hot, scatter, spmm from torch_geometric.utils.sparse import index2ptr @@ -351,7 +350,7 @@ def aggregate(self, inputs: Tensor, edge_type: Tensor, index: Tensor, # Compute normalization in separation for each `edge_type`. if self.aggr == 'mean': - norm = F.one_hot(edge_type, self.num_relations).to(torch.float) + norm = one_hot(edge_type, self.num_relations, dtype=inputs.dtype) norm = scatter(norm, index, dim=0, dim_size=dim_size)[index] norm = torch.gather(norm, 1, edge_type.view(-1, 1)) norm = 1. / norm.clamp_(1.) diff --git a/torch_geometric/nn/models/correct_and_smooth.py b/torch_geometric/nn/models/correct_and_smooth.py index 8b7de77f50dc..c27f39d062e9 100644 --- a/torch_geometric/nn/models/correct_and_smooth.py +++ b/torch_geometric/nn/models/correct_and_smooth.py @@ -1,9 +1,9 @@ import torch -import torch.nn.functional as F from torch import Tensor from torch_geometric.nn.models import LabelPropagation from torch_geometric.typing import Adj, OptTensor +from torch_geometric.utils import one_hot class CorrectAndSmooth(torch.nn.Module): @@ -97,8 +97,8 @@ def correct(self, y_soft: Tensor, y_true: Tensor, mask: Tensor, assert y_true.size(0) == numel if y_true.dtype == torch.long and y_true.size(0) == y_true.numel(): - y_true = F.one_hot(y_true.view(-1), y_soft.size(-1)) - y_true = y_true.to(y_soft.dtype) + y_true = one_hot(y_true.view(-1), num_classes=y_soft.size(-1), + dtype=y_soft.dtype) error = torch.zeros_like(y_soft) error[mask] = y_true - y_soft[mask] @@ -141,8 +141,8 @@ def smooth(self, y_soft: Tensor, y_true: Tensor, mask: Tensor, assert y_true.size(0) == numel if y_true.dtype == torch.long and y_true.size(0) == y_true.numel(): - y_true = F.one_hot(y_true.view(-1), y_soft.size(-1)) - y_true = y_true.to(y_soft.dtype) + y_true = one_hot(y_true.view(-1), num_classes=y_soft.size(-1), + dtype=y_soft.dtype) y_soft = y_soft.clone() y_soft[mask] = y_true diff --git a/torch_geometric/nn/models/label_prop.py b/torch_geometric/nn/models/label_prop.py index 7b772ca64b27..41645db3f191 100644 --- a/torch_geometric/nn/models/label_prop.py +++ b/torch_geometric/nn/models/label_prop.py @@ -1,13 +1,12 @@ from typing import Callable, Optional import torch -import torch.nn.functional as F from torch import Tensor from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.typing import Adj, OptTensor, SparseTensor -from torch_geometric.utils import spmm +from torch_geometric.utils import one_hot, spmm class LabelPropagation(MessagePassing): @@ -62,7 +61,7 @@ def forward( (default: :obj:`None`) """ if y.dtype == torch.long and y.size(0) == y.numel(): - y = F.one_hot(y.view(-1)).to(torch.float) + y = one_hot(y.view(-1)) out = y if mask is not None: diff --git a/torch_geometric/transforms/grid_sampling.py b/torch_geometric/transforms/grid_sampling.py index cd429ecdf34e..4042abe6ae78 100644 --- a/torch_geometric/transforms/grid_sampling.py +++ b/torch_geometric/transforms/grid_sampling.py @@ -2,14 +2,13 @@ from typing import List, Optional, Union import torch -import torch.nn.functional as F from torch import Tensor import torch_geometric from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.utils import scatter +from torch_geometric.utils import one_hot, scatter @functional_transform('grid_sampling') @@ -53,8 +52,7 @@ def __call__(self, data: Data) -> Data: if torch.is_tensor(item) and item.size(0) == num_nodes: if key == 'y': - item = F.one_hot(item) - item = scatter(item, c, dim=0, reduce='sum') + item = scatter(one_hot(item), c, dim=0, reduce='sum') data[key] = item.argmax(dim=-1) elif key == 'batch': data[key] = item[perm] diff --git a/torch_geometric/transforms/one_hot_degree.py b/torch_geometric/transforms/one_hot_degree.py index db8af4b81c52..d3cdf96661db 100644 --- a/torch_geometric/transforms/one_hot_degree.py +++ b/torch_geometric/transforms/one_hot_degree.py @@ -1,10 +1,9 @@ import torch -import torch.nn.functional as F from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.utils import degree +from torch_geometric.utils import degree, one_hot @functional_transform('one_hot_degree') @@ -33,7 +32,7 @@ def __init__( def __call__(self, data: Data) -> Data: idx, x = data.edge_index[1 if self.in_degree else 0], data.x deg = degree(idx, data.num_nodes, dtype=torch.long) - deg = F.one_hot(deg, num_classes=self.max_degree + 1).to(torch.float) + deg = one_hot(deg, num_classes=self.max_degree + 1) if x is not None and self.cat: x = x.view(-1, 1) if x.dim() == 1 else x diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 69d04c378bdb..d8d771999fc8 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -30,6 +30,7 @@ to_torch_csc_tensor, to_edge_index) from .spmm import spmm from .unbatch import unbatch, unbatch_edge_index +from .one_hot import one_hot from .normalized_cut import normalized_cut from .grid import grid from .geodesic import geodesic_distance @@ -101,6 +102,7 @@ 'spmm', 'unbatch', 'unbatch_edge_index', + 'one_hot', 'normalized_cut', 'grid', 'geodesic_distance', diff --git a/torch_geometric/utils/one_hot.py b/torch_geometric/utils/one_hot.py new file mode 100644 index 000000000000..dbd472df827e --- /dev/null +++ b/torch_geometric/utils/one_hot.py @@ -0,0 +1,38 @@ +from typing import Optional + +import torch +from torch import Tensor + + +def one_hot( + index: Tensor, + num_classes: Optional[int] = None, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + r"""Taskes a one-dimensional :obj:`index` tensor and returns a one-hot + encoded representation of it with shape :obj:`[*, num_classes]` that has + zeros everywhere except where the index of last dimension matches the + corresponding value of the input tensor, in which case it will be :obj:`1`. + + .. note:: + This is a more memory-efficient version of + :meth:`torch.nn.functional.one_hot` as you can customize the output + :obj:`dtype`. + + Args: + index (torch.Tensor): The one-dimensional input tensor. + num_classes (int, optional): The total number of classes. If set to + :obj:`None`, the number of classes will be inferred as one greater + than the largest class value in the input tensor. + (default: :obj:`None`) + dtype (torch.dtype, optional): The :obj:`dtype` of the output tensor. + """ + if index.dim() != 1: + raise ValueError("'index' tensor needs to be one-dimensional") + + if num_classes is None: + num_classes = int(index.max()) + 1 + + out = torch.zeros((index.size(0), num_classes), dtype=dtype, + device=index.device) + return out.scatter_(1, index.unsqueeze(1), 1) From c78f358bb664a15650c68113f7108be89aca98da Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 23 Mar 2023 07:05:11 +0100 Subject: [PATCH 1039/2432] Fix full test (#7007) --- test/nn/conv/test_gen_conv.py | 32 ++++++++++++--------- test/nn/conv/test_graph_conv.py | 8 +++--- test/nn/dense/test_dense_gat_conv.py | 8 +++--- test/nn/dense/test_dense_gcn_conv.py | 8 +++--- test/nn/dense/test_linear.py | 2 -- test/utils/test_sparse.py | 9 +++++- torch_geometric/compile.py | 7 ++--- torch_geometric/nn/conv/cluster_gcn_conv.py | 12 ++------ torch_geometric/nn/conv/gatv2_conv.py | 1 + torch_geometric/nn/conv/gcn_conv.py | 13 +++------ torch_geometric/utils/loop.py | 23 +++++++++++---- torch_geometric/utils/sparse.py | 23 +++++++++++---- 12 files changed, 84 insertions(+), 62 deletions(-) diff --git a/test/nn/conv/test_gen_conv.py b/test/nn/conv/test_gen_conv.py index 21aa14e6b686..a66de738c79c 100644 --- a/test/nn/conv/test_gen_conv.py +++ b/test/nn/conv/test_gen_conv.py @@ -40,10 +40,12 @@ def test_gen_conv(aggr): if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out11) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11) - assert torch.allclose(jit(x1, edge_index, value), out12) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12) + assert torch.allclose(jit(x1, edge_index), out11, atol=1e-6) + assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11, + atol=1e-6) + assert torch.allclose(jit(x1, edge_index, value), out12, atol=1e-6) + assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12, + atol=1e-6) t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) @@ -71,10 +73,13 @@ def test_gen_conv(aggr): if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out21) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21) - assert torch.allclose(jit((x1, x2), edge_index, value), out22) - assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22) + assert torch.allclose(jit((x1, x2), edge_index), out21, atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21, + atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index, value), out22, + atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22, + atol=1e-6) t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) @@ -120,13 +125,14 @@ def test_gen_conv(aggr): if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, value), out1) + assert torch.allclose(jit((x1, x2), edge_index, value), out1, + atol=1e-6) assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1) + out1, atol=1e-6) assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2) + out2, atol=1e-6) t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1) - assert torch.allclose(jit((x1, None), adj1.t()), out2) + assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) diff --git a/test/nn/conv/test_graph_conv.py b/test/nn/conv/test_graph_conv.py index dd0de6439ce9..c80bb6d4de48 100644 --- a/test/nn/conv/test_graph_conv.py +++ b/test/nn/conv/test_graph_conv.py @@ -80,7 +80,7 @@ def test_graph_conv(): t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out21) - assert torch.allclose(jit((x1, x2), adj2.t()), out22) - assert torch.allclose(jit((x1, None), adj1.t()), out23) - assert torch.allclose(jit((x1, None), adj2.t()), out24) + assert torch.allclose(jit((x1, x2), adj1.t()), out21, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out22, atol=1e-6) + assert torch.allclose(jit((x1, None), adj1.t()), out23, atol=1e-6) + assert torch.allclose(jit((x1, None), adj2.t()), out24, atol=1e-6) diff --git a/test/nn/dense/test_dense_gat_conv.py b/test/nn/dense/test_dense_gat_conv.py index 72c4ccc4b153..94a5f7fd5068 100644 --- a/test/nn/dense/test_dense_gat_conv.py +++ b/test/nn/dense/test_dense_gat_conv.py @@ -41,14 +41,14 @@ def test_dense_gat_conv(heads, concat): dense_out = dense_conv(x, adj, mask) - assert dense_out[1, 2].abs().sum() == 0 - dense_out = dense_out.view(6, dense_out.size(-1))[:-1] - assert torch.allclose(sparse_out, dense_out, atol=1e-4) - if is_full_test(): jit = torch.jit.script(dense_conv) assert torch.allclose(jit(x, adj, mask), dense_out) + assert dense_out[1, 2].abs().sum() == 0 + dense_out = dense_out.view(6, dense_out.size(-1))[:-1] + assert torch.allclose(sparse_out, dense_out, atol=1e-4) + def test_dense_gat_conv_with_broadcasting(): batch_size, num_nodes, channels = 8, 3, 16 diff --git a/test/nn/dense/test_dense_gcn_conv.py b/test/nn/dense/test_dense_gcn_conv.py index 7d43ec0fcb6d..20237b80addf 100644 --- a/test/nn/dense/test_dense_gcn_conv.py +++ b/test/nn/dense/test_dense_gcn_conv.py @@ -39,14 +39,14 @@ def test_dense_gcn_conv(): dense_out = dense_conv(x, adj, mask) assert dense_out.size() == (2, 3, channels) - assert dense_out[1, 2].abs().sum() == 0 - dense_out = dense_out.view(6, channels)[:-1] - assert torch.allclose(sparse_out, dense_out, atol=1e-4) - if is_full_test(): jit = torch.jit.script(dense_conv) assert torch.allclose(jit(x, adj, mask), dense_out) + assert dense_out[1, 2].abs().sum() == 0 + dense_out = dense_out.view(6, channels)[:-1] + assert torch.allclose(sparse_out, dense_out, atol=1e-4) + def test_dense_gcn_conv_with_broadcasting(): batch_size, num_nodes, channels = 8, 3, 16 diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index d5e9185f152e..e3b04cd46590 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -125,7 +125,6 @@ def test_lazy_hetero_linear(): out = lin(x, type_vec) assert out.size() == (3, 32) - assert str(lin) == 'HeteroLinear(16, 32, num_types=3, bias=True)' def test_hetero_dict_linear(): @@ -160,7 +159,6 @@ def test_lazy_hetero_dict_linear(): assert len(out_dict) == 2 assert out_dict['v'].size() == (3, 32) assert out_dict['w'].size() == (2, 32) - assert str(lin) == "HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=True)" @withPackage('pyg_lib') diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index fd11dcdcf68e..7814846a6b9d 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -76,7 +76,14 @@ def test_to_torch_coo_tensor(): ]) edge_attr = torch.randn(edge_index.size(1), 8) - adj = to_torch_coo_tensor(edge_index) + adj = to_torch_coo_tensor(edge_index, is_coalesced=False) + assert adj.is_coalesced() + assert adj.size() == (4, 4) + assert adj.layout == torch.sparse_coo + assert torch.allclose(adj.indices(), edge_index) + + adj = to_torch_coo_tensor(edge_index, is_coalesced=True) + assert adj.is_coalesced() assert adj.size() == (4, 4) assert adj.layout == torch.sparse_coo assert torch.allclose(adj.indices(), edge_index) diff --git a/torch_geometric/compile.py b/torch_geometric/compile.py index e38f389dec61..6fcfc1966a79 100644 --- a/torch_geometric/compile.py +++ b/torch_geometric/compile.py @@ -76,7 +76,8 @@ def fn(model: Callable) -> Callable: for key in prev_state.keys(): setattr(torch_geometric.typing, key, False) - # Temporarily adjust the logging level of `torch.compile`: + # Adjust the logging level of `torch.compile`: + # TODO (matthias) Disable only temporarily prev_log_level = { 'torch._dynamo': logging.getLogger('torch._dynamo').level, 'torch._inductor': logging.getLogger('torch._inductor').level, @@ -91,8 +92,4 @@ def fn(model: Callable) -> Callable: # Finally, run `torch.compile` to create an optimized version: out = torch.compile(model, *args, **kwargs) - # Restore the previous state: - for key, value in prev_log_level.items(): - logging.getLogger(key).setLevel(value) - return out diff --git a/torch_geometric/nn/conv/cluster_gcn_conv.py b/torch_geometric/nn/conv/cluster_gcn_conv.py index 3a1571f70400..ed715c829430 100644 --- a/torch_geometric/nn/conv/cluster_gcn_conv.py +++ b/torch_geometric/nn/conv/cluster_gcn_conv.py @@ -12,7 +12,7 @@ spmm, to_edge_index, ) -from torch_geometric.utils.sparse import get_sparse_diag, set_sparse_value +from torch_geometric.utils.sparse import set_sparse_value class ClusterGCNConv(MessagePassing): @@ -71,6 +71,7 @@ def reset_parameters(self): self.lin_root.reset_parameters() def forward(self, x: Tensor, edge_index: Adj) -> Tensor: + num_nodes = x.size(self.node_dim) edge_weight: OptTensor = None if isinstance(edge_index, SparseTensor): @@ -94,13 +95,7 @@ def forward(self, x: Tensor, edge_index: Adj) -> Tensor: "supported in 'gcn_norm'") if self.add_self_loops: - diag = get_sparse_diag(edge_index.size(0), 1.0, - edge_index.layout, edge_index.dtype, - edge_index.device) - edge_index = edge_index + diag - - if edge_index.layout == torch.sparse_coo: - edge_index = edge_index.coalesce() + edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes) col_and_row, value = to_edge_index(edge_index) col, row = col_and_row[0], col_and_row[1] @@ -112,7 +107,6 @@ def forward(self, x: Tensor, edge_index: Adj) -> Tensor: edge_index = set_sparse_value(edge_index, edge_weight) else: - num_nodes = x.size(self.node_dim) if self.add_self_loops: edge_index, _ = remove_self_loops(edge_index) edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes) diff --git a/torch_geometric/nn/conv/gatv2_conv.py b/torch_geometric/nn/conv/gatv2_conv.py index bb72856b55ec..2de2d95685b6 100644 --- a/torch_geometric/nn/conv/gatv2_conv.py +++ b/torch_geometric/nn/conv/gatv2_conv.py @@ -251,6 +251,7 @@ def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj, size=None) alpha = self._alpha + assert alpha is not None self._alpha = None if self.concat: diff --git a/torch_geometric/nn/conv/gcn_conv.py b/torch_geometric/nn/conv/gcn_conv.py index 0b4d6a54ccef..cc61dfc0b05f 100644 --- a/torch_geometric/nn/conv/gcn_conv.py +++ b/torch_geometric/nn/conv/gcn_conv.py @@ -14,15 +14,16 @@ SparseTensor, torch_sparse, ) +from torch_geometric.utils import add_remaining_self_loops +from torch_geometric.utils import add_self_loops as add_self_loops_fn from torch_geometric.utils import ( - add_remaining_self_loops, is_torch_sparse_tensor, scatter, spmm, to_edge_index, ) from torch_geometric.utils.num_nodes import maybe_num_nodes -from torch_geometric.utils.sparse import get_sparse_diag, set_sparse_value +from torch_geometric.utils.sparse import set_sparse_value @torch.jit._overload @@ -70,14 +71,8 @@ def gcn_norm(edge_index, edge_weight=None, num_nodes=None, improved=False, "supported in 'gcn_norm'") adj_t = edge_index - if add_self_loops: - diag = get_sparse_diag(adj_t.size(0), fill_value, adj_t.layout, - adj_t.dtype, adj_t.device) - adj_t = adj_t + diag - - if adj_t.layout == torch.sparse_coo: - adj_t = adj_t.coalesce() + adj_t, _ = add_self_loops_fn(adj_t, None, fill_value, num_nodes) edge_index, value = to_edge_index(adj_t) col, row = edge_index[0], edge_index[1] diff --git a/torch_geometric/utils/loop.py b/torch_geometric/utils/loop.py index d21283d76876..bb847e2fa9e2 100644 --- a/torch_geometric/utils/loop.py +++ b/torch_geometric/utils/loop.py @@ -10,6 +10,7 @@ is_torch_sparse_tensor, to_edge_index, to_torch_coo_tensor, + to_torch_csr_tensor, ) @@ -65,21 +66,25 @@ def remove_self_loops( [3, 4]])) """ size: Optional[Tuple[int, int]] = None + layout: Optional[int] = None - is_sparse = is_torch_sparse_tensor(edge_index) - if is_sparse: + if is_torch_sparse_tensor(edge_index): assert edge_attr is None + layout = edge_index.layout size = (edge_index.size(0), edge_index.size(1)) edge_index, edge_attr = to_edge_index(edge_index) mask = edge_index[0] != edge_index[1] edge_index = edge_index[:, mask] - if is_sparse: + if layout is not None: assert edge_attr is not None edge_attr = edge_attr[mask] - adj = to_torch_coo_tensor(edge_index, edge_attr, size=size) - return adj, None + if str(layout) == 'torch.sparse_coo': # str(...) for TorchScript :( + return to_torch_coo_tensor(edge_index, edge_attr, size, True), None + elif str(layout) == 'torch.sparse_csr': + return to_torch_csr_tensor(edge_index, edge_attr, size, True), None + raise ValueError(f"Unexpected sparse tensor layout (got '{layout}')") if edge_attr is None: return edge_index, None @@ -220,10 +225,12 @@ def add_self_loops( [1, 0, 0, 0, 1]]), tensor([0.5000, 0.5000, 0.5000, 1.0000, 0.5000])) """ + layout: Optional[int] = None is_sparse = is_torch_sparse_tensor(edge_index) if is_sparse: assert edge_attr is None + layout = edge_index.layout size = (edge_index.size(0), edge_index.size(1)) edge_index, edge_attr = to_edge_index(edge_index) elif isinstance(num_nodes, (tuple, list)): @@ -261,7 +268,11 @@ def add_self_loops( edge_index = torch.cat([edge_index, loop_index], dim=1) if is_sparse: - return to_torch_coo_tensor(edge_index, edge_attr, size=size), None + if str(layout) == 'torch.sparse_coo': # str(...) for TorchScript :( + return to_torch_coo_tensor(edge_index, edge_attr, size), None + elif str(layout) == 'torch.sparse_csr': + return to_torch_csr_tensor(edge_index, edge_attr, size), None + raise ValueError(f"Unexpected sparse tensor layout (got '{layout}')") return edge_index, edge_attr diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py index 8de55e8a4534..e2c6034bb9c9 100644 --- a/torch_geometric/utils/sparse.py +++ b/torch_geometric/utils/sparse.py @@ -85,6 +85,7 @@ def to_torch_coo_tensor( edge_index: Tensor, edge_attr: Optional[Tensor] = None, size: Optional[Union[int, Tuple[int, int]]] = None, + is_coalesced: bool = False, ) -> Tensor: r"""Converts a sparse adjacency matrix defined by edge indices and edge attributes to a :class:`torch.sparse.Tensor` with layout @@ -99,6 +100,9 @@ def to_torch_coo_tensor( If given as an integer, will create a quadratic sparse matrix. If set to :obj:`None`, will infer a quadratic sparse matrix based on :obj:`edge_index.max() + 1`. (default: :obj:`None`) + is_coalesced (bool): If set to :obj:`True`, will assume that + :obj:`edge_index` is already coalesced and thus avoids expensive + computation. (default: :obj:`False`) :rtype: :class:`torch.sparse.Tensor` @@ -123,18 +127,20 @@ def to_torch_coo_tensor( size = tuple(size) + edge_attr.size()[1:] - return torch.sparse_coo_tensor( + adj = torch.sparse_coo_tensor( indices=edge_index, values=edge_attr, size=size, device=edge_index.device, - ).coalesce() + ) + return adj._coalesced_(True) if is_coalesced else adj.coalesce() def to_torch_csr_tensor( edge_index: Tensor, edge_attr: Optional[Tensor] = None, size: Optional[Union[int, Tuple[int, int]]] = None, + is_coalesced: bool = False, ) -> Tensor: r"""Converts a sparse adjacency matrix defined by edge indices and edge attributes to a :class:`torch.sparse.Tensor` with layout @@ -149,6 +155,9 @@ def to_torch_csr_tensor( If given as an integer, will create a quadratic sparse matrix. If set to :obj:`None`, will infer a quadratic sparse matrix based on :obj:`edge_index.max() + 1`. (default: :obj:`None`) + is_coalesced (bool): If set to :obj:`True`, will assume that + :obj:`edge_index` is already coalesced and thus avoids expensive + computation. (default: :obj:`False`) :rtype: :class:`torch.sparse.Tensor` @@ -163,7 +172,7 @@ def to_torch_csr_tensor( size=(4, 4), nnz=6, layout=torch.sparse_csr) """ - adj = to_torch_coo_tensor(edge_index, edge_attr, size) + adj = to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) return adj.to_sparse_csr() @@ -171,6 +180,7 @@ def to_torch_csc_tensor( edge_index: Tensor, edge_attr: Optional[Tensor] = None, size: Optional[Union[int, Tuple[int, int]]] = None, + is_coalesced: bool = False, ) -> Tensor: r"""Converts a sparse adjacency matrix defined by edge indices and edge attributes to a :class:`torch.sparse.Tensor` with layout @@ -185,6 +195,9 @@ def to_torch_csc_tensor( If given as an integer, will create a quadratic sparse matrix. If set to :obj:`None`, will infer a quadratic sparse matrix based on :obj:`edge_index.max() + 1`. (default: :obj:`None`) + is_coalesced (bool): If set to :obj:`True`, will assume that + :obj:`edge_index` is already coalesced and thus avoids expensive + computation. (default: :obj:`False`) :rtype: :class:`torch.sparse.Tensor` @@ -199,7 +212,7 @@ def to_torch_csc_tensor( size=(4, 4), nnz=6, layout=torch.sparse_csc) """ - adj = to_torch_coo_tensor(edge_index, edge_attr, size) + adj = to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) return adj.to_sparse_csc() @@ -241,7 +254,7 @@ def to_edge_index(adj: Union[Tensor, SparseTensor]) -> Tuple[Tensor, Tensor]: row = adj.row_indices().detach() return torch.stack([row, col], dim=0).long(), adj.values() - raise ValueError(f"Expected sparse tensor layout (got '{adj.layout}')") + raise ValueError(f"Unexpected sparse tensor layout (got '{adj.layout}')") # Helper functions ############################################################ From dd0610a95935a4d8b00974464251190356084118 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 23 Mar 2023 09:24:24 +0100 Subject: [PATCH 1040/2432] Conda builds (#7018) --- .github/workflows/building_pyg_conda.yml | 6 +++--- .github/workflows/building_rusty1s_conda.yml | 6 +++--- CHANGELOG.md | 10 +++++++++- conda/pyg/README.md | 2 +- conda/pyg/build_conda.sh | 3 +++ conda/pyg/meta.yaml | 10 ++-------- conda/pytorch-geometric/README.md | 2 +- conda/pytorch-geometric/build_conda.sh | 3 +++ conda/pytorch-geometric/meta.yaml | 10 ++-------- 9 files changed, 27 insertions(+), 25 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 5df2b61e6284..4464ddf144f1 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -10,9 +10,9 @@ jobs: strategy: fail-fast: false matrix: - # We have trouble building for Windows - drop for now. - os: [ubuntu-18.04, macos-10.15] # windows-2019 - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + os: [ubuntu-18.04, macos-10.15, windows-2019] + # We have troube building for Python 3.11 due to version conflicts. + python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] exclude: diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index 1b673e3af48b..296a402cbf60 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -10,9 +10,9 @@ jobs: strategy: fail-fast: false matrix: - # We have trouble building for Windows - drop for now. - os: [ubuntu-18.04, macos-10.15] # windows-2019 - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + os: [ubuntu-18.04, macos-10.15, windows-2019] + # We have troube building for Python 3.11 due to version conflicts. + python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] exclude: diff --git a/CHANGELOG.md b/CHANGELOG.md index 51c67378e921..073ba04f34f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,15 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). -## [2.3.0] - 2023-MM-DD +## [2.4.0] - 2023-MM-DD + +### Added + +### Changed + +### Removed + +## [2.3.0] - 2023-03-23 ### Added diff --git a/conda/pyg/README.md b/conda/pyg/README.md index 6476246b88df..6207e22ab74f 100644 --- a/conda/pyg/README.md +++ b/conda/pyg/README.md @@ -1,3 +1,3 @@ ``` -./build_conda.sh 3.9 1.13.0 cu116 # python, pytorch and cuda version +./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version ``` diff --git a/conda/pyg/build_conda.sh b/conda/pyg/build_conda.sh index d5d83213089b..33ea72db4ea9 100755 --- a/conda/pyg/build_conda.sh +++ b/conda/pyg/build_conda.sh @@ -10,6 +10,9 @@ if [ "${CUDA_VERSION}" = "cpu" ]; then export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" else case $CUDA_VERSION in + cu118) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" + ;; cu117) export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" ;; diff --git a/conda/pyg/meta.yaml b/conda/pyg/meta.yaml index 936358f5dc32..ad1f90408e17 100644 --- a/conda/pyg/meta.yaml +++ b/conda/pyg/meta.yaml @@ -1,9 +1,9 @@ package: name: pyg - version: 2.2.0 + version: 2.3.0 source: - url: https://files.pythonhosted.org/packages/de/29/dbefbb2b1349638bc9ec5a632d770e8e7fefc2455b8911d675b2ca82d9e1/torch_geometric-2.2.0.tar.gz + url: https://files.pythonhosted.org/packages/43/b5/be9795db7756e6c1fa2606c8145ec637552487e72c6428ed0b231f8bcbd3/torch_geometric-2.3.0.tar.gz requirements: host: @@ -14,9 +14,6 @@ requirements: - python {{ environ.get('PYTHON_VERSION') }} - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} - - pytorch-scatter - - pytorch-sparse - - pytorch-cluster - psutil - tqdm - jinja2 @@ -32,9 +29,6 @@ build: test: imports: - - torch_scatter - - torch_sparse - - torch_cluster - torch_geometric - torch_geometric.nn - torch_geometric.data diff --git a/conda/pytorch-geometric/README.md b/conda/pytorch-geometric/README.md index 6476246b88df..6207e22ab74f 100644 --- a/conda/pytorch-geometric/README.md +++ b/conda/pytorch-geometric/README.md @@ -1,3 +1,3 @@ ``` -./build_conda.sh 3.9 1.13.0 cu116 # python, pytorch and cuda version +./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version ``` diff --git a/conda/pytorch-geometric/build_conda.sh b/conda/pytorch-geometric/build_conda.sh index 99b7f963d4d3..fcddcd03327e 100755 --- a/conda/pytorch-geometric/build_conda.sh +++ b/conda/pytorch-geometric/build_conda.sh @@ -10,6 +10,9 @@ if [ "${CUDA_VERSION}" = "cpu" ]; then export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" else case $CUDA_VERSION in + cu118) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" + ;; cu117) export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" ;; diff --git a/conda/pytorch-geometric/meta.yaml b/conda/pytorch-geometric/meta.yaml index 596f4a0070ee..a93e0b638948 100644 --- a/conda/pytorch-geometric/meta.yaml +++ b/conda/pytorch-geometric/meta.yaml @@ -1,9 +1,9 @@ package: name: pytorch-geometric - version: 2.2.0 + version: 2.3.0 source: - url: https://files.pythonhosted.org/packages/de/29/dbefbb2b1349638bc9ec5a632d770e8e7fefc2455b8911d675b2ca82d9e1/torch_geometric-2.2.0.tar.gz + url: https://files.pythonhosted.org/packages/43/b5/be9795db7756e6c1fa2606c8145ec637552487e72c6428ed0b231f8bcbd3/torch_geometric-2.3.0.tar.gz requirements: host: @@ -14,9 +14,6 @@ requirements: - python {{ environ.get('PYTHON_VERSION') }} - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} - - pytorch-scatter - - pytorch-sparse - - pytorch-cluster - psutil - tqdm - jinja2 @@ -32,9 +29,6 @@ build: test: imports: - - torch_scatter - - torch_sparse - - torch_cluster - torch_geometric - torch_geometric.nn - torch_geometric.data From f0c72186286f257778c1d9293cfd0d35472d30bb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 23 Mar 2023 10:11:51 +0100 Subject: [PATCH 1041/2432] Bump version to `2.4.0` (#7019) --- pyproject.toml | 2 +- setup.py | 2 +- torch_geometric/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a063673818ac..18fe5e790ba0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name="torch_geometric" -version="2.3.0" +version="2.4.0" authors=[ {name="Matthias Fey", email="matthias@pyg.org"}, ] diff --git a/setup.py b/setup.py index dd6989916f1d..5a43a5b95ec1 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from setuptools import find_packages, setup -__version__ = '2.3.0' +__version__ = '2.4.0' install_requires = [ 'tqdm', diff --git a/torch_geometric/__init__.py b/torch_geometric/__init__.py index b44b5dd681eb..7a9ed82ae1ea 100644 --- a/torch_geometric/__init__.py +++ b/torch_geometric/__init__.py @@ -19,7 +19,7 @@ contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib') graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym') -__version__ = '2.3.0' +__version__ = '2.4.0' __all__ = [ 'seed_everything', From 24113afc5759be8814998627c46df9a6e54431c4 Mon Sep 17 00:00:00 2001 From: "Zory, ZHANG" Date: Fri, 24 Mar 2023 15:20:53 +0800 Subject: [PATCH 1042/2432] Fix typo in introduction tutorial (#7027) fix typo "accross" to across --- docs/source/get_started/introduction.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/get_started/introduction.rst b/docs/source/get_started/introduction.rst index b4ccb8ef41f7..af0e1ebe81e1 100644 --- a/docs/source/get_started/introduction.rst +++ b/docs/source/get_started/introduction.rst @@ -392,7 +392,7 @@ Now let's implement a two-layer GCN: return F.log_softmax(x, dim=1) The constructor defines two :class:`~torch_geometric.nn.conv.GCNConv` layers which get called in the forward pass of our network. -Note that the non-linearity is not integrated in the :obj:`conv` calls and hence needs to be applied afterwards (something which is consistent accross all operators in :pyg:`PyG`). +Note that the non-linearity is not integrated in the :obj:`conv` calls and hence needs to be applied afterwards (something which is consistent across all operators in :pyg:`PyG`). Here, we chose to use ReLU as our intermediate non-linearity and finally output a softmax distribution over the number of classes. Let's train this model on the training nodes for 200 epochs: From 743c1c1d9a33072ed80082ddb31f4be1080e0233 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 24 Mar 2023 09:26:52 +0100 Subject: [PATCH 1043/2432] Place message store on correct device in `TGN` (#7028) --- torch_geometric/nn/models/tgn.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/torch_geometric/nn/models/tgn.py b/torch_geometric/nn/models/tgn.py index 67a1889b5b67..9026fb4e8c8c 100644 --- a/torch_geometric/nn/models/tgn.py +++ b/torch_geometric/nn/models/tgn.py @@ -61,6 +61,10 @@ def __init__(self, num_nodes: int, raw_msg_dim: int, memory_dim: int, self.reset_parameters() + @property + def device(self) -> torch.device: + return self.time_enc.lin.weight.device + def reset_parameters(self): r"""Resets all learnable parameters of the module.""" if hasattr(self.msg_s_module, 'reset_parameters'): @@ -109,8 +113,8 @@ def update_state(self, src: Tensor, dst: Tensor, t: Tensor, self._update_memory(n_id) def _reset_message_store(self): - i = self.memory.new_empty((0, ), dtype=torch.long) - msg = self.memory.new_empty((0, self.raw_msg_dim)) + i = self.memory.new_empty((0, ), device=self.device, dtype=torch.long) + msg = self.memory.new_empty((0, self.raw_msg_dim), device=self.device) # Message store format: (src, dst, t, msg) self.msg_s_store = {j: (i, i, i, msg) for j in range(self.num_nodes)} self.msg_d_store = {j: (i, i, i, msg) for j in range(self.num_nodes)} From 96f7bfc3fb32f603cde5bab9914ce03d79620f2d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 25 Mar 2023 15:05:07 +0100 Subject: [PATCH 1044/2432] Fix `Iterator` types in `DataPipes` (#7035) --- torch_geometric/data/datapipes.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/torch_geometric/data/datapipes.py b/torch_geometric/data/datapipes.py index 2529eec085e7..0bfb9a0695f2 100644 --- a/torch_geometric/data/datapipes.py +++ b/torch_geometric/data/datapipes.py @@ -1,5 +1,5 @@ import copy -from typing import Any, Callable, Optional, Sequence +from typing import Any, Callable, Iterator, Optional, Sequence import torch @@ -45,7 +45,7 @@ def __init__( self.smiles_key = smiles_key self.target_key = target_key - def __iter__(self) -> Any: + def __iter__(self) -> Iterator: for d in self.dp: if isinstance(d, str): data = from_smiles(d) @@ -76,7 +76,7 @@ def is_shardable(self) -> bool: def apply_sharding(self, num_shards: int, shard_idx: int): self.range = range(shard_idx, len(self), num_shards) - def __iter__(self) -> Any: + def __iter__(self) -> Iterator: for i in self.range: yield self.dataset[i] @@ -93,7 +93,7 @@ def __init__(self, dp: IterDataPipe, *args, **kwargs): self.dp = dp self.fn = cls(*args, **kwargs) - def __iter__(self) -> Any: + def __iter__(self) -> Iterator: for data in self.dp: yield self.fn(copy.copy(data)) From a51e1db0ff009037925451a2e994434053e4da06 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 25 Mar 2023 15:05:58 +0100 Subject: [PATCH 1045/2432] Fix storage warning (#7034) --- torch_geometric/data/collate.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index f6473d78bf69..3d76bc194de8 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -5,6 +5,7 @@ import torch from torch import Tensor +import torch_geometric.typing from torch_geometric.data.data import BaseData from torch_geometric.data.storage import BaseStorage, NodeStorage from torch_geometric.typing import SparseTensor, torch_sparse @@ -142,7 +143,11 @@ def _collate( if torch.utils.data.get_worker_info() is not None: # Write directly into shared memory to avoid an extra copy: numel = sum(value.numel() for value in values) - storage = elem.storage()._new_shared(numel) + if torch_geometric.typing.WITH_PT2: + storage = elem.untyped_storage()._new_shared( + numel * elem.element_size(), device=elem.device) + else: + storage = elem.storage()._new_shared(numel, device=elem.device) shape = list(elem.size()) if cat_dim is None or elem.dim() == 0: shape = [len(values)] + shape From c78c5b217f9fb652555512405fb561c33ae0b439 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 25 Mar 2023 16:59:59 +0100 Subject: [PATCH 1046/2432] Test against nightly PyTorch releases (#7036) --- .github/actions/setup/action.yml | 9 ++++ .github/workflows/latest_testing.yml | 67 ++++++++++++++++++++++++++++ test/profile/test_profile_utils.py | 13 +++++- test/utils/test_assortativity.py | 17 ++++--- test/utils/test_homophily.py | 15 ++++--- test/utils/test_segment.py | 3 +- test/utils/test_softmax.py | 24 +++++++--- test/utils/test_sparse.py | 10 +++-- test/utils/test_spmm.py | 33 ++++++++------ torch_geometric/typing.py | 39 ++++++++++++++-- 10 files changed, 188 insertions(+), 42 deletions(-) create mode 100644 .github/workflows/latest_testing.yml diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 412dc6240b99..4bb57fa8f493 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -31,12 +31,21 @@ runs: setup.py - name: Install PyTorch ${{ inputs.torch-version }}+${{ inputs.cuda-version }} + if: ${{ inputs.torch-version != 'nightly' }} run: | pip install torch==${{ inputs.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} python -c "import torch; print('PyTorch:', torch.__version__)" python -c "import torch; print('CUDA:', torch.version.cuda)" shell: bash + - name: Install PyTorch ${{ inputs.torch-version }}+${{ inputs.cuda-version }} + if: ${{ inputs.torch-version == 'nightly' }} + run: | + pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/${{ inputs.cuda-version }} + python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA:', torch.version.cuda)" + shell: bash + - name: Install extension packages if: ${{ inputs.full_install == 'true' }} run: | diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml new file mode 100644 index 000000000000..1df55913f049 --- /dev/null +++ b/.github/workflows/latest_testing.yml @@ -0,0 +1,67 @@ +name: Testing PyTorch nightly + +on: # yamllint disable-line rule:truthy + push: + branches: + - master + pull_request: + +jobs: + + latest_pytest: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + fetch-depth: 40 + + # Skip workflow if only certain files have been changed. + - name: Get changed files + id: changed-files-specific + uses: tj-actions/changed-files@v34 + with: + files: | + benchmark/** + conda/** + docker/** + docs/** + examples/** + graphgym/** + CHANGELOG.md + + - name: Setup packages + if: steps.changed-files-specific.outputs.only_changed != 'true' + uses: ./.github/actions/setup + with: + torch-version: nightly + full_install: false + + - name: Install main package + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pip install -e .[full,test] + + - name: Run tests + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pytest test/test_debug.py + pytest test/test_experimental.py + pytest test/test_home.py + pytest test/test_seed.py + pytest test/test_typing.py + pytest test/contrib/ + # pytest test/data/ + pytest test/datasets/ + pytest test/explain/ + pytest test/graphgym/ + pytest test/io/ + # pytest test/loader/ + # pytest test/nn/ + pytest test/profile/ + pytest test/sampler/ + pytest test/testing/ + # pytest test/transforms/ + pytest test/utils/ + pytest test/visualization/ diff --git a/test/profile/test_profile_utils.py b/test/profile/test_profile_utils.py index 6d52194ec2b2..c9215be68425 100644 --- a/test/profile/test_profile_utils.py +++ b/test/profile/test_profile_utils.py @@ -1,6 +1,5 @@ import torch from torch.nn import Linear -from torch_sparse import SparseTensor from torch_geometric.data import Data from torch_geometric.profile import ( @@ -15,7 +14,8 @@ byte_to_megabyte, medibyte_to_megabyte, ) -from torch_geometric.testing import onlyCUDA +from torch_geometric.testing import onlyCUDA, withPackage +from torch_geometric.typing import SparseTensor def test_count_parameters(): @@ -28,6 +28,15 @@ def test_get_model_size(): def test_get_data_size(): + x = torch.randn(10, 128) + data = Data(x=x, y=x) + + data_size = get_data_size(data) + assert data_size == 10 * 128 * 4 + + +@withPackage('torch_sparse') +def test_get_data_size_with_sparse_tensor(): x = torch.randn(10, 128) row, col = torch.randint(0, 10, (2, 100), dtype=torch.long) adj_t = SparseTensor(row=row, col=col, value=None, sparse_sizes=(10, 10)) diff --git a/test/utils/test_assortativity.py b/test/utils/test_assortativity.py index 58b5fdb25e59..b1ac2f4e0c67 100644 --- a/test/utils/test_assortativity.py +++ b/test/utils/test_assortativity.py @@ -1,7 +1,8 @@ import pytest import torch -from torch_sparse import SparseTensor +import torch_geometric.typing +from torch_geometric.typing import SparseTensor from torch_geometric.utils import assortativity @@ -12,9 +13,10 @@ def test_assortativity(): out = assortativity(edge_index) assert pytest.approx(out, abs=1e-5) == 1.0 - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[6, 6]) - out = assortativity(adj) - assert pytest.approx(out, abs=1e-5) == 1.0 + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[6, 6]) + out = assortativity(adj) + assert pytest.approx(out, abs=1e-5) == 1.0 # Completely disassortative graph: edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 5, 5, 5, 5], @@ -22,6 +24,7 @@ def test_assortativity(): out = assortativity(edge_index) assert pytest.approx(out, abs=1e-5) == -1.0 - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[6, 6]) - out = assortativity(adj) - assert pytest.approx(out, abs=1e-5) == -1.0 + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[6, 6]) + out = assortativity(adj) + assert pytest.approx(out, abs=1e-5) == -1.0 diff --git a/test/utils/test_homophily.py b/test/utils/test_homophily.py index 7746d08d4e7b..126cc71960fc 100644 --- a/test/utils/test_homophily.py +++ b/test/utils/test_homophily.py @@ -1,7 +1,8 @@ import pytest import torch -from torch_sparse import SparseTensor +import torch_geometric.typing +from torch_geometric.typing import SparseTensor from torch_geometric.utils import homophily @@ -10,19 +11,23 @@ def test_homophily(): y = torch.tensor([0, 0, 0, 0, 1]) batch = torch.tensor([0, 0, 0, 1, 1]) row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(5, 5)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor(row=row, col=col, sparse_sizes=(5, 5)) method = 'edge' assert pytest.approx(homophily(edge_index, y, method=method)) == 0.75 - assert pytest.approx(homophily(adj, y, method=method)) == 0.75 + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert pytest.approx(homophily(adj, y, method=method)) == 0.75 assert homophily(edge_index, y, batch, method).tolist() == [1., 0.] method = 'node' assert pytest.approx(homophily(edge_index, y, method=method)) == 0.6 - assert pytest.approx(homophily(adj, y, method=method)) == 0.6 + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert pytest.approx(homophily(adj, y, method=method)) == 0.6 assert homophily(edge_index, y, batch, method).tolist() == [1., 0.] method = 'edge_insensitive' assert pytest.approx(homophily(edge_index, y, method=method)) == 0.1999999 - assert pytest.approx(homophily(adj, y, method=method)) == 0.1999999 + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert pytest.approx(homophily(adj, y, method=method)) == 0.1999999 assert homophily(edge_index, y, batch, method).tolist() == [0., 0.] diff --git a/test/utils/test_segment.py b/test/utils/test_segment.py index 5784e6215ce0..754bbec73923 100644 --- a/test/utils/test_segment.py +++ b/test/utils/test_segment.py @@ -1,11 +1,12 @@ import pytest import torch -from torch_geometric.testing import withCUDA +from torch_geometric.testing import withCUDA, withPackage from torch_geometric.utils import segment @withCUDA +@withPackage('torch_scatter') @pytest.mark.parametrize('reduce', ['sum', 'mean', 'min', 'max']) def test_segment(device, reduce): src = torch.randn(20, 16, device=device) diff --git a/test/utils/test_softmax.py b/test/utils/test_softmax.py index a84ce67b30fd..bfffba30cb96 100644 --- a/test/utils/test_softmax.py +++ b/test/utils/test_softmax.py @@ -1,6 +1,7 @@ +import pytest import torch -import torch_geometric +import torch_geometric.typing from torch_geometric.profile import benchmark from torch_geometric.utils import softmax @@ -12,12 +13,17 @@ def test_softmax(): out = softmax(src, index) assert out.tolist() == [0.5, 0.5, 1, 1] - assert softmax(src, None, ptr).tolist() == out.tolist() + if torch_geometric.typing.WITH_TORCH_SCATTER: + assert softmax(src, None, ptr).tolist() == out.tolist() + else: + with pytest.raises(ImportError): + softmax(src, None, ptr) src = src.view(-1, 1) out = softmax(src, index) assert out.tolist() == [[0.5], [0.5], [1], [1]] - assert softmax(src, None, ptr).tolist() == out.tolist() + if torch_geometric.typing.WITH_TORCH_SCATTER: + assert softmax(src, None, ptr).tolist() == out.tolist() jit = torch.jit.script(softmax) assert torch.allclose(jit(src, index), out) @@ -46,19 +52,23 @@ def test_softmax_dim(): src = torch.randn(4) assert torch.allclose(softmax(src, index, dim=0), src.softmax(dim=0)) - assert torch.allclose(softmax(src, ptr=ptr, dim=0), src.softmax(dim=0)) + if torch_geometric.typing.WITH_TORCH_SCATTER: + assert torch.allclose(softmax(src, ptr=ptr, dim=0), src.softmax(dim=0)) src = torch.randn(4, 16) assert torch.allclose(softmax(src, index, dim=0), src.softmax(dim=0)) - assert torch.allclose(softmax(src, ptr=ptr, dim=0), src.softmax(dim=0)) + if torch_geometric.typing.WITH_TORCH_SCATTER: + assert torch.allclose(softmax(src, ptr=ptr, dim=0), src.softmax(dim=0)) src = torch.randn(4, 4) assert torch.allclose(softmax(src, index, dim=-1), src.softmax(dim=-1)) - assert torch.allclose(softmax(src, ptr=ptr, dim=-1), src.softmax(dim=-1)) + if torch_geometric.typing.WITH_TORCH_SCATTER: + assert torch.allclose(softmax(src, ptr=ptr, dim=-1), src.softmax(-1)) src = torch.randn(4, 4, 16) assert torch.allclose(softmax(src, index, dim=1), src.softmax(dim=1)) - assert torch.allclose(softmax(src, ptr=ptr, dim=1), src.softmax(dim=1)) + if torch_geometric.typing.WITH_TORCH_SCATTER: + assert torch.allclose(softmax(src, ptr=ptr, dim=1), src.softmax(dim=1)) if __name__ == '__main__': diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 7814846a6b9d..24d9e66af5c4 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor import torch_geometric.typing from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor from torch_geometric.utils import ( dense_to_sparse, is_sparse, @@ -57,17 +57,21 @@ def test_is_torch_sparse_tensor(): x = torch.randn(5, 5) assert not is_torch_sparse_tensor(x) - assert not is_torch_sparse_tensor(SparseTensor.from_dense(x)) assert is_torch_sparse_tensor(x.to_sparse()) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert not is_torch_sparse_tensor(SparseTensor.from_dense(x)) + def test_is_sparse(): x = torch.randn(5, 5) assert not is_sparse(x) - assert is_sparse(SparseTensor.from_dense(x)) assert is_sparse(x.to_sparse()) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert is_sparse(SparseTensor.from_dense(x)) + def test_to_torch_coo_tensor(): edge_index = torch.tensor([ diff --git a/test/utils/test_spmm.py b/test/utils/test_spmm.py index 26f90ec16199..6667603694c0 100644 --- a/test/utils/test_spmm.py +++ b/test/utils/test_spmm.py @@ -5,6 +5,7 @@ import torch from torch import Tensor +import torch_geometric.typing from torch_geometric.profile import benchmark from torch_geometric.testing import withCUDA, withPackage from torch_geometric.typing import SparseTensor @@ -17,21 +18,23 @@ def test_spmm_basic(device, reduce): src = torch.randn(5, 4, device=device) other = torch.randn(4, 8, device=device) - out1 = src @ other + out1 = (src @ other) / (src.size(1) if reduce == 'mean' else 1) out2 = spmm(src.to_sparse_csr(), other, reduce=reduce) - out3 = spmm(SparseTensor.from_dense(src), other, reduce=reduce) assert out1.size() == (5, 8) - if reduce == 'sum': - assert torch.allclose(out1, out2, atol=1e-6) - assert torch.allclose(out1, out3, atol=1e-6) - assert torch.allclose(out2, out3, atol=1e-6) + assert torch.allclose(out1, out2, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out3 = spmm(SparseTensor.from_dense(src), other, reduce=reduce) + assert torch.allclose(out2, out3, atol=1e-6) # Test `mean` reduction with isolated nodes: src[0] = 0. + out1 = (src @ other) / (4. if reduce == 'mean' else 1.) out2 = spmm(src.to_sparse_csr(), other, reduce=reduce) - out3 = spmm(SparseTensor.from_dense(src), other, reduce=reduce) assert out1.size() == (5, 8) - assert torch.allclose(out2, out3, atol=1e-6) + assert torch.allclose(out1, out2, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out3 = spmm(SparseTensor.from_dense(src), other, reduce=reduce) + assert torch.allclose(out2, out3, atol=1e-6) @withCUDA @@ -46,8 +49,10 @@ def test_spmm_reduce(device, reduce): spmm(src.to_sparse_csr(), other, reduce) else: out1 = spmm(src.to_sparse_csr(), other, reduce) - out2 = spmm(SparseTensor.from_dense(src), other, reduce=reduce) - assert torch.allclose(out1, out2) + assert out1.size() == (5, 8) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out2 = spmm(SparseTensor.from_dense(src), other, reduce=reduce) + assert torch.allclose(out1, out2) @withCUDA @@ -91,13 +96,13 @@ def jit_torch(src: Tensor, other: Tensor, reduce: str) -> Tensor: other = torch.randn(4, 8) out1 = src @ other - out2 = jit_torch_sparse(SparseTensor.from_dense(src), other, reduce=reduce) - out3 = jit_torch(src.to_sparse_csr(), other, reduce) + out2 = jit_torch(src.to_sparse_csr(), other, reduce) assert out1.size() == (5, 8) if reduce == 'sum': assert torch.allclose(out1, out2, atol=1e-6) - assert torch.allclose(out1, out3, atol=1e-6) - assert torch.allclose(out2, out3, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out3 = jit_torch_sparse(SparseTensor.from_dense(src), other, reduce) + assert torch.allclose(out2, out3, atol=1e-6) if __name__ == '__main__': diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index bb9c74e77497..75af7e5c278c 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -41,17 +41,50 @@ if isinstance(e, OSError): warnings.warn(f"An issue occurred while importing 'torch-sparse'. " f"Disabling its usage. Stacktrace: {e}") - torch_sparse = object WITH_TORCH_SPARSE = False class SparseTensor: - def __init__(self, *args, **kwargs): + def __init__( + self, + row: Optional[torch.Tensor] = None, + rowptr: Optional[torch.Tensor] = None, + col: Optional[torch.Tensor] = None, + value: Optional[torch.Tensor] = None, + sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None, + is_sorted: bool = False, + trust_data: bool = False, + ): raise ImportError("'SparseTensor' requires 'torch-sparse'") @classmethod - def from_edge_index(cls, *args, **kwargs) -> 'SparseTensor': + def from_edge_index( + self, + edge_index: torch.Tensor, + edge_attr: Optional[torch.Tensor] = None, + sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None, + is_sorted: bool = False, + trust_data: bool = False, + ) -> 'SparseTensor': raise ImportError("'SparseTensor' requires 'torch-sparse'") + def size(self, dim: int) -> int: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + + def is_cuda(self) -> bool: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + + def to_torch_sparse_csr_tensor( + self, + dtype: Optional[torch.dtype] = None, + ) -> torch.Tensor: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + + class torch_sparse: + @staticmethod + def matmul(src: SparseTensor, other: Tensor, + reduce: str = "sum") -> Tensor: + raise ImportError("'matmul' requires 'torch-sparse'") + # Types for accessing data #################################################### From 0ea43bb9afe7018e814d331652051dfdefc5761d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 26 Mar 2023 10:45:06 +0200 Subject: [PATCH 1047/2432] Drop `torch_sparse` dependency in tests (1/n) (#7041) --- .github/workflows/latest_testing.yml | 2 +- CHANGELOG.md | 2 ++ test/transforms/test_add_metapaths.py | 5 ++++ test/transforms/test_gcn_norm.py | 28 ++++++++++--------- test/transforms/test_to_sparse_tensor.py | 11 ++++---- .../transforms/add_positional_encoding.py | 26 ++++++++--------- .../transforms/feature_propagation.py | 15 +++++----- torch_geometric/transforms/rooted_subgraph.py | 8 ++---- torch_geometric/transforms/sign.py | 21 ++++++++------ torch_geometric/transforms/two_hop.py | 15 +++++----- 10 files changed, 72 insertions(+), 61 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 1df55913f049..aed4ca6b2195 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -62,6 +62,6 @@ jobs: pytest test/profile/ pytest test/sampler/ pytest test/testing/ - # pytest test/transforms/ + pytest test/transforms/ pytest test/utils/ pytest test/visualization/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 073ba04f34f3..6b35aaf046e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) + ### Removed ## [2.3.0] - 2023-03-23 diff --git a/test/transforms/test_add_metapaths.py b/test/transforms/test_add_metapaths.py index a3f2403bbc44..e16bd4ae322a 100644 --- a/test/transforms/test_add_metapaths.py +++ b/test/transforms/test_add_metapaths.py @@ -4,6 +4,7 @@ from torch import tensor from torch_geometric.data import HeteroData +from torch_geometric.testing import withPackage from torch_geometric.transforms import AddMetaPaths, AddRandomMetaPaths from torch_geometric.utils import coalesce @@ -21,6 +22,7 @@ def generate_data() -> HeteroData: return data +@withPackage('torch_sparse') def test_add_metapaths(): data = generate_data() # Test transform options: @@ -74,6 +76,7 @@ def test_add_metapaths(): assert list(meta.metapath_dict.keys()) == new_edge_types +@withPackage('torch_sparse') def test_add_metapaths_max_sample(): torch.manual_seed(12345) @@ -86,6 +89,7 @@ def test_add_metapaths_max_sample(): assert meta['metapath_0'].edge_index.size(1) < 9 +@withPackage('torch_sparse') def test_add_weighted_metapaths(): torch.manual_seed(12345) @@ -144,6 +148,7 @@ def test_add_weighted_metapaths(): assert edge_weight.tolist() == [1, 2, 2, 4] +@withPackage('torch_sparse') def test_add_random_metapaths(): data = generate_data() diff --git a/test/transforms/test_gcn_norm.py b/test/transforms/test_gcn_norm.py index 9eb2bd5debc5..cd92ad2a17b5 100644 --- a/test/transforms/test_gcn_norm.py +++ b/test/transforms/test_gcn_norm.py @@ -1,14 +1,14 @@ import torch -from torch_sparse import SparseTensor +import torch_geometric.typing from torch_geometric.data import Data from torch_geometric.transforms import GCNNorm +from torch_geometric.typing import SparseTensor def test_gcn_norm(): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) edge_weight = torch.ones(edge_index.size(1)) - adj_t = SparseTensor.from_edge_index(edge_index, edge_weight).t() transform = GCNNorm() assert str(transform) == 'GCNNorm(add_self_loops=True)' @@ -32,14 +32,16 @@ def test_gcn_norm(): assert torch.allclose(data.edge_weight, expected_edge_weight, atol=1e-4) # For `SparseTensor`, expected outputs will be sorted: - expected_edge_index = [[0, 0, 1, 1, 1, 2, 2], [0, 1, 0, 1, 2, 1, 2]] - expected_edge_weight = torch.tensor( - [0.500, 0.4082, 0.4082, 0.3333, 0.4082, 0.4082, 0.5000]) - - data = Data(adj_t=adj_t) - data = transform(data) - assert len(data) == 1 - row, col, value = data.adj_t.coo() - assert row.tolist() == expected_edge_index[0] - assert col.tolist() == expected_edge_index[1] - assert torch.allclose(value, expected_edge_weight, atol=1e-4) + if torch_geometric.typing.WITH_TORCH_SPARSE: + expected_edge_index = [[0, 0, 1, 1, 1, 2, 2], [0, 1, 0, 1, 2, 1, 2]] + expected_edge_weight = torch.tensor( + [0.500, 0.4082, 0.4082, 0.3333, 0.4082, 0.4082, 0.5000]) + + adj_t = SparseTensor.from_edge_index(edge_index, edge_weight).t() + data = Data(adj_t=adj_t) + data = transform(data) + assert len(data) == 1 + row, col, value = data.adj_t.coo() + assert row.tolist() == expected_edge_index[0] + assert col.tolist() == expected_edge_index[1] + assert torch.allclose(value, expected_edge_weight, atol=1e-4) diff --git a/test/transforms/test_to_sparse_tensor.py b/test/transforms/test_to_sparse_tensor.py index e0ba20b13fc2..b2c85016f785 100644 --- a/test/transforms/test_to_sparse_tensor.py +++ b/test/transforms/test_to_sparse_tensor.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.data import Data, HeteroData from torch_geometric.transforms import ToSparseTensor @@ -26,14 +27,14 @@ def test_to_sparse_tensor_basic(layout): assert torch.equal(data.edge_attr, edge_attr[perm]) assert 'adj_t' in data - if layout is None: # `torch_sparse.SparseTensor`. + if layout is None and torch_geometric.typing.WITH_TORCH_SPARSE: row, col, value = data.adj_t.coo() assert row.tolist() == [0, 1, 1, 2] assert col.tolist() == [1, 0, 2, 1] assert torch.equal(value, edge_weight[perm]) else: adj_t = data.adj_t - assert adj_t.layout == layout + assert adj_t.layout == layout or torch.sparse_csr if layout != torch.sparse_coo: adj_t = adj_t.to_sparse_coo() assert adj_t.indices().tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] @@ -69,7 +70,7 @@ def test_hetero_to_sparse_tensor(layout): data = ToSparseTensor(layout=layout)(data) - if layout is None: # `torch_sparse.SparseTensor`. + if layout is None and torch_geometric.typing.WITH_TORCH_SPARSE: row, col, value = data['v', 'v'].adj_t.coo() assert row.tolist() == [0, 1, 1, 2] assert col.tolist() == [1, 0, 2, 1] @@ -81,14 +82,14 @@ def test_hetero_to_sparse_tensor(layout): assert value is None else: adj_t = data['v', 'v'].adj_t - assert adj_t.layout == layout + assert adj_t.layout == layout or torch.sparse_csr if layout != torch.sparse_coo: adj_t = adj_t.to_sparse_coo() assert adj_t.indices().tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] assert adj_t.values().tolist() == [1., 1., 1., 1.] adj_t = data['v', 'w'].adj_t - assert adj_t.layout == layout + assert adj_t.layout == layout or torch.sparse_csr if layout != torch.sparse_coo: adj_t = adj_t.to_sparse_coo() assert adj_t.indices().tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] diff --git a/torch_geometric/transforms/add_positional_encoding.py b/torch_geometric/transforms/add_positional_encoding.py index 6229f0d7f527..3fecc169b3e9 100644 --- a/torch_geometric/transforms/add_positional_encoding.py +++ b/torch_geometric/transforms/add_positional_encoding.py @@ -6,11 +6,13 @@ from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor from torch_geometric.utils import ( get_laplacian, get_self_loop_attr, + scatter, + to_edge_index, to_scipy_sparse_matrix, + to_torch_csr_tensor, ) @@ -116,24 +118,22 @@ def __init__( self.attr_name = attr_name def __call__(self, data: Data) -> Data: - num_nodes = data.num_nodes - edge_index, edge_weight = data.edge_index, data.edge_weight + row, col = data.edge_index + N = data.num_nodes - adj = SparseTensor.from_edge_index(edge_index, edge_weight, - sparse_sizes=(num_nodes, num_nodes)) + value = data.edge_weight + if value is None: + value = torch.ones(data.num_edges, device=row.device) + value = scatter(value, row, dim_size=N, reduce='sum').clamp(min=1)[row] + value = 1.0 / value - # Compute D^{-1} A: - deg_inv = 1.0 / adj.sum(dim=1) - deg_inv[deg_inv == float('inf')] = 0 - adj = adj * deg_inv.view(-1, 1) + adj = to_torch_csr_tensor(data.edge_index, value, size=data.size()) out = adj - row, col, value = out.coo() - pe_list = [get_self_loop_attr((row, col), value, num_nodes)] + pe_list = [get_self_loop_attr(*to_edge_index(out), num_nodes=N)] for _ in range(self.walk_length - 1): out = out @ adj - row, col, value = out.coo() - pe_list.append(get_self_loop_attr((row, col), value, num_nodes)) + pe_list.append(get_self_loop_attr(*to_edge_index(out), N)) pe = torch.stack(pe_list, dim=-1) data = add_node_attr(data, pe, attr_name=self.attr_name) diff --git a/torch_geometric/transforms/feature_propagation.py b/torch_geometric/transforms/feature_propagation.py index 426fcdf0ebf2..5a9f89b6c1e7 100644 --- a/torch_geometric/transforms/feature_propagation.py +++ b/torch_geometric/transforms/feature_propagation.py @@ -4,7 +4,7 @@ from torch_geometric.data.datapipes import functional_transform from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor +from torch_geometric.utils import is_torch_sparse_tensor, to_torch_csc_tensor @functional_transform('feature_propagation') @@ -52,14 +52,13 @@ def __call__(self, data: Data) -> Data: if 'edge_weight' in data: edge_weight = data.edge_weight edge_index = data.edge_index - adj_t = SparseTensor(row=edge_index[1], col=edge_index[0], - value=edge_weight, - sparse_sizes=data.size()[::-1], - is_sorted=False, trust_data=True) + adj_t = to_torch_csc_tensor(edge_index, edge_weight, + size=data.size()).t() + adj_t, _ = gcn_norm(adj_t, add_self_loops=False) + elif is_torch_sparse_tensor(data.adj_t): + adj_t, _ = gcn_norm(data.adj_t, add_self_loops=False) else: - adj_t = data.adj_t - - adj_t = gcn_norm(adj_t, add_self_loops=False) + adj_t = gcn_norm(data.adj_t, add_self_loops=False) x = data.x.clone() x[missing_mask] = 0. diff --git a/torch_geometric/transforms/rooted_subgraph.py b/torch_geometric/transforms/rooted_subgraph.py index 5f91fac1e47f..fde7e2eb4f4f 100644 --- a/torch_geometric/transforms/rooted_subgraph.py +++ b/torch_geometric/transforms/rooted_subgraph.py @@ -7,7 +7,7 @@ from torch_geometric.data import Data from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor class RootedSubgraphData(Data): @@ -116,11 +116,7 @@ def extract( data: Data, ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: - adj_t = SparseTensor.from_edge_index( - data.edge_index, - sparse_sizes=(data.num_nodes, data.num_nodes), - ).t() - + adj_t = to_torch_csc_tensor(data.edge_index, size=data.size()).t() n_mask = torch.eye(data.num_nodes, device=data.edge_index.device) for _ in range(self.num_hops): n_mask += adj_t @ n_mask diff --git a/torch_geometric/transforms/sign.py b/torch_geometric/transforms/sign.py index 819d6eaad8c9..c9e0d9e0b6be 100644 --- a/torch_geometric/transforms/sign.py +++ b/torch_geometric/transforms/sign.py @@ -3,7 +3,7 @@ from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor +from torch_geometric.utils import scatter, to_torch_csc_tensor @functional_transform('sign') @@ -37,13 +37,18 @@ def __init__(self, K: int): def __call__(self, data: Data) -> Data: assert data.edge_index is not None row, col = data.edge_index - adj_t = SparseTensor(row=col, col=row, - sparse_sizes=(data.num_nodes, data.num_nodes)) - - deg = adj_t.sum(dim=1).to(torch.float) - deg_inv_sqrt = deg.pow(-0.5) - deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0 - adj_t = deg_inv_sqrt.view(-1, 1) * adj_t * deg_inv_sqrt.view(1, -1) + N = data.num_nodes + + edge_weight = data.edge_weight + if edge_weight is None: + edge_weight = torch.ones(data.num_edges, device=row.device) + + deg = scatter(edge_weight, col, dim_size=N, reduce='sum') + deg_inv_sqrt = deg.pow_(-0.5) + deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0) + edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col] + adj = to_torch_csc_tensor(data.edge_index, edge_weight, size=(N, N)) + adj_t = adj.t() assert data.x is not None xs = [data.x] diff --git a/torch_geometric/transforms/two_hop.py b/torch_geometric/transforms/two_hop.py index a05792ec1beb..10f08ad5b37f 100644 --- a/torch_geometric/transforms/two_hop.py +++ b/torch_geometric/transforms/two_hop.py @@ -3,8 +3,12 @@ from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce, remove_self_loops +from torch_geometric.utils import ( + coalesce, + remove_self_loops, + to_edge_index, + to_torch_csr_tensor, +) @functional_transform('two_hop') @@ -15,11 +19,8 @@ def __call__(self, data: Data) -> Data: edge_index, edge_attr = data.edge_index, data.edge_attr N = data.num_nodes - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(N, N)) - - adj = adj @ adj - row, col, _ = adj.coo() - edge_index2 = torch.stack([row, col], dim=0) + adj = to_torch_csr_tensor(edge_index, size=(N, N)) + edge_index2, _ = to_edge_index(adj @ adj) edge_index2, _ = remove_self_loops(edge_index2) edge_index = torch.cat([edge_index, edge_index2], dim=1) From 93f9f590bb62a848e3f57b8fcaee5fd48c768436 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 26 Mar 2023 16:14:07 +0200 Subject: [PATCH 1048/2432] Accelerated sparse tensor conversions (#7042) --- CHANGELOG.md | 1 + test/utils/test_sparse.py | 23 ++++++++++++++++++++ torch_geometric/utils/num_nodes.py | 4 ++-- torch_geometric/utils/sparse.py | 35 +++++++++++++++++++++++++----- 4 files changed, 55 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b35aaf046e4..57433f7c0b93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Accelerated sparse tensor conversion routiens ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) ### Removed diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 24d9e66af5c4..2e1fcf571b21 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -1,6 +1,7 @@ import torch import torch_geometric.typing +from torch_geometric.profile import benchmark from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor from torch_geometric.utils import ( @@ -195,3 +196,25 @@ def test_to_edge_index(): edge_index, edge_attr = jit(adj) assert edge_index.tolist() == [[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]] assert edge_attr.tolist() == [1., 1., 1., 1., 1., 1.] + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cuda') + args = parser.parse_args() + + num_nodes, num_edges = 10_000, 200_000 + edge_index = torch.randint(num_nodes, (2, num_edges), device=args.device) + + benchmark( + funcs=[ + SparseTensor.from_edge_index, to_torch_coo_tensor, + to_torch_csr_tensor, to_torch_csc_tensor + ], + func_names=['SparseTensor', 'To COO', 'To CSR', 'To CSC'], + args=(edge_index, None, (num_nodes, num_nodes)), + num_steps=50 if args.device == 'cpu' else 500, + num_warmups=10 if args.device == 'cpu' else 100, + ) diff --git a/torch_geometric/utils/num_nodes.py b/torch_geometric/utils/num_nodes.py index d81e356b9ff3..767e7ffe64d5 100644 --- a/torch_geometric/utils/num_nodes.py +++ b/torch_geometric/utils/num_nodes.py @@ -4,8 +4,8 @@ import torch from torch import Tensor +import torch_geometric from torch_geometric.typing import SparseTensor # noqa -from torch_geometric.utils.sparse import is_torch_sparse_tensor @torch.jit._overload @@ -24,7 +24,7 @@ def maybe_num_nodes(edge_index, num_nodes=None): if num_nodes is not None: return num_nodes elif isinstance(edge_index, Tensor): - if is_torch_sparse_tensor(edge_index): + if torch_geometric.utils.is_torch_sparse_tensor(edge_index): return max(edge_index.size(0), edge_index.size(1)) return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0 else: diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py index e2c6034bb9c9..d05d91730802 100644 --- a/torch_geometric/utils/sparse.py +++ b/torch_geometric/utils/sparse.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.typing import SparseTensor +from torch_geometric.utils import coalesce def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]: @@ -122,18 +123,21 @@ def to_torch_coo_tensor( if not isinstance(size, (tuple, list)): size = (size, size) + if not is_coalesced: + edge_index, edge_attr = coalesce(edge_index, edge_attr, max(size)) + if edge_attr is None: edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) - size = tuple(size) + edge_attr.size()[1:] - adj = torch.sparse_coo_tensor( indices=edge_index, values=edge_attr, - size=size, + size=tuple(size) + edge_attr.size()[1:], device=edge_index.device, ) - return adj._coalesced_(True) if is_coalesced else adj.coalesce() + adj = adj._coalesced_(True) + + return adj def to_torch_csr_tensor( @@ -212,8 +216,27 @@ def to_torch_csc_tensor( size=(4, 4), nnz=6, layout=torch.sparse_csc) """ - adj = to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) - return adj.to_sparse_csc() + if size is None: + size = int(edge_index.max()) + 1 + if not isinstance(size, (tuple, list)): + size = (size, size) + + if not is_coalesced: + edge_index, edge_attr = coalesce(edge_index, edge_attr, max(size), + sort_by_row=False) + + if edge_attr is None: + edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) + + adj = torch.sparse_csc_tensor( + ccol_indices=index2ptr(edge_index[1], size[1]), + row_indices=edge_index[0], + values=edge_attr, + size=tuple(size) + edge_attr.size()[1:], + device=edge_index.device, + ) + + return adj def to_edge_index(adj: Union[Tensor, SparseTensor]) -> Tuple[Tensor, Tensor]: From 0ed30b44a5668bc7715e8470f1f469b5e3aa3cbb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 26 Mar 2023 21:41:28 +0200 Subject: [PATCH 1049/2432] Drop `torch_sparse` dependency in tests (2/n) (#7043) --- .github/workflows/latest_testing.yml | 2 +- CHANGELOG.md | 2 +- test/data/lightning/test_datamodule.py | 5 + test/data/test_batch.py | 175 ++++++++++++++----------- test/data/test_data.py | 12 +- test/data/test_dataset.py | 7 +- test/data/test_graph_store.py | 16 ++- test/data/test_hetero_data.py | 14 +- 8 files changed, 134 insertions(+), 99 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index aed4ca6b2195..d17dd6100c53 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -52,7 +52,7 @@ jobs: pytest test/test_seed.py pytest test/test_typing.py pytest test/contrib/ - # pytest test/data/ + pytest test/data/ pytest test/datasets/ pytest test/explain/ pytest test/graphgym/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 57433f7c0b93..ec7477948bf6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Accelerated sparse tensor conversion routiens ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042)) +- Accelerated sparse tensor conversion routiens ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) ### Removed diff --git a/test/data/lightning/test_datamodule.py b/test/data/lightning/test_datamodule.py index e190988bcb87..55b7803ab52d 100644 --- a/test/data/lightning/test_datamodule.py +++ b/test/data/lightning/test_datamodule.py @@ -177,6 +177,7 @@ def configure_optimizers(self): @onlyCUDA @onlyFullTest +@withPackage('pyg_lib') @withPackage('pytorch_lightning>=2.0.0') @withPackage('torchmetrics>=0.11.0') @pytest.mark.parametrize('loader', ['full', 'neighbor']) @@ -275,6 +276,7 @@ def configure_optimizers(self): @onlyCUDA @onlyFullTest +@withPackage('pyg_lib') @withPackage('pytorch_lightning>=2.0.0') @withPackage('torchmetrics>=0.11.0') def test_lightning_hetero_node_data(get_dataset): @@ -324,6 +326,7 @@ def sample_from_nodes(self, *args, **kwargs): @onlyCUDA @onlyFullTest +@withPackage('pyg_lib') @withPackage('pytorch_lightning') def test_lightning_hetero_link_data(): torch.manual_seed(12345) @@ -383,6 +386,7 @@ def test_lightning_hetero_link_data(): assert 'edge_label_time' in batch['author', 'paper'] +@withPackage('pyg_lib') @withPackage('pytorch_lightning') def test_lightning_hetero_link_data_custom_store(): torch.manual_seed(12345) @@ -419,6 +423,7 @@ def test_lightning_hetero_link_data_custom_store(): assert 'edge_label_index' in batch['author', 'paper'] +@withPackage('pyg_lib') @withPackage('pytorch_lightning') def test_eval_loader_kwargs(get_dataset): data = get_dataset(name='Cora')[0] diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 62b836e3f0a5..5a521c03e00b 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -2,97 +2,70 @@ import numpy as np import torch -from torch_sparse import SparseTensor import torch_geometric from torch_geometric.data import Batch, Data, HeteroData -from torch_geometric.testing import get_random_edge_index +from torch_geometric.testing import get_random_edge_index, withPackage +from torch_geometric.typing import SparseTensor -def test_batch(): +def test_batch_basic(): torch_geometric.set_debug(True) - x1 = torch.tensor([1, 2, 3], dtype=torch.float) - y1 = 1 - x1_sp = SparseTensor.from_dense(x1.view(-1, 1)) - e1 = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - adj1 = SparseTensor.from_edge_index(e1) - s1 = '1' - array1 = ['1', '2'] - x2 = torch.tensor([1, 2], dtype=torch.float) - y2 = 2 - x2_sp = SparseTensor.from_dense(x2.view(-1, 1)) - e2 = torch.tensor([[0, 1], [1, 0]]) - adj2 = SparseTensor.from_edge_index(e2) - s2 = '2' - array2 = ['3', '4', '5'] - x3 = torch.tensor([1, 2, 3, 4], dtype=torch.float) - y3 = 3 - x3_sp = SparseTensor.from_dense(x3.view(-1, 1)) - e3 = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) - adj3 = SparseTensor.from_edge_index(e3) - s3 = '3' - array3 = ['6', '7', '8', '9'] - - data1 = Data(x=x1, y=y1, x_sp=x1_sp, edge_index=e1, adj=adj1, s=s1, - array=array1, num_nodes=3) - data2 = Data(x=x2, y=y2, x_sp=x2_sp, edge_index=e2, adj=adj2, s=s2, - array=array2, num_nodes=2) - data3 = Data(x=x3, y=y3, x_sp=x3_sp, edge_index=e3, adj=adj3, s=s3, - array=array3, num_nodes=4) + x = torch.tensor([1.0, 2.0, 3.0]) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + data1 = Data(x=x, y=1, edge_index=edge_index, string='1', array=['1', '2'], + num_nodes=3) + + x = torch.tensor([1.0, 2.0]) + edge_index = torch.tensor([[0, 1], [1, 0]]) + data2 = Data(x=x, y=2, edge_index=edge_index, string='2', + array=['3', '4', '5'], num_nodes=2) + + x = torch.tensor([1.0, 2.0, 3.0, 4.0]) + edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) + data3 = Data(x=x, y=3, edge_index=edge_index, string='3', + array=['6', '7', '8', '9'], num_nodes=4) batch = Batch.from_data_list([data1]) assert str(batch) == ('DataBatch(x=[3], edge_index=[2, 4], y=[1], ' - 'x_sp=[3, 1, nnz=3], adj=[3, 3, nnz=4], s=[1], ' - 'array=[1], num_nodes=3, batch=[3], ptr=[2])') + 'string=[1], array=[1], num_nodes=3, batch=[3], ' + 'ptr=[2])') assert batch.num_graphs == len(batch) == 1 assert batch.x.tolist() == [1, 2, 3] assert batch.y.tolist() == [1] - assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() assert batch.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - edge_index = torch.stack(batch.adj.coo()[:2], dim=0) - assert edge_index.tolist() == batch.edge_index.tolist() - assert batch.s == ['1'] + assert batch.string == ['1'] assert batch.array == [['1', '2']] assert batch.num_nodes == 3 assert batch.batch.tolist() == [0, 0, 0] assert batch.ptr.tolist() == [0, 3] - batch = Batch.from_data_list([data1, data2, data3], follow_batch=['s']) + batch = Batch.from_data_list([data1, data2, data3], + follow_batch=['string']) assert str(batch) == ('DataBatch(x=[9], edge_index=[2, 12], y=[3], ' - 'x_sp=[9, 1, nnz=9], adj=[9, 9, nnz=12], s=[3], ' - 's_batch=[3], s_ptr=[4], array=[3], num_nodes=9, ' - 'batch=[9], ptr=[4])') + 'string=[3], string_batch=[3], string_ptr=[4], ' + 'array=[3], num_nodes=9, batch=[9], ptr=[4])') assert batch.num_graphs == len(batch) == 3 assert batch.x.tolist() == [1, 2, 3, 1, 2, 1, 2, 3, 4] assert batch.y.tolist() == [1, 2, 3] - assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() assert batch.edge_index.tolist() == [[0, 1, 1, 2, 3, 4, 5, 6, 6, 7, 7, 8], [1, 0, 2, 1, 4, 3, 6, 5, 7, 6, 8, 7]] - edge_index = torch.stack(batch.adj.coo()[:2], dim=0) - assert edge_index.tolist() == batch.edge_index.tolist() - assert batch.s == ['1', '2', '3'] - assert batch.s_batch.tolist() == [0, 1, 2] - assert batch.s_ptr.tolist() == [0, 1, 2, 3] + assert batch.string == ['1', '2', '3'] + assert batch.string_batch.tolist() == [0, 1, 2] + assert batch.string_ptr.tolist() == [0, 1, 2, 3] assert batch.array == [['1', '2'], ['3', '4', '5'], ['6', '7', '8', '9']] assert batch.num_nodes == 9 assert batch.batch.tolist() == [0, 0, 0, 1, 1, 2, 2, 2, 2] assert batch.ptr.tolist() == [0, 3, 5, 9] - data = batch[0] - assert str(data) == ("Data(x=[3], edge_index=[2, 4], y=[1], " - "x_sp=[3, 1, nnz=3], adj=[3, 3, nnz=4], s='1', " - "array=[2], num_nodes=3)") - data = batch[1] - assert str(data) == ("Data(x=[2], edge_index=[2, 2], y=[1], " - "x_sp=[2, 1, nnz=2], adj=[2, 2, nnz=2], s='2', " - "array=[3], num_nodes=2)") - - data = batch[2] - assert str(data) == ("Data(x=[4], edge_index=[2, 6], y=[1], " - "x_sp=[4, 1, nnz=4], adj=[4, 4, nnz=6], s='3', " - "array=[4], num_nodes=4)") + assert str(batch[0]) == ("Data(x=[3], edge_index=[2, 4], y=[1], " + "string='1', array=[2], num_nodes=3)") + assert str(batch[1]) == ("Data(x=[2], edge_index=[2, 2], y=[1], " + "string='2', array=[3], num_nodes=2)") + assert str(batch[2]) == ("Data(x=[4], edge_index=[2, 6], y=[1], " + "string='3', array=[4], num_nodes=4)") assert len(batch.index_select([1, 0])) == 2 assert len(batch.index_select(torch.tensor([1, 0]))) == 2 @@ -104,43 +77,95 @@ def test_batch(): data_list = batch.to_data_list() assert len(data_list) == 3 - assert len(data_list[0]) == 8 + assert len(data_list[0]) == 6 assert data_list[0].x.tolist() == [1, 2, 3] assert data_list[0].y.tolist() == [1] - assert data_list[0].x_sp.to_dense().view(-1).tolist() == [1, 2, 3] assert data_list[0].edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - edge_index = torch.stack(data_list[0].adj.coo()[:2], dim=0) - assert edge_index.tolist() == data_list[0].edge_index.tolist() - assert data_list[0].s == '1' + assert data_list[0].string == '1' assert data_list[0].array == ['1', '2'] assert data_list[0].num_nodes == 3 - assert len(data_list[1]) == 8 + assert len(data_list[1]) == 6 assert data_list[1].x.tolist() == [1, 2] assert data_list[1].y.tolist() == [2] - assert data_list[1].x_sp.to_dense().view(-1).tolist() == [1, 2] assert data_list[1].edge_index.tolist() == [[0, 1], [1, 0]] - edge_index = torch.stack(data_list[1].adj.coo()[:2], dim=0) - assert edge_index.tolist() == data_list[1].edge_index.tolist() - assert data_list[1].s == '2' + assert data_list[1].string == '2' assert data_list[1].array == ['3', '4', '5'] assert data_list[1].num_nodes == 2 - assert len(data_list[2]) == 8 + assert len(data_list[2]) == 6 assert data_list[2].x.tolist() == [1, 2, 3, 4] assert data_list[2].y.tolist() == [3] - assert data_list[2].x_sp.to_dense().view(-1).tolist() == [1, 2, 3, 4] assert data_list[2].edge_index.tolist() == [[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]] - edge_index = torch.stack(data_list[2].adj.coo()[:2], dim=0) - assert edge_index.tolist() == data_list[2].edge_index.tolist() - assert data_list[2].s == '3' + assert data_list[2].string == '3' assert data_list[2].array == ['6', '7', '8', '9'] assert data_list[2].num_nodes == 4 torch_geometric.set_debug(True) +@withPackage('torch_sparse') +def test_batch_with_sparse_tensor(): + x = SparseTensor.from_dense(torch.tensor([[1.0], [2.0], [3.0]])) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + adj = SparseTensor.from_edge_index(edge_index) + data1 = Data(x=x, adj=adj) + + x = SparseTensor.from_dense(torch.tensor([[1.0], [2.0]])) + edge_index = torch.tensor([[0, 1], [1, 0]]) + adj = SparseTensor.from_edge_index(edge_index) + data2 = Data(x=x, adj=adj) + + x = SparseTensor.from_dense(torch.tensor([[1.0], [2.0], [3.0], [4.0]])) + edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) + adj = SparseTensor.from_edge_index(edge_index) + data3 = Data(x=x, adj=adj) + + batch = Batch.from_data_list([data1]) + assert str(batch) == ('DataBatch(x=[3, 1, nnz=3], adj=[3, 3, nnz=4], ' + 'batch=[3], ptr=[2])') + assert batch.num_graphs == len(batch) == 1 + assert batch.x.to_dense().tolist() == [[1], [2], [3]] + assert batch.adj.coo()[0].tolist() == [0, 1, 1, 2] + assert batch.adj.coo()[1].tolist() == [1, 0, 2, 1] + assert batch.batch.tolist() == [0, 0, 0] + assert batch.ptr.tolist() == [0, 3] + + batch = Batch.from_data_list([data1, data2, data3]) + + assert str(batch) == ('DataBatch(x=[9, 1, nnz=9], adj=[9, 9, nnz=12], ' + 'batch=[9], ptr=[4])') + assert batch.num_graphs == len(batch) == 3 + assert batch.x.to_dense().view(-1).tolist() == [1, 2, 3, 1, 2, 1, 2, 3, 4] + assert batch.adj.coo()[0].tolist() == [0, 1, 1, 2, 3, 4, 5, 6, 6, 7, 7, 8] + assert batch.adj.coo()[1].tolist() == [1, 0, 2, 1, 4, 3, 6, 5, 7, 6, 8, 7] + assert batch.batch.tolist() == [0, 0, 0, 1, 1, 2, 2, 2, 2] + assert batch.ptr.tolist() == [0, 3, 5, 9] + + assert str(batch[0]) == ("Data(x=[3, 1, nnz=3], adj=[3, 3, nnz=4])") + assert str(batch[1]) == ("Data(x=[2, 1, nnz=2], adj=[2, 2, nnz=2])") + assert str(batch[2]) == ("Data(x=[4, 1, nnz=4], adj=[4, 4, nnz=6])") + + data_list = batch.to_data_list() + assert len(data_list) == 3 + + assert len(data_list[0]) == 2 + assert data_list[0].x.to_dense().tolist() == [[1], [2], [3]] + assert data_list[0].adj.coo()[0].tolist() == [0, 1, 1, 2] + assert data_list[0].adj.coo()[1].tolist() == [1, 0, 2, 1] + + assert len(data_list[1]) == 2 + assert data_list[1].x.to_dense().tolist() == [[1], [2]] + assert data_list[1].adj.coo()[0].tolist() == [0, 1] + assert data_list[1].adj.coo()[1].tolist() == [1, 0] + + assert len(data_list[2]) == 2 + assert data_list[2].x.to_dense().tolist() == [[1], [2], [3], [4]] + assert data_list[2].adj.coo()[0].tolist() == [0, 1, 1, 2, 2, 3] + assert data_list[2].adj.coo()[1].tolist() == [1, 0, 2, 1, 3, 2] + + def test_batching_with_new_dimension(): torch_geometric.set_debug(True) diff --git a/test/data/test_data.py b/test/data/test_data.py index 09fd9d95e0b0..5badd09eb055 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -3,11 +3,11 @@ import pytest import torch import torch.multiprocessing as mp -import torch_sparse import torch_geometric from torch_geometric.data import Data from torch_geometric.data.storage import AttrType +from torch_geometric.testing import withPackage def test_data(): @@ -392,13 +392,11 @@ def test_basic_feature_store(): # Graph Store ################################################################# +@withPackage('torch_sparse') def test_basic_graph_store(): r"""Test the core graph store API.""" data = Data() - edge_index = torch.LongTensor([[0, 1], [1, 2]]) - adj = torch_sparse.SparseTensor(row=edge_index[0], col=edge_index[1]) - def assert_equal_tensor_tuple(expected, actual): assert len(expected) == len(actual) for i in range(len(expected)): @@ -406,9 +404,9 @@ def assert_equal_tensor_tuple(expected, actual): # We put all three tensor types: COO, CSR, and CSC, and we get them back # to confirm that `GraphStore` works as intended. - coo = adj.coo()[:-1] - csr = adj.csr()[:-1] - csc = adj.csc()[-2::-1] # (row, colptr) + coo = (torch.tensor([0, 1]), torch.tensor([1, 2])) + csr = (torch.tensor([0, 1, 2, 2]), torch.tensor([1, 2])) + csc = (torch.tensor([0, 1]), torch.tensor([0, 0, 1, 2])) # Put: data.put_edge_index(coo, layout='coo', size=(3, 3)) diff --git a/test/data/test_dataset.py b/test/data/test_dataset.py index abe0e06b2d2b..067a055a12f1 100644 --- a/test/data/test_dataset.py +++ b/test/data/test_dataset.py @@ -2,9 +2,10 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.data import Data, HeteroData, InMemoryDataset +from torch_geometric.testing import withPackage +from torch_geometric.typing import SparseTensor class MyTestDataset(InMemoryDataset): @@ -117,6 +118,7 @@ def test_to_datapipe(): assert torch.equal(dataset[1].edge_index, list(dp)[1].edge_index) +@withPackage('torch_sparse') def test_in_memory_sparse_tensor_dataset(): x = torch.randn(11, 16) adj = SparseTensor( @@ -279,7 +281,8 @@ def tr(n, m): assert dataset[3].xs[1].size() == (16, 4) -def test_lists_of_SparseTensors(): +@withPackage('torch_sparse') +def test_lists_of_sparse_tensors(): e1 = torch.tensor([[4, 1, 3, 2, 2, 3], [1, 3, 2, 3, 3, 2]]) e2 = torch.tensor([[0, 1, 4, 7, 2, 9], [7, 2, 2, 1, 4, 7]]) e3 = torch.tensor([[3, 5, 1, 2, 3, 3], [5, 0, 2, 1, 3, 7]]) diff --git a/test/data/test_graph_store.py b/test/data/test_graph_store.py index 960280a62a2c..8628bf699c6e 100644 --- a/test/data/test_graph_store.py +++ b/test/data/test_graph_store.py @@ -1,9 +1,13 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.data.graph_store import EdgeAttr, EdgeLayout from torch_geometric.testing import MyGraphStore, get_random_edge_index +from torch_geometric.utils import ( + to_torch_coo_tensor, + to_torch_csc_tensor, + to_torch_csr_tensor, +) def test_graph_store(): @@ -39,9 +43,13 @@ def test_graph_store(): def test_graph_store_conversion(): graph_store = MyGraphStore() - coo = (row, col) = get_random_edge_index(100, 100, 300) - adj = SparseTensor(row=row, col=col, sparse_sizes=(100, 100)) - csr, csc = adj.csr()[:2], adj.csc()[:2][::-1] + edge_index = get_random_edge_index(100, 100, 300) + adj = to_torch_coo_tensor(edge_index, size=(100, 100)) + coo = (adj.indices()[0], adj.indices()[1]) + adj = to_torch_csr_tensor(edge_index, size=(100, 100)) + csr = (adj.crow_indices(), adj.col_indices()) + adj = to_torch_csc_tensor(edge_index, size=(100, 100)) + csc = (adj.row_indices(), adj.ccol_indices()) graph_store.put_edge_index(coo, ('v', '1', 'v'), 'coo', size=(100, 100)) graph_store.put_edge_index(csr, ('v', '2', 'v'), 'csr', size=(100, 100)) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index ee345709d925..1ea3c79bbe99 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -2,11 +2,10 @@ import pytest import torch -import torch_sparse from torch_geometric.data import HeteroData from torch_geometric.data.storage import EdgeStorage -from torch_geometric.testing import get_random_edge_index +from torch_geometric.testing import get_random_edge_index, withPackage x_paper = torch.randn(10, 16) x_author = torch.randn(5, 32) @@ -562,13 +561,10 @@ def test_basic_feature_store(): # Graph Store ################################################################# +@withPackage('torch_sparse') def test_basic_graph_store(): data = HeteroData() - edge_index = torch.LongTensor([[0, 1], [1, 2]]) - adj = torch_sparse.SparseTensor(row=edge_index[0], col=edge_index[1], - sparse_sizes=(3, 3)) - def assert_equal_tensor_tuple(expected, actual): assert len(expected) == len(actual) for i in range(len(expected)): @@ -576,9 +572,9 @@ def assert_equal_tensor_tuple(expected, actual): # We put all three tensor types: COO, CSR, and CSC, and we get them back # to confirm that `GraphStore` works as intended. - coo = adj.coo()[:-1] - csr = adj.csr()[:-1] - csc = adj.csc()[-2::-1] # (row, colptr) + coo = (torch.tensor([0, 1]), torch.tensor([1, 2])) + csr = (torch.tensor([0, 1, 2, 2]), torch.tensor([1, 2])) + csc = (torch.tensor([0, 1]), torch.tensor([0, 0, 1, 2])) # Put: data.put_edge_index(coo, layout='coo', edge_type=('a', 'to', 'b'), From dd58a3f5aeba8c13e6b2de4f7b4eb6762d5fbe32 Mon Sep 17 00:00:00 2001 From: Riya Sinha <33243383+riyavsinha@users.noreply.github.com> Date: Mon, 27 Mar 2023 00:49:06 -0500 Subject: [PATCH 1050/2432] feat: add `RotatE` KGE model (#7026) [No longer contributing affiliated with CS224W] Paper reported FB15k-237 H@10: .533 Implementation Example H@10: .431 Of note, the paper only reports their score using their Adversarial Negative Sampling (ANS), which is not implemented as a part of this PR. Table 7 from the paper comparing the use of ANS vs. uniform random sampling shows a 5% increase in H@10 for FB15k-237, which may be a factor to the disparity in performance, combined with the small hidden size in the example, I believe. The margin=9 argument is from the paper's reported best performance in Table 12 on FB15k-237 as well. Happy to try to implement the ANS as part of this PR if the current performance is below standard, let me know! --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinu Sunil --- CHANGELOG.md | 1 + README.md | 1 + examples/kge_fb15k_237.py | 14 +++-- test/nn/kge/test_rotate.py | 24 ++++++++ torch_geometric/nn/kge/__init__.py | 2 + torch_geometric/nn/kge/rotate.py | 97 ++++++++++++++++++++++++++++++ 6 files changed, 133 insertions(+), 6 deletions(-) create mode 100644 test/nn/kge/test_rotate.py create mode 100644 torch_geometric/nn/kge/rotate.py diff --git a/CHANGELOG.md b/CHANGELOG.md index ec7477948bf6..5d782d9ec530 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.4.0] - 2023-MM-DD ### Added +- Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) ### Changed diff --git a/README.md b/README.md index 22d6c63d9cab..e9807f77c006 100644 --- a/README.md +++ b/README.md @@ -304,6 +304,7 @@ New Benchmarks and Strong Simple Methods](https://arxiv.org/abs/2110.14446) (Neu * **[TransE](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.TransE.html)** from Bordes *et al.*: [Translating Embeddings for Modeling Multi-Relational Data](https://proceedings.neurips.cc/paper/2013/file/1cecc7a77928ca8133fa24680a88d2f9-Paper.pdf) (NIPS 2013) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] * **[ComplEx](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.ComplEx.html)** from Trouillon *et al.*: [Complex Embeddings for Simple Link Prediction](https://arxiv.org/abs/1606.06357) (ICML 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] * **[DistMult](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.DistMult.html)** from Yang *et al.*: [Embedding Entities and Relations for Learning and Inference in Knowledge Bases](https://arxiv.org/abs/1412.6575) (ICLR 2015) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] +* **[RotatE](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.RotatE.html)** from Sun *et al.*: [RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space](https://arxiv.org/abs/1902.10197) (ICLR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] **GNN operators and utilities:** diff --git a/examples/kge_fb15k_237.py b/examples/kge_fb15k_237.py index b4d1ac7f008a..a8fe23daef6f 100644 --- a/examples/kge_fb15k_237.py +++ b/examples/kge_fb15k_237.py @@ -5,12 +5,13 @@ import torch.optim as optim from torch_geometric.datasets import FB15k_237 -from torch_geometric.nn import ComplEx, DistMult, TransE +from torch_geometric.nn import ComplEx, DistMult, RotatE, TransE model_map = { 'transe': TransE, 'complex': ComplEx, 'distmult': DistMult, + 'rotate': RotatE, } parser = argparse.ArgumentParser() @@ -25,11 +26,11 @@ val_data = FB15k_237(path, split='val')[0].to(device) test_data = FB15k_237(path, split='test')[0].to(device) -model = model_map[args.model]( - num_nodes=train_data.num_nodes, - num_relations=train_data.num_edge_types, - hidden_channels=50, -).to(device) +model_arg_map = {'rotate': {'margin': 9.0}} +model = model_map[args.model](num_nodes=train_data.num_nodes, + num_relations=train_data.num_edge_types, + hidden_channels=50, + **model_arg_map.get(args.model, {})).to(device) loader = model.loader( head_index=train_data.edge_index[0], @@ -43,6 +44,7 @@ 'transe': optim.Adam(model.parameters(), lr=0.01), 'complex': optim.Adagrad(model.parameters(), lr=0.001, weight_decay=1e-6), 'distmult': optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-6), + 'rotate': optim.Adam(model.parameters(), lr=1e-3), } optimizer = optimizer_map[args.model] diff --git a/test/nn/kge/test_rotate.py b/test/nn/kge/test_rotate.py new file mode 100644 index 000000000000..87640f386d89 --- /dev/null +++ b/test/nn/kge/test_rotate.py @@ -0,0 +1,24 @@ +import torch + +from torch_geometric.nn import RotatE + + +def test_rotate(): + model = RotatE(num_nodes=10, num_relations=5, hidden_channels=32) + assert str(model) == 'RotatE(10, num_relations=5, hidden_channels=32)' + + head_index = torch.tensor([0, 2, 4, 6, 8]) + rel_type = torch.tensor([0, 1, 2, 3, 4]) + tail_index = torch.tensor([1, 3, 5, 7, 9]) + + loader = model.loader(head_index, rel_type, tail_index, batch_size=5) + for h, r, t in loader: + out = model(h, r, t) + assert out.size() == (5, ) + + loss = model.loss(h, r, t) + assert loss >= 0. + + mean_rank, hits_at_10 = model.test(h, r, t, batch_size=5, log=False) + assert mean_rank <= 10 + assert hits_at_10 == 1.0 diff --git a/torch_geometric/nn/kge/__init__.py b/torch_geometric/nn/kge/__init__.py index 35e285218580..cd0c302baf1e 100644 --- a/torch_geometric/nn/kge/__init__.py +++ b/torch_geometric/nn/kge/__init__.py @@ -2,10 +2,12 @@ from .transe import TransE from .complex import ComplEx from .distmult import DistMult +from .rotate import RotatE __all__ = classes = [ 'KGEModel', 'TransE', 'ComplEx', 'DistMult', + 'RotatE', ] diff --git a/torch_geometric/nn/kge/rotate.py b/torch_geometric/nn/kge/rotate.py new file mode 100644 index 000000000000..a4724c5e956e --- /dev/null +++ b/torch_geometric/nn/kge/rotate.py @@ -0,0 +1,97 @@ +import math + +import torch +import torch.nn.functional as F +from torch import Tensor +from torch.nn import Embedding + +from torch_geometric.nn.kge import KGEModel + + +class RotatE(KGEModel): + r"""The RotatE model from the `"RotatE: Knowledge Graph Embedding by + Relational Rotation in Complex Space" `_ paper. + + :class:`RotatE` models relations as a rotation in complex space + from head to tail such that: + + .. math:: + \mathbf{e}_t = \mathbf{e}_h \circ \mathbf{e}_r + + Resulting in the scoring function: + + .. math:: + d(h, r, t) = - {\| \mathbf{e}_h \circ \mathbf{e}_r - \mathbf{e}_t \|}_p + + .. note:: + + For an example of using the :class:`RotatE` model, see + `examples/kge_fb15k_237.py + `_. + + Args: + num_nodes (int): The number of nodes/entities in the graph. + num_relations (int): The number of relations in the graph. + hidden_channels (int): The hidden embedding size. + margin (float, optional): The margin of the ranking loss. + sparse (bool, optional): If set to :obj:`True`, gradients w.r.t. to + the embedding matrices will be sparse. (default: :obj:`False`) + """ + def __init__( + self, + num_nodes: int, + num_relations: int, + hidden_channels: int, + margin: float = 1.0, + sparse: bool = False, + ): + super().__init__(num_nodes, num_relations, hidden_channels, sparse) + self.register_buffer('margin', torch.Tensor([margin])) + self.node_emb_im = Embedding(num_nodes, hidden_channels, sparse=sparse) + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.xavier_uniform_(self.node_emb.weight) + torch.nn.init.xavier_uniform_(self.node_emb_im.weight) + torch.nn.init.uniform_(self.rel_emb.weight, 0, 2 * math.pi) + + def forward( + self, + head_index: Tensor, + rel_type: Tensor, + tail_index: Tensor, + ) -> Tensor: + + head_re = self.node_emb(head_index) + head_im = self.node_emb_im(head_index) + tail_re = self.node_emb(tail_index) + tail_im = self.node_emb_im(tail_index) + + rel_theta = self.rel_emb(rel_type) + rel_re, rel_im = torch.cos(rel_theta), torch.sin(rel_theta) + + re_score = (rel_re * head_re - rel_im * head_im) - tail_re + im_score = (rel_re * head_im + rel_im * head_re) - tail_im + complex_score = torch.stack([re_score, im_score], dim=2) + score = torch.linalg.vector_norm(complex_score, dim=(1, 2)) + + return self.margin - score + + def loss( + self, + head_index: Tensor, + rel_type: Tensor, + tail_index: Tensor, + ) -> Tensor: + + pos_score = self(head_index, rel_type, tail_index) + neg_score = self(*self.random_sample(head_index, rel_type, tail_index)) + scores = torch.cat([pos_score, neg_score], dim=0) + + pos_target = torch.ones_like(pos_score) + neg_target = torch.zeros_like(neg_score) + target = torch.cat([pos_target, neg_target], dim=0) + + return F.binary_cross_entropy_with_logits(scores, target) From f91cb8445b1892e0b57e71503e036ff65214b65b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 27 Mar 2023 09:40:21 +0200 Subject: [PATCH 1051/2432] Drop `torch_sparse` dependency in tests (3/n) (#7045) --- .github/workflows/latest_testing.yml | 2 +- examples/kge_fb15k_237.py | 10 +- test/loader/test_cluster.py | 2 +- test/loader/test_graph_saint.py | 2 + test/loader/test_hgt_loader.py | 6 +- test/loader/test_imbalanced_sampler.py | 2 + test/loader/test_link_neighbor_loader.py | 16 ++- test/loader/test_neighbor_loader.py | 124 ++++++++++++----------- test/loader/test_neighbor_sampler.py | 6 +- test/loader/test_shadow.py | 4 +- test/loader/test_zip_loader.py | 2 + torch_geometric/nn/kge/rotate.py | 10 +- torch_geometric/nn/kge/transe.py | 3 +- torch_geometric/testing/__init__.py | 2 + torch_geometric/testing/decorators.py | 11 ++ 15 files changed, 125 insertions(+), 77 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index d17dd6100c53..6a6163624b8d 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -57,7 +57,7 @@ jobs: pytest test/explain/ pytest test/graphgym/ pytest test/io/ - # pytest test/loader/ + pytest test/loader/ # pytest test/nn/ pytest test/profile/ pytest test/sampler/ diff --git a/examples/kge_fb15k_237.py b/examples/kge_fb15k_237.py index a8fe23daef6f..036656d271e9 100644 --- a/examples/kge_fb15k_237.py +++ b/examples/kge_fb15k_237.py @@ -27,10 +27,12 @@ test_data = FB15k_237(path, split='test')[0].to(device) model_arg_map = {'rotate': {'margin': 9.0}} -model = model_map[args.model](num_nodes=train_data.num_nodes, - num_relations=train_data.num_edge_types, - hidden_channels=50, - **model_arg_map.get(args.model, {})).to(device) +model = model_map[args.model]( + num_nodes=train_data.num_nodes, + num_relations=train_data.num_edge_types, + hidden_channels=50, + **model_arg_map.get(args.model, {}), +).to(device) loader = model.loader( head_index=train_data.edge_index[0], diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index 1e9e0bfd8155..35d023448df7 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -10,7 +10,7 @@ col = torch.tensor([0]) torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) with_metis = True -except RuntimeError: +except (AttributeError, RuntimeError): with_metis = False diff --git a/test/loader/test_graph_saint.py b/test/loader/test_graph_saint.py index 8f06133875cc..efb7ef6dd8cc 100644 --- a/test/loader/test_graph_saint.py +++ b/test/loader/test_graph_saint.py @@ -6,8 +6,10 @@ GraphSAINTNodeSampler, GraphSAINTRandomWalkSampler, ) +from torch_geometric.testing import withPackage +@withPackage('torch_sparse') def test_graph_saint(): adj = torch.tensor([ [+1, +2, +3, +0, +4, +0], diff --git a/test/loader/test_hgt_loader.py b/test/loader/test_hgt_loader.py index a43e72871115..20fee6f0f9e2 100644 --- a/test/loader/test_hgt_loader.py +++ b/test/loader/test_hgt_loader.py @@ -1,11 +1,11 @@ import numpy as np import torch -from torch_sparse import SparseTensor from torch_geometric.data import HeteroData from torch_geometric.loader import HGTLoader from torch_geometric.nn import GraphConv, to_hetero -from torch_geometric.testing import get_random_edge_index +from torch_geometric.testing import get_random_edge_index, withPackage +from torch_geometric.typing import SparseTensor from torch_geometric.utils import k_hop_subgraph @@ -17,6 +17,7 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): return int(mask.sum()) == mask.numel() +@withPackage('torch_sparse') def test_hgt_loader(): torch.manual_seed(12345) @@ -131,6 +132,7 @@ def test_hgt_loader(): assert torch.cat([row, col]).unique().numel() >= 59 +@withPackage('torch_sparse') def test_hgt_loader_on_cora(get_dataset): dataset = get_dataset(name='Cora') data = dataset[0] diff --git a/test/loader/test_imbalanced_sampler.py b/test/loader/test_imbalanced_sampler.py index a2dd6e25d908..28fa7c50d998 100644 --- a/test/loader/test_imbalanced_sampler.py +++ b/test/loader/test_imbalanced_sampler.py @@ -9,6 +9,7 @@ ImbalancedSampler, NeighborLoader, ) +from torch_geometric.testing import onlyNeighborSampler def test_dataloader_with_imbalanced_sampler(): @@ -62,6 +63,7 @@ def test_in_memory_dataset_imbalanced_sampler(): assert prob.min() > 0.4 and prob.max() < 0.6 +@onlyNeighborSampler def test_neighbor_loader_with_imbalanced_sampler(): zeros = torch.zeros(10, dtype=torch.long) ones = torch.ones(90, dtype=torch.long) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 9e6265e82a2c..a87de55fc2b1 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -7,6 +7,7 @@ MyFeatureStore, MyGraphStore, get_random_edge_index, + onlyNeighborSampler, withPackage, ) @@ -15,6 +16,7 @@ def unique_edge_pairs(edge_index): return set(map(tuple, edge_index.t().tolist())) +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio): @@ -80,6 +82,7 @@ def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio): assert torch.all(batch.edge_label[20:] == 0) +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) def test_hetero_link_neighbor_loader_basic(directed, neg_sampling_ratio): @@ -124,6 +127,7 @@ def test_hetero_link_neighbor_loader_basic(directed, neg_sampling_ratio): assert torch.all(batch['paper', 'author'].edge_label[20:] == 0) +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode def test_hetero_link_neighbor_loader_loop(directed): data = HeteroData() @@ -150,6 +154,7 @@ def test_hetero_link_neighbor_loader_loop(directed): assert len(edge_index | edge_label_index) == len(edge_index) +@onlyNeighborSampler def test_link_neighbor_loader_edge_label(): edge_index = get_random_edge_index(100, 100, 500) data = Data(edge_index=edge_index, x=torch.arange(100)) @@ -226,12 +231,11 @@ def test_temporal_hetero_link_neighbor_loader(): assert edge_min >= author_min -@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) -@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) -def test_custom_hetero_link_neighbor_loader(FeatureStore, GraphStore): +@onlyNeighborSampler +def test_custom_hetero_link_neighbor_loader(): data = HeteroData() - feature_store = FeatureStore() - graph_store = GraphStore() + feature_store = MyFeatureStore() + graph_store = MyGraphStore() # Set up node features: x = torch.arange(100) @@ -296,6 +300,7 @@ def test_custom_hetero_link_neighbor_loader(FeatureStore, GraphStore): 'author', 'to', 'paper'].edge_index.size()) +@onlyNeighborSampler def test_homo_link_neighbor_loader_no_edges(): loader = LinkNeighborLoader( Data(num_nodes=100), @@ -312,6 +317,7 @@ def test_homo_link_neighbor_loader_no_edges(): assert batch.num_nodes == batch.edge_label_index.unique().numel() +@onlyNeighborSampler def test_hetero_link_neighbor_loader_no_edges(): loader = LinkNeighborLoader( HeteroData(paper=dict(num_nodes=100)), diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 00bb86d75080..89709ddf430c 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -5,9 +5,7 @@ import numpy as np import pytest import torch -from torch_sparse import SparseTensor -import torch_geometric.typing from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero @@ -16,10 +14,15 @@ MyGraphStore, get_random_edge_index, onlyLinux, + onlyNeighborSampler, withPackage, ) from torch_geometric.typing import WITH_PYG_LIB -from torch_geometric.utils import k_hop_subgraph +from torch_geometric.utils import ( + k_hop_subgraph, + to_torch_csc_tensor, + to_torch_csr_tensor, +) def is_subset(subedge_index, edge_index, src_idx, dst_idx): @@ -30,10 +33,11 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): return int(mask.sum()) == mask.numel() +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) def test_homo_neighbor_loader_basic(directed, dtype): - if dtype != torch.int64 and not torch_geometric.typing.WITH_PYG_LIB: + if dtype != torch.int64 and not WITH_PYG_LIB: return torch.manual_seed(12345) @@ -76,10 +80,11 @@ def test_homo_neighbor_loader_basic(directed, dtype): ) +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) def test_hetero_neighbor_loader_basic(directed, dtype): - if dtype != torch.int64 and not torch_geometric.typing.WITH_PYG_LIB: + if dtype != torch.int64 and not WITH_PYG_LIB: return torch.manual_seed(12345) @@ -102,11 +107,8 @@ def test_hetero_neighbor_loader_basic(directed, dtype): r1, c1 = data['paper', 'paper'].edge_index r2, c2 = data['paper', 'author'].edge_index + torch.tensor([[0], [100]]) r3, c3 = data['author', 'paper'].edge_index + torch.tensor([[100], [0]]) - full_adj = SparseTensor( - row=torch.cat([r1, r2, r3]), - col=torch.cat([c1, c2, c3]), - value=torch.arange(2500), - ) + mat = torch.full((300, 300), fill_value=-1, dtype=torch.long) + mat[torch.cat([r1, r2, r3]), torch.cat([c1, c2, c3])] = torch.arange(2500) batch_size = 20 @@ -167,11 +169,13 @@ def test_hetero_neighbor_loader_basic(directed, dtype): assert col.min() >= 0 and col.max() < batch['paper'].num_nodes assert value.min() >= 0 and value.max() < 500 if not directed: - adj = full_adj[batch['paper'].x, batch['paper'].x] - assert adj.nnz() == row.size(0) - assert torch.allclose(row.unique(), adj.storage.row().unique()) - assert torch.allclose(col.unique(), adj.storage.col().unique()) - assert torch.allclose(value.unique(), adj.storage.value().unique()) + adj = mat[batch['paper'].x][:, batch['paper'].x] + full_row, full_col = (adj >= 0).nonzero().t() + full_value = adj[adj >= 0] + assert full_value.size(0) == row.size(0) + assert torch.equal(row.unique(), full_row.unique()) + assert torch.equal(col.unique(), full_col.unique()) + assert torch.equal(value.unique(), full_value().unique()) assert is_subset( batch['paper', 'paper'].edge_index.to(torch.int64), @@ -189,11 +193,13 @@ def test_hetero_neighbor_loader_basic(directed, dtype): assert col.min() >= 0 and col.max() < batch['author'].num_nodes assert value.min() >= 500 and value.max() < 1500 if not directed: - adj = full_adj[batch['paper'].x, batch['author'].x] - assert adj.nnz() == row.size(0) - assert torch.allclose(row.unique(), adj.storage.row().unique()) - assert torch.allclose(col.unique(), adj.storage.col().unique()) - assert torch.allclose(value.unique(), adj.storage.value().unique()) + adj = mat[batch['paper'].x][:, batch['author'].x] + full_row, full_col = (adj >= 0).nonzero().t() + full_value = adj[adj >= 0] + assert full_value.size(0) == row.size(0) + assert torch.equal(row.unique(), full_row.unique()) + assert torch.equal(col.unique(), full_col.unique()) + assert torch.equal(value.unique(), full_value().unique()) assert is_subset( batch['paper', 'author'].edge_index.to(torch.int64), @@ -211,11 +217,13 @@ def test_hetero_neighbor_loader_basic(directed, dtype): assert col.min() >= 0 and col.max() < batch['paper'].num_nodes assert value.min() >= 1500 and value.max() < 2500 if not directed: - adj = full_adj[batch['author'].x, batch['paper'].x] - assert adj.nnz() == row.size(0) - assert torch.allclose(row.unique(), adj.storage.row().unique()) - assert torch.allclose(col.unique(), adj.storage.col().unique()) - assert torch.allclose(value.unique(), adj.storage.value().unique()) + adj = mat[batch['author'].x][:, batch['paper'].x] + full_row, full_col = (adj >= 0).nonzero().t() + full_value = adj[adj >= 0] + assert full_value.size(0) == row.size(0) + assert torch.equal(row.unique(), full_row.unique()) + assert torch.equal(col.unique(), full_col.unique()) + assert torch.equal(value.unique(), full_value().unique()) assert is_subset( batch['author', 'paper'].edge_index.to(torch.int64), @@ -226,10 +234,12 @@ def test_hetero_neighbor_loader_basic(directed, dtype): # Test for isolated nodes (there shouldn't exist any): n_id = torch.cat([batch['paper'].x, batch['author'].x]) - row, col, _ = full_adj[n_id, n_id].coo() + adj = mat[n_id][:, n_id] + row, col = (adj >= 0).nonzero().t() assert torch.cat([row, col]).unique().numel() == n_id.numel() +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode def test_homo_neighbor_loader_on_cora(get_dataset, directed): dataset = get_dataset(name='Cora') @@ -273,6 +283,7 @@ def forward(self, x, edge_index, edge_weight): assert torch.allclose(out1, out2, atol=1e-6) +@onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode def test_hetero_neighbor_loader_on_cora(get_dataset, directed): dataset = get_dataset(name='Cora') @@ -344,12 +355,11 @@ def test_temporal_hetero_neighbor_loader_on_cora(get_dataset): assert torch.all(mask) -@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) -@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) -def test_custom_neighbor_loader(FeatureStore, GraphStore): +@onlyNeighborSampler +def test_custom_neighbor_loader(): # Initialize feature store, graph store, and reference: - feature_store = FeatureStore() - graph_store = GraphStore() + feature_store = MyFeatureStore() + graph_store = MyGraphStore() data = HeteroData() # Set up node features: @@ -372,7 +382,8 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): # CSR: edge_index = get_random_edge_index(100, 200, 1000) data['paper', 'to', 'author'].edge_index = edge_index - csr = SparseTensor.from_edge_index(edge_index).csr()[:2] + adj = to_torch_csr_tensor(edge_index, size=(100, 200)) + csr = (adj.crow_indices(), adj.col_indices()) graph_store.put_edge_index(edge_index=csr, edge_type=('paper', 'to', 'author'), layout='csr', size=(100, 200)) @@ -380,7 +391,8 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): # CSC: edge_index = get_random_edge_index(200, 100, 1000) data['author', 'to', 'paper'].edge_index = edge_index - csc = SparseTensor(row=edge_index[1], col=edge_index[0]).csr()[-2::-1] + adj = to_torch_csc_tensor(edge_index, size=(200, 100)) + csc = (adj.row_indices(), adj.ccol_indices()) graph_store.put_edge_index(edge_index=csc, edge_type=('author', 'to', 'paper'), layout='csc', size=(200, 100)) @@ -426,18 +438,15 @@ def test_custom_neighbor_loader(FeatureStore, GraphStore): @withPackage('pyg_lib') -@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) -@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) -def test_temporal_custom_neighbor_loader_on_cora(get_dataset, FeatureStore, - GraphStore): +def test_temporal_custom_neighbor_loader_on_cora(get_dataset): # Initialize dataset (once): dataset = get_dataset(name='Cora') data = dataset[0] data.time = torch.arange(data.num_nodes, 0, -1) # Initialize feature store, graph store, and reference: - feature_store = FeatureStore() - graph_store = GraphStore() + feature_store = MyFeatureStore() + graph_store = MyGraphStore() hetero_data = HeteroData() feature_store.put_tensor( @@ -491,9 +500,11 @@ def test_temporal_custom_neighbor_loader_on_cora(get_dataset, FeatureStore, @withPackage('pyg_lib') -def test_pyg_lib_homo_neighbor_loader(): - adj = SparseTensor.from_edge_index(get_random_edge_index(20, 20, 100)) - colptr, row, _ = adj.csc() +@withPackage('torch_sparse') +def test_pyg_lib_and_torch_sparse_homo_equality(): + edge_index = get_random_edge_index(20, 20, 100) + adj = to_torch_csc_tensor(edge_index, size=(20, 20)) + colptr, row = adj.ccol_indices(), adj.row_indices() seed = torch.arange(10) @@ -511,12 +522,15 @@ def test_pyg_lib_homo_neighbor_loader(): @withPackage('pyg_lib') -def test_pyg_lib_hetero_neighbor_loader(): - adj1 = SparseTensor.from_edge_index(get_random_edge_index(20, 10, 50)) - colptr1, row1, _ = adj1.csc() +@withPackage('torch_sparse') +def test_pyg_lib_and_torch_sparse_hetero_equality(): + edge_index = get_random_edge_index(20, 10, 50) + adj = to_torch_csc_tensor(edge_index, size=(20, 10)) + colptr1, row1 = adj.ccol_indices(), adj.row_indices() - adj2 = SparseTensor.from_edge_index(get_random_edge_index(10, 20, 50)) - colptr2, row2, _ = adj2.csc() + edge_index = get_random_edge_index(10, 20, 50) + adj = to_torch_csc_tensor(edge_index, size=(10, 20)) + colptr2, row2 = adj.ccol_indices(), adj.row_indices() node_types = ['paper', 'author'] edge_types = [('paper', 'to', 'author'), ('author', 'to', 'paper')] @@ -559,6 +573,7 @@ def test_pyg_lib_hetero_neighbor_loader(): @onlyLinux +@onlyNeighborSampler def test_memmap_neighbor_loader(tmp_path): path = osp.join(tmp_path, 'x.npy') x = np.memmap(path, dtype=np.float32, mode='w+', shape=(100, 32)) @@ -580,17 +595,12 @@ def test_memmap_neighbor_loader(tmp_path): @onlyLinux -@pytest.mark.parametrize('num_workers,loader_cores', [ - (1, None), - (1, [1]), -]) -def test_cpu_affinity_neighbor_loader(num_workers, loader_cores): +@onlyNeighborSampler +@pytest.mark.parametrize('loader_cores', [None, [1]]) +def test_cpu_affinity_neighbor_loader(loader_cores): data = Data(x=torch.randn(1, 1)) loader = NeighborLoader(data, num_neighbors=[-1], batch_size=1, - num_workers=num_workers) - - if isinstance(loader_cores, list): - loader_cores = loader_cores[:num_workers] + num_workers=1) out = [] with loader.enable_cpu_affinity(loader_cores): @@ -604,7 +614,7 @@ def test_cpu_affinity_neighbor_loader(num_workers, loader_cores): stdout = process.communicate()[0].decode('utf-8') out.append(int(stdout.split(':')[1].strip())) if not loader_cores: - assert out == list(range(0, num_workers)) + assert out == [0] else: assert out == loader_cores diff --git a/test/loader/test_neighbor_sampler.py b/test/loader/test_neighbor_sampler.py index 60f4477c5a8b..53bb849d5629 100644 --- a/test/loader/test_neighbor_sampler.py +++ b/test/loader/test_neighbor_sampler.py @@ -1,12 +1,14 @@ import numpy as np import torch -from torch_sparse import SparseTensor from torch_geometric.loader import NeighborSampler from torch_geometric.nn.conv import GATConv, SAGEConv +from torch_geometric.testing import withPackage +from torch_geometric.typing import SparseTensor from torch_geometric.utils import erdos_renyi_graph +@withPackage('torch_sparse') def test_neighbor_sampler_basic(): edge_index = erdos_renyi_graph(num_nodes=10, edge_prob=0.5) adj_t = SparseTensor.from_edge_index(edge_index, sparse_sizes=(10, 10)).t() @@ -38,12 +40,14 @@ def test_neighbor_sampler_basic(): assert adj_t.size(1) == size[0] +@withPackage('torch_sparse') def test_neighbor_sampler_invalid_kwargs(): # Ignore `collate_fn` and `dataset` arguments: edge_index = torch.tensor([[0, 1], [1, 0]]) NeighborSampler(edge_index, sizes=[-1], collate_fn=None, dataset=None) +@withPackage('torch_sparse') def test_neighbor_sampler_on_cora(get_dataset): dataset = get_dataset(name='Cora') data = dataset[0] diff --git a/test/loader/test_shadow.py b/test/loader/test_shadow.py index ddd8a959ac5a..5f09440d4961 100644 --- a/test/loader/test_shadow.py +++ b/test/loader/test_shadow.py @@ -1,10 +1,12 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.data import Data from torch_geometric.loader import ShaDowKHopSampler +from torch_geometric.testing import withPackage +from torch_geometric.typing import SparseTensor +@withPackage('torch_sparse') def test_shadow_k_hop_sampler(): row = torch.tensor([0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 4, 4, 5, 5]) col = torch.tensor([1, 2, 3, 0, 2, 0, 1, 4, 5, 0, 2, 5, 2, 4]) diff --git a/test/loader/test_zip_loader.py b/test/loader/test_zip_loader.py index 14c4c36d4121..4bb4a4d79e2b 100644 --- a/test/loader/test_zip_loader.py +++ b/test/loader/test_zip_loader.py @@ -3,8 +3,10 @@ from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader, ZipLoader +from torch_geometric.testing import onlyNeighborSampler +@onlyNeighborSampler @pytest.mark.parametrize('filter_per_worker', [True, False]) def test_zip_loader(filter_per_worker): x = torch.arange(100) diff --git a/torch_geometric/nn/kge/rotate.py b/torch_geometric/nn/kge/rotate.py index a4724c5e956e..94d1d6b0ff8e 100644 --- a/torch_geometric/nn/kge/rotate.py +++ b/torch_geometric/nn/kge/rotate.py @@ -14,12 +14,12 @@ class RotatE(KGEModel): 1902.10197>`_ paper. :class:`RotatE` models relations as a rotation in complex space - from head to tail such that: + from head to tail such that .. math:: - \mathbf{e}_t = \mathbf{e}_h \circ \mathbf{e}_r + \mathbf{e}_t = \mathbf{e}_h \circ \mathbf{e}_r, - Resulting in the scoring function: + resulting in the scoring function .. math:: d(h, r, t) = - {\| \mathbf{e}_h \circ \mathbf{e}_r - \mathbf{e}_t \|}_p @@ -48,8 +48,10 @@ def __init__( sparse: bool = False, ): super().__init__(num_nodes, num_relations, hidden_channels, sparse) - self.register_buffer('margin', torch.Tensor([margin])) + + self.margin = margin self.node_emb_im = Embedding(num_nodes, hidden_channels, sparse=sparse) + self.reset_parameters() def reset_parameters(self): diff --git a/torch_geometric/nn/kge/transe.py b/torch_geometric/nn/kge/transe.py index 084b7efe698b..cd15a1925d9b 100644 --- a/torch_geometric/nn/kge/transe.py +++ b/torch_geometric/nn/kge/transe.py @@ -50,9 +50,10 @@ def __init__( p_norm: float = 1.0, sparse: bool = False, ): + super().__init__(num_nodes, num_relations, hidden_channels, sparse) + self.p_norm = p_norm self.margin = margin - super().__init__(num_nodes, num_relations, hidden_channels, sparse) self.reset_parameters() diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py index 8b3aa9f4f6d0..215ed8fb5948 100644 --- a/torch_geometric/testing/__init__.py +++ b/torch_geometric/testing/__init__.py @@ -5,6 +5,7 @@ onlyPython, onlyCUDA, onlyGraphviz, + onlyNeighborSampler, withPackage, withCUDA, disableExtensions, @@ -20,6 +21,7 @@ 'onlyPython', 'onlyCUDA', 'onlyGraphviz', + 'onlyNeighborSampler', 'withPackage', 'withCUDA', 'disableExtensions', diff --git a/torch_geometric/testing/decorators.py b/torch_geometric/testing/decorators.py index e7ceb3f9e80e..e6ea08938f28 100644 --- a/torch_geometric/testing/decorators.py +++ b/torch_geometric/testing/decorators.py @@ -7,6 +7,7 @@ import torch from packaging.requirements import Requirement +from torch_geometric.typing import WITH_PYG_LIB, WITH_TORCH_SPARSE from torch_geometric.visualization.graph import has_graphviz @@ -68,6 +69,16 @@ def onlyGraphviz(func: Callable) -> Callable: )(func) +def onlyNeighborSampler(func: Callable): + r"""A decorator to skip tests if no neighborhood sampler package is + installed.""" + import pytest + return pytest.mark.skipif( + not WITH_PYG_LIB and not WITH_TORCH_SPARSE, + reason="No neighbor sampler installed", + )(func) + + def withPackage(*args) -> Callable: r"""A decorator to skip tests if certain packages are not installed. Also supports version specification.""" From d3f4471478bc7c400628c17a97bf283c359f9430 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 27 Mar 2023 10:04:34 +0200 Subject: [PATCH 1052/2432] Drop `torch_sparse` dependency in tests (4/n) (#7046) --- .github/workflows/latest_testing.yml | 23 +++++++++++++++++------ test/nn/test_compile_dynamic.py | 22 ++++++++++------------ 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 6a6163624b8d..f39918ce1072 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -46,11 +46,7 @@ jobs: - name: Run tests if: steps.changed-files-specific.outputs.only_changed != 'true' run: | - pytest test/test_debug.py - pytest test/test_experimental.py - pytest test/test_home.py - pytest test/test_seed.py - pytest test/test_typing.py + pytest test/test_debug.py test/test_experimental.py test/test_home.py test/test_seed.py test/test_typing.py pytest test/contrib/ pytest test/data/ pytest test/datasets/ @@ -58,10 +54,25 @@ jobs: pytest test/graphgym/ pytest test/io/ pytest test/loader/ - # pytest test/nn/ pytest test/profile/ pytest test/sampler/ pytest test/testing/ pytest test/transforms/ pytest test/utils/ pytest test/visualization/ + # pytest test/nn/aggr + # pytest test/nn/conv + pytest test/nn/dense + pytest test/nn/functional + pytest test/nn/kge + # pytest test/nn/models + pytest test/nn/norm + # pytest test/nn/pool + pytest test/nn/unpool + pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py + # pytest test/nn/test_model_summary.py + pytest test/nn/test_module_dict.py test/nn/test_parameter_dict.py test/nn/test_reshape.py test/nn/test_resolver.py + # pytest test/nn/test_sequential.py + pytest test/nn/test_to_fixed_size_transformer.py test/nn/test_to_hetero_module.py + # pytest test/nn/test_to_hetero_transformer.py + # pytest test/nn/test_to_hetero_with_bases_transformer.py diff --git a/test/nn/test_compile_dynamic.py b/test/nn/test_compile_dynamic.py index 378eb56156a4..0fe60ba49e29 100644 --- a/test/nn/test_compile_dynamic.py +++ b/test/nn/test_compile_dynamic.py @@ -1,6 +1,5 @@ import random -import pytest import torch from torch import Tensor @@ -30,22 +29,21 @@ def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: @withCUDA @onlyLinux @disableExtensions -@withPackage('torch>=2.0.0') +@withPackage('torch>2.0.0') def test_dynamic_torch_compile(device): conv = MySAGEConv(64, 64).to(device) conv = torch_geometric.compile(conv, dynamic=True) optimizer = torch.optim.Adam(conv.parameters(), lr=0.01) - with pytest.raises(RuntimeError): - for _ in range(10): - N = random.randrange(100, 500) - E = random.randrange(200, 1000) + for _ in range(10): + N = random.randrange(100, 500) + E = random.randrange(200, 1000) - x = torch.randn(N, 64, device=device) - edge_index = get_random_edge_index(N, N, E, device=device) + x = torch.randn(N, 64, device=device) + edge_index = get_random_edge_index(N, N, E, device=device) - optimizer.zero_grad() - expected = conv(x, edge_index) - expected.mean().backward() - optimizer.step() + optimizer.zero_grad() + expected = conv(x, edge_index) + expected.mean().backward() + optimizer.step() From 0e439f38b674e551491c99e5a6ddbc7a62f4cbf4 Mon Sep 17 00:00:00 2001 From: DomInvivo <47570400+DomInvivo@users.noreply.github.com> Date: Mon, 27 Mar 2023 04:36:23 -0400 Subject: [PATCH 1053/2432] Adding unbatching support for `torch.sparse.Tensor` (#7037) This PR allows to fix the issues with `Batch.from_data_list` or `Batch.get_example` or `Batch__getitem__` when using pytorch's sparse tensors, such as `sparse_coo_tensor`. See issue #7022 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 ++ test/data/test_batch.py | 42 ++++++++++++++++++++++++++++++++ torch_geometric/data/separate.py | 3 ++- torch_geometric/utils/select.py | 6 +++++ 4 files changed, 52 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5d782d9ec530..1f44c14dea2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [2.4.0] - 2023-MM-DD ### Added + +- Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) ### Changed diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 5a521c03e00b..fc63c16c70fc 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -166,6 +166,48 @@ def test_batch_with_sparse_tensor(): assert data_list[2].adj.coo()[1].tolist() == [1, 0, 2, 1, 3, 2] +def test_batch_with_torch_coo_tensor(): + x = torch.tensor([[1.0], [2.0], [3.0]]).to_sparse_coo() + data1 = Data(x=x) + + x = torch.tensor([[1.0], [2.0]]).to_sparse_coo() + data2 = Data(x=x) + + x = torch.tensor([[1.0], [2.0], [3.0], [4.0]]).to_sparse_coo() + data3 = Data(x=x) + + batch = Batch.from_data_list([data1]) + assert str(batch) == ('DataBatch(x=[3, 1], batch=[3], ptr=[2])') + assert batch.num_graphs == len(batch) == 1 + assert batch.x.to_dense().tolist() == [[1], [2], [3]] + assert batch.batch.tolist() == [0, 0, 0] + assert batch.ptr.tolist() == [0, 3] + + batch = Batch.from_data_list([data1, data2, data3]) + + assert str(batch) == ('DataBatch(x=[9, 1], batch=[9], ptr=[4])') + assert batch.num_graphs == len(batch) == 3 + assert batch.x.to_dense().view(-1).tolist() == [1, 2, 3, 1, 2, 1, 2, 3, 4] + assert batch.batch.tolist() == [0, 0, 0, 1, 1, 2, 2, 2, 2] + assert batch.ptr.tolist() == [0, 3, 5, 9] + + assert str(batch[0]) == ("Data(x=[3, 1])") + assert str(batch[1]) == ("Data(x=[2, 1])") + assert str(batch[2]) == ("Data(x=[4, 1])") + + data_list = batch.to_data_list() + assert len(data_list) == 3 + + assert len(data_list[0]) == 1 + assert data_list[0].x.to_dense().tolist() == [[1], [2], [3]] + + assert len(data_list[1]) == 1 + assert data_list[1].x.to_dense().tolist() == [[1], [2]] + + assert len(data_list[2]) == 1 + assert data_list[2].x.to_dense().tolist() == [[1], [2], [3], [4]] + + def test_batching_with_new_dimension(): torch_geometric.set_debug(True) diff --git a/torch_geometric/data/separate.py b/torch_geometric/data/separate.py index a3850a251198..5fcb98b9203c 100644 --- a/torch_geometric/data/separate.py +++ b/torch_geometric/data/separate.py @@ -6,6 +6,7 @@ from torch_geometric.data.data import BaseData from torch_geometric.data.storage import BaseStorage from torch_geometric.typing import SparseTensor +from torch_geometric.utils import narrow def separate(cls, batch: BaseData, idx: int, slice_dict: Any, @@ -62,7 +63,7 @@ def _separate( key = str(key) cat_dim = batch.__cat_dim__(key, value, store) start, end = int(slices[idx]), int(slices[idx + 1]) - value = value.narrow(cat_dim or 0, start, end - start) + value = narrow(value, cat_dim or 0, start, end - start) value = value.squeeze(0) if cat_dim is None else value if decrement and (incs.dim() > 1 or int(incs[idx]) != 0): value = value - incs[idx].to(value.device) diff --git a/torch_geometric/utils/select.py b/torch_geometric/utils/select.py index 210b127df2cc..54d0a3961f5d 100644 --- a/torch_geometric/utils/select.py +++ b/torch_geometric/utils/select.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.utils.mask import mask_select +from torch_geometric.utils.sparse import is_torch_sparse_tensor def select(src: Union[Tensor, List[Any]], index_or_mask: Tensor, @@ -41,6 +42,11 @@ def narrow(src: Union[Tensor, List[Any]], dim: int, start: int, start (int): The starting dimension. length (int): The distance to the ending dimension. """ + if is_torch_sparse_tensor(src): + # TODO Sparse tensors in `torch.sparse` do not yet support `narrow`. + index = torch.arange(start, start + length, device=src.device) + return src.index_select(dim, index) + if isinstance(src, Tensor): return src.narrow(dim, start, length) From 958e43b734831e43779949423c42d44e141c630a Mon Sep 17 00:00:00 2001 From: toenshoff Date: Mon, 27 Mar 2023 15:36:39 +0200 Subject: [PATCH 1054/2432] Fix `FastHGTConv` to correctly call the value module (#7050) This fixes a small bug in the new `fast_hgt_conv` layer, which incorrectly called the module `self.k_rel` instead of `self.v_rel` to generate the value vectors `v`. --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 3 ++- torch_geometric/nn/conv/fast_hgt_conv.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f44c14dea2a..fd66199f6d03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Accelerated sparse tensor conversion routiens ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) +- Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) +- Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) ### Removed diff --git a/torch_geometric/nn/conv/fast_hgt_conv.py b/torch_geometric/nn/conv/fast_hgt_conv.py index 73223b44eb3a..4a4304a80e66 100644 --- a/torch_geometric/nn/conv/fast_hgt_conv.py +++ b/torch_geometric/nn/conv/fast_hgt_conv.py @@ -127,7 +127,7 @@ def _construct_src_node_feat( type_vec = torch.cat(type_list, dim=0) k = self.k_rel(torch.cat(ks, dim=0), type_vec).view(-1, H, D) - v = self.k_rel(torch.cat(vs, dim=0), type_vec).view(-1, H, D) + v = self.v_rel(torch.cat(vs, dim=0), type_vec).view(-1, H, D) return k, v, offset From 56b051f2e0a0c766e2bb60a5dd99f9ac70ce220e Mon Sep 17 00:00:00 2001 From: YanbingJiang Date: Mon, 27 Mar 2023 22:28:03 +0800 Subject: [PATCH 1055/2432] Fix `bfloat16` conversion in `HeteroData` benchmark scripts (#7048) This PR is to fix the issues: 1. Re-use `test` function in training benchmark to avoid mismatch between `Data` and `HeteroData` in evaluation mode. 2. `HeteroData` to convert to `bfloat16` datatype in inference and training benchmarks. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- benchmark/inference/inference_benchmark.py | 13 ++----- benchmark/training/training_benchmark.py | 37 +----------------- benchmark/utils/__init__.py | 2 + benchmark/utils/utils.py | 45 +++++++++++++++++++++- 4 files changed, 51 insertions(+), 46 deletions(-) diff --git a/benchmark/inference/inference_benchmark.py b/benchmark/inference/inference_benchmark.py index be72ed04ab97..72f634251749 100644 --- a/benchmark/inference/inference_benchmark.py +++ b/benchmark/inference/inference_benchmark.py @@ -10,6 +10,7 @@ get_model, get_split_masks, save_benchmark_data, + test, write_to_csv, ) from torch_geometric.loader import NeighborLoader @@ -33,13 +34,6 @@ def full_batch_inference(model, data): return model(data.x, edge_index) -def test(y, loader): - y_hat = y.argmax(dim=-1) - y = loader.data.y.to(y_hat.device) - mask = loader.data.test_mask - return int((y_hat[mask] == y[mask]).sum()) / int(mask.sum()) - - def run(args: argparse.ArgumentParser): csv_data = defaultdict(list) @@ -209,10 +203,11 @@ def run(args: argparse.ArgumentParser): progress_bar=True, ) if args.evaluate: - test_acc = model.test( - y, + test_acc = test( + model, test_loader, device, + hetero, progress_bar=True, ) print(f'Mini Batch Test Accuracy: \ diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index 2fc9b1123be4..8878ce9088d8 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -14,6 +14,7 @@ get_model, get_split_masks, save_benchmark_data, + test, write_to_csv, ) from torch_geometric.loader import NeighborLoader @@ -78,42 +79,6 @@ def train_hetero(model, loader, optimizer, device, progress_bar=True, desc="", optimizer.step() -@torch.no_grad() -def test(model, loader, device, hetero, progress_bar=True, desc="") -> None: - if progress_bar: - loader = tqdm(loader, desc=desc) - total_examples = total_correct = 0 - if hetero: - for batch in loader: - batch = batch.to(device) - if len(batch.adj_t_dict) > 0: - edge_index_dict = batch.adj_t_dict - else: - edge_index_dict = batch.edge_index_dict - out = model(batch.x_dict, edge_index_dict) - batch_size = batch['paper'].batch_size - out = out['paper'][:batch_size] - pred = out.argmax(dim=-1) - - total_examples += batch_size - total_correct += int((pred == batch['paper'].y[:batch_size]).sum()) - else: - for batch in loader: - batch = batch.to(device) - if hasattr(batch, 'adj_t'): - edge_index = batch.adj_t - else: - edge_index = batch.edge_index - out = model(batch.x, edge_index) - batch_size = batch.batch_size - out = out[:batch_size] - pred = out.argmax(dim=-1) - - total_examples += batch_size - total_correct += int((pred == batch.y[:batch_size]).sum()) - return total_correct / total_examples - - def run(args: argparse.ArgumentParser): csv_data = defaultdict(list) diff --git a/benchmark/utils/__init__.py b/benchmark/utils/__init__.py index 376545b7ca15..d97451a778e3 100644 --- a/benchmark/utils/__init__.py +++ b/benchmark/utils/__init__.py @@ -3,6 +3,7 @@ from .utils import get_model from .utils import get_split_masks from .utils import save_benchmark_data, write_to_csv +from .utils import test __all__ = [ 'emit_itt', @@ -12,4 +13,5 @@ 'get_split_masks', 'save_benchmark_data', 'write_to_csv', + 'test', ] diff --git a/benchmark/utils/utils.py b/benchmark/utils/utils.py index 27d8748141a0..c8b8f0901ebf 100644 --- a/benchmark/utils/utils.py +++ b/benchmark/utils/utils.py @@ -5,8 +5,10 @@ import pandas as pd import torch from ogb.nodeproppred import PygNodePropPredDataset +from tqdm import tqdm import torch_geometric.transforms as T +from torch_geometric.data import HeteroData from torch_geometric.datasets import OGB_MAG, Reddit from torch_geometric.nn import GAT, GCN, PNA, EdgeCNN, GraphSAGE from torch_geometric.utils import index_to_mask @@ -70,7 +72,11 @@ def get_dataset_with_transformation(name, root, use_sparse_tensor=False, data.y = data.y.squeeze() if bf16: - data.x = data.x.to(torch.bfloat16) + if isinstance(data, HeteroData): + for node_type in data.node_types: + data[node_type].x = data[node_type].x.to(torch.bfloat16) + else: + data.x = data.x.to(torch.bfloat16) return data, dataset.num_classes, transform @@ -145,3 +151,40 @@ def write_to_csv(csv_data, training=False): with_header = not osp.exists(csv_path) df = pd.DataFrame(csv_data) df.to_csv(csv_path, mode='a', index_label='TEST_ID', header=with_header) + + +@torch.no_grad() +def test(model, loader, device, hetero, progress_bar=True, + desc="Evaluation") -> None: + if progress_bar: + loader = tqdm(loader, desc=desc) + total_examples = total_correct = 0 + if hetero: + for batch in loader: + batch = batch.to(device) + if len(batch.adj_t_dict) > 0: + edge_index_dict = batch.adj_t_dict + else: + edge_index_dict = batch.edge_index_dict + out = model(batch.x_dict, edge_index_dict) + batch_size = batch['paper'].batch_size + out = out['paper'][:batch_size] + pred = out.argmax(dim=-1) + + total_examples += batch_size + total_correct += int((pred == batch['paper'].y[:batch_size]).sum()) + else: + for batch in loader: + batch = batch.to(device) + if hasattr(batch, 'adj_t'): + edge_index = batch.adj_t + else: + edge_index = batch.edge_index + out = model(batch.x, edge_index) + batch_size = batch.batch_size + out = out[:batch_size] + pred = out.argmax(dim=-1) + + total_examples += batch_size + total_correct += int((pred == batch.y[:batch_size]).sum()) + return total_correct / total_examples From 88696db00cd2bc406a226cc6dee7aaec7ea09604 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 27 Mar 2023 17:54:35 +0200 Subject: [PATCH 1056/2432] Drop `torch_sparse` dependency in tests (5/n) (#7051) --- .github/workflows/latest_testing.yml | 2 +- test/nn/aggr/test_attention.py | 9 ++++++++- test/nn/aggr/test_basic.py | 20 +++++++++++++++++--- test/nn/aggr/test_multi.py | 11 +++++++++-- torch_geometric/nn/aggr/multi.py | 2 +- 5 files changed, 36 insertions(+), 8 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index f39918ce1072..4f1cf1f9a00a 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -60,7 +60,7 @@ jobs: pytest test/transforms/ pytest test/utils/ pytest test/visualization/ - # pytest test/nn/aggr + pytest test/nn/aggr # pytest test/nn/conv pytest test/nn/dense pytest test/nn/functional diff --git a/test/nn/aggr/test_attention.py b/test/nn/aggr/test_attention.py index 2bb620d783f7..012e1fdb1bcc 100644 --- a/test/nn/aggr/test_attention.py +++ b/test/nn/aggr/test_attention.py @@ -1,5 +1,7 @@ +import pytest import torch +import torch_geometric.typing from torch_geometric.nn import MLP from torch_geometric.nn.aggr import AttentionalAggregation @@ -18,4 +20,9 @@ def test_attentional_aggregation(): out = aggr(x, index) assert out.size() == (3, channels) - torch.allclose(aggr(x, ptr=ptr, dim_size=3), out) + + if not torch_geometric.typing.WITH_TORCH_SCATTER: + with pytest.raises(ImportError, match="'segment' requires"): + aggr(x, ptr=ptr) + else: + assert torch.allclose(out, aggr(x, ptr=ptr)) diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py index 5370ad82de14..45d04dc3f861 100644 --- a/test/nn/aggr/test_basic.py +++ b/test/nn/aggr/test_basic.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import ( MaxAggregation, MeanAggregation, @@ -54,6 +55,9 @@ def test_basic_aggregation(Aggregation): if isinstance(aggr, MulAggregation): with pytest.raises(NotImplementedError, match="requires 'index'"): aggr(x, ptr=ptr) + elif not torch_geometric.typing.WITH_TORCH_SCATTER: + with pytest.raises(ImportError, match="'segment' requires"): + aggr(x, ptr=ptr) else: assert torch.allclose(out, aggr(x, ptr=ptr)) @@ -75,7 +79,7 @@ def test_var_aggregation(): PowerMeanAggregation, ]) @pytest.mark.parametrize('learn', [True, False]) -def test_gen_aggregation(Aggregation, learn): +def test_learnable_aggregation(Aggregation, learn): x = torch.randn(6, 16) index = torch.tensor([0, 0, 1, 1, 1, 2]) ptr = torch.tensor([0, 2, 5, 6]) @@ -85,7 +89,12 @@ def test_gen_aggregation(Aggregation, learn): out = aggr(x, index) assert out.size() == (3, x.size(1)) - assert torch.allclose(out, aggr(x, ptr=ptr)) + + if not torch_geometric.typing.WITH_TORCH_SCATTER: + with pytest.raises(ImportError, match="'segment' requires"): + aggr(x, ptr=ptr) + else: + assert torch.allclose(out, aggr(x, ptr=ptr)) if learn: out.mean().backward() @@ -107,7 +116,12 @@ def test_learnable_channels_aggregation(Aggregation): out = aggr(x, index) assert out.size() == (3, x.size(1)) - assert torch.allclose(out, aggr(x, ptr=ptr)) + + if not torch_geometric.typing.WITH_TORCH_SCATTER: + with pytest.raises(ImportError, match="'segment' requires"): + aggr(x, ptr=ptr) + else: + assert torch.allclose(out, aggr(x, ptr=ptr)) out.mean().backward() for param in aggr.parameters(): diff --git a/test/nn/aggr/test_multi.py b/test/nn/aggr/test_multi.py index ccfa70dbfbc4..222696b62ec9 100644 --- a/test/nn/aggr/test_multi.py +++ b/test/nn/aggr/test_multi.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import MultiAggregation @@ -36,7 +37,13 @@ def test_multi_aggr(multi_aggr_tuple): f"], mode={aggr_kwargs['mode']})") out = aggr(x, index) - assert torch.allclose(out, aggr(x, ptr=ptr)) assert out.size() == (4, expand * x.size(1)) - # TODO test JIT support + if not torch_geometric.typing.WITH_TORCH_SCATTER: + with pytest.raises(ImportError, match="'segment' requires"): + aggr(x, ptr=ptr) + else: + assert torch.allclose(out, aggr(x, ptr=ptr)) + + jit = torch.jit.script(aggr) + assert torch.allclose(out, jit(x, index)) diff --git a/torch_geometric/nn/aggr/multi.py b/torch_geometric/nn/aggr/multi.py index b4855b1e11db..6883f935d4a3 100644 --- a/torch_geometric/nn/aggr/multi.py +++ b/torch_geometric/nn/aggr/multi.py @@ -182,7 +182,7 @@ def combine(self, inputs: List[Tensor]) -> Tensor: if hasattr(self, 'multihead_attn'): x = torch.stack( - [head(x) for x, head in zip(inputs, self.lin_heads)], + [head(inputs[i]) for i, head in enumerate(self.lin_heads)], dim=0, ) attn_out, _ = self.multihead_attn(x, x, x) From 055a9b15356d89ae4aa7badfa6633d3c0ce3cee1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 27 Mar 2023 21:27:41 +0200 Subject: [PATCH 1057/2432] Drop `torch_sparse` dependency in tests (6/n) (#7052) --- .github/workflows/latest_testing.yml | 2 +- test/nn/models/test_correct_and_smooth.py | 2 +- test/nn/models/test_label_prop.py | 2 +- test/nn/models/test_linkx.py | 2 +- test/nn/models/test_rect.py | 12 ++++++--- test/nn/pool/test_pan_pool.py | 3 ++- torch_geometric/nn/pool/asap.py | 31 +++++++++++++---------- 7 files changed, 32 insertions(+), 22 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 4f1cf1f9a00a..fc63728f3c76 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -67,7 +67,7 @@ jobs: pytest test/nn/kge # pytest test/nn/models pytest test/nn/norm - # pytest test/nn/pool + pytest test/nn/pool pytest test/nn/unpool pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py # pytest test/nn/test_model_summary.py diff --git a/test/nn/models/test_correct_and_smooth.py b/test/nn/models/test_correct_and_smooth.py index 2f4d63076eb4..9125b0c8b045 100644 --- a/test/nn/models/test_correct_and_smooth.py +++ b/test/nn/models/test_correct_and_smooth.py @@ -1,7 +1,7 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn.models import CorrectAndSmooth +from torch_geometric.typing import SparseTensor def test_correct_and_smooth(): diff --git a/test/nn/models/test_label_prop.py b/test/nn/models/test_label_prop.py index bc7d993037f7..bc9e1f97ed00 100644 --- a/test/nn/models/test_label_prop.py +++ b/test/nn/models/test_label_prop.py @@ -1,7 +1,7 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn.models import LabelPropagation +from torch_geometric.typing import SparseTensor def test_label_prop(): diff --git a/test/nn/models/test_linkx.py b/test/nn/models/test_linkx.py index 98e25787e01b..e91b8fbc782d 100644 --- a/test/nn/models/test_linkx.py +++ b/test/nn/models/test_linkx.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import LINKX from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('num_edge_layers', [1, 2]) diff --git a/test/nn/models/test_rect.py b/test/nn/models/test_rect.py index 09ef7ec14e8d..74f2b0320d07 100644 --- a/test/nn/models/test_rect.py +++ b/test/nn/models/test_rect.py @@ -1,15 +1,15 @@ import torch -from torch_sparse import SparseTensor +import torch_geometric.typing from torch_geometric.nn import RECT_L from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_rect(): x = torch.randn(6, 8) y = torch.tensor([1, 0, 0, 2, 1, 1]) edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) mask = torch.randint(0, 2, (6, ), dtype=torch.bool) model = RECT_L(8, 16) @@ -17,12 +17,15 @@ def test_rect(): out = model(x, edge_index) assert out.size() == (6, 8) - assert torch.allclose(out, model(x, adj.t())) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) + assert torch.allclose(out, model(x, adj.t())) # Test `embed`: embed_out = model.embed(x, edge_index) assert embed_out.size() == (6, 16) - assert torch.allclose(embed_out, model.embed(x, adj.t())) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(embed_out, model.embed(x, adj.t())) # Test `get_semantic_labels`: labeds_out = model.get_semantic_labels(x, y, mask) @@ -35,6 +38,7 @@ def test_rect(): assert torch.allclose(embed_out, jit.embed(x, edge_index)) assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(model.jittable(t)) assert torch.allclose(jit(x, adj.t()), out) diff --git a/test/nn/pool/test_pan_pool.py b/test/nn/pool/test_pan_pool.py index c6c6a2249ca5..1e8dfa9390b4 100644 --- a/test/nn/pool/test_pan_pool.py +++ b/test/nn/pool/test_pan_pool.py @@ -1,9 +1,10 @@ import torch from torch_geometric.nn import PANConv, PANPooling -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withPackage +@withPackage('torch_sparse') def test_pan_pooling(): edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]]) diff --git a/torch_geometric/nn/pool/asap.py b/torch_geometric/nn/pool/asap.py index 82f065e16172..d5cf75653969 100644 --- a/torch_geometric/nn/pool/asap.py +++ b/torch_geometric/nn/pool/asap.py @@ -8,8 +8,15 @@ from torch_geometric.nn import LEConv from torch_geometric.nn.pool.topk_pool import topk -from torch_geometric.typing import SparseTensor, torch_sparse -from torch_geometric.utils import add_remaining_self_loops, scatter, softmax +from torch_geometric.utils import ( + add_remaining_self_loops, + remove_self_loops, + scatter, + softmax, + to_edge_index, + to_torch_coo_tensor, + to_torch_csr_tensor, +) class ASAPooling(torch.nn.Module): @@ -133,21 +140,19 @@ def forward( batch = batch[perm] # Graph coarsening. - row, col = edge_index[0], edge_index[1] - A = SparseTensor(row=row, col=col, value=edge_weight, - sparse_sizes=(N, N)) - S = SparseTensor(row=row, col=col, value=score, sparse_sizes=(N, N)) + A = to_torch_csr_tensor(edge_index, edge_weight, size=(N, N)) + S = to_torch_coo_tensor(edge_index, score, size=(N, N)) + S = S.index_select(1, perm).to_sparse_csr() + A = S.t().to_sparse_csr() @ (A @ S) - S = torch_sparse.index_select(S, 1, perm) - A = torch_sparse.matmul(torch_sparse.matmul(torch_sparse.t(S), A), S) + edge_index, edge_weight = to_edge_index(A) if self.add_self_loops: - A = torch_sparse.fill_diag(A, 1.) + edge_index, edge_weight = add_remaining_self_loops( + edge_index, edge_weight, num_nodes=A.size(0)) else: - A = torch_sparse.remove_diag(A) - - row, col, edge_weight = A.coo() - edge_index = torch.stack([row, col], dim=0) + edge_index, edge_weight = remove_self_loops( + edge_index, edge_weight) return x, edge_index, edge_weight, batch, perm From c81a1035cbeb2e51beb089b78daaf4d410291804 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 27 Mar 2023 22:39:26 +0200 Subject: [PATCH 1058/2432] Drop `torch_sparse` dependency in tests (7/n) (#7054) --- .github/workflows/latest_testing.yml | 2 +- test/nn/models/test_basic_gnn.py | 2 + test/nn/models/test_correct_and_smooth.py | 15 +++++-- test/nn/models/test_dimenet.py | 3 +- test/nn/models/test_gnnff.py | 3 +- test/nn/models/test_label_prop.py | 9 ++-- test/nn/models/test_linkx.py | 22 ++++++---- test/nn/models/test_metapath2vec.py | 14 ++++--- test/nn/models/test_node2vec.py | 17 ++++---- test/nn/models/test_tgn.py | 2 + torch_geometric/nn/models/graph_unet.py | 17 ++++---- torch_geometric/nn/models/metapath2vec.py | 51 +++++++++++------------ torch_geometric/nn/models/node2vec.py | 28 +++++++------ 13 files changed, 107 insertions(+), 78 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index fc63728f3c76..83dd148beb85 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -65,7 +65,7 @@ jobs: pytest test/nn/dense pytest test/nn/functional pytest test/nn/kge - # pytest test/nn/models + pytest test/nn/models pytest test/nn/norm pytest test/nn/pool pytest test/nn/unpool diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 51eceb9deb3f..3fb38ad79ae7 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -15,6 +15,7 @@ from torch_geometric.testing import ( disableExtensions, onlyLinux, + onlyNeighborSampler, withCUDA, withPackage, ) @@ -145,6 +146,7 @@ def test_one_layer_gnn(out_dim, jk): assert model(x, edge_index).size() == (3, out_channels) +@onlyNeighborSampler @pytest.mark.parametrize('jk', [None, 'last']) def test_basic_gnn_inference(get_dataset, jk): dataset = get_dataset(name='Cora') diff --git a/test/nn/models/test_correct_and_smooth.py b/test/nn/models/test_correct_and_smooth.py index 9125b0c8b045..035f9f860803 100644 --- a/test/nn/models/test_correct_and_smooth.py +++ b/test/nn/models/test_correct_and_smooth.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn.models import CorrectAndSmooth from torch_geometric.typing import SparseTensor @@ -8,7 +9,6 @@ def test_correct_and_smooth(): y_soft = torch.tensor([0.1, 0.5, 0.4]).repeat(6, 1) y_true = torch.tensor([1, 0, 0, 2, 1, 1]) edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)).t() mask = torch.randint(0, 2, (6, ), dtype=torch.bool) model = CorrectAndSmooth( @@ -25,11 +25,16 @@ def test_correct_and_smooth(): out = model.correct(y_soft, y_true[mask], mask, edge_index) assert out.size() == (6, 3) - assert torch.allclose(out, model.correct(y_soft, y_true[mask], mask, adj)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) + assert torch.allclose( + out, model.correct(y_soft, y_true[mask], mask, adj.t())) out = model.smooth(y_soft, y_true[mask], mask, edge_index) assert out.size() == (6, 3) - assert torch.allclose(out, model.smooth(y_soft, y_true[mask], mask, adj)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose( + out, model.smooth(y_soft, y_true[mask], mask, adj.t())) # Test without autoscale: model = CorrectAndSmooth( @@ -41,4 +46,6 @@ def test_correct_and_smooth(): ) out = model.correct(y_soft, y_true[mask], mask, edge_index) assert out.size() == (6, 3) - assert torch.allclose(out, model.correct(y_soft, y_true[mask], mask, adj)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose( + out, model.correct(y_soft, y_true[mask], mask, adj.t())) diff --git a/test/nn/models/test_dimenet.py b/test/nn/models/test_dimenet.py index 411647941c59..953948981969 100644 --- a/test/nn/models/test_dimenet.py +++ b/test/nn/models/test_dimenet.py @@ -8,7 +8,7 @@ Envelope, ResidualLayer, ) -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withPackage def test_dimenet_modules(): @@ -25,6 +25,7 @@ def test_dimenet_modules(): assert rl(x).size() == (128, 128) # Isotonic layer. +@withPackage('torch_sparse') # TODO `triplet` requires `SparseTensor` for now. @pytest.mark.parametrize('Model', [DimeNet, DimeNetPlusPlus]) def test_dimenet(Model): z = torch.randint(1, 10, (20, )) diff --git a/test/nn/models/test_gnnff.py b/test/nn/models/test_gnnff.py index 5d5d853043c9..61b9fc44656b 100644 --- a/test/nn/models/test_gnnff.py +++ b/test/nn/models/test_gnnff.py @@ -1,9 +1,10 @@ import torch from torch_geometric.nn import GNNFF -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withPackage +@withPackage('torch_sparse') # TODO `triplet` requires `SparseTensor` for now. def test_gnnff(): z = torch.randint(1, 10, (20, )) pos = torch.randn(20, 3) diff --git a/test/nn/models/test_label_prop.py b/test/nn/models/test_label_prop.py index bc9e1f97ed00..903a16855f3d 100644 --- a/test/nn/models/test_label_prop.py +++ b/test/nn/models/test_label_prop.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn.models import LabelPropagation from torch_geometric.typing import SparseTensor @@ -7,7 +8,6 @@ def test_label_prop(): y = torch.tensor([1, 0, 0, 2, 1, 1]) edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) mask = torch.randint(0, 2, (6, ), dtype=torch.bool) model = LabelPropagation(num_layers=2, alpha=0.5) @@ -16,12 +16,15 @@ def test_label_prop(): # Test without mask: out = model(y, edge_index) assert out.size() == (6, 3) - assert torch.allclose(model(y, adj.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) + assert torch.allclose(model(y, adj.t()), out) # Test with mask: out = model(y, edge_index, mask) assert out.size() == (6, 3) - assert torch.allclose(model(y, adj.t(), mask), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(model(y, adj.t(), mask), out) # Test post step: out = model(y, edge_index, mask, post_step=lambda y: torch.zeros_like(y)) diff --git a/test/nn/models/test_linkx.py b/test/nn/models/test_linkx.py index e91b8fbc782d..6c8d5dc01fd9 100644 --- a/test/nn/models/test_linkx.py +++ b/test/nn/models/test_linkx.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import LINKX from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor @@ -11,9 +12,6 @@ def test_linkx(num_edge_layers): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2], [1, 2, 3]]) edge_weight = torch.rand(edge_index.size(1)) - adj2 = SparseTensor.from_edge_index(edge_index, edge_weight, - sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) model = LINKX(num_nodes=4, in_channels=16, hidden_channels=32, out_channels=8, num_layers=2, @@ -22,25 +20,33 @@ def test_linkx(num_edge_layers): out = model(x, edge_index) assert out.size() == (4, 8) - assert torch.allclose(out, model(x, adj1.t()), atol=1e-4) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(out, model(x, adj.t()), atol=1e-6) if is_full_test(): t = '(OptTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(model.jittable(t)) assert torch.allclose(jit(x, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(model.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out) + assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) out = model(None, edge_index) assert out.size() == (4, 8) - assert torch.allclose(out, model(None, adj1.t()), atol=1e-4) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(out, model(None, adj.t()), atol=1e-6) out = model(x, edge_index, edge_weight) assert out.size() == (4, 8) - assert torch.allclose(out, model(x, adj2.t()), atol=1e-4) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_weight, + sparse_sizes=(4, 4)) + assert torch.allclose(model(x, adj.t()), out, atol=1e-6) out = model(None, edge_index, edge_weight) assert out.size() == (4, 8) - assert torch.allclose(out, model(None, adj2.t()), atol=1e-4) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(model(None, adj.t()), out, atol=1e-6) diff --git a/test/nn/models/test_metapath2vec.py b/test/nn/models/test_metapath2vec.py index 990f0f913455..7f00515c6b38 100644 --- a/test/nn/models/test_metapath2vec.py +++ b/test/nn/models/test_metapath2vec.py @@ -1,14 +1,16 @@ import torch from torch_geometric.nn import MetaPath2Vec +from torch_geometric.testing import withCUDA -def test_metapath2vec(): +@withCUDA +def test_metapath2vec(device): edge_index_dict = { ('author', 'writes', 'paper'): - torch.tensor([[0, 1, 1, 2], [0, 0, 1, 1]]), + torch.tensor([[0, 1, 1, 2], [0, 0, 1, 1]], device=device), ('paper', 'written_by', 'author'): - torch.tensor([[0, 0, 1, 1], [0, 1, 1, 2]]) + torch.tensor([[0, 0, 1, 1], [0, 1, 1, 2]], device=device) } metapath = [ @@ -17,7 +19,7 @@ def test_metapath2vec(): ] model = MetaPath2Vec(edge_index_dict, embedding_dim=16, metapath=metapath, - walk_length=2, context_size=2) + walk_length=2, context_size=2).to(device) assert str(model) == 'MetaPath2Vec(5, 16)' z = model('author') @@ -26,12 +28,12 @@ def test_metapath2vec(): z = model('paper') assert z.size() == (2, 16) - z = model('author', torch.arange(2)) + z = model('author', torch.arange(2, device=device)) assert z.size() == (2, 16) pos_rw, neg_rw = model._sample(torch.arange(3)) - loss = model.loss(pos_rw, neg_rw) + loss = model.loss(pos_rw.to(device), neg_rw.to(device)) assert 0 <= loss.item() acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )), diff --git a/test/nn/models/test_node2vec.py b/test/nn/models/test_node2vec.py index 58ab8a2848ef..77d195656253 100644 --- a/test/nn/models/test_node2vec.py +++ b/test/nn/models/test_node2vec.py @@ -1,21 +1,22 @@ import torch from torch_geometric.nn import Node2Vec -from torch_geometric.testing import is_full_test, withPackage +from torch_geometric.testing import is_full_test, withCUDA, withPackage +@withCUDA @withPackage('torch_cluster') -def test_node2vec(): - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) +def test_node2vec(device): + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device) model = Node2Vec(edge_index, embedding_dim=16, walk_length=2, - context_size=2) + context_size=2).to(device) assert str(model) == 'Node2Vec(3, 16)' - assert model(torch.arange(3)).size() == (3, 16) + assert model(torch.arange(3, device=device)).size() == (3, 16) pos_rw, neg_rw = model.sample(torch.arange(3)) - assert float(model.loss(pos_rw, neg_rw)) >= 0 + assert float(model.loss(pos_rw.to(device), neg_rw.to(device))) >= 0 acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )), torch.ones(20, 16), torch.randint(10, (20, ))) @@ -24,7 +25,7 @@ def test_node2vec(): if is_full_test(): jit = torch.jit.script(model) - assert jit(torch.arange(3)).size() == (3, 16) + assert jit(torch.arange(3, device=device)).size() == (3, 16) pos_rw, neg_rw = jit.sample(torch.arange(3)) - assert float(jit.loss(pos_rw, neg_rw)) >= 0 + assert float(jit.loss(pos_rw.to(device), neg_rw.to(device))) >= 0 diff --git a/test/nn/models/test_tgn.py b/test/nn/models/test_tgn.py index cb1f5c6ddd14..c9437fa74bbf 100644 --- a/test/nn/models/test_tgn.py +++ b/test/nn/models/test_tgn.py @@ -8,8 +8,10 @@ LastAggregator, LastNeighborLoader, ) +from torch_geometric.testing import withPackage +@withPackage('torch_scatter') # TODO Requires `scatter_argmax` for now. def test_tgn(): memory_dim = 16 time_dim = 16 diff --git a/torch_geometric/nn/models/graph_unet.py b/torch_geometric/nn/models/graph_unet.py index f0a998aa22a1..9e5c210dc943 100644 --- a/torch_geometric/nn/models/graph_unet.py +++ b/torch_geometric/nn/models/graph_unet.py @@ -5,8 +5,12 @@ from torch_geometric.nn import GCNConv, TopKPooling from torch_geometric.nn.resolver import activation_resolver -from torch_geometric.typing import OptTensor, PairTensor, SparseTensor -from torch_geometric.utils import add_self_loops, remove_self_loops +from torch_geometric.typing import OptTensor, PairTensor +from torch_geometric.utils import ( + add_self_loops, + remove_self_loops, + to_torch_csr_tensor, +) from torch_geometric.utils.repeat import repeat @@ -127,11 +131,10 @@ def augment_adj(self, edge_index: Tensor, edge_weight: Tensor, edge_index, edge_weight = remove_self_loops(edge_index, edge_weight) edge_index, edge_weight = add_self_loops(edge_index, edge_weight, num_nodes=num_nodes) - adj = SparseTensor.from_edge_index(edge_index, edge_weight, - sparse_sizes=(num_nodes, num_nodes)) - adj = adj @ adj - row, col, edge_weight = adj.coo() - edge_index = torch.stack([row, col], dim=0) + adj = to_torch_csr_tensor(edge_index, edge_weight, + size=(num_nodes, num_nodes)) + adj = (adj @ adj).to_sparse_coo() + edge_index, edge_weight = adj.indices(), adj.values() edge_index, edge_weight = remove_self_loops(edge_index, edge_weight) return edge_index, edge_weight diff --git a/torch_geometric/nn/models/metapath2vec.py b/torch_geometric/nn/models/metapath2vec.py index 5495721725d5..caa58926d524 100644 --- a/torch_geometric/nn/models/metapath2vec.py +++ b/torch_geometric/nn/models/metapath2vec.py @@ -5,7 +5,9 @@ from torch.nn import Embedding from torch.utils.data import DataLoader -from torch_geometric.typing import EdgeType, NodeType, OptTensor, SparseTensor +from torch_geometric.typing import EdgeType, NodeType, OptTensor +from torch_geometric.utils import sort_edge_index +from torch_geometric.utils.sparse import index2ptr EPS = 1e-15 @@ -71,13 +73,14 @@ def __init__( N = int(edge_index[1].max() + 1) num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N)) - adj_dict = {} + self.rowptr_dict, self.col_dict, self.rowcount_dict = {}, {}, {} for keys, edge_index in edge_index_dict.items(): sizes = (num_nodes_dict[keys[0]], num_nodes_dict[keys[-1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=sizes) - adj = adj.to('cpu') - adj_dict[keys] = adj + row, col = sort_edge_index(edge_index, num_nodes=max(sizes)).cpu() + rowptr = index2ptr(row, size=sizes[0]) + self.rowptr_dict[keys] = rowptr + self.col_dict[keys] = col + self.rowcount_dict[keys] = rowptr[1:] - rowptr[:-1] assert walk_length + 1 >= context_size if walk_length > len(metapath) and metapath[0][0] != metapath[-1][-1]: @@ -85,7 +88,6 @@ def __init__( "The 'walk_length' is longer than the given 'metapath', but " "the 'metapath' does not denote a cycle") - self.adj_dict = adj_dict self.embedding_dim = embedding_dim self.metapath = metapath self.walk_length = walk_length @@ -145,10 +147,15 @@ def _pos_sample(self, batch: Tensor) -> Tensor: rws = [batch] for i in range(self.walk_length): - keys = self.metapath[i % len(self.metapath)] - adj = self.adj_dict[keys] - batch = sample(adj, batch, num_neighbors=1, - dummy_idx=self.dummy_idx).view(-1) + edge_type = self.metapath[i % len(self.metapath)] + batch = sample( + self.rowptr_dict[edge_type], + self.col_dict[edge_type], + self.rowcount_dict[edge_type], + batch, + num_neighbors=1, + dummy_idx=self.dummy_idx, + ).view(-1) rws.append(batch) rw = torch.stack(rws, dim=-1) @@ -231,21 +238,13 @@ def __repr__(self) -> str: f'{self.embedding.weight.size(1)})') -def sample(src: SparseTensor, subset: Tensor, num_neighbors: int, - dummy_idx: int) -> Tensor: - - mask = subset < dummy_idx - rowcount = torch.zeros_like(subset) - rowcount[mask] = src.storage.rowcount()[subset[mask]] - mask = mask & (rowcount > 0) - offset = torch.zeros_like(subset) - offset[mask] = src.storage.rowptr()[subset[mask]] +def sample(rowptr: Tensor, col: Tensor, rowcount: Tensor, subset: Tensor, + num_neighbors: int, dummy_idx: int) -> Tensor: - rand = torch.rand((rowcount.size(0), num_neighbors), device=subset.device) - rand.mul_(rowcount.to(rand.dtype).view(-1, 1)) - rand = rand.to(torch.long) - rand.add_(offset.view(-1, 1)) + rand = torch.rand((subset.size(0), num_neighbors), device=subset.device) + rand *= rowcount[subset].to(rand.dtype).view(-1, 1) + rand = rand.to(torch.long) + rowptr[subset].view(-1, 1) - col = src.storage.col()[rand] - col[~mask] = dummy_idx + col = col[rand] + col[(subset >= dummy_idx) | (rowcount[subset] == 0)] = dummy_idx return col diff --git a/torch_geometric/nn/models/node2vec.py b/torch_geometric/nn/models/node2vec.py index 9d525439d79f..ae91b4a49095 100644 --- a/torch_geometric/nn/models/node2vec.py +++ b/torch_geometric/nn/models/node2vec.py @@ -5,8 +5,9 @@ from torch.nn import Embedding from torch.utils.data import DataLoader -from torch_geometric.typing import OptTensor, SparseTensor +from torch_geometric.utils import sort_edge_index from torch_geometric.utils.num_nodes import maybe_num_nodes +from torch_geometric.utils.sparse import index2ptr try: import torch_cluster # noqa @@ -65,10 +66,11 @@ def __init__( if random_walk is None: raise ImportError('`Node2Vec` requires `torch-cluster`.') - N = maybe_num_nodes(edge_index, num_nodes) - row, col = edge_index - self.adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N)) - self.adj = self.adj.to('cpu') + self.num_nodes = maybe_num_nodes(edge_index, num_nodes) + + row, col = sort_edge_index(edge_index, num_nodes=self.num_nodes).cpu() + self.rowptr, self.col = index2ptr(row, self.num_nodes), col + self.EPS = 1e-15 assert walk_length >= context_size @@ -80,7 +82,8 @@ def __init__( self.q = q self.num_negative_samples = num_negative_samples - self.embedding = Embedding(N, embedding_dim, sparse=sparse) + self.embedding = Embedding(self.num_nodes, embedding_dim, + sparse=sparse) self.reset_parameters() @@ -88,20 +91,20 @@ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" self.embedding.reset_parameters() - def forward(self, batch: OptTensor = None) -> Tensor: + def forward(self, batch: Optional[Tensor] = None) -> Tensor: """Returns the embeddings for the nodes in :obj:`batch`.""" emb = self.embedding.weight return emb if batch is None else emb.index_select(0, batch) def loader(self, **kwargs) -> DataLoader: - return DataLoader(range(self.adj.sparse_size(0)), - collate_fn=self.sample, **kwargs) + return DataLoader(range(self.num_nodes), collate_fn=self.sample, + **kwargs) @torch.jit.export def pos_sample(self, batch: Tensor) -> Tensor: batch = batch.repeat(self.walks_per_node) - rowptr, col, _ = self.adj.csr() - rw = random_walk(rowptr, col, batch, self.walk_length, self.p, self.q) + rw = random_walk(self.rowptr, self.col, batch, self.walk_length, + self.p, self.q) if not isinstance(rw, Tensor): rw = rw[0] @@ -115,8 +118,7 @@ def pos_sample(self, batch: Tensor) -> Tensor: def neg_sample(self, batch: Tensor) -> Tensor: batch = batch.repeat(self.walks_per_node * self.num_negative_samples) - rw = torch.randint(self.adj.sparse_size(0), - (batch.size(0), self.walk_length)) + rw = torch.randint(self.num_nodes, (batch.size(0), self.walk_length)) rw = torch.cat([batch.view(-1, 1), rw], dim=-1) walks = [] From 5d1bcdfe04a33ba4344012c3d73054fe799cf63f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 08:06:03 +0200 Subject: [PATCH 1059/2432] Move `torch.compile` tests to nightly tests (#7056) --- test/nn/models/test_basic_gnn.py | 2 ++ test/nn/test_compile_basic.py | 2 ++ test/nn/test_compile_conv.py | 2 ++ test/nn/test_compile_dynamic.py | 2 ++ 4 files changed, 8 insertions(+) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 3fb38ad79ae7..158d398991a0 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -14,6 +14,7 @@ from torch_geometric.profile import benchmark from torch_geometric.testing import ( disableExtensions, + onlyFullTest, onlyLinux, onlyNeighborSampler, withCUDA, @@ -169,6 +170,7 @@ def test_basic_gnn_inference(get_dataset, jk): @withCUDA @onlyLinux +@onlyFullTest @disableExtensions @withPackage('torch>=2.0.0') def test_compile(device): diff --git a/test/nn/test_compile_basic.py b/test/nn/test_compile_basic.py index f89a7e00005b..dc730c2de2c7 100644 --- a/test/nn/test_compile_basic.py +++ b/test/nn/test_compile_basic.py @@ -4,6 +4,7 @@ from torch_geometric.profile import benchmark from torch_geometric.testing import ( disableExtensions, + onlyFullTest, onlyLinux, withCUDA, withPackage, @@ -45,6 +46,7 @@ def fused_gather_scatter(x, edge_index, reduce=['sum', 'mean', 'max']): @withCUDA @onlyLinux +@onlyFullTest @disableExtensions @withPackage('torch>=2.0.0') def test_torch_compile(device): diff --git a/test/nn/test_compile_conv.py b/test/nn/test_compile_conv.py index 360086ade10d..f1748dfea4e0 100644 --- a/test/nn/test_compile_conv.py +++ b/test/nn/test_compile_conv.py @@ -7,6 +7,7 @@ from torch_geometric.profile import benchmark from torch_geometric.testing import ( disableExtensions, + onlyFullTest, onlyLinux, withCUDA, withPackage, @@ -28,6 +29,7 @@ def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: @withCUDA @onlyLinux +@onlyFullTest @disableExtensions @withPackage('torch>=2.0.0') @pytest.mark.parametrize('Conv', [GCNConv, SAGEConv]) diff --git a/test/nn/test_compile_dynamic.py b/test/nn/test_compile_dynamic.py index 0fe60ba49e29..ac39f9bcbcf9 100644 --- a/test/nn/test_compile_dynamic.py +++ b/test/nn/test_compile_dynamic.py @@ -7,6 +7,7 @@ from torch_geometric.testing import ( disableExtensions, get_random_edge_index, + onlyFullTest, onlyLinux, withCUDA, withPackage, @@ -28,6 +29,7 @@ def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: @withCUDA @onlyLinux +@onlyFullTest @disableExtensions @withPackage('torch>2.0.0') def test_dynamic_torch_compile(device): From c21615e736a7098321a984faad878e8093d1793f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 08:06:26 +0200 Subject: [PATCH 1060/2432] Update minimal required PyTorch version (#7055) --- .github/workflows/documentation.yml | 1 + .github/workflows/install.yml | 1 + .github/workflows/latest_testing.yml | 1 + .github/workflows/prev_testing.yml | 1 + .github/workflows/testing.yml | 1 + README.md | 2 ++ docs/source/install/installation.rst | 6 ++++-- 7 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 4d69fbd9fed3..7282d9e8c730 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -28,6 +28,7 @@ jobs: docker/** examples/** graphgym/** + README.md CHANGELOG.md - name: Setup packages diff --git a/.github/workflows/install.yml b/.github/workflows/install.yml index 3d7a01e61295..95e8d96d6913 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/install.yml @@ -29,6 +29,7 @@ jobs: docs/** examples/** graphgym/** + README.md CHANGELOG.md - name: Setup packages diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 83dd148beb85..05f3baad8683 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -29,6 +29,7 @@ jobs: docs/** examples/** graphgym/** + README.md CHANGELOG.md - name: Setup packages diff --git a/.github/workflows/prev_testing.yml b/.github/workflows/prev_testing.yml index 629e6955305b..9d73dc6636ce 100644 --- a/.github/workflows/prev_testing.yml +++ b/.github/workflows/prev_testing.yml @@ -29,6 +29,7 @@ jobs: docs/** examples/** graphgym/** + README.md CHANGELOG.md - name: Setup packages diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index cb444ef35819..e0784feb172a 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -29,6 +29,7 @@ jobs: docs/** examples/** graphgym/** + README.md CHANGELOG.md - name: Setup packages diff --git a/README.md b/README.md index e9807f77c006..14f7a3ed6831 100644 --- a/README.md +++ b/README.md @@ -378,6 +378,8 @@ For this, simply run pip install torch_geometric ``` +PyG 2.3 requires that at least PyTorch 1.12 is installed. + ### Additional Libraries If you want to utilize the full set of features from PyG, there exists several additional libraries you may want to install: diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index 38b63dd74c18..02c84000edbe 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -18,7 +18,7 @@ Installation via Anaconda You can now install :pyg:`PyG` via `Anaconda `_ for all major OS, :pytorch:`PyTorch` and CUDA combinations 🤗 If you have not yet installed :pytorch:`PyTorch`, install it via :obj:`conda` as described in its `official documentation `_. -Given that you have :pytorch:`PyTorch` installed (:obj:`>=1.8.0`), simply run +Given that you have :pytorch:`PyTorch` installed (:obj:`>=1.12.0`), simply run .. code-block:: none @@ -31,12 +31,14 @@ Installation via PyPi --------------------- From :pyg:`null` **PyG 2.3** onwards, you can install and use :pyg:`PyG` **without any external library** required except for :pytorch:`PyTorch`. -For this, simply run +For this, simply run: .. code-block:: none pip install torch_geometric +PyG 2.3 requires that at least PyTorch 1.12 is installed. + Additional Libraries -------------------- From eb5e9672b8e43c3c39ad3a41f002cd2ad162c4c1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 09:37:24 +0200 Subject: [PATCH 1061/2432] Drop `torch_sparse` dependency in tests (8/n) (#7058) --- .github/workflows/latest_testing.yml | 8 +--- test/nn/test_model_summary.py | 15 +++++-- test/nn/test_to_hetero_transformer.py | 43 +++++++++++-------- .../test_to_hetero_with_bases_transformer.py | 19 ++++---- 4 files changed, 51 insertions(+), 34 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 05f3baad8683..5a28fe1dc0c7 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -70,10 +70,6 @@ jobs: pytest test/nn/norm pytest test/nn/pool pytest test/nn/unpool - pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py - # pytest test/nn/test_model_summary.py - pytest test/nn/test_module_dict.py test/nn/test_parameter_dict.py test/nn/test_reshape.py test/nn/test_resolver.py + pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py test/nn/test_model_summary.py test/nn/test_module_dict.py test/nn/test_parameter_dict.py test/nn/test_reshape.py test/nn/test_resolver.py # pytest test/nn/test_sequential.py - pytest test/nn/test_to_fixed_size_transformer.py test/nn/test_to_hetero_module.py - # pytest test/nn/test_to_hetero_transformer.py - # pytest test/nn/test_to_hetero_with_bases_transformer.py + pytest test/nn/test_to_fixed_size_transformer.py test/nn/test_to_hetero_module.py test/nn/test_to_hetero_transformer.py test/nn/test_to_hetero_with_bases_transformer.py diff --git a/test/nn/test_model_summary.py b/test/nn/test_model_summary.py index c357697474e3..5610ed8d273c 100644 --- a/test/nn/test_model_summary.py +++ b/test/nn/test_model_summary.py @@ -1,11 +1,14 @@ +from typing import Optional + import pytest import torch from torch import Tensor, nn -from torch_sparse import SparseTensor +import torch_geometric.typing from torch_geometric.nn import Linear, SAGEConv, summary, to_hetero from torch_geometric.nn.models import GCN from torch_geometric.testing import withPackage +from torch_geometric.typing import SparseTensor class GraphSAGE(torch.nn.Module): @@ -40,8 +43,13 @@ def gcn(): model = GCN(32, 16, num_layers=2, out_channels=32) x = torch.randn(100, 32) edge_index = torch.randint(100, size=(2, 20)) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) - return dict(model=model, x=x, edge_index=edge_index, adj_t=adj.t()) + adj_t: Optional[SparseTensor] = None + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=(100, 100), + ).t() + return dict(model=model, x=x, edge_index=edge_index, adj_t=adj_t) @withPackage('tabulate') @@ -61,6 +69,7 @@ def test_summary_basic(gcn): @withPackage('tabulate') +@withPackage('torch_sparse') def test_summary_with_sparse_tensor(gcn): expected = """ +---------------------+-----------------------+----------------+----------+ diff --git a/test/nn/test_to_hetero_transformer.py b/test/nn/test_to_hetero_transformer.py index b522851d1949..fc6ee1879796 100644 --- a/test/nn/test_to_hetero_transformer.py +++ b/test/nn/test_to_hetero_transformer.py @@ -5,8 +5,8 @@ import torch.nn.functional as F from torch import Tensor from torch.nn import Linear, ReLU, Sequential -from torch_sparse import SparseTensor +import torch_geometric.typing from torch_geometric.nn import GAT, BatchNorm, GCNConv, GINEConv, GraphSAGE from torch_geometric.nn import Linear as LazyLinear from torch_geometric.nn import ( @@ -16,6 +16,7 @@ SAGEConv, to_hetero, ) +from torch_geometric.typing import SparseTensor from torch_geometric.utils import dropout_edge torch.fx.wrap('dropout_edge') @@ -179,16 +180,21 @@ def test_to_hetero_basic(): ('author', 'writes', 'paper'): torch.randint(100, (2, 200), dtype=torch.long), } - adj_t_dict = {} - for edge_type, (row, col) in edge_index_dict.items(): - adj_t_dict[edge_type] = SparseTensor(row=col, col=row, - sparse_sizes=(100, 100)) edge_attr_dict = { ('paper', 'cites', 'paper'): torch.randn(200, 8), ('paper', 'written_by', 'author'): torch.randn(200, 8), ('author', 'writes', 'paper'): torch.randn(200, 8), } + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict = {} + for edge_type, (row, col) in edge_index_dict.items(): + adj_t_dict[edge_type] = SparseTensor( + row=col, + col=row, + sparse_sizes=(100, 100), + ) + metadata = list(x_dict.keys()), list(edge_index_dict.keys()) model = Net1() @@ -214,10 +220,11 @@ def test_to_hetero_basic(): assert out1['paper'].size() == (100, 32) assert out1['author'].size() == (100, 32) - out2 = model(x_dict, adj_t_dict) - assert isinstance(out2, dict) and len(out2) == 2 - for node_type in x_dict.keys(): - assert torch.allclose(out1[node_type], out2[node_type], atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out2 = model(x_dict, adj_t_dict) + assert isinstance(out2, dict) and len(out2) == 2 + for key in x_dict.keys(): + assert torch.allclose(out1[key], out2[key], atol=1e-6) model = Net3() model = to_hetero(model, metadata, debug=False) @@ -405,12 +412,13 @@ def test_to_hetero_and_rgcn_equal_output(): edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]), } - node_types, edge_types = list(x_dict.keys()), list(edge_index_dict.keys()) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict = { + key: SparseTensor.from_edge_index(edge_index).t() + for key, edge_index in edge_index_dict.items() + } - adj_t_dict = { - key: SparseTensor.from_edge_index(edge_index).t() - for key, edge_index in edge_index_dict.items() - } + node_types, edge_types = list(x_dict.keys()), list(edge_index_dict.keys()) model = to_hetero(RGCN(16, 32), (node_types, edge_types)) @@ -426,9 +434,10 @@ def test_to_hetero_and_rgcn_equal_output(): out2 = torch.cat([out2['paper'], out2['author']], dim=0) assert torch.allclose(out1, out2, atol=1e-6) - out3 = model(x_dict, adj_t_dict) - out3 = torch.cat([out3['paper'], out3['author']], dim=0) - assert torch.allclose(out1, out3, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out3 = model(x_dict, adj_t_dict) + out3 = torch.cat([out3['paper'], out3['author']], dim=0) + assert torch.allclose(out1, out3, atol=1e-6) class GraphLevelGNN(torch.nn.Module): diff --git a/test/nn/test_to_hetero_with_bases_transformer.py b/test/nn/test_to_hetero_with_bases_transformer.py index 5c37464b11e6..b26871a306a3 100644 --- a/test/nn/test_to_hetero_with_bases_transformer.py +++ b/test/nn/test_to_hetero_with_bases_transformer.py @@ -4,8 +4,8 @@ import torch from torch import Tensor from torch.nn import Linear, ReLU, Sequential -from torch_sparse import SparseTensor +import torch_geometric.typing from torch_geometric.nn import ( GINEConv, MessagePassing, @@ -13,6 +13,7 @@ SAGEConv, to_hetero_with_bases, ) +from torch_geometric.typing import SparseTensor class Net1(torch.nn.Module): @@ -258,10 +259,11 @@ def test_to_hetero_with_bases_and_rgcn_equal_output(): edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]), } - adj_t_dict = { - key: SparseTensor.from_edge_index(edge_index).t() - for key, edge_index in edge_index_dict.items() - } + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict = { + key: SparseTensor.from_edge_index(edge_index).t() + for key, edge_index in edge_index_dict.items() + } metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) model = to_hetero_with_bases(RGCN(16, 32), metadata, num_bases=num_bases, @@ -279,9 +281,10 @@ def test_to_hetero_with_bases_and_rgcn_equal_output(): out2 = torch.cat([out2['paper'], out2['author']], dim=0) assert torch.allclose(out1, out2, atol=1e-6) - out3 = model(x_dict, adj_t_dict) - out3 = torch.cat([out3['paper'], out3['author']], dim=0) - assert torch.allclose(out1, out3, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out3 = model(x_dict, adj_t_dict) + out3 = torch.cat([out3['paper'], out3['author']], dim=0) + assert torch.allclose(out1, out3, atol=1e-6) def test_to_hetero_with_bases_validate(): From 78dc0313c63da63ccc4ae5c6a522ea5714a6cbd9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 09:49:56 +0200 Subject: [PATCH 1062/2432] Drop `torch_sparse` dependency in tests (9/n) (#7059) --- test/nn/conv/test_agnn_conv.py | 2 +- test/nn/conv/test_antisymmetric_conv.py | 2 +- test/nn/conv/test_appnp.py | 2 +- test/nn/conv/test_arma_conv.py | 2 +- test/nn/conv/test_cg_conv.py | 2 +- test/nn/conv/test_cluster_gcn_conv.py | 2 +- test/nn/conv/test_dna_conv.py | 2 +- test/nn/conv/test_edge_conv.py | 2 +- test/nn/conv/test_eg_conv.py | 2 +- test/nn/conv/test_fa_conv.py | 2 +- test/nn/conv/test_feast_conv.py | 2 +- test/nn/conv/test_film_conv.py | 2 +- test/nn/conv/test_gat_conv.py | 2 +- test/nn/conv/test_gated_graph_conv.py | 2 +- test/nn/conv/test_gatv2_conv.py | 2 +- test/nn/conv/test_gcn2_conv.py | 2 +- test/nn/conv/test_gcn_conv.py | 2 +- test/nn/conv/test_gen_conv.py | 2 +- test/nn/conv/test_gin_conv.py | 2 +- test/nn/conv/test_gmm_conv.py | 2 +- test/nn/conv/test_gps_conv.py | 2 +- test/nn/conv/test_graph_conv.py | 2 +- test/nn/conv/test_han_conv.py | 2 +- test/nn/conv/test_heat_conv.py | 2 +- test/nn/conv/test_hgt_conv.py | 2 +- test/nn/conv/test_le_conv.py | 2 +- test/nn/conv/test_lg_conv.py | 2 +- test/nn/conv/test_message_passing.py | 9 +++++++-- test/nn/conv/test_mf_conv.py | 2 +- test/nn/conv/test_nn_conv.py | 2 +- test/nn/conv/test_pan_conv.py | 2 +- test/nn/conv/test_pdn_conv.py | 2 +- test/nn/conv/test_pna_conv.py | 2 +- test/nn/conv/test_point_conv.py | 2 +- test/nn/conv/test_point_gnn_conv.py | 2 +- test/nn/conv/test_point_transformer_conv.py | 2 +- test/nn/conv/test_ppf_conv.py | 2 +- test/nn/conv/test_res_gated_graph_conv.py | 2 +- test/nn/conv/test_rgat_conv.py | 2 +- test/nn/conv/test_rgcn_conv.py | 2 +- test/nn/conv/test_sage_conv.py | 2 +- test/nn/conv/test_sg_conv.py | 2 +- test/nn/conv/test_signed_conv.py | 2 +- test/nn/conv/test_simple_conv.py | 2 +- test/nn/conv/test_spline_conv.py | 2 +- test/nn/conv/test_ssg_conv.py | 2 +- test/nn/conv/test_supergat_conv.py | 2 +- test/nn/conv/test_tag_conv.py | 2 +- test/nn/conv/test_transformer_conv.py | 2 +- test/nn/conv/test_wl_conv.py | 2 +- test/nn/test_sequential.py | 2 +- 51 files changed, 57 insertions(+), 52 deletions(-) diff --git a/test/nn/conv/test_agnn_conv.py b/test/nn/conv/test_agnn_conv.py index 6dad258ea727..27d70070241b 100644 --- a/test/nn/conv/test_agnn_conv.py +++ b/test/nn/conv/test_agnn_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import AGNNConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('requires_grad', [True, False]) diff --git a/test/nn/conv/test_antisymmetric_conv.py b/test/nn/conv/test_antisymmetric_conv.py index c48aa024a400..a3b8df56ab90 100644 --- a/test/nn/conv/test_antisymmetric_conv.py +++ b/test/nn/conv/test_antisymmetric_conv.py @@ -1,7 +1,7 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import AntiSymmetricConv +from torch_geometric.typing import SparseTensor def test_antisymmetric_conv(): diff --git a/test/nn/conv/test_appnp.py b/test/nn/conv/test_appnp.py index 50fed8595808..15b10ce27f1a 100644 --- a/test/nn/conv/test_appnp.py +++ b/test/nn/conv/test_appnp.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import APPNP from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_appnp(): diff --git a/test/nn/conv/test_arma_conv.py b/test/nn/conv/test_arma_conv.py index ed7b6bf16ccb..f344a42dbac6 100644 --- a/test/nn/conv/test_arma_conv.py +++ b/test/nn/conv/test_arma_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import ARMAConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_arma_conv(): diff --git a/test/nn/conv/test_cg_conv.py b/test/nn/conv/test_cg_conv.py index d0b6795f7efd..d67b8df5ee40 100644 --- a/test/nn/conv/test_cg_conv.py +++ b/test/nn/conv/test_cg_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import CGConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_cg_conv(): diff --git a/test/nn/conv/test_cluster_gcn_conv.py b/test/nn/conv/test_cluster_gcn_conv.py index d7a0a599303e..41373f13afd2 100644 --- a/test/nn/conv/test_cluster_gcn_conv.py +++ b/test/nn/conv/test_cluster_gcn_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import ClusterGCNConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_cluster_gcn_conv(): diff --git a/test/nn/conv/test_dna_conv.py b/test/nn/conv/test_dna_conv.py index 4d88b96556e0..c7ec5d71fe7b 100644 --- a/test/nn/conv/test_dna_conv.py +++ b/test/nn/conv/test_dna_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import DNAConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_dna_conv1(): diff --git a/test/nn/conv/test_edge_conv.py b/test/nn/conv/test_edge_conv.py index 041964733b8e..f9b278b0b733 100644 --- a/test/nn/conv/test_edge_conv.py +++ b/test/nn/conv/test_edge_conv.py @@ -2,10 +2,10 @@ from torch.nn import Linear as Lin from torch.nn import ReLU from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor from torch_geometric.nn import DynamicEdgeConv, EdgeConv from torch_geometric.testing import is_full_test, withPackage +from torch_geometric.typing import SparseTensor def test_edge_conv_conv(): diff --git a/test/nn/conv/test_eg_conv.py b/test/nn/conv/test_eg_conv.py index 6d7536b23fde..8fb2f2912049 100644 --- a/test/nn/conv/test_eg_conv.py +++ b/test/nn/conv/test_eg_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import EGConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_eg_conv(): diff --git a/test/nn/conv/test_fa_conv.py b/test/nn/conv/test_fa_conv.py index fa35ad327492..a20b0701b77c 100644 --- a/test/nn/conv/test_fa_conv.py +++ b/test/nn/conv/test_fa_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import FAConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_fa_conv(): diff --git a/test/nn/conv/test_feast_conv.py b/test/nn/conv/test_feast_conv.py index c26b60aafa6e..371e0e382cce 100644 --- a/test/nn/conv/test_feast_conv.py +++ b/test/nn/conv/test_feast_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import FeaStConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_feast_conv(): diff --git a/test/nn/conv/test_film_conv.py b/test/nn/conv/test_film_conv.py index f7943fae494a..f64aa14fc234 100644 --- a/test/nn/conv/test_film_conv.py +++ b/test/nn/conv/test_film_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import FiLMConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_film_conv(): diff --git a/test/nn/conv/test_gat_conv.py b/test/nn/conv/test_gat_conv.py index 126659c82d38..7ae53c04ebee 100644 --- a/test/nn/conv/test_gat_conv.py +++ b/test/nn/conv/test_gat_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GATConv from torch_geometric.testing import is_full_test, withCUDA +from torch_geometric.typing import SparseTensor def test_gat_conv(): diff --git a/test/nn/conv/test_gated_graph_conv.py b/test/nn/conv/test_gated_graph_conv.py index da96814c4bd0..08cbed131469 100644 --- a/test/nn/conv/test_gated_graph_conv.py +++ b/test/nn/conv/test_gated_graph_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GatedGraphConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_gated_graph_conv(): diff --git a/test/nn/conv/test_gatv2_conv.py b/test/nn/conv/test_gatv2_conv.py index 19328c35c081..4d1308576b9a 100644 --- a/test/nn/conv/test_gatv2_conv.py +++ b/test/nn/conv/test_gatv2_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GATv2Conv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_gatv2_conv(): diff --git a/test/nn/conv/test_gcn2_conv.py b/test/nn/conv/test_gcn2_conv.py index f570b41e49f3..393f2445a4e0 100644 --- a/test/nn/conv/test_gcn2_conv.py +++ b/test/nn/conv/test_gcn2_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GCN2Conv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_gcn2_conv(): diff --git a/test/nn/conv/test_gcn_conv.py b/test/nn/conv/test_gcn_conv.py index 4ac026169c3e..247e2937b31a 100644 --- a/test/nn/conv/test_gcn_conv.py +++ b/test/nn/conv/test_gcn_conv.py @@ -2,11 +2,11 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GCNConv from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor from torch_geometric.utils import to_torch_coo_tensor diff --git a/test/nn/conv/test_gen_conv.py b/test/nn/conv/test_gen_conv.py index a66de738c79c..d7b4a41eb3ba 100644 --- a/test/nn/conv/test_gen_conv.py +++ b/test/nn/conv/test_gen_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GENConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('aggr', [ diff --git a/test/nn/conv/test_gin_conv.py b/test/nn/conv/test_gin_conv.py index 69a7c683e85d..66930cfe3bc6 100644 --- a/test/nn/conv/test_gin_conv.py +++ b/test/nn/conv/test_gin_conv.py @@ -2,10 +2,10 @@ from torch.nn import Linear as Lin from torch.nn import ReLU from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor from torch_geometric.nn import GINConv, GINEConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_gin_conv(): diff --git a/test/nn/conv/test_gmm_conv.py b/test/nn/conv/test_gmm_conv.py index 237cd4970cfc..f839c67e4ddc 100644 --- a/test/nn/conv/test_gmm_conv.py +++ b/test/nn/conv/test_gmm_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GMMConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('separate_gaussians', [True, False]) diff --git a/test/nn/conv/test_gps_conv.py b/test/nn/conv/test_gps_conv.py index 21032cd6e4ae..f38069746bd1 100644 --- a/test/nn/conv/test_gps_conv.py +++ b/test/nn/conv/test_gps_conv.py @@ -1,8 +1,8 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GPSConv, SAGEConv +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm']) diff --git a/test/nn/conv/test_graph_conv.py b/test/nn/conv/test_graph_conv.py index c80bb6d4de48..40cfe683fcda 100644 --- a/test/nn/conv/test_graph_conv.py +++ b/test/nn/conv/test_graph_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import GraphConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_graph_conv(): diff --git a/test/nn/conv/test_han_conv.py b/test/nn/conv/test_han_conv.py index c0f6205657f5..74b150048f5f 100644 --- a/test/nn/conv/test_han_conv.py +++ b/test/nn/conv/test_han_conv.py @@ -1,7 +1,7 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import HANConv +from torch_geometric.typing import SparseTensor from torch_geometric.utils import coalesce diff --git a/test/nn/conv/test_heat_conv.py b/test/nn/conv/test_heat_conv.py index feecf59da801..b0cfdf218b1b 100644 --- a/test/nn/conv/test_heat_conv.py +++ b/test/nn/conv/test_heat_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import HEATConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_heat_conv(): diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index bf6a52248f9a..e226bdd49e65 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -1,9 +1,9 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.data import HeteroData from torch_geometric.nn import HGTConv from torch_geometric.profile import benchmark +from torch_geometric.typing import SparseTensor from torch_geometric.utils import coalesce diff --git a/test/nn/conv/test_le_conv.py b/test/nn/conv/test_le_conv.py index 2e7d858aa8d4..4a60638be603 100644 --- a/test/nn/conv/test_le_conv.py +++ b/test/nn/conv/test_le_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import LEConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_le_conv(): diff --git a/test/nn/conv/test_lg_conv.py b/test/nn/conv/test_lg_conv.py index 2ad940f16423..bea2152e0875 100644 --- a/test/nn/conv/test_lg_conv.py +++ b/test/nn/conv/test_lg_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import LGConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_lg_conv(): diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index 23fe85560917..005192c6410d 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -5,10 +5,15 @@ import torch from torch import Tensor from torch.nn import Linear -from torch_sparse import SparseTensor from torch_geometric.nn import MessagePassing, aggr -from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size +from torch_geometric.typing import ( + Adj, + OptPairTensor, + OptTensor, + Size, + SparseTensor, +) from torch_geometric.utils import scatter, spmm diff --git a/test/nn/conv/test_mf_conv.py b/test/nn/conv/test_mf_conv.py index c3f286c700e9..acde3614e688 100644 --- a/test/nn/conv/test_mf_conv.py +++ b/test/nn/conv/test_mf_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import MFConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_mf_conv(): diff --git a/test/nn/conv/test_nn_conv.py b/test/nn/conv/test_nn_conv.py index dac419594418..32bc58047347 100644 --- a/test/nn/conv/test_nn_conv.py +++ b/test/nn/conv/test_nn_conv.py @@ -2,10 +2,10 @@ from torch.nn import Linear as Lin from torch.nn import ReLU from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor from torch_geometric.nn import NNConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_nn_conv(): diff --git a/test/nn/conv/test_pan_conv.py b/test/nn/conv/test_pan_conv.py index 3858f14bf387..6dfdc86e6de6 100644 --- a/test/nn/conv/test_pan_conv.py +++ b/test/nn/conv/test_pan_conv.py @@ -1,7 +1,7 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import PANConv +from torch_geometric.typing import SparseTensor def test_pan_conv(): diff --git a/test/nn/conv/test_pdn_conv.py b/test/nn/conv/test_pdn_conv.py index 0bfc74417b50..988175955b20 100644 --- a/test/nn/conv/test_pdn_conv.py +++ b/test/nn/conv/test_pdn_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import PDNConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_pdn_conv(): diff --git a/test/nn/conv/test_pna_conv.py b/test/nn/conv/test_pna_conv.py index 74ba2841eeee..242b1b5646d0 100644 --- a/test/nn/conv/test_pna_conv.py +++ b/test/nn/conv/test_pna_conv.py @@ -1,11 +1,11 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.data import Data from torch_geometric.loader import DataLoader, NeighborLoader from torch_geometric.nn import PNAConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor aggregators = ['sum', 'mean', 'min', 'max', 'var', 'std'] scalers = [ diff --git a/test/nn/conv/test_point_conv.py b/test/nn/conv/test_point_conv.py index f83d9df4e60d..25a65d6c0f38 100644 --- a/test/nn/conv/test_point_conv.py +++ b/test/nn/conv/test_point_conv.py @@ -2,10 +2,10 @@ from torch.nn import Linear as Lin from torch.nn import ReLU from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor from torch_geometric.nn import PointNetConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_point_net_conv(): diff --git a/test/nn/conv/test_point_gnn_conv.py b/test/nn/conv/test_point_gnn_conv.py index a007993a5adc..d6ddcaa5ef14 100644 --- a/test/nn/conv/test_point_gnn_conv.py +++ b/test/nn/conv/test_point_gnn_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import MLP, PointGNNConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_point_gnn_conv(): diff --git a/test/nn/conv/test_point_transformer_conv.py b/test/nn/conv/test_point_transformer_conv.py index bd8d115dbdcd..bdb05c8e093b 100644 --- a/test/nn/conv/test_point_transformer_conv.py +++ b/test/nn/conv/test_point_transformer_conv.py @@ -1,9 +1,9 @@ import torch from torch.nn import Linear, ReLU, Sequential -from torch_sparse import SparseTensor from torch_geometric.nn import PointTransformerConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_point_transformer_conv(): diff --git a/test/nn/conv/test_ppf_conv.py b/test/nn/conv/test_ppf_conv.py index fc1c58594c01..747b8bd208ca 100644 --- a/test/nn/conv/test_ppf_conv.py +++ b/test/nn/conv/test_ppf_conv.py @@ -3,10 +3,10 @@ from torch.nn import Linear as Lin from torch.nn import ReLU from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor from torch_geometric.nn import PPFConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_ppf_conv(): diff --git a/test/nn/conv/test_res_gated_graph_conv.py b/test/nn/conv/test_res_gated_graph_conv.py index eb7190034c9c..32fb38758e04 100644 --- a/test/nn/conv/test_res_gated_graph_conv.py +++ b/test/nn/conv/test_res_gated_graph_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import ResGatedGraphConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_res_gated_graph_conv(): diff --git a/test/nn/conv/test_rgat_conv.py b/test/nn/conv/test_rgat_conv.py index b3c1afa1d27f..2e8a0a054916 100644 --- a/test/nn/conv/test_rgat_conv.py +++ b/test/nn/conv/test_rgat_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import RGATConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('mod', [ diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index 8bfd6b8394d4..9ce78a805b26 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import FastRGCNConv, RGCNConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor classes = [RGCNConv, FastRGCNConv] confs = [(None, None), (2, None), (None, 2)] diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index 3463dd4dcf12..6785982f83ef 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SAGEConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('project', [False, True]) diff --git a/test/nn/conv/test_sg_conv.py b/test/nn/conv/test_sg_conv.py index d35451d320fe..6f449c90f5a1 100644 --- a/test/nn/conv/test_sg_conv.py +++ b/test/nn/conv/test_sg_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SGConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_sg_conv(): diff --git a/test/nn/conv/test_signed_conv.py b/test/nn/conv/test_signed_conv.py index d4d006ee1cff..df5c625ca04d 100644 --- a/test/nn/conv/test_signed_conv.py +++ b/test/nn/conv/test_signed_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SignedConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_signed_conv(): diff --git a/test/nn/conv/test_simple_conv.py b/test/nn/conv/test_simple_conv.py index 4acad776b265..28e1aa8c6e59 100644 --- a/test/nn/conv/test_simple_conv.py +++ b/test/nn/conv/test_simple_conv.py @@ -1,9 +1,9 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SimpleConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('aggr, combine_root', [ diff --git a/test/nn/conv/test_spline_conv.py b/test/nn/conv/test_spline_conv.py index 488c2d40dcd7..7bd99baa5867 100644 --- a/test/nn/conv/test_spline_conv.py +++ b/test/nn/conv/test_spline_conv.py @@ -1,10 +1,10 @@ import warnings import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SplineConv from torch_geometric.testing import is_full_test, withPackage +from torch_geometric.typing import SparseTensor @withPackage('torch_spline_conv') diff --git a/test/nn/conv/test_ssg_conv.py b/test/nn/conv/test_ssg_conv.py index f44b0d24ebf1..4afa4c226b50 100644 --- a/test/nn/conv/test_ssg_conv.py +++ b/test/nn/conv/test_ssg_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SSGConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_ssg_conv(): diff --git a/test/nn/conv/test_supergat_conv.py b/test/nn/conv/test_supergat_conv.py index dce3c073184d..ab588a523639 100644 --- a/test/nn/conv/test_supergat_conv.py +++ b/test/nn/conv/test_supergat_conv.py @@ -1,8 +1,8 @@ import pytest import torch -from torch_sparse import SparseTensor from torch_geometric.nn import SuperGATConv +from torch_geometric.typing import SparseTensor @pytest.mark.parametrize('att_type', ['MX', 'SD']) diff --git a/test/nn/conv/test_tag_conv.py b/test/nn/conv/test_tag_conv.py index a74d6c2749d9..af18a2c57f81 100644 --- a/test/nn/conv/test_tag_conv.py +++ b/test/nn/conv/test_tag_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import TAGConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_tag_conv(): diff --git a/test/nn/conv/test_transformer_conv.py b/test/nn/conv/test_transformer_conv.py index 48ee0a3c83ab..d30b2c878cc6 100644 --- a/test/nn/conv/test_transformer_conv.py +++ b/test/nn/conv/test_transformer_conv.py @@ -1,8 +1,8 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import TransformerConv from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_transformer_conv(): diff --git a/test/nn/conv/test_wl_conv.py b/test/nn/conv/test_wl_conv.py index 889e578da9d9..675de8570fb3 100644 --- a/test/nn/conv/test_wl_conv.py +++ b/test/nn/conv/test_wl_conv.py @@ -1,7 +1,7 @@ import torch -from torch_sparse import SparseTensor from torch_geometric.nn import WLConv +from torch_geometric.typing import SparseTensor from torch_geometric.utils import one_hot diff --git a/test/nn/test_sequential.py b/test/nn/test_sequential.py index 6f54889c165a..4d8db76fdade 100644 --- a/test/nn/test_sequential.py +++ b/test/nn/test_sequential.py @@ -3,7 +3,6 @@ import torch import torch.fx from torch.nn import Dropout, Linear, ReLU -from torch_sparse import SparseTensor from torch_geometric.nn import ( GCNConv, @@ -12,6 +11,7 @@ Sequential, global_mean_pool, ) +from torch_geometric.typing import SparseTensor def test_sequential(): From 833ac54d2ef1ae9a98edd79ea825d6dee26c4def Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 11:57:20 +0200 Subject: [PATCH 1063/2432] Drop `torch_sparse` dependency in tests (10/n) (#7061) --- CHANGELOG.md | 1 + test/nn/conv/test_message_passing.py | 181 ++++++++++-------- torch_geometric/nn/conv/message_passing.jinja | 28 ++- torch_geometric/nn/conv/message_passing.py | 22 ++- torch_geometric/nn/conv/utils/typing.py | 2 + torch_geometric/nn/sequential.jinja | 1 - torch_geometric/typing.py | 6 + 7 files changed, 137 insertions(+), 104 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd66199f6d03..ab6051e61b61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for `jittable` without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index 005192c6410d..46533b089410 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -6,6 +6,7 @@ from torch import Tensor from torch.nn import Linear +import torch_geometric.typing from torch_geometric.nn import MessagePassing, aggr from torch_geometric.typing import ( Adj, @@ -14,7 +15,7 @@ Size, SparseTensor, ) -from torch_geometric.utils import scatter, spmm +from torch_geometric.utils import scatter, spmm, to_torch_csc_tensor class MyConv(MessagePassing): @@ -56,24 +57,28 @@ def test_my_conv_basic(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() + value = torch.randn(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) conv = MyConv(8, 32) out = conv(x1, edge_index, value) assert out.size() == (4, 32) assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out, atol=1e-6) - assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, torch_adj.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) conv.fuse = False - assert torch.allclose(conv(x1, adj.t()), out) - assert torch.allclose(conv(x1, torch_adj.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) conv.fuse = True - adj = adj.sparse_resize((4, 2)) - torch_adj = adj.to_torch_sparse_csc_tensor() + # Bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, value, size=(4, 2)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, value, (4, 2)) conv = MyConv((8, 16), 32) out1 = conv((x1, x2), edge_index, value) @@ -81,19 +86,21 @@ def test_my_conv_basic(): assert out1.size() == (2, 32) assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), torch_adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, None), torch_adj.t()), out2, atol=1e-6) + assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) conv.fuse = False - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), torch_adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, None), torch_adj.t()), out2, atol=1e-6) + assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) # Test gradient computation for `torch.sparse` tensors: conv.fuse = True - torch_adj_t = torch_adj.t().requires_grad_() + torch_adj_t = adj1.t().requires_grad_() out = conv((x1, x2), torch_adj_t) out.sum().backward() assert torch_adj_t.grad is not None @@ -118,26 +125,29 @@ def test_my_conv_jittable(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.randn(edge_index.size(1)) conv = MyConv(8, 32) out = conv(x1, edge_index, value) + conv.jittable() # Should succeed. + torch.jit.script(conv.jittable()) # Should succeed. + t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, edge_index, value), out, atol=1e-6) assert torch.allclose(jit(x1, edge_index, value, (4, 4)), out, atol=1e-6) - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) - jit.fuse = False - assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) - jit.fuse = True + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + + t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) + jit.fuse = False + assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) + jit.fuse = True - adj = adj.sparse_resize((4, 2)) conv = MyConv((8, 16), 32) out1 = conv((x1, x2), edge_index, value) out2 = conv((x1, None), edge_index, value, (4, 2)) @@ -148,14 +158,17 @@ def test_my_conv_jittable(): assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out1) assert torch.allclose(jit((x1, None), edge_index, value, (4, 2)), out2) - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) - jit.fuse = False - assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) - jit.fuse = True + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + + t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) + jit.fuse = False + assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) + jit.fuse = True @pytest.mark.parametrize('aggr', ['add', 'sum', 'mean', 'min', 'max', 'mul']) @@ -173,25 +186,29 @@ def test_my_static_graph_conv(): x1 = torch.randn(3, 4, 8) x2 = torch.randn(3, 2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.randn(edge_index.size(1)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 4)) conv = MyConv(8, 32) out = conv(x1, edge_index, value) assert out.size() == (3, 4, 32) assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out) - assert torch.allclose(conv(x1, adj.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x1, adj.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 2)) - adj = adj.sparse_resize((4, 2)) conv = MyConv((8, 16), 32) out1 = conv((x1, x2), edge_index, value) - assert out1.size() == (3, 2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj.t()), out1) out2 = conv((x1, None), edge_index, value, (4, 2)) + assert out1.size() == (3, 2, 32) assert out2.size() == (3, 2, 32) - assert torch.allclose(conv((x1, None), adj.t()), out2) + assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv((x1, x2), adj.t()), out1) + assert torch.allclose(conv((x1, None), adj.t()), out2) class MyMultipleAggrConv(MessagePassing): @@ -214,22 +231,21 @@ def test_my_multiple_aggr_conv(multi_aggr_tuple): aggr_kwargs, expand = multi_aggr_tuple x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) conv = MyMultipleAggrConv(aggr_kwargs=aggr_kwargs) out = conv(x, edge_index) assert out.size() == (4, 16 * expand) - assert torch.allclose(conv(x, adj.t()), out) - assert torch.allclose(conv(x, torch_adj.t()), out) + assert torch.allclose(conv(x, adj1.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, adj2.t()), out) def test_my_multiple_aggr_conv_jittable(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) conv = MyMultipleAggrConv() out = conv(x, edge_index) @@ -238,9 +254,11 @@ def test_my_multiple_aggr_conv_jittable(): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index), out) - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + t = '(Tensor, SparseTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, adj.t()), out) def test_copy(): @@ -283,25 +301,25 @@ def message(self, edge_attr: Tensor) -> Tensor: def test_my_edge_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + row, col = edge_index expected = scatter(x[row] - x[col], col, dim=0, dim_size=4, reduce='sum') conv = MyEdgeConv() out = conv(x, edge_index) assert out.size() == (4, 16) assert torch.allclose(out, expected) - assert torch.allclose(conv(x, adj.t()), out) - assert torch.allclose(conv(x, torch_adj.t()), out) + assert torch.allclose(conv(x, adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out) def test_my_edge_conv_jittable(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) conv = MyEdgeConv() out = conv(x, edge_index) @@ -310,9 +328,11 @@ def test_my_edge_conv_jittable(): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index), out) - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + t = '(Tensor, SparseTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, adj.t()), out) num_pre_hook_calls = 0 @@ -324,9 +344,8 @@ def test_message_passing_hooks(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.randn(edge_index.size(1)) + adj = to_torch_csc_tensor(edge_index, value, size=(4, 4)) def pre_hook(module, inputs): assert module == conv @@ -446,20 +465,22 @@ def message(self, x_j, zeros: bool = True): def test_my_default_arg_conv(): x = torch.randn(4, 1) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = MyDefaultArgConv() assert conv(x, edge_index).view(-1).tolist() == [0, 0, 0, 0] - assert conv(x, adj.t()).view(-1).tolist() == [0, 0, 0, 0] - assert conv(x, torch_adj.t()).view(-1).tolist() == [0, 0, 0, 0] + assert conv(x, adj1.t()).view(-1).tolist() == [0, 0, 0, 0] + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert conv(x, adj2.t()).view(-1).tolist() == [0, 0, 0, 0] def test_my_default_arg_conv_jittable(): conv = MyDefaultArgConv() - with pytest.raises(RuntimeError): # This should not succeed in JIT mode. + # This should not succeed in JIT mode. + with pytest.raises((RuntimeError, AttributeError)): torch.jit.script(conv.jittable()) @@ -555,13 +576,17 @@ def test_message_passing_with_aggr_module(aggr_module): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = MyAggregatorConv(aggr=aggr_module) assert isinstance(conv.aggr_module, aggr.Aggregation) out = conv(x, edge_index) assert out.size(0) == 4 and out.size(1) in {8, 16} - assert torch.allclose(conv(x, adj.t()), out) + assert torch.allclose(conv(x, adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out) def test_message_passing_int32_edge_index(): diff --git a/torch_geometric/nn/conv/message_passing.jinja b/torch_geometric/nn/conv/message_passing.jinja index 54a8ab0c99e4..13a32aa4c5d9 100644 --- a/torch_geometric/nn/conv/message_passing.jinja +++ b/torch_geometric/nn/conv/message_passing.jinja @@ -4,7 +4,6 @@ from torch_geometric.typing import * import torch from torch import Tensor -from torch_geometric.typing import SparseTensor, torch_sparse from torch_geometric.nn.conv.message_passing import * from {{module}} import * @@ -63,11 +62,10 @@ class {{cls_name}}({{parent_cls_name}}): return src.index_select(self.node_dim, index) elif isinstance(edge_index, SparseTensor): + row, col, _ = edge_index.coo() if dim == 0: - col = edge_index.storage.col() return src.index_select(self.node_dim, col) elif dim == 1: - row = edge_index.storage.row() return src.index_select(self.node_dim, row) raise ValueError( @@ -126,20 +124,20 @@ class {{cls_name}}({{parent_cls_name}}): edge_index_j = edge_def[j] elif isinstance(edge_def, SparseTensor): adj_t = edge_def - edge_index_i = edge_def.storage.row() - edge_index_j = edge_def.storage.col() - ptr = edge_def.storage.rowptr() + edge_index_i, edge_index_j, value = edge_def.coo() + ptr, _, _ = edge_def.csr() + {% if 'edge_weight' in collect_types.keys() %} if edge_weight is None: - edge_weight = edge_def.storage.value() + edge_weight = value {% endif %} {% if 'edge_attr' in collect_types.keys() %} if edge_attr is None: - edge_attr = edge_def.storage.value() + edge_attr = value {% endif %} {% if 'edge_type' in collect_types.keys() %} if edge_type is None: - edge_type = edge_def.storage.value() + edge_type = value {% endif %} {% if collect_types.get('edge_weight', 'Optional')[:8] != 'Optional' %}assert edge_weight is not None{% endif %} @@ -205,20 +203,20 @@ class {{cls_name}}({{parent_cls_name}}): edge_index_j = edge_def[j] elif isinstance(edge_def, SparseTensor): adj_t = edge_def - edge_index_i = edge_def.storage.row() - edge_index_j = edge_def.storage.col() - ptr = edge_def.storage.rowptr() + edge_index_i, edge_index_j, value = edge_def.coo() + ptr, _, _ = edge_def.csr() + {% if 'edge_weight' in edge_collect_types.keys() %} if edge_weight is None: - edge_weight = edge_def.storage.value() + edge_weight = value {% endif %} {% if 'edge_attr' in edge_collect_types.keys() %} if edge_attr is None: - edge_attr = edge_def.storage.value() + edge_attr = value {% endif %} {% if 'edge_type' in edge_collect_types.keys() %} if edge_type is None: - edge_type = edge_def.storage.value() + edge_type = value {% endif %} {% if edge_collect_types.get('edge_weight', 'Optional')[:8] != 'Optional' %}assert edge_weight is not None{% endif %} diff --git a/torch_geometric/nn/conv/message_passing.py b/torch_geometric/nn/conv/message_passing.py index 2db5c9893766..95339bf183d7 100644 --- a/torch_geometric/nn/conv/message_passing.py +++ b/torch_geometric/nn/conv/message_passing.py @@ -1,6 +1,7 @@ import inspect import os import os.path as osp +import random import re from collections import OrderedDict from inspect import Parameter @@ -15,7 +16,6 @@ Union, get_type_hints, ) -from uuid import uuid1 import torch from torch import Tensor @@ -302,11 +302,10 @@ def _lift(self, src, edge_index, dim): raise e elif isinstance(edge_index, SparseTensor): + row, col, _ = edge_index.coo() if dim == 0: - col = edge_index.storage.col() return src.index_select(self.node_dim, col) elif dim == 1: - row = edge_index.storage.row() return src.index_select(self.node_dim, row) raise ValueError( @@ -359,17 +358,20 @@ def _collect(self, args, edge_index, size, kwargs): out['ptr'] = None elif isinstance(edge_index, SparseTensor): + row, col, value = edge_index.coo() + rowptr, _, _ = edge_index.csr() + out['adj_t'] = edge_index out['edge_index'] = None - out['edge_index_i'] = edge_index.storage.row() - out['edge_index_j'] = edge_index.storage.col() - out['ptr'] = edge_index.storage.rowptr() + out['edge_index_i'] = row + out['edge_index_j'] = col + out['ptr'] = rowptr if out.get('edge_weight', None) is None: - out['edge_weight'] = edge_index.storage.value() + out['edge_weight'] = value if out.get('edge_attr', None) is None: - out['edge_attr'] = edge_index.storage.value() + out['edge_attr'] = value if out.get('edge_type', None) is None: - out['edge_type'] = edge_index.storage.value() + out['edge_type'] = value out['index'] = out['edge_index_i'] out['size'] = size @@ -867,7 +869,7 @@ def jittable(self, typing: Optional[str] = None) -> 'MessagePassing': with open(osp.join(root, 'message_passing.jinja'), 'r') as f: template = Template(f.read()) - uid = uuid1().hex[:6] + uid = '%06x' % random.randrange(16**6) cls_name = f'{self.__class__.__name__}Jittable_{uid}' jit_module_repr = template.render( uid=uid, diff --git a/torch_geometric/nn/conv/utils/typing.py b/torch_geometric/nn/conv/utils/typing.py index 25fdfdd34447..f4bcb40031d2 100644 --- a/torch_geometric/nn/conv/utils/typing.py +++ b/torch_geometric/nn/conv/utils/typing.py @@ -27,6 +27,8 @@ def sanitize(type_repr: str): type_repr = type_repr.replace('typing.', '') type_repr = type_repr.replace('torch_sparse.tensor.', '') type_repr = type_repr.replace('Adj', 'Union[Tensor, SparseTensor]') + type_repr = type_repr.replace('torch_geometric.SparseTensor', + 'SparseTensor') # Replace `Union[..., NoneType]` by `Optional[...]`. sexp = pp.nestedExpr(opener='[', closer=']') diff --git a/torch_geometric/nn/sequential.jinja b/torch_geometric/nn/sequential.jinja index 2e5abc6733bf..013d6f46cb05 100644 --- a/torch_geometric/nn/sequential.jinja +++ b/torch_geometric/nn/sequential.jinja @@ -3,7 +3,6 @@ from torch_geometric.typing import * import torch from torch import Tensor -from torch_geometric.typing import SparseTensor class {{cls_name}}(torch.nn.Module): diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 75af7e5c278c..e7379e7c8466 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -73,6 +73,12 @@ def size(self, dim: int) -> int: def is_cuda(self) -> bool: raise ImportError("'SparseTensor' requires 'torch-sparse'") + def coo(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + + def csr(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + def to_torch_sparse_csr_tensor( self, dtype: Optional[torch.dtype] = None, From 49aa49079d208955710b5852b03c92af79e3c483 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 12:35:59 +0200 Subject: [PATCH 1064/2432] Drop `torch_sparse` dependency in tests (11/n) (#7062) --- .github/workflows/latest_testing.yml | 5 ++--- CHANGELOG.md | 2 +- test/nn/test_sequential.py | 21 ++++++++++++--------- torch_geometric/typing.py | 20 ++++++++++++++++++++ 4 files changed, 35 insertions(+), 13 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 5a28fe1dc0c7..224f68748279 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -62,6 +62,7 @@ jobs: pytest test/utils/ pytest test/visualization/ pytest test/nn/aggr + pytest test/nn/conv/test_message_passing.py # pytest test/nn/conv pytest test/nn/dense pytest test/nn/functional @@ -70,6 +71,4 @@ jobs: pytest test/nn/norm pytest test/nn/pool pytest test/nn/unpool - pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py test/nn/test_model_summary.py test/nn/test_module_dict.py test/nn/test_parameter_dict.py test/nn/test_reshape.py test/nn/test_resolver.py - # pytest test/nn/test_sequential.py - pytest test/nn/test_to_fixed_size_transformer.py test/nn/test_to_hetero_module.py test/nn/test_to_hetero_transformer.py test/nn/test_to_hetero_with_bases_transformer.py + pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py test/nn/test_model_summary.py test/nn/test_module_dict.py test/nn/test_parameter_dict.py test/nn/test_reshape.py test/nn/test_resolver.py test/nn/test_sequential.py test/nn/test_to_fixed_size_transformer.py test/nn/test_to_hetero_module.py test/nn/test_to_hetero_transformer.py test/nn/test_to_hetero_with_bases_transformer.py diff --git a/CHANGELOG.md b/CHANGELOG.md index ab6051e61b61..d8bf4ab8e8dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added support for `jittable` without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061)) +- Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) diff --git a/test/nn/test_sequential.py b/test/nn/test_sequential.py index 4d8db76fdade..086e194c377c 100644 --- a/test/nn/test_sequential.py +++ b/test/nn/test_sequential.py @@ -4,6 +4,7 @@ import torch.fx from torch.nn import Dropout, Linear, ReLU +import torch_geometric.typing from torch_geometric.nn import ( GCNConv, JumpingKnowledge, @@ -67,7 +68,6 @@ def test_sequential(): def test_sequential_jittable(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - adj_t = SparseTensor(row=edge_index[0], col=edge_index[1]).t() model = Sequential('x: Tensor, edge_index: Tensor', [ (GCNConv(16, 64).jittable(), 'x, edge_index -> x'), @@ -78,14 +78,17 @@ def test_sequential_jittable(): ]) torch.jit.script(model)(x, edge_index) - model = Sequential('x: Tensor, edge_index: SparseTensor', [ - (GCNConv(16, 64).jittable(), 'x, edge_index -> x'), - ReLU(inplace=True), - (GCNConv(64, 64).jittable(), 'x, edge_index -> x'), - ReLU(inplace=True), - Linear(64, 7), - ]) - torch.jit.script(model)(x, adj_t) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t = SparseTensor.from_edge_index(edge_index).t() + + model = Sequential('x: Tensor, edge_index: SparseTensor', [ + (GCNConv(16, 64).jittable(), 'x, edge_index -> x'), + ReLU(inplace=True), + (GCNConv(64, 64).jittable(), 'x, edge_index -> x'), + ReLU(inplace=True), + Linear(64, 7), + ]) + torch.jit.script(model)(x, adj_t) def symbolic_trace(module): diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index e7379e7c8466..81c2316c5400 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -73,6 +73,13 @@ def size(self, dim: int) -> int: def is_cuda(self) -> bool: raise ImportError("'SparseTensor' requires 'torch-sparse'") + def has_value(self) -> bool: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + + def fill_value(self, fill_value: float, + dtype: Optional[torch.dtype] = None) -> 'SparseTensor': + raise ImportError("'SparseTensor' requires 'torch-sparse'") + def coo(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]: raise ImportError("'SparseTensor' requires 'torch-sparse'") @@ -91,6 +98,19 @@ def matmul(src: SparseTensor, other: Tensor, reduce: str = "sum") -> Tensor: raise ImportError("'matmul' requires 'torch-sparse'") + @staticmethod + def sum(src: SparseTensor, dim: Optional[int] = None) -> Tensor: + raise ImportError("'sum' requires 'torch-sparse'") + + @staticmethod + def mul(src: SparseTensor, other: Tensor) -> SparseTensor: + raise ImportError("'mul' requires 'torch-sparse'") + + @staticmethod + def fill_diag(src: SparseTensor, fill_value: float, + k: int = 0) -> SparseTensor: + raise ImportError("'fill_diag' requires 'torch-sparse'") + # Types for accessing data #################################################### From 4e03dbeb96ed1fc27867be3b5a256c5a2f9f6132 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 14:34:48 +0200 Subject: [PATCH 1065/2432] Drop `torch_sparse` dependency in tests (12/n) (#7063) --- test/nn/conv/test_agnn_conv.py | 14 +++-- test/nn/conv/test_antisymmetric_conv.py | 31 ++++++----- test/nn/conv/test_appnp.py | 27 +++++---- test/nn/conv/test_arma_conv.py | 18 ++++-- test/nn/conv/test_cg_conv.py | 73 +++++++++++++------------ test/nn/conv/test_cluster_gcn_conv.py | 16 ++++-- test/nn/conv/test_dna_conv.py | 61 ++++++++++++--------- test/nn/conv/test_edge_conv.py | 64 +++++++++++++--------- test/nn/conv/test_eg_conv.py | 50 +++++++---------- 9 files changed, 198 insertions(+), 156 deletions(-) diff --git a/test/nn/conv/test_agnn_conv.py b/test/nn/conv/test_agnn_conv.py index 27d70070241b..bb8ffda978d6 100644 --- a/test/nn/conv/test_agnn_conv.py +++ b/test/nn/conv/test_agnn_conv.py @@ -1,31 +1,35 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import AGNNConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor @pytest.mark.parametrize('requires_grad', [True, False]) def test_agnn_conv(requires_grad): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = AGNNConv(requires_grad=requires_grad) assert str(conv) == 'AGNNConv()' out = conv(x, edge_index) assert out.size() == (4, 16) assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x, edge_index).tolist() == out.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x, adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_antisymmetric_conv.py b/test/nn/conv/test_antisymmetric_conv.py index a3b8df56ab90..e444f9a565bd 100644 --- a/test/nn/conv/test_antisymmetric_conv.py +++ b/test/nn/conv/test_antisymmetric_conv.py @@ -1,29 +1,32 @@ import torch +import torch_geometric.typing from torch_geometric.nn import AntiSymmetricConv from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_antisymmetric_conv(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = AntiSymmetricConv(8) assert str(conv) == ('AntiSymmetricConv(8, phi=GCNConv(8, 8), ' 'num_iters=1, epsilon=0.1, gamma=0.1)') - out = conv(x, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out, atol=1e-6) + out1 = conv(x, edge_index) + assert out1.size() == (4, 8) + assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - out = conv(x, edge_index, value) - assert out.size() == (4, 8) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out, atol=1e-6) + out2 = conv(x, edge_index, value) + assert out2.size() == (4, 8) + assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) diff --git a/test/nn/conv/test_appnp.py b/test/nn/conv/test_appnp.py index 15b10ce27f1a..093e94416307 100644 --- a/test/nn/conv/test_appnp.py +++ b/test/nn/conv/test_appnp.py @@ -1,29 +1,32 @@ import torch +import torch_geometric.typing from torch_geometric.nn import APPNP from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_appnp(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = APPNP(K=3, alpha=0.1, cached=True) assert str(conv) == 'APPNP(K=3, alpha=0.1)' out = conv(x, edge_index) assert out.size() == (4, 16) assert torch.allclose(conv(x, adj1.t()), out) - assert torch.allclose(conv(x, adj2.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out) # Run again to test the cached functionality: assert conv._cached_edge_index is not None - assert conv._cached_adj_t is not None assert torch.allclose(conv(x, edge_index), conv(x, adj1.t())) - assert torch.allclose(conv(x, edge_index), conv(x, adj2.t())) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert conv._cached_adj_t is not None + assert torch.allclose(conv(x, edge_index), conv(x, adj2.t())) conv.reset_parameters() assert conv._cached_edge_index is None @@ -34,20 +37,22 @@ def test_appnp(): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out) + assert torch.allclose(jit(x, adj2.t()), out) def test_appnp_dropout(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) # With dropout probability of 1.0, the final output equals to alpha * x: conv = APPNP(K=2, alpha=0.1, dropout=1.0) assert torch.allclose(0.1 * x, conv(x, edge_index)) assert torch.allclose(0.1 * x, conv(x, adj1.t())) - assert torch.allclose(0.1 * x, conv(x, adj2.t())) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(0.1 * x, conv(x, adj2.t())) diff --git a/test/nn/conv/test_arma_conv.py b/test/nn/conv/test_arma_conv.py index f344a42dbac6..9a3ed519eeac 100644 --- a/test/nn/conv/test_arma_conv.py +++ b/test/nn/conv/test_arma_conv.py @@ -1,30 +1,38 @@ +import pytest import torch +import torch_geometric.typing from torch_geometric.nn import ARMAConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_arma_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = ARMAConv(16, 32, num_stacks=8, num_layers=4) assert str(conv) == 'ARMAConv(16, 32, num_stacks=8, num_layers=4)' out = conv(x, edge_index) assert out.size() == (4, 32) - assert conv(x, adj.t()).tolist() == out.tolist() + with pytest.raises(RuntimeError): # No 3D feature tensor support. + assert torch.allclose(conv(x, adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) + assert torch.allclose(jit(x, adj2.t()), out, atol=1e-6) def test_lazy_arma_conv(): diff --git a/test/nn/conv/test_cg_conv.py b/test/nn/conv/test_cg_conv.py index d67b8df5ee40..70cfcaa8e950 100644 --- a/test/nn/conv/test_cg_conv.py +++ b/test/nn/conv/test_cg_conv.py @@ -1,103 +1,104 @@ +import pytest import torch +import torch_geometric.typing from torch_geometric.nn import CGConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csr_tensor -def test_cg_conv(): +@pytest.mark.parametrize('batch_norm', [False, True]) +def test_cg_conv(batch_norm): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csr_tensor(edge_index, size=(4, 4)) - conv = CGConv(8) + conv = CGConv(8, batch_norm=batch_norm) assert str(conv) == 'CGConv(8, dim=0)' out = conv(x1, edge_index) assert out.size() == (4, 8) assert torch.allclose(conv(x1, adj1.t()), out) - assert torch.allclose(conv(x1, adj2.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csr_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() conv = CGConv((8, 16)) assert str(conv) == 'CGConv((8, 16), dim=0)' out = conv((x1, x2), edge_index) assert out.size() == (2, 16) assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) - - # Test batch_norm true: - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = CGConv(8, batch_norm=True) - assert str(conv) == 'CGConv(8, dim=0)' - out = conv(x1, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) def test_cg_conv_with_edge_features(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.rand(edge_index.size(1), 3) conv = CGConv(8, dim=3) assert str(conv) == 'CGConv(8, dim=3)' out = conv(x1, edge_index, value) assert out.size() == (4, 8) - assert conv(x1, adj.t()).tolist() == out.tolist() + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index, value).tolist() == out.tolist() + assert torch.allclose(jit(x1, edge_index, value), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() + assert torch.allclose(jit(x1, adj.t()), out) - adj = adj.sparse_resize((4, 2)) + # Test bipartite message passing: conv = CGConv((8, 16), dim=3) assert str(conv) == 'CGConv((8, 16), dim=3)' out = conv((x1, x2), edge_index, value) assert out.size() == (2, 16) - assert conv((x1, x2), adj.t()).tolist() == out.tolist() + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out) if is_full_test(): t = '(PairTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index, value).tolist() == out.tolist() + assert torch.allclose(jit((x1, x2), edge_index, value), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out.tolist() + assert torch.allclose(jit((x1, x2), adj.t()), out) diff --git a/test/nn/conv/test_cluster_gcn_conv.py b/test/nn/conv/test_cluster_gcn_conv.py index 41373f13afd2..b31db4c14524 100644 --- a/test/nn/conv/test_cluster_gcn_conv.py +++ b/test/nn/conv/test_cluster_gcn_conv.py @@ -1,29 +1,33 @@ import torch +import torch_geometric.typing from torch_geometric.nn import ClusterGCNConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_cluster_gcn_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = ClusterGCNConv(16, 32, diag_lambda=1.) assert str(conv) == 'ClusterGCNConv(16, 32, diag_lambda=1.0)' out = conv(x, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out, atol=1e-5) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-5) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-5) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-5) + assert torch.allclose(jit(x, adj2.t()), out, atol=1e-5) diff --git a/test/nn/conv/test_dna_conv.py b/test/nn/conv/test_dna_conv.py index c7ec5d71fe7b..b2f56e731023 100644 --- a/test/nn/conv/test_dna_conv.py +++ b/test/nn/conv/test_dna_conv.py @@ -1,21 +1,23 @@ +import pytest import torch +import torch_geometric.typing from torch_geometric.nn import DNAConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor -def test_dna_conv1(): - channels = 32 - num_layers = 3 +@pytest.mark.parametrize('channels', [32]) +@pytest.mark.parametrize('num_layers', [3]) +def test_dna_conv(channels, num_layers): + x = torch.randn((4, num_layers, channels)) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - num_nodes = edge_index.max().item() + 1 - x = torch.randn((num_nodes, num_layers, channels)) conv = DNAConv(channels, heads=4, groups=8, dropout=0.0) assert str(conv) == 'DNAConv(32, heads=4, groups=8)' out = conv(x, edge_index) - assert out.size() == (num_nodes, channels) + assert out.size() == (4, channels) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -25,7 +27,7 @@ def test_dna_conv1(): conv = DNAConv(channels, heads=1, groups=1, dropout=0.0) assert str(conv) == 'DNAConv(32, heads=1, groups=1)' out = conv(x, edge_index) - assert out.size() == (num_nodes, channels) + assert out.size() == (4, channels) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -34,8 +36,9 @@ def test_dna_conv1(): conv = DNAConv(channels, heads=1, groups=1, dropout=0.0, cached=True) out = conv(x, edge_index) + assert conv._cached_edge_index is not None out = conv(x, edge_index) - assert out.size() == (num_nodes, channels) + assert out.size() == (4, channels) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -43,26 +46,29 @@ def test_dna_conv1(): assert jit(x, edge_index).tolist() == out.tolist() -def test_dna_conv2(): - x = torch.randn((4, 3, 32)) +@pytest.mark.parametrize('channels', [32]) +@pytest.mark.parametrize('num_layers', [3]) +def test_dna_conv_sparse_tensor(channels, num_layers): + x = torch.randn((4, num_layers, channels)) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = DNAConv(32, heads=4, groups=8, dropout=0.0) assert str(conv) == 'DNAConv(32, heads=4, groups=8)' out1 = conv(x, edge_index) assert out1.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) out2 = conv(x, edge_index, value) assert out2.size() == (4, 32) assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -70,14 +76,19 @@ def test_dna_conv2(): assert jit(x, edge_index).tolist() == out1.tolist() assert jit(x, edge_index, value).tolist() == out2.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) + + conv = DNAConv(channels, heads=1, groups=1, dropout=0.0, cached=True) - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() - conv(x, adj1.t()) + out1 = conv(x, adj1.t()) + assert conv._cached_edge_index is not None assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert conv._cached_adj_t is not None + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) diff --git a/test/nn/conv/test_edge_conv.py b/test/nn/conv/test_edge_conv.py index f9b278b0b733..081bf7101ebb 100644 --- a/test/nn/conv/test_edge_conv.py +++ b/test/nn/conv/test_edge_conv.py @@ -3,18 +3,18 @@ from torch.nn import ReLU from torch.nn import Sequential as Seq +import torch_geometric.typing from torch_geometric.nn import DynamicEdgeConv, EdgeConv from torch_geometric.testing import is_full_test, withPackage from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_edge_conv_conv(): x1 = torch.randn(4, 16) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32)) conv = EdgeConv(nn) @@ -24,41 +24,55 @@ def test_edge_conv_conv(): ' (1): ReLU()\n' ' (2): Linear(in_features=16, out_features=32, bias=True)\n' '))') - out1 = conv(x1, edge_index) - assert out1.size() == (4, 32) - assert conv((x1, x1), edge_index).tolist() == out1.tolist() - assert torch.allclose(conv(x1, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x1), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x1), adj2.t()), out1, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out2 = conv((x1, x2), edge_index) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj1.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out2, atol=1e-6) + out = conv(x1, edge_index) + assert out.size() == (4, 32) + assert torch.allclose(conv((x1, x1), edge_index), out, atol=1e-6) + assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(conv((x1, x1), adj1.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv((x1, x1), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out1.tolist() + assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) t = '(PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x1), edge_index).tolist() == out1.tolist() - assert jit((x1, x2), edge_index).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x1), edge_index), out, atol=1e-6) - adj1 = adj1.sparse_resize((4, 4)) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj1.t()).tolist() == out1.tolist() + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) + + t = '(PairTensor, SparseTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit((x1, x1), adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + + out = conv((x1, x2), edge_index) + assert out.size() == (2, 32) + assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) + + if is_full_test(): + t = '(PairTensor, Tensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x1), adj1.t()).tolist() == out1.tolist() - adj1 = adj1.sparse_resize((4, 2)) - assert jit((x1, x2), adj1.t()).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) @withPackage('torch_cluster') diff --git a/test/nn/conv/test_eg_conv.py b/test/nn/conv/test_eg_conv.py index 8fb2f2912049..88f15feddccd 100644 --- a/test/nn/conv/test_eg_conv.py +++ b/test/nn/conv/test_eg_conv.py @@ -1,73 +1,65 @@ import torch +import torch_geometric.typing from torch_geometric.nn import EGConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_eg_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = EGConv(16, 32) assert str(conv) == "EGConv(16, 32, aggregators=['symnorm'])" out = conv(x, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out.tolist() - conv(x, adj1.t()) + assert torch.allclose(conv(x, edge_index), out, atol=1e-6) + assert conv._cached_edge_index is not None + assert torch.allclose(conv(x, edge_index), out, atol=1e-6) assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + assert conv._cached_adj_t is not None + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x, edge_index).tolist() == out.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x, adj2.t()), out, atol=1e-6) def test_eg_conv_multiple_aggregators(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) conv = EGConv(16, 32, aggregators=["max", "min"]) assert str(conv) == "EGConv(16, 32, aggregators=['max', 'min'])" out = conv(x, edge_index) assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out.tolist() - conv(x, adj1.t()) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) def test_eg_conv_with_sparse_input_feature(): - x = torch.sparse_coo_tensor(indices=torch.tensor([[0, 0], [0, 1]]), - values=torch.tensor([1., 1.]), - size=torch.Size([4, 16])) + x = torch.randn(4, 16).to_sparse_coo() edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) conv = EGConv(16, 32) From 5a998e68d5c866af9206da7fdb8716e7c906cf13 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 14:35:04 +0200 Subject: [PATCH 1066/2432] Drop `torch_sparse` dependency in tests (13/n) (#7064) --- test/nn/conv/test_fa_conv.py | 41 +++++++++++--------- test/nn/conv/test_feast_conv.py | 27 ++++++++----- test/nn/conv/test_film_conv.py | 67 +++++++++++++++++++++------------ 3 files changed, 85 insertions(+), 50 deletions(-) diff --git a/test/nn/conv/test_fa_conv.py b/test/nn/conv/test_fa_conv.py index a20b0701b77c..d70137fbabb8 100644 --- a/test/nn/conv/test_fa_conv.py +++ b/test/nn/conv/test_fa_conv.py @@ -1,37 +1,41 @@ import torch +import torch_geometric.typing from torch_geometric.nn import FAConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_fa_conv(): x = torch.randn(4, 16) x_0 = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + + # adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) conv = FAConv(16, eps=1.0, cached=True) assert str(conv) == 'FAConv(16, eps=1.0)' out = conv(x, x_0, edge_index) + assert conv._cached_edge_index is not None assert out.size() == (4, 16) - assert torch.allclose(conv(x, x_0, adj1.t()), out) - # Run again to test the cached functionality: - assert conv._cached_edge_index is not None - assert conv._cached_adj_t is not None - assert torch.allclose(conv(x, x_0, edge_index), conv(x, x_0, adj1.t())) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, x_0, adj2.t()), out) + assert conv._cached_adj_t is not None + assert torch.allclose(conv(x, x_0, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, Tensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, x_0, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, Tensor, SparseTensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, x_0, adj1.t()), out) + assert torch.allclose(jit(x, x_0, adj2.t()), out) conv.reset_parameters() assert conv._cached_edge_index is None @@ -41,9 +45,10 @@ def test_fa_conv(): conv.cached = False out = conv(x, x_0, edge_index) assert torch.allclose(conv(x, x_0, adj1.t()), out) - assert torch.allclose(conv(x, x_0, adj2.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, x_0, adj2.t()), out) - # Test `return_attention_weights`. + # Test `return_attention_weights`: result = conv(x, x_0, edge_index, return_attention_weights=True) assert torch.allclose(result[0], out) assert result[1][0].size() == (2, 10) @@ -52,15 +57,16 @@ def test_fa_conv(): result = conv(x, x_0, adj1.t(), return_attention_weights=True) assert torch.allclose(result[0], out) - assert result[1].sizes() == [4, 4] and result[1].nnz() == 10 - assert conv._alpha is None - - result = conv(x, x_0, adj2.t(), return_attention_weights=True) - assert torch.allclose(result[0], out) assert result[1][0].size() == torch.Size([4, 4]) assert result[1][0]._nnz() == 10 assert conv._alpha is None + if torch_geometric.typing.WITH_TORCH_SPARSE: + result = conv(x, x_0, adj2.t(), return_attention_weights=True) + assert torch.allclose(result[0], out) + assert result[1].sizes() == [4, 4] and result[1].nnz() == 10 + assert conv._alpha is None + if is_full_test(): t = ('(Tensor, Tensor, Tensor, OptTensor, bool) ' '-> Tuple[Tensor, Tuple[Tensor, Tensor]]') @@ -71,10 +77,11 @@ def test_fa_conv(): assert result[1][1].size() == (10, ) assert conv._alpha is None + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = ('(Tensor, Tensor, SparseTensor, OptTensor, bool) ' '-> Tuple[Tensor, SparseTensor]') jit = torch.jit.script(conv.jittable(t)) - result = jit(x, x_0, adj1.t(), return_attention_weights=True) + result = jit(x, x_0, adj2.t(), return_attention_weights=True) assert torch.allclose(result[0], out) assert result[1].sizes() == [4, 4] and result[1].nnz() == 10 assert conv._alpha is None diff --git a/test/nn/conv/test_feast_conv.py b/test/nn/conv/test_feast_conv.py index 371e0e382cce..a5c92ad77f9f 100644 --- a/test/nn/conv/test_feast_conv.py +++ b/test/nn/conv/test_feast_conv.py @@ -1,17 +1,17 @@ import torch +import torch_geometric.typing from torch_geometric.nn import FeaStConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_feast_conv(): x1 = torch.randn(4, 16) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = FeaStConv(16, 32, heads=2) assert str(conv) == 'FeaStConv(16, 32, heads=2)' @@ -19,29 +19,38 @@ def test_feast_conv(): out = conv(x1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit(x1, edge_index).tolist() == out.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() out = conv((x1, x2), edge_index) assert out.size() == (2, 32) assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert jit((x1, x2), edge_index).tolist() == out.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_film_conv.py b/test/nn/conv/test_film_conv.py index f64aa14fc234..9563b8220dc0 100644 --- a/test/nn/conv/test_film_conv.py +++ b/test/nn/conv/test_film_conv.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn import FiLMConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor @@ -10,63 +11,81 @@ def test_film_conv(): x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4)) conv = FiLMConv(4, 32) assert str(conv) == 'FiLMConv(4, 32, num_relations=1)' - out1 = conv(x1, edge_index) - assert out1.size() == (4, 32) - assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist() + out = conv(x1, edge_index) + assert out.size() == (4, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out1.tolist() + assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist() + assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) conv = FiLMConv(4, 32, num_relations=2) assert str(conv) == 'FiLMConv(4, 32, num_relations=2)' - out1 = conv(x1, edge_index, edge_type) - assert out1.size() == (4, 32) - assert conv(x1, adj.t()).tolist() == out1.tolist() + out = conv(x1, edge_index, edge_type) + assert out.size() == (4, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_type, (4, 4)) + assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index, edge_type).tolist() == out1.tolist() + assert torch.allclose(jit(x1, edge_index, edge_type), out, atol=1e-6) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out1.tolist() - - adj = adj.sparse_resize((4, 2)) + assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) + # Test bipartite message passing: conv = FiLMConv((4, 16), 32) assert str(conv) == 'FiLMConv((4, 16), 32, num_relations=1)' - out1 = conv((x1, x2), edge_index) - assert out1.size() == (2, 32) - assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist() + out = conv((x1, x2), edge_index) + assert out.size() == (2, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out, atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out1.tolist() + assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist() + assert torch.allclose(jit((x1, x2), adj.t()), out, atol=1e-6) conv = FiLMConv((4, 16), 32, num_relations=2) assert str(conv) == 'FiLMConv((4, 16), 32, num_relations=2)' - out1 = conv((x1, x2), edge_index, edge_type) - assert out1.size() == (2, 32) - assert conv((x1, x2), adj.t()).tolist() == out1.tolist() + out = conv((x1, x2), edge_index, edge_type) + assert out.size() == (2, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_type, (4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out, atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist() + assert torch.allclose(jit((x1, x2), edge_index, edge_type), out, + atol=1e-6) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() + assert torch.allclose(jit((x1, x2), adj.t()), out, atol=1e-6) From 05504cc622f36a8fa98679284d140f9faab4aad7 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 21:54:43 +0200 Subject: [PATCH 1067/2432] Drop `torch_sparse` dependency in tests (14/n) (#7066) --- test/nn/conv/test_gat_conv.py | 72 ++++++++----- test/nn/conv/test_gated_graph_conv.py | 24 +++-- test/nn/conv/test_gatv2_conv.py | 50 +++++---- test/nn/conv/test_gcn2_conv.py | 34 +++--- test/nn/conv/test_gcn_conv.py | 40 +++++--- test/nn/conv/test_gen_conv.py | 142 ++++++++++++++------------ test/nn/conv/test_gin_conv.py | 96 +++++++++-------- test/nn/conv/test_gmm_conv.py | 45 +++++--- test/nn/conv/test_gps_conv.py | 15 ++- test/nn/conv/test_graph_conv.py | 121 ++++++++++++---------- 10 files changed, 373 insertions(+), 266 deletions(-) diff --git a/test/nn/conv/test_gat_conv.py b/test/nn/conv/test_gat_conv.py index 7ae53c04ebee..f17240fd0532 100644 --- a/test/nn/conv/test_gat_conv.py +++ b/test/nn/conv/test_gat_conv.py @@ -1,18 +1,18 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import GATConv from torch_geometric.testing import is_full_test, withCUDA from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_gat_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = GATConv(8, 32, heads=2) assert str(conv) == 'GATConv(8, 32, heads=2)' @@ -20,7 +20,10 @@ def test_gat_conv(): assert out.size() == (4, 64) assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out) assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size, NoneType) -> Tensor' @@ -28,9 +31,10 @@ def test_gat_conv(): assert torch.allclose(jit(x1, edge_index), out) assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) # Test `return_attention_weights`. result = conv(x1, edge_index, return_attention_weights=True) @@ -41,13 +45,14 @@ def test_gat_conv(): result = conv(x1, adj1.t(), return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - - result = conv(x1, adj2.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) assert result[1][0].size() == torch.Size([4, 4, 2]) assert result[1][0]._nnz() == 7 + if torch_geometric.typing.WITH_TORCH_SPARSE: + result = conv(x1, adj2.t(), return_attention_weights=True) + assert torch.allclose(result[0], out, atol=1e-6) + assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 + if is_full_test(): t = ('(Tensor, Tensor, OptTensor, Size, bool) -> ' 'Tuple[Tensor, Tuple[Tensor, Tensor]]') @@ -58,27 +63,33 @@ def test_gat_conv(): assert result[1][1].size() == (7, 2) assert result[1][1].min() >= 0 and result[1][1].max() <= 1 + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = ('(Tensor, SparseTensor, OptTensor, Size, bool) -> ' 'Tuple[Tensor, SparseTensor]') jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, adj1.t(), return_attention_weights=True) + result = jit(x1, adj2.t(), return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + conv = GATConv((8, 16), 32, heads=2) assert str(conv) == 'GATConv((8, 16), 32, heads=2)' out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, size=(4, 2)) assert out1.size() == (2, 64) - assert out2.size() == (2, 64) assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out1) assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + + out2 = conv((x1, None), edge_index, size=(4, 2)) + assert out2.size() == (2, 64) assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size, NoneType) -> Tensor' @@ -87,11 +98,12 @@ def test_gat_conv(): assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1) assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = ('(OptPairTensor, SparseTensor, OptTensor, Size, NoneType) -> ' 'Tensor') jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj2.t()), out2, atol=1e-6) def test_gat_conv_with_edge_attr(): @@ -99,32 +111,36 @@ def test_gat_conv_with_edge_attr(): edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 1]]) edge_weight = torch.randn(edge_index.size(1)) edge_attr = torch.randn(edge_index.size(1), 4) - adj1 = SparseTensor.from_edge_index(edge_index, edge_weight) - adj2 = SparseTensor.from_edge_index(edge_index, edge_attr) conv = GATConv(8, 32, heads=2, edge_dim=1, fill_value=0.5) out = conv(x, edge_index, edge_weight) assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj1.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj1 = SparseTensor.from_edge_index(edge_index, edge_weight, (4, 4)) + with pytest.raises(NotImplementedError): + assert torch.allclose(conv(x, adj1.t()), out) conv = GATConv(8, 32, heads=2, edge_dim=1, fill_value='mean') out = conv(x, edge_index, edge_weight) assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj1.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + with pytest.raises(NotImplementedError): + assert torch.allclose(conv(x, adj1.t()), out) conv = GATConv(8, 32, heads=2, edge_dim=4, fill_value=0.5) out = conv(x, edge_index, edge_attr) assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj2.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) + with pytest.raises(NotImplementedError): + assert torch.allclose(conv(x, adj2.t()), out) conv = GATConv(8, 32, heads=2, edge_dim=4, fill_value='mean') out = conv(x, edge_index, edge_attr) assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj2.t()), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + with pytest.raises(NotImplementedError): + assert torch.allclose(conv(x, adj2.t()), out) @withCUDA diff --git a/test/nn/conv/test_gated_graph_conv.py b/test/nn/conv/test_gated_graph_conv.py index 08cbed131469..f2d9a7bf8ab7 100644 --- a/test/nn/conv/test_gated_graph_conv.py +++ b/test/nn/conv/test_gated_graph_conv.py @@ -1,30 +1,33 @@ import torch +import torch_geometric.typing from torch_geometric.nn import GatedGraphConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_gated_graph_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = GatedGraphConv(32, num_layers=3) assert str(conv) == 'GatedGraphConv(32, num_layers=3)' out1 = conv(x, edge_index) assert out1.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) out2 = conv(x, edge_index, value) assert out2.size() == (4, 32) assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -32,7 +35,8 @@ def test_gated_graph_conv(): assert jit(x, edge_index).tolist() == out1.tolist() assert jit(x, edge_index, value).tolist() == out2.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) diff --git a/test/nn/conv/test_gatv2_conv.py b/test/nn/conv/test_gatv2_conv.py index 4d1308576b9a..f8192ed6a5dd 100644 --- a/test/nn/conv/test_gatv2_conv.py +++ b/test/nn/conv/test_gatv2_conv.py @@ -1,17 +1,17 @@ import torch +import torch_geometric.typing from torch_geometric.nn import GATv2Conv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_gatv2_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = GATv2Conv(8, 32, heads=2) assert str(conv) == 'GATv2Conv(8, 32, heads=2)' @@ -19,16 +19,20 @@ def test_gatv2_conv(): assert out.size() == (4, 64) assert torch.allclose(conv(x1, edge_index), out) assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) # Test `return_attention_weights`. result = conv(x1, edge_index, return_attention_weights=True) @@ -40,14 +44,15 @@ def test_gatv2_conv(): result = conv(x1, adj1.t(), return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - assert conv._alpha is None - - result = conv(x1, adj2.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) assert result[1][0].size() == torch.Size([4, 4, 2]) assert result[1][0]._nnz() == 7 + if torch_geometric.typing.WITH_TORCH_SPARSE: + result = conv(x1, adj2.t(), return_attention_weights=True) + assert torch.allclose(result[0], out, atol=1e-6) + assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 + assert conv._alpha is None + if is_full_test(): t = ('(Tensor, Tensor, OptTensor, bool) -> ' 'Tuple[Tensor, Tuple[Tensor, Tensor]]') @@ -59,30 +64,35 @@ def test_gatv2_conv(): assert result[1][1].min() >= 0 and result[1][1].max() <= 1 assert conv._alpha is None + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = ('(Tensor, SparseTensor, OptTensor, bool) -> ' 'Tuple[Tensor, SparseTensor]') jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, adj1.t(), return_attention_weights=True) + result = jit(x1, adj2.t(), return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 assert conv._alpha is None - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out1 = conv((x1, x2), edge_index) - assert out1.size() == (2, 64) - assert torch.allclose(conv((x1, x2), edge_index), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + + out = conv((x1, x2), edge_index) + assert out.size() == (2, 64) + assert torch.allclose(conv((x1, x2), edge_index), out) + assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out1) + assert torch.allclose(jit((x1, x2), edge_index), out) t = '(OptPairTensor, SparseTensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) def test_gatv2_conv_with_edge_attr(): diff --git a/test/nn/conv/test_gcn2_conv.py b/test/nn/conv/test_gcn2_conv.py index 393f2445a4e0..c50e18b1febf 100644 --- a/test/nn/conv/test_gcn2_conv.py +++ b/test/nn/conv/test_gcn2_conv.py @@ -1,31 +1,34 @@ import torch +import torch_geometric.typing from torch_geometric.nn import GCN2Conv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_gcn2_conv(): x = torch.randn(4, 16) x_0 = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = GCN2Conv(16, alpha=0.2) assert str(conv) == 'GCN2Conv(16, alpha=0.2, beta=1.0)' out1 = conv(x, x_0, edge_index) assert out1.size() == (4, 16) assert torch.allclose(conv(x, x_0, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, x_0, adj3.t()), out1, atol=1e-6) out2 = conv(x, x_0, edge_index, value) assert out2.size() == (4, 16) assert torch.allclose(conv(x, x_0, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, x_0, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, x_0, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, x_0, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, Tensor, OptTensor) -> Tensor' @@ -33,16 +36,19 @@ def test_gcn2_conv(): assert jit(x, x_0, edge_index).tolist() == out1.tolist() assert jit(x, x_0, edge_index, value).tolist() == out2.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, x_0, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, x_0, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, x_0, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, x_0, adj4.t()), out2, atol=1e-6) conv.cached = True conv(x, x_0, edge_index) + assert conv._cached_edge_index is not None assert torch.allclose(conv(x, x_0, edge_index), out1, atol=1e-6) - conv._cached_edge_index = None - conv(x, x_0, adj3.t()) - assert torch.allclose(conv(x, x_0, adj3.t()), out1, atol=1e-6) - conv(x, x_0, adj1.t()) assert torch.allclose(conv(x, x_0, adj1.t()), out1, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + conv(x, x_0, adj3.t()) + assert conv._cached_adj_t is not None + assert torch.allclose(conv(x, x_0, adj3.t()), out1, atol=1e-6) diff --git a/test/nn/conv/test_gcn_conv.py b/test/nn/conv/test_gcn_conv.py index 247e2937b31a..b43f90080272 100644 --- a/test/nn/conv/test_gcn_conv.py +++ b/test/nn/conv/test_gcn_conv.py @@ -3,51 +3,61 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import GCNConv from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor -from torch_geometric.utils import to_torch_coo_tensor +from torch_geometric.utils import to_torch_coo_tensor, to_torch_csc_tensor def test_gcn_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = GCNConv(16, 32) assert str(conv) == 'GCNConv(16, 32)' + out1 = conv(x, edge_index) assert out1.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + out2 = conv(x, edge_index, value) assert out2.size() == (4, 32) assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) conv.cached = True conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() - conv(x, adj1.t()) + assert conv._cached_edge_index is not None + assert torch.allclose(conv(x, edge_index), out1, atol=1e-6) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + conv(x, adj3.t()) + assert conv._cached_adj_t is not None + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + def test_gcn_conv_with_decomposed_layers(): x = torch.randn(4, 16) diff --git a/test/nn/conv/test_gen_conv.py b/test/nn/conv/test_gen_conv.py index d7b4a41eb3ba..558d012a693b 100644 --- a/test/nn/conv/test_gen_conv.py +++ b/test/nn/conv/test_gen_conv.py @@ -1,9 +1,11 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import GENConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_coo_tensor @pytest.mark.parametrize('aggr', [ @@ -15,113 +17,124 @@ def test_gen_conv(aggr): x1 = torch.randn(4, 16) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0), 16) - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj3 = adj1.to_torch_sparse_coo_tensor() - adj4 = adj2.to_torch_sparse_coo_tensor() + value = torch.randn(edge_index.size(1), 16) + adj1 = to_torch_coo_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_coo_tensor(edge_index, value, size=(4, 4)) conv = GENConv(16, 32, aggr, edge_dim=16, msg_norm=True) assert str(conv) == f'GENConv(16, 32, aggr={aggr})' - out11 = conv(x1, edge_index) - assert out11.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out11) - assert torch.allclose(conv(x1, adj1.t()), out11) - assert torch.allclose(conv(x1, adj3.t().coalesce()), out11) - - out12 = conv(x1, edge_index, value) - assert out12.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out12) - assert torch.allclose(conv(x1, adj2.t()), out12) + out1 = conv(x1, edge_index) + assert out1.size() == (4, 32) + assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out1) + assert torch.allclose(conv(x1, adj1.t().coalesce()), out1) + + out2 = conv(x1, edge_index, value) + assert out2.size() == (4, 32) + assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out2) # t() expects a tensor with <= 2 sparse and 0 dense dimensions - assert torch.allclose(conv(x1, adj4.transpose(1, 0).coalesce()), out12) + assert torch.allclose(conv(x1, adj2.transpose(1, 0).coalesce()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj3.t()), out1) + assert torch.allclose(conv(x1, adj4.t()), out2) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out11, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11, + assert torch.allclose(jit(x1, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out1, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, value), out12, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12, + assert torch.allclose(jit(x1, edge_index, value), out2, atol=1e-6) + assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out11) - assert torch.allclose(jit(x1, adj2.t()), out12) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj2.sparse_resize((4, 2)) - adj3 = adj1.to_torch_sparse_coo_tensor() - adj4 = adj2.to_torch_sparse_coo_tensor() - - out21 = conv((x1, x2), edge_index) - assert out21.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out21) - assert torch.allclose(conv((x1, x2), adj1.t()), out21) - assert torch.allclose(conv((x1, x2), adj3.t().coalesce()), out21) - - out22 = conv((x1, x2), edge_index, value) - assert out22.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out22) - assert torch.allclose(conv((x1, x2), adj2.t()), out22) + assert torch.allclose(jit(x1, adj3.t()), out1) + assert torch.allclose(jit(x1, adj4.t()), out2) + + # Test bipartite message passing: + adj1 = to_torch_coo_tensor(edge_index, size=(4, 2)) + adj2 = to_torch_coo_tensor(edge_index, value, size=(4, 2)) + + out1 = conv((x1, x2), edge_index) + assert out1.size() == (2, 32) + assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out1) + assert torch.allclose(conv((x1, x2), adj1.t().coalesce()), out1) + + out2 = conv((x1, x2), edge_index, value) + assert out2.size() == (2, 32) + assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out2) assert torch.allclose(conv((x1, x2), - adj4.transpose(1, 0).coalesce()), out22) + adj2.transpose(1, 0).coalesce()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj3.t()), out1) + assert torch.allclose(conv((x1, x2), adj4.t()), out2) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out21, atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21, + assert torch.allclose(jit((x1, x2), edge_index), out1, atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1, atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, value), out22, + assert torch.allclose(jit((x1, x2), edge_index, value), out2, atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22, + assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out21) - assert torch.allclose(jit((x1, x2), adj2.t()), out22) + assert torch.allclose(jit((x1, x2), adj3.t()), out1) + assert torch.allclose(jit((x1, x2), adj4.t()), out2) + # Test bipartite message passing with unequal feature dimensions: conv.reset_parameters() assert float(conv.msg_norm.scale) == 1 x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() + conv = GENConv((8, 16), 32, aggr) assert str(conv) == f'GENConv((8, 16), 32, aggr={aggr})' + out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, size=(4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) - assert torch.allclose(conv((x1, x2), adj2.t().coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) - assert torch.allclose(conv((x1, None), adj2.t().coalesce()), out2) - - value = torch.randn(row.size(0), 4) - adj1 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() + assert torch.allclose(conv((x1, x2), adj1.t().coalesce()), out1) + + out2 = conv((x1, None), edge_index, size=(4, 2)) + assert out2.size() == (2, 32) + assert torch.allclose(conv((x1, None), adj1.t().coalesce()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv((x1, x2), adj3.t()), out1) + assert torch.allclose(conv((x1, None), adj3.t()), out2) + + # Test lazy initialization: conv = GENConv((-1, -1), 32, aggr, edge_dim=-1) assert str(conv) == f'GENConv((-1, -1), 32, aggr={aggr})' out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, size=(4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, value, size=(4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) assert torch.allclose(conv((x1, x2), adj2.transpose(1, 0).coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) + + out2 = conv((x1, None), edge_index, value, size=(4, 2)) + assert out2.size() == (2, 32) assert torch.allclose(conv((x1, None), adj2.transpose(1, 0).coalesce()), out2) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv((x1, x2), adj4.t()), out1) + assert torch.allclose(conv((x1, None), adj4.t()), out2) + if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) @@ -132,7 +145,8 @@ def test_gen_conv(aggr): assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj4.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj4.t()), out2, atol=1e-6) diff --git a/test/nn/conv/test_gin_conv.py b/test/nn/conv/test_gin_conv.py index 66930cfe3bc6..bd4429aa0258 100644 --- a/test/nn/conv/test_gin_conv.py +++ b/test/nn/conv/test_gin_conv.py @@ -3,18 +3,18 @@ from torch.nn import ReLU from torch.nn import Sequential as Seq +import torch_geometric.typing from torch_geometric.nn import GINConv, GINEConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_gin_conv(): x1 = torch.randn(4, 16) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) conv = GINConv(nn, train_eps=True) @@ -27,8 +27,11 @@ def test_gin_conv(): out = conv(x1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out, atol=1e-6) - assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, Size) -> Tensor' @@ -36,43 +39,47 @@ def test_gin_conv(): assert jit(x1, edge_index).tolist() == out.tolist() assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() + assert jit(x1, adj2.t()).tolist() == out.tolist() + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj = adj.sparse_resize((4, 2)) - adj2 = adj.to_torch_sparse_csc_tensor() out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, (4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) + assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) + + out2 = conv((x1, None), edge_index, (4, 2)) + assert out2.size() == (2, 32) + assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) if is_full_test(): t = '(OptPairTensor, Tensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out1.tolist() - assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist() - assert jit((x1, None), edge_index, - size=(4, 2)).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), edge_index), out1) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1) + assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() - assert jit((x1, None), adj.t()).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), adj2.t()), out1) + assert torch.allclose(jit((x1, None), adj2.t()), out2) def test_gine_conv(): x1 = torch.randn(4, 16) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0), 16) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.randn(edge_index.size(1), 16) nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) conv = GINEConv(nn, train_eps=True) @@ -84,41 +91,50 @@ def test_gine_conv(): '))') out = conv(x1, edge_index, value) assert out.size() == (4, 32) - assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() - assert conv(x1, adj.t()).tolist() == out.tolist() + assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index, value).tolist() == out.tolist() - assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() + assert torch.allclose(jit(x1, edge_index, value), out) + assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() + assert torch.allclose(jit(x1, adj.t()), out) - adj = adj.sparse_resize((4, 2)) + # Test bipartite message passing: out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) assert out1.size() == (2, 32) + assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) + + out2 = conv((x1, None), edge_index, value, (4, 2)) assert out2.size() == (2, 32) - assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist() - assert conv((x1, x2), adj.t()).tolist() == out1.tolist() - assert conv((x1, None), adj.t()).tolist() == out2.tolist() + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out1) + assert torch.allclose(conv((x1, None), adj.t()), out2) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index, value).tolist() == out1.tolist() - assert jit((x1, x2), edge_index, value, - size=(4, 2)).tolist() == out1.tolist() - assert jit((x1, None), edge_index, value, - size=(4, 2)).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), edge_index, value), out1) + assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), + out1) + assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), + out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() - assert jit((x1, None), adj.t()).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), adj.t()), out1) + assert torch.allclose(jit((x1, None), adj.t()), out2) def test_gine_conv_edge_dim(): diff --git a/test/nn/conv/test_gmm_conv.py b/test/nn/conv/test_gmm_conv.py index f839c67e4ddc..8939171a3c16 100644 --- a/test/nn/conv/test_gmm_conv.py +++ b/test/nn/conv/test_gmm_conv.py @@ -1,9 +1,11 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import GMMConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_coo_tensor @pytest.mark.parametrize('separate_gaussians', [True, False]) @@ -11,10 +13,8 @@ def test_gmm_conv(separate_gaussians): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj1 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_coo_tensor() + value = torch.rand(edge_index.size(1), 3) + adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 4)) conv = GMMConv(8, 32, dim=3, kernel_size=25, separate_gaussians=separate_gaussians) @@ -22,9 +22,12 @@ def test_gmm_conv(separate_gaussians): out = conv(x1, edge_index, value) assert out.size() == (4, 32) assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj1.t()), out) # t() expects a tensor with <= 2 sparse and 0 dense dimensions - assert torch.allclose(conv(x1, adj2.transpose(0, 1).coalesce()), out) + assert torch.allclose(conv(x1, adj1.transpose(0, 1).coalesce()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' @@ -32,26 +35,33 @@ def test_gmm_conv(separate_gaussians): assert torch.allclose(jit(x1, edge_index, value), out) assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out) + assert torch.allclose(jit(x1, adj2.t()), out) + + # Test bipartite message passing: + adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() conv = GMMConv((8, 16), 32, dim=3, kernel_size=5, separate_gaussians=separate_gaussians) assert str(conv) == 'GMMConv((8, 16), 32, dim=3)' + out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) assert torch.allclose(conv((x1, x2), - adj2.transpose(0, 1).coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) + adj1.transpose(0, 1).coalesce()), out1) + + out2 = conv((x1, None), edge_index, value, (4, 2)) + assert out2.size() == (2, 32) assert torch.allclose(conv((x1, None), - adj2.transpose(0, 1).coalesce()), out2) + adj1.transpose(0, 1).coalesce()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out1) + assert torch.allclose(conv((x1, None), adj2.t()), out2) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' @@ -62,10 +72,11 @@ def test_gmm_conv(separate_gaussians): assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1) - assert torch.allclose(jit((x1, None), adj1.t()), out2) + assert torch.allclose(jit((x1, x2), adj2.t()), out1) + assert torch.allclose(jit((x1, None), adj2.t()), out2) @pytest.mark.parametrize('separate_gaussians', [True, False]) diff --git a/test/nn/conv/test_gps_conv.py b/test/nn/conv/test_gps_conv.py index f38069746bd1..eda1a4889ff7 100644 --- a/test/nn/conv/test_gps_conv.py +++ b/test/nn/conv/test_gps_conv.py @@ -1,18 +1,18 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import GPSConv, SAGEConv from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor @pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm']) def test_gps_conv(norm): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) - row, col = edge_index - adj1 = SparseTensor(row=col, col=row, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() batch = torch.tensor([0, 0, 1, 1]) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = GPSConv(16, conv=SAGEConv(16, 16), heads=4, norm=norm) conv.reset_parameters() @@ -22,9 +22,14 @@ def test_gps_conv(norm): out = conv(x, edge_index) assert out.size() == (4, 16) assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) out = conv(x, edge_index, batch) assert out.size() == (4, 16) assert torch.allclose(conv(x, adj1.t(), batch), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t(), batch), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, adj2.t(), batch), out, atol=1e-6) diff --git a/test/nn/conv/test_graph_conv.py b/test/nn/conv/test_graph_conv.py index 40cfe683fcda..14bd30481d4a 100644 --- a/test/nn/conv/test_graph_conv.py +++ b/test/nn/conv/test_graph_conv.py @@ -1,86 +1,101 @@ import torch +import torch_geometric.typing from torch_geometric.nn import GraphConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_graph_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index value = torch.randn(edge_index.size(1)) - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = GraphConv(8, 32) assert str(conv) == 'GraphConv(8, 32)' - out11 = conv(x1, edge_index) - assert out11.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out11, atol=1e-6) - assert torch.allclose(conv(x1, adj1.t()), out11, atol=1e-6) - assert torch.allclose(conv(x1, adj3.t()), out11, atol=1e-6) - - out12 = conv(x1, edge_index, value) - assert out12.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out12, + out1 = conv(x1, edge_index) + assert out1.size() == (4, 32) + assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out1, atol=1e-6) + assert torch.allclose(conv(x1, adj1.t()), out1, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj3.t()), out1, atol=1e-6) + + out2 = conv(x1, edge_index, value) + assert out2.size() == (4, 32) + assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out2, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out12, atol=1e-6) - assert torch.allclose(conv(x1, adj4.t()), out12, atol=1e-6) + assert torch.allclose(conv(x1, adj2.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out11) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11) - assert torch.allclose(jit(x1, edge_index, value), out12) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12) + assert torch.allclose(jit(x1, edge_index), out1) + assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out1) + assert torch.allclose(jit(x1, edge_index, value), out2) + assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out11) - assert torch.allclose(jit(x1, adj2.t()), out12) + assert torch.allclose(jit(x1, adj3.t()), out1) + assert torch.allclose(jit(x1, adj4.t()), out2) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj2.sparse_resize((4, 2)) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() conv = GraphConv((8, 16), 32) assert str(conv) == 'GraphConv((8, 16), 32)' - out21 = conv((x1, x2), edge_index) - out22 = conv((x1, x2), edge_index, value) - out23 = conv((x1, None), edge_index, size=(4, 2)) - out24 = conv((x1, None), edge_index, value, size=(4, 2)) - assert out21.size() == (2, 32) - assert out22.size() == (2, 32) - assert out23.size() == (2, 32) - assert out24.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out21, - atol=1e-6) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out22, - atol=1e-6) - assert torch.allclose(conv((x1, x2), adj1.t()), out21, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out22, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj3.t()), out21, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj4.t()), out22, atol=1e-6) + out1 = conv((x1, x2), edge_index) + assert out1.size() == (2, 32) + assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out1) + assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) + + out2 = conv((x1, None), edge_index, size=(4, 2)) + assert out2.size() == (2, 32) + assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) + + out3 = conv((x1, x2), edge_index, value) + assert out3.size() == (2, 32) + assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out3) + assert torch.allclose(conv((x1, x2), adj2.t()), out3, atol=1e-6) + + out4 = conv((x1, None), edge_index, value, size=(4, 2)) + assert out4.size() == (2, 32) + assert torch.allclose(conv((x1, None), adj2.t()), out4, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj3.t()), out2, atol=1e-6) + assert torch.allclose(conv((x1, x2), adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj4.t()), out4, atol=1e-6) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out21) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21) - assert torch.allclose(jit((x1, x2), edge_index, value), out22) - assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22) - assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out23) - assert torch.allclose(jit((x1, None), edge_index, value, (4, 2)), - out24) + assert torch.allclose(jit((x1, x2), edge_index), out1) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1) + assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) + assert torch.allclose(jit((x1, x2), edge_index, value), out3) + assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out3) + assert torch.allclose(jit((x1, None), edge_index, value, (4, 2)), out4) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out21, atol=1e-6) - assert torch.allclose(jit((x1, x2), adj2.t()), out22, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out23, atol=1e-6) - assert torch.allclose(jit((x1, None), adj2.t()), out24, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj3.t()), out2, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj4.t()), out3, atol=1e-6) + assert torch.allclose(jit((x1, None), adj4.t()), out4, atol=1e-6) From 99d1c04fe1e820a5c13b6935279d28916e949262 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 22:30:16 +0200 Subject: [PATCH 1068/2432] Drop `torch_sparse` dependency in tests (15/n) (#7067) --- test/nn/conv/test_han_conv.py | 55 +++++++++-------- test/nn/conv/test_heat_conv.py | 27 ++++----- test/nn/conv/test_hgt_conv.py | 104 +++++++++++++++++---------------- 3 files changed, 98 insertions(+), 88 deletions(-) diff --git a/test/nn/conv/test_han_conv.py b/test/nn/conv/test_han_conv.py index 74b150048f5f..299d7e11fc41 100644 --- a/test/nn/conv/test_han_conv.py +++ b/test/nn/conv/test_han_conv.py @@ -1,8 +1,9 @@ import torch +import torch_geometric.typing from torch_geometric.nn import HANConv from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce +from torch_geometric.utils import coalesce, to_torch_csc_tensor def test_han_conv(): @@ -21,15 +22,12 @@ def test_han_conv(): } adj_t_dict1 = {} - adj_t_dict2 = {} for edge_type, edge_index in edge_index_dict.items(): src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) in_channels = {'author': 16, 'paper': 12, 'term': 3} @@ -49,10 +47,17 @@ def test_han_conv(): for key in out_dict1.keys(): assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict3.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for key in out_dict3.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) # Test non-zero dropout: conv = HANConv(in_channels, 16, metadata, heads=2, dropout=0.1) @@ -76,15 +81,12 @@ def test_han_conv_lazy(): } adj_t_dict1 = {} - adj_t_dict2 = {} for edge_type, edge_index in edge_index_dict.items(): src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) conv = HANConv(-1, 16, metadata, heads=2) @@ -99,10 +101,17 @@ def test_han_conv_lazy(): for key in out_dict1.keys(): assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for key in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) def test_han_conv_empty_tensor(): diff --git a/test/nn/conv/test_heat_conv.py b/test/nn/conv/test_heat_conv.py index b0cfdf218b1b..99815b0cb854 100644 --- a/test/nn/conv/test_heat_conv.py +++ b/test/nn/conv/test_heat_conv.py @@ -1,26 +1,31 @@ +import pytest import torch +import torch_geometric.typing from torch_geometric.nn import HEATConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor -def test_heat_conv(): +@pytest.mark.parametrize('concat', [True, False]) +def test_heat_conv(concat): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index edge_attr = torch.randn((4, 2)) - adj = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4)) node_type = torch.tensor([0, 0, 1, 2]) edge_type = torch.tensor([0, 2, 1, 2]) conv = HEATConv(in_channels=8, out_channels=16, num_node_types=3, num_edge_types=3, edge_type_emb_dim=5, edge_dim=2, - edge_attr_emb_dim=6, heads=2, concat=True) + edge_attr_emb_dim=6, heads=2, concat=concat) assert str(conv) == 'HEATConv(8, 16, heads=2)' + out = conv(x, edge_index, node_type, edge_type, edge_attr) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out) + assert out.size() == (4, 32 if concat else 16) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) + assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out) if is_full_test(): t = '(Tensor, Tensor, Tensor, Tensor, OptTensor) -> Tensor' @@ -28,15 +33,7 @@ def test_heat_conv(): assert torch.allclose( jit(x, edge_index, node_type, edge_type, edge_attr), out) - conv = HEATConv(in_channels=8, out_channels=16, num_node_types=3, - num_edge_types=3, edge_type_emb_dim=5, edge_dim=2, - edge_attr_emb_dim=6, heads=2, concat=False) - assert str(conv) == 'HEATConv(8, 16, heads=2)' - out = conv(x, edge_index, node_type, edge_type, edge_attr) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out) - - if is_full_test(): + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, adj.t(), node_type, edge_type), out) diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index e226bdd49e65..bb41663d2e4a 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -1,10 +1,12 @@ import torch +import torch_geometric.typing from torch_geometric.data import HeteroData from torch_geometric.nn import HGTConv from torch_geometric.profile import benchmark +from torch_geometric.testing import get_random_edge_index from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce +from torch_geometric.utils import coalesce, to_torch_csc_tensor def test_hgt_conv_same_dimensions(): @@ -12,10 +14,7 @@ def test_hgt_conv_same_dimensions(): 'author': torch.randn(4, 16), 'paper': torch.randn(6, 16), } - - row = torch.randint(0, 4, (20, ), dtype=torch.long) - col = torch.randint(0, 6, (20, ), dtype=torch.long) - edge_index = coalesce(torch.stack([row, col], dim=0)) + edge_index = coalesce(get_random_edge_index(4, 6, num_edges=20)) edge_index_dict = { ('author', 'writes', 'paper'): edge_index, @@ -23,15 +22,12 @@ def test_hgt_conv_same_dimensions(): } adj_t_dict1 = {} - adj_t_dict2 = {} for edge_type, edge_index in edge_index_dict.items(): src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) @@ -47,10 +43,17 @@ def test_hgt_conv_same_dimensions(): for key in out_dict1.keys(): assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for key in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) # TODO: Test JIT functionality. We need to wait on this one until PyTorch # allows indexing `ParameterDict` mappings :( @@ -61,10 +64,7 @@ def test_hgt_conv_different_dimensions(): 'author': torch.randn(4, 16), 'paper': torch.randn(6, 32), } - - row = torch.randint(0, 4, (20, ), dtype=torch.long) - col = torch.randint(0, 6, (20, ), dtype=torch.long) - edge_index = coalesce(torch.stack([row, col], dim=0)) + edge_index = coalesce(get_random_edge_index(4, 6, num_edges=20)) edge_index_dict = { ('author', 'writes', 'paper'): edge_index, @@ -72,15 +72,12 @@ def test_hgt_conv_different_dimensions(): } adj_t_dict1 = {} - adj_t_dict2 = {} for edge_type, edge_index in edge_index_dict.items(): src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) @@ -99,10 +96,17 @@ def test_hgt_conv_different_dimensions(): for key in out_dict1.keys(): assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for node_type in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for node_type in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) def test_hgt_conv_lazy(): @@ -110,10 +114,7 @@ def test_hgt_conv_lazy(): 'author': torch.randn(4, 16), 'paper': torch.randn(6, 32), } - - row = torch.randint(0, 4, (20, ), dtype=torch.long) - col = torch.randint(0, 6, (20, ), dtype=torch.long) - edge_index = coalesce(torch.stack([row, col], dim=0)) + edge_index = coalesce(get_random_edge_index(4, 6, num_edges=20)) edge_index_dict = { ('author', 'writes', 'paper'): edge_index, @@ -121,15 +122,12 @@ def test_hgt_conv_lazy(): } adj_t_dict1 = {} - adj_t_dict2 = {} for edge_type, edge_index in edge_index_dict.items(): src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) @@ -145,10 +143,17 @@ def test_hgt_conv_lazy(): for key in out_dict1.keys(): assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for key in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) def test_hgt_conv_out_of_place(): @@ -156,11 +161,10 @@ def test_hgt_conv_out_of_place(): data['author'].x = torch.randn(4, 16) data['paper'].x = torch.randn(6, 32) - index1 = torch.randint(0, 4, (20, ), dtype=torch.long) - index2 = torch.randint(0, 6, (20, ), dtype=torch.long) + edge_index = coalesce(get_random_edge_index(4, 6, num_edges=20)) - data['author', 'paper'].edge_index = torch.stack([index1, index2], dim=0) - data['paper', 'author'].edge_index = torch.stack([index2, index1], dim=0) + data['author', 'paper'].edge_index = edge_index + data['paper', 'author'].edge_index = edge_index.flip([0]) conv = HGTConv(-1, 64, data.metadata(), heads=1) From 4416a77df8cb5610cdaef11fb2f770d9d97f6a0d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 28 Mar 2023 22:38:11 +0200 Subject: [PATCH 1069/2432] Drop `torch_sparse` dependency in tests (16/n) (#7068) --- test/nn/conv/test_le_conv.py | 21 +++++++++------- test/nn/conv/test_lg_conv.py | 25 ++++++++++++-------- test/nn/conv/test_mf_conv.py | 44 ++++++++++++++++++++-------------- test/nn/conv/test_nn_conv.py | 46 +++++++++++++++++++++++------------- 4 files changed, 82 insertions(+), 54 deletions(-) diff --git a/test/nn/conv/test_le_conv.py b/test/nn/conv/test_le_conv.py index 4a60638be603..35c6c5d6fedb 100644 --- a/test/nn/conv/test_le_conv.py +++ b/test/nn/conv/test_le_conv.py @@ -1,30 +1,33 @@ import torch +import torch_geometric.typing from torch_geometric.nn import LEConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_le_conv(): - in_channels, out_channels = (16, 32) + x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - num_nodes = edge_index.max().item() + 1 - x = torch.randn((num_nodes, in_channels)) - adj1 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) - conv = LEConv(in_channels, out_channels) + conv = LEConv(16, 32) assert str(conv) == 'LEConv(16, 32)' out = conv(x, edge_index) - assert out.size() == (num_nodes, out_channels) + assert out.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out) - assert torch.allclose(conv(x, adj2.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) torch.allclose(jit(x, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out) + assert torch.allclose(jit(x, adj2.t()), out) diff --git a/test/nn/conv/test_lg_conv.py b/test/nn/conv/test_lg_conv.py index bea2152e0875..3f82769f6cd1 100644 --- a/test/nn/conv/test_lg_conv.py +++ b/test/nn/conv/test_lg_conv.py @@ -1,30 +1,34 @@ import torch +import torch_geometric.typing from torch_geometric.nn import LGConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_lg_conv(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = LGConv() assert str(conv) == 'LGConv()' out1 = conv(x, edge_index) assert out1.size() == (4, 8) assert torch.allclose(conv(x, adj1.t()), out1) - assert torch.allclose(conv(x, adj3.t()), out1) + out2 = conv(x, edge_index, value) assert out2.size() == (4, 8) assert torch.allclose(conv(x, adj2.t()), out2) - assert torch.allclose(conv(x, adj4.t()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1) + assert torch.allclose(conv(x, adj4.t()), out2) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -32,7 +36,8 @@ def test_lg_conv(): assert torch.allclose(jit(x, edge_index), out1) assert torch.allclose(jit(x, edge_index, value), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1) - assert torch.allclose(jit(x, adj2.t()), out2) + assert torch.allclose(jit(x, adj3.t()), out1) + assert torch.allclose(jit(x, adj4.t()), out2) diff --git a/test/nn/conv/test_mf_conv.py b/test/nn/conv/test_mf_conv.py index acde3614e688..9473c5d87457 100644 --- a/test/nn/conv/test_mf_conv.py +++ b/test/nn/conv/test_mf_conv.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn import MFConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor @@ -9,46 +10,53 @@ def test_mf_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) conv = MFConv(8, 32) assert str(conv) == 'MFConv(8, 32)' out = conv(x1, edge_index) assert out.size() == (4, 32) - assert conv(x1, edge_index, size=(4, 4)).tolist() == out.tolist() - assert conv(x1, adj.t()).tolist() == out.tolist() + assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj.t()), out) if is_full_test(): t = '(Tensor, Tensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out.tolist() - assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist() + assert torch.allclose(jit(x1, edge_index), out) + assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() + assert torch.allclose(jit(x1, adj.t()), out) - adj = adj.sparse_resize((4, 2)) + # Test bipartite message passing: conv = MFConv((8, 16), 32) assert str(conv) == 'MFConv((8, 16), 32)' + out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, (4, 2)) assert out1.size() == (2, 32) + assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1) + + out2 = conv((x1, None), edge_index, (4, 2)) assert out2.size() == (2, 32) - assert conv((x1, x2), edge_index, (4, 2)).tolist() == out1.tolist() - assert conv((x1, x2), adj.t()).tolist() == out1.tolist() - assert conv((x1, None), adj.t()).tolist() == out2.tolist() + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out1) + assert torch.allclose(conv((x1, None), adj.t()), out2) if is_full_test(): t = '(OptPairTensor, Tensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out1.tolist() - assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist() - assert jit((x1, None), edge_index, - size=(4, 2)).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), edge_index), out1) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1) + assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() - assert jit((x1, None), adj.t()).tolist() == out2.tolist() + assert torch.allclose(jit((x1, x2), adj.t()), out1) + assert torch.allclose(jit((x1, None), adj.t()), out2) diff --git a/test/nn/conv/test_nn_conv.py b/test/nn/conv/test_nn_conv.py index 32bc58047347..e0db2f9575d1 100644 --- a/test/nn/conv/test_nn_conv.py +++ b/test/nn/conv/test_nn_conv.py @@ -3,19 +3,19 @@ from torch.nn import ReLU from torch.nn import Sequential as Seq +import torch_geometric.typing from torch_geometric.nn import NNConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_coo_tensor def test_nn_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj1 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_coo_tensor() + value = torch.rand(edge_index.size(1), 3) + adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 4)) nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32)) conv = NNConv(8, 32, nn=nn) @@ -25,11 +25,15 @@ def test_nn_conv(): ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=256, bias=True)\n' '))') + out = conv(x1, edge_index, value) assert out.size() == (4, 32) assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj1.t()), out) - assert torch.allclose(conv(x1, adj2.transpose(0, 1).coalesce()), out) + assert torch.allclose(conv(x1, adj1.transpose(0, 1).coalesce()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' @@ -37,12 +41,14 @@ def test_nn_conv(): assert torch.allclose(jit(x1, edge_index, value), out) assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out) + assert torch.allclose(jit(x1, adj2.t()), out) + + # Test bipartite message passing: + adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() conv = NNConv((8, 16), 32, nn=nn) assert str(conv) == ( 'NNConv((8, 16), 32, aggr=add, nn=Sequential(\n' @@ -50,17 +56,22 @@ def test_nn_conv(): ' (1): ReLU()\n' ' (2): Linear(in_features=32, out_features=256, bias=True)\n' '))') + out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) assert torch.allclose(conv((x1, x2), - adj2.transpose(0, 1).coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) + adj1.transpose(0, 1).coalesce()), out1) + + out2 = conv((x1, None), edge_index, value, (4, 2)) + assert out2.size() == (2, 32) assert torch.allclose(conv((x1, None), - adj2.transpose(0, 1).coalesce()), out2) + adj1.transpose(0, 1).coalesce()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out1) + assert torch.allclose(conv((x1, None), adj2.t()), out2) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' @@ -71,7 +82,8 @@ def test_nn_conv(): assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1) - assert torch.allclose(jit((x1, None), adj1.t()), out2) + assert torch.allclose(jit((x1, x2), adj2.t()), out1) + assert torch.allclose(jit((x1, None), adj2.t()), out2) From 152ab41e3f6211a780b19b7a5850c1b60e21767e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 29 Mar 2023 05:43:22 +0200 Subject: [PATCH 1070/2432] Drop `torch_sparse` dependency in tests (17/n) (#7069) --- test/nn/conv/test_pan_conv.py | 17 +++++--- test/nn/conv/test_pdn_conv.py | 14 ++++--- test/nn/conv/test_pna_conv.py | 28 +++++++------- test/nn/conv/test_point_conv.py | 41 +++++++++++--------- test/nn/conv/test_point_gnn_conv.py | 14 ++++--- test/nn/conv/test_point_transformer_conv.py | 39 +++++++++++-------- test/nn/conv/test_ppf_conv.py | 43 +++++++++++---------- torch_geometric/nn/conv/pan_conv.py | 7 +++- 8 files changed, 118 insertions(+), 85 deletions(-) diff --git a/test/nn/conv/test_pan_conv.py b/test/nn/conv/test_pan_conv.py index 6dfdc86e6de6..41a53ae8f139 100644 --- a/test/nn/conv/test_pan_conv.py +++ b/test/nn/conv/test_pan_conv.py @@ -1,15 +1,19 @@ import torch +import torch_geometric.typing from torch_geometric.nn import PANConv +from torch_geometric.testing import withPackage from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor +@withPackage('torch_sparse') # TODO `PANConv` returns a `SparseTensor`. def test_pan_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) conv = PANConv(16, 32, filter_size=2) assert str(conv) == 'PANConv(16, 32, filter_size=2)' @@ -20,6 +24,7 @@ def test_pan_conv(): assert torch.allclose(out1, out2, atol=1e-6) assert torch.allclose(M1.to_dense(), M2.to_dense()) - out3, M3 = conv(x, adj2.t()) - assert torch.allclose(out1, out3, atol=1e-6) - assert torch.allclose(M1.to_dense(), M3.to_dense()) + if torch_geometric.typing.WITH_TORCH_SPARSE: + out3, M3 = conv(x, adj2.t()) + assert torch.allclose(out1, out3, atol=1e-6) + assert torch.allclose(M1.to_dense(), M3.to_dense()) diff --git a/test/nn/conv/test_pdn_conv.py b/test/nn/conv/test_pdn_conv.py index 988175955b20..86cdf4121b16 100644 --- a/test/nn/conv/test_pdn_conv.py +++ b/test/nn/conv/test_pdn_conv.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn import PDNConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor @@ -8,21 +9,24 @@ def test_pdn_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - edge_attr = torch.randn(6, 8) - adj = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4)) + edge_attr = torch.randn(edge_index.size(1), 8) conv = PDNConv(16, 32, edge_dim=8, hidden_channels=128) assert str(conv) == "PDNConv(16, 32)" + out = conv(x, edge_index, edge_attr) assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) + assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index, edge_attr), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) @@ -35,7 +39,7 @@ def test_pdn_conv_with_sparse_node_input_feature(): size=torch.Size([4, 16]), ) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - edge_attr = torch.randn(6, 8) + edge_attr = torch.randn(edge_index.size(1), 8) conv = PDNConv(16, 32, edge_dim=8, hidden_channels=128) diff --git a/test/nn/conv/test_pna_conv.py b/test/nn/conv/test_pna_conv.py index 242b1b5646d0..989997e61e8b 100644 --- a/test/nn/conv/test_pna_conv.py +++ b/test/nn/conv/test_pna_conv.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.data import Data from torch_geometric.loader import DataLoader, NeighborLoader from torch_geometric.nn import PNAConv @@ -18,21 +19,25 @@ def test_pna_conv(divide_input): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) deg = torch.tensor([0, 3, 0, 1]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.rand(edge_index.size(1), 3) + conv = PNAConv(16, 32, aggregators, scalers, deg=deg, edge_dim=3, towers=4, pre_layers=2, post_layers=2, divide_input=divide_input) assert str(conv) == 'PNAConv(16, 32, towers=4, edge_dim=3)' + out = conv(x, edge_index, value) assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index, value), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) @@ -49,19 +54,17 @@ def test_pna_conv_get_degree_histogram(): shuffle=False, ) deg_hist = PNAConv.get_degree_histogram(loader) - deg_hist_ref = torch.tensor([1, 2, 1, 1]) - assert torch.equal(deg_hist_ref, deg_hist) + assert torch.equal(deg_hist, torch.tensor([1, 2, 1, 1])) edge_index_1 = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]]) edge_index_2 = torch.tensor([[1, 1, 2, 2, 0, 3, 3], [2, 3, 3, 1, 1, 0, 2]]) edge_index_3 = torch.tensor([[1, 3, 2, 0, 0, 4, 2], [2, 0, 4, 1, 1, 0, 3]]) edge_index_4 = torch.tensor([[0, 1, 2, 4, 0, 1, 3], [2, 3, 3, 1, 1, 0, 2]]) - data_1 = Data(num_nodes=5, - edge_index=edge_index_1) # deg_hist = [1, 2 ,1 ,1] - data_2 = Data(num_nodes=5, edge_index=edge_index_2) # deg_hist = [1, 1, 3] - data_3 = Data(num_nodes=5, edge_index=edge_index_3) # deg_hist = [0, 3, 2] - data_4 = Data(num_nodes=5, edge_index=edge_index_4) # deg_hist = [1, 1, 3] + data_1 = Data(num_nodes=5, edge_index=edge_index_1) # hist = [1, 2 ,1 ,1] + data_2 = Data(num_nodes=5, edge_index=edge_index_2) # hist = [1, 1, 3] + data_3 = Data(num_nodes=5, edge_index=edge_index_3) # hist = [0, 3, 2] + data_4 = Data(num_nodes=5, edge_index=edge_index_4) # hist = [1, 1, 3] loader = DataLoader( [data_1, data_2, data_3, data_4], @@ -69,5 +72,4 @@ def test_pna_conv_get_degree_histogram(): shuffle=False, ) deg_hist = PNAConv.get_degree_histogram(loader) - deg_hist_ref = torch.tensor([3, 7, 9, 1]) - assert torch.equal(deg_hist_ref, deg_hist) + assert torch.equal(deg_hist, torch.tensor([3, 7, 9, 1])) diff --git a/test/nn/conv/test_point_conv.py b/test/nn/conv/test_point_conv.py index 25a65d6c0f38..bb8820f463c3 100644 --- a/test/nn/conv/test_point_conv.py +++ b/test/nn/conv/test_point_conv.py @@ -3,9 +3,11 @@ from torch.nn import ReLU from torch.nn import Sequential as Seq +import torch_geometric.typing from torch_geometric.nn import PointNetConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_point_net_conv(): @@ -13,9 +15,7 @@ def test_point_net_conv(): pos1 = torch.randn(4, 3) pos2 = torch.randn(2, 3) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) local_nn = Seq(Lin(16 + 3, 32), ReLU(), Lin(32, 32)) global_nn = Seq(Lin(32, 32)) @@ -31,36 +31,41 @@ def test_point_net_conv(): out = conv(x1, pos1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, pos1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(OptTensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, pos1, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x1, pos1, edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptTensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, pos1, adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() out = conv(x1, (pos1, pos2), edge_index) assert out.size() == (2, 32) - assert conv((x1, None), (pos1, pos2), edge_index).tolist() == out.tolist() + assert torch.allclose(conv((x1, None), (pos1, pos2), edge_index), out) assert torch.allclose(conv(x1, (pos1, pos2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), adj2.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), adj1.t()), out, - atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), adj2.t()), out, - atol=1e-6) + assert torch.allclose(conv((x1, None), (pos1, pos2), adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv(x1, (pos1, pos2), adj2.t()), out, atol=1e-6) + assert torch.allclose(conv((x1, None), (pos1, pos2), adj2.t()), out) if is_full_test(): t = '(PairOptTensor, PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, None), (pos1, pos2), - edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x1, (pos1, pos2), edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairOptTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, None), (pos1, pos2), adj1.t()), out, - atol=1e-6) + assert torch.allclose(jit(x1, (pos1, pos2), adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_point_gnn_conv.py b/test/nn/conv/test_point_gnn_conv.py index d6ddcaa5ef14..ac25e1a93a71 100644 --- a/test/nn/conv/test_point_gnn_conv.py +++ b/test/nn/conv/test_point_gnn_conv.py @@ -1,17 +1,17 @@ import torch +import torch_geometric.typing from torch_geometric.nn import MLP, PointGNNConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_point_gnn_conv(): x = torch.randn(6, 8) pos = torch.randn(6, 3) edge_index = torch.tensor([[0, 1, 1, 1, 2, 5], [1, 2, 3, 4, 3, 4]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(6, 6)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(6, 6)) conv = PointGNNConv( mlp_h=MLP([8, 16, 3]), @@ -27,13 +27,17 @@ def test_point_gnn_conv(): out = conv(x, pos, edge_index) assert out.size() == (6, 8) assert torch.allclose(conv(x, pos, adj1.t()), out) - assert torch.allclose(conv(x, pos, adj2.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) + assert torch.allclose(conv(x, pos, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, pos, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, pos, adj1.t()), out) + assert torch.allclose(jit(x, pos, adj2.t()), out) diff --git a/test/nn/conv/test_point_transformer_conv.py b/test/nn/conv/test_point_transformer_conv.py index bdb05c8e093b..868b461d518f 100644 --- a/test/nn/conv/test_point_transformer_conv.py +++ b/test/nn/conv/test_point_transformer_conv.py @@ -1,9 +1,11 @@ import torch from torch.nn import Linear, ReLU, Sequential +import torch_geometric.typing from torch_geometric.nn import PointTransformerConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_point_transformer_conv(): @@ -12,9 +14,7 @@ def test_point_transformer_conv(): pos1 = torch.rand(4, 3) pos2 = torch.randn(2, 3) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = PointTransformerConv(in_channels=16, out_channels=32) assert str(conv) == 'PointTransformerConv(16, 32)' @@ -22,16 +22,20 @@ def test_point_transformer_conv(): out = conv(x1, pos1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, pos1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, pos1, edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, pos1, adj2.t()), out, atol=1e-6) pos_nn = Sequential(Linear(3, 16), ReLU(), Linear(16, 32)) attn_nn = Sequential(Linear(32, 32), ReLU(), Linear(32, 32)) @@ -40,26 +44,29 @@ def test_point_transformer_conv(): out = conv(x1, pos1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, pos1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) + + # Test biparitite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) conv = PointTransformerConv((16, 8), 32) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() + assert str(conv) == 'PointTransformerConv((16, 8), 32)' out = conv((x1, x2), (pos1, pos2), edge_index) assert out.size() == (2, 32) - assert torch.allclose(conv((x1, x2), (pos1, pos2), adj1.t()), out, - atol=1e-6) - assert torch.allclose(conv((x1, x2), (pos1, pos2), adj2.t()), out, - atol=1e-6) + assert torch.allclose(conv((x1, x2), (pos1, pos2), adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), (pos1, pos2), adj2.t()), out) if is_full_test(): t = '(PairTensor, PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), (pos1, pos2), edge_index), out, - atol=1e-6) + assert torch.allclose(jit((x1, x2), (pos1, pos2), edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), (pos1, pos2), adj1.t()), out, - atol=1e-6) + assert torch.allclose(jit((x1, x2), (pos1, pos2), adj2.t()), out) diff --git a/test/nn/conv/test_ppf_conv.py b/test/nn/conv/test_ppf_conv.py index 747b8bd208ca..5b36b55ca672 100644 --- a/test/nn/conv/test_ppf_conv.py +++ b/test/nn/conv/test_ppf_conv.py @@ -4,9 +4,11 @@ from torch.nn import ReLU from torch.nn import Sequential as Seq +import torch_geometric.typing from torch_geometric.nn import PPFConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_ppf_conv(): @@ -16,9 +18,7 @@ def test_ppf_conv(): n1 = F.normalize(torch.rand(4, 3), dim=-1) n2 = F.normalize(torch.rand(2, 3), dim=-1) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) local_nn = Seq(Lin(16 + 4, 32), ReLU(), Lin(32, 32)) global_nn = Seq(Lin(32, 32)) @@ -31,45 +31,48 @@ def test_ppf_conv(): '), global_nn=Sequential(\n' ' (0): Linear(in_features=32, out_features=32, bias=True)\n' '))') + out = conv(x1, pos1, n1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, pos1, n1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, n1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, pos1, n1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(OptTensor, Tensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, pos1, n1, edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptTensor, Tensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, n1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, pos1, n1, adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() out = conv(x1, (pos1, pos2), (n1, n2), edge_index) assert out.size() == (2, 32) assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), edge_index), out, atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj1.t()), out, - atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj2.t()), out, - atol=1e-6) + assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj1.t()), out) assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), adj2.t()), - out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj2.t()), out) + assert torch.allclose( + conv((x1, None), (pos1, pos2), (n1, n2), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(PairOptTensor, PairTensor, PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose( - jit((x1, None), (pos1, pos2), (n1, n2), edge_index), - out, - atol=1e-6, - ) + assert torch.allclose(jit(x1, (pos1, pos2), (n1, n2), edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairOptTensor, PairTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose( - jit((x1, None), (pos1, pos2), (n1, n2), adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, (pos1, pos2), (n1, n2), adj2.t()), out) diff --git a/torch_geometric/nn/conv/pan_conv.py b/torch_geometric/nn/conv/pan_conv.py index 704b2a6ea529..6bf4fb10417a 100644 --- a/torch_geometric/nn/conv/pan_conv.py +++ b/torch_geometric/nn/conv/pan_conv.py @@ -61,8 +61,11 @@ def reset_parameters(self): self.lin.reset_parameters() self.weight.data.fill_(0.5) - def forward(self, x: Tensor, - edge_index: Adj) -> Tuple[Tensor, SparseTensor]: + def forward( + self, + x: Tensor, + edge_index: Adj, + ) -> Tuple[Tensor, SparseTensor]: adj_t: Optional[SparseTensor] = None if isinstance(edge_index, Tensor): From 431d07fddd2dd464628fe039fbf362a800941324 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 29 Mar 2023 06:02:15 +0200 Subject: [PATCH 1071/2432] Drop `torch_sparse` dependency in tests (18/n) (#7070) --- test/nn/conv/test_res_gated_graph_conv.py | 29 +++++++++++++------- test/nn/conv/test_rgat_conv.py | 20 +++++++------- test/nn/conv/test_rgcn_conv.py | 33 ++++++++++++++++------- 3 files changed, 54 insertions(+), 28 deletions(-) diff --git a/test/nn/conv/test_res_gated_graph_conv.py b/test/nn/conv/test_res_gated_graph_conv.py index 32fb38758e04..193cd53814d6 100644 --- a/test/nn/conv/test_res_gated_graph_conv.py +++ b/test/nn/conv/test_res_gated_graph_conv.py @@ -1,48 +1,59 @@ import torch +import torch_geometric.typing from torch_geometric.nn import ResGatedGraphConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_res_gated_graph_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 32) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = ResGatedGraphConv(8, 32) assert str(conv) == 'ResGatedGraphConv(8, 32)' + out = conv(x1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() conv = ResGatedGraphConv((8, 32), 32) assert str(conv) == 'ResGatedGraphConv((8, 32), 32)' + out = conv((x1, x2), edge_index) assert out.size() == (2, 32) assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_rgat_conv.py b/test/nn/conv/test_rgat_conv.py index 2e8a0a054916..6efd7391c2ef 100644 --- a/test/nn/conv/test_rgat_conv.py +++ b/test/nn/conv/test_rgat_conv.py @@ -1,9 +1,11 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import RGATConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_coo_tensor @pytest.mark.parametrize('mod', [ @@ -38,11 +40,9 @@ def test_rgat_conv(mod, attention_mechanism, attention_mode): def test_rgat_conv_jittable(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - edge_attr = torch.randn((4, 8)) - adj1 = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_coo_tensor() + edge_attr = torch.randn((edge_index.size(1), 8)) edge_type = torch.tensor([0, 2, 1, 2]) + adj1 = to_torch_coo_tensor(edge_index, edge_attr, size=(4, 4)) conv = RGATConv(8, 20, num_relations=4, num_bases=4, mod='additive', attention_mechanism='across-relation', @@ -51,13 +51,15 @@ def test_rgat_conv_jittable(): out = conv(x, edge_index, edge_type, edge_attr) assert out.size() == (4, 40) - assert torch.allclose(conv(x, adj1.t(), edge_type), out) # t() expects a tensor with <= 2 sparse and 0 dense dimensions - adj2_t = adj2.transpose(0, 1).coalesce() - assert torch.allclose(conv(x, adj2_t, edge_type), out) + adj1_t = adj1.transpose(0, 1).coalesce() + assert torch.allclose(conv(x, adj1_t, edge_type), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) + assert torch.allclose(conv(x, adj2.t(), edge_type), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor, OptTensor, Size, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index, edge_type), - conv(x, edge_index, edge_type)) + assert torch.allclose(jit(x, edge_index, edge_type), out) diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index 9ce78a805b26..80f9e9d6b5f3 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import FastRGCNConv, RGCNConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor @@ -50,19 +51,24 @@ def test_rgcn_conv(cls, conf): idx2 = torch.arange(2) edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4)) conv = cls(4, 32, 2, num_bases, num_blocks, aggr='sum') assert str(conv) == f'{cls.__name__}(4, 32, num_relations=2)' + out1 = conv(x1, edge_index, edge_type) assert out1.size() == (4, 32) - assert torch.allclose(conv(x1, adj.t()), out1, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_type, (4, 4)) + assert torch.allclose(conv(x1, adj.t()), out1, atol=1e-6) if num_blocks is None: out2 = conv(None, edge_index, edge_type) + assert torch.allclose(conv(idx1, edge_index, edge_type), out2) assert out2.size() == (4, 32) - assert torch.allclose(conv(None, adj.t()), out2, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(None, adj.t()), out2, atol=1e-6) + assert torch.allclose(conv(idx1, adj.t()), out2, atol=1e-6) if is_full_test(): t = '(OptTensor, Tensor, OptTensor) -> Tensor' @@ -72,6 +78,7 @@ def test_rgcn_conv(cls, conf): assert torch.allclose(jit(idx1, edge_index, edge_type), out2) assert torch.allclose(jit(None, edge_index, edge_type), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, adj.t()), out1) @@ -79,19 +86,24 @@ def test_rgcn_conv(cls, conf): assert torch.allclose(jit(idx1, adj.t()), out2, atol=1e-6) assert torch.allclose(jit(None, adj.t()), out2, atol=1e-6) - adj = adj.sparse_resize((4, 2)) + # Test bipartite message passing: conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum') assert str(conv) == f'{cls.__name__}((4, 16), 32, num_relations=2)' + out1 = conv((x1, x2), edge_index, edge_type) assert out1.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_type, (4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) if num_blocks is None: out2 = conv((None, idx2), edge_index, edge_type) assert out2.size() == (2, 32) assert torch.allclose(conv((idx1, idx2), edge_index, edge_type), out2) - assert torch.allclose(conv((None, idx2), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((idx1, idx2), adj.t()), out2, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv((None, idx2), adj.t()), out2, atol=1e-6) + assert torch.allclose(conv((idx1, idx2), adj.t()), out2, atol=1e-6) if is_full_test(): t = '(Tuple[OptTensor, Tensor], Tensor, OptTensor) -> Tensor' @@ -99,10 +111,11 @@ def test_rgcn_conv(cls, conf): assert torch.allclose(jit((x1, x2), edge_index, edge_type), out1) if num_blocks is None: assert torch.allclose(jit((None, idx2), edge_index, edge_type), - out2) + out2, atol=1e-6) assert torch.allclose(jit((idx1, idx2), edge_index, edge_type), - out2) + out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tuple[OptTensor, Tensor], SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) From 948852763a8d7d13e3813c39954ffd84052904c8 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 29 Mar 2023 06:21:56 +0200 Subject: [PATCH 1072/2432] Drop `torch_sparse` dependency in tests (19/n) (#7071) --- test/nn/conv/test_sg_conv.py | 36 ++++++++++++++++----------- test/nn/conv/test_signed_conv.py | 39 +++++++++++++++++++----------- test/nn/conv/test_simple_conv.py | 23 +++++++++++++----- test/nn/conv/test_spline_conv.py | 39 ++++++++++++++++++------------ test/nn/conv/test_ssg_conv.py | 36 ++++++++++++++++----------- test/nn/conv/test_supergat_conv.py | 12 +++++---- 6 files changed, 117 insertions(+), 68 deletions(-) diff --git a/test/nn/conv/test_sg_conv.py b/test/nn/conv/test_sg_conv.py index 6f449c90f5a1..3eba82b853cd 100644 --- a/test/nn/conv/test_sg_conv.py +++ b/test/nn/conv/test_sg_conv.py @@ -1,44 +1,52 @@ import torch +import torch_geometric.typing from torch_geometric.nn import SGConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_sg_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = SGConv(16, 32, K=10) assert str(conv) == 'SGConv(16, 32, K=10)' + out1 = conv(x, edge_index) assert out1.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + out2 = conv(x, edge_index, value) assert out2.size() == (4, 32) assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) conv.cached = True conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() + assert conv._cached_x is not None + assert torch.allclose(conv(x, edge_index), out1, atol=1e-6) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) diff --git a/test/nn/conv/test_signed_conv.py b/test/nn/conv/test_signed_conv.py index df5c625ca04d..a4f306a9beda 100644 --- a/test/nn/conv/test_signed_conv.py +++ b/test/nn/conv/test_signed_conv.py @@ -1,16 +1,16 @@ import torch +import torch_geometric.typing from torch_geometric.nn import SignedConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_signed_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv1 = SignedConv(16, 32, first_aggr=True) assert str(conv1) == 'SignedConv(16, 32, first_aggr=True)' @@ -21,12 +21,17 @@ def test_signed_conv(): out1 = conv1(x, edge_index, edge_index) assert out1.size() == (4, 64) assert torch.allclose(conv1(x, adj1.t(), adj1.t()), out1) - assert torch.allclose(conv1(x, adj2.t(), adj2.t()), out1) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv1(x, adj2.t(), adj2.t()), out1) out2 = conv2(out1, edge_index, edge_index) assert out2.size() == (4, 96) assert torch.allclose(conv2(out1, adj1.t(), adj1.t()), out2) - assert torch.allclose(conv2(out1, adj2.t(), adj2.t()), out2) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv2(out1, adj2.t(), adj2.t()), out2) if is_full_test(): t = '(Tensor, Tensor, Tensor) -> Tensor' @@ -35,23 +40,28 @@ def test_signed_conv(): assert torch.allclose(jit1(x, edge_index, edge_index), out1) assert torch.allclose(jit2(out1, edge_index, edge_index), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1(x, adj1.t(), adj1.t()), out1) - assert torch.allclose(jit2(out1, adj1.t(), adj1.t()), out2) + assert torch.allclose(jit1(x, adj2.t(), adj1.t()), out1) + assert torch.allclose(jit2(out1, adj2.t(), adj1.t()), out2) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() assert torch.allclose(conv1((x, x[:2]), edge_index, edge_index), out1[:2]) assert torch.allclose(conv1((x, x[:2]), adj1.t(), adj1.t()), out1[:2]) - assert torch.allclose(conv1((x, x[:2]), adj2.t(), adj2.t()), out1[:2]) assert torch.allclose(conv2((out1, out1[:2]), edge_index, edge_index), out2[:2], atol=1e-6) assert torch.allclose(conv2((out1, out1[:2]), adj1.t(), adj1.t()), out2[:2], atol=1e-6) - assert torch.allclose(conv2((out1, out1[:2]), adj2.t(), adj2.t()), - out2[:2], atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv1((x, x[:2]), adj2.t(), adj2.t()), out1[:2]) + assert torch.allclose(conv2((out1, out1[:2]), adj2.t(), adj2.t()), + out2[:2], atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor, Tensor) -> Tensor' @@ -62,9 +72,10 @@ def test_signed_conv(): assert torch.allclose(jit2((out1, out1[:2]), edge_index, edge_index), out2[:2], atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1((x, x[:2]), adj1.t(), adj1.t()), out1[:2]) - assert torch.allclose(jit2((out1, out1[:2]), adj1.t(), adj1.t()), + assert torch.allclose(jit1((x, x[:2]), adj2.t(), adj1.t()), out1[:2]) + assert torch.allclose(jit2((out1, out1[:2]), adj2.t(), adj1.t()), out2[:2], atol=1e-6) diff --git a/test/nn/conv/test_simple_conv.py b/test/nn/conv/test_simple_conv.py index 28e1aa8c6e59..ed251cd15ced 100644 --- a/test/nn/conv/test_simple_conv.py +++ b/test/nn/conv/test_simple_conv.py @@ -1,9 +1,11 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import SimpleConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor @pytest.mark.parametrize('aggr, combine_root', [ @@ -15,8 +17,7 @@ def test_simple_conv(aggr, combine_root): x1 = torch.randn(4, 8) x2 = torch.randn(2, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = SimpleConv(aggr, combine_root) assert str(conv) == 'SimpleConv()' @@ -27,7 +28,11 @@ def test_simple_conv(aggr, combine_root): out = conv(x1, edge_index) assert out.size() == (4, output_size) assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj.t()), out) + assert torch.allclose(conv(x1, adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' @@ -35,13 +40,19 @@ def test_simple_conv(aggr, combine_root): assert torch.allclose(jit(x1, edge_index), out) assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out) + assert torch.allclose(jit(x1, adj2.t()), out) - adj = adj.sparse_resize((4, 2)) + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) out = conv((x1, x2), edge_index) assert out.size() == (2, output_size) assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out) - assert torch.allclose(conv((x1, x2), adj.t()), out) + assert torch.allclose(conv((x1, x2), adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out) diff --git a/test/nn/conv/test_spline_conv.py b/test/nn/conv/test_spline_conv.py index 7bd99baa5867..5958aa4cb85d 100644 --- a/test/nn/conv/test_spline_conv.py +++ b/test/nn/conv/test_spline_conv.py @@ -2,6 +2,7 @@ import torch +import torch_geometric.typing from torch_geometric.nn import SplineConv from torch_geometric.testing import is_full_test, withPackage from torch_geometric.typing import SparseTensor @@ -14,51 +15,59 @@ def test_spline_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) + value = torch.rand(edge_index.size(1), 3) conv = SplineConv(8, 32, dim=3, kernel_size=5) assert str(conv) == 'SplineConv(8, 32, dim=3)' out = conv(x1, edge_index, value) assert out.size() == (4, 32) assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, value), out) + assert torch.allclose(jit(x1, edge_index, value), out, atol=1e-6) assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out) + assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) - adj = adj.sparse_resize((4, 2)) + # Test bipartite message passing: conv = SplineConv((8, 16), 32, dim=3, kernel_size=5) assert str(conv) == 'SplineConv((8, 16), 32, dim=3)' + out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj.t()), out1) - assert torch.allclose(conv((x1, None), adj.t()), out2) + + out2 = conv((x1, None), edge_index, value, (4, 2)) + assert out2.size() == (2, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, value, (4, 2)) + assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index, value), out1) assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1) + out1, atol=1e-6) assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2) + out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj.t()), out1) - assert torch.allclose(jit((x1, None), adj.t()), out2) + assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) @withPackage('torch_spline_conv') diff --git a/test/nn/conv/test_ssg_conv.py b/test/nn/conv/test_ssg_conv.py index 4afa4c226b50..bd5fde7ad95e 100644 --- a/test/nn/conv/test_ssg_conv.py +++ b/test/nn/conv/test_ssg_conv.py @@ -1,44 +1,52 @@ import torch +import torch_geometric.typing from torch_geometric.nn import SSGConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_ssg_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = SSGConv(16, 32, alpha=0.1, K=10) assert str(conv) == 'SSGConv(16, 32, K=10, alpha=0.1)' + out1 = conv(x, edge_index) assert out1.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + out2 = conv(x, edge_index, value) assert out2.size() == (4, 32) assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) conv.cached = True conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() + assert conv._cached_h is not None + assert torch.allclose(conv(x, edge_index), out1, atol=1e-6) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + if torch_geometric.typing.WITH_TORCH_SPARSE: + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) diff --git a/test/nn/conv/test_supergat_conv.py b/test/nn/conv/test_supergat_conv.py index ab588a523639..3f2d4cc57f13 100644 --- a/test/nn/conv/test_supergat_conv.py +++ b/test/nn/conv/test_supergat_conv.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import SuperGATConv from torch_geometric.typing import SparseTensor @@ -17,16 +18,17 @@ def test_supergat_conv(att_type): out = conv(x, edge_index) assert out.size() == (4, 64) - adj_t = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)).t() - assert torch.allclose(conv(x, adj_t), out) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) - # Negative samples are given. + # Negative samples are given: neg_edge_index = conv.negative_sampling(edge_index, x.size(0)) assert torch.allclose(conv(x, edge_index, neg_edge_index), out) att_loss = conv.get_attention_loss() assert isinstance(att_loss, torch.Tensor) and att_loss > 0 - # Batch of graphs. + # Batch of graphs: x = torch.randn(8, 8) edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7], [0, 0, 1, 1, 4, 4, 5, 5]]) @@ -34,7 +36,7 @@ def test_supergat_conv(att_type): out = conv(x, edge_index, batch=batch) assert out.size() == (8, 64) - # Batch of graphs and negative samples are given. + # Batch of graphs and negative samples are given: neg_edge_index = conv.negative_sampling(edge_index, x.size(0), batch) assert torch.allclose(conv(x, edge_index, neg_edge_index), out) att_loss = conv.get_attention_loss() From 2472dafc46db310d2d5d4e7817652132ef7dd2c1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 29 Mar 2023 07:32:06 +0200 Subject: [PATCH 1073/2432] Drop `torch_sparse` dependency in tests (20/n) (#7072) --- .github/workflows/latest_testing.yml | 26 +--------- test/nn/conv/test_eg_conv.py | 2 +- test/nn/conv/test_gatv2_conv.py | 1 + test/nn/conv/test_pna_conv.py | 7 ++- test/nn/conv/test_point_conv.py | 4 +- test/nn/conv/test_ppf_conv.py | 6 ++- test/nn/conv/test_rgat_conv.py | 7 +-- test/nn/conv/test_sage_conv.py | 65 ++++++++++++++++--------- test/nn/conv/test_signed_conv.py | 8 +-- test/nn/conv/test_tag_conv.py | 26 ++++++---- test/nn/conv/test_transformer_conv.py | 40 +++++++++------ test/nn/conv/test_wl_conv.py | 13 +++-- test/nn/conv/test_wl_conv_continuous.py | 1 + torch_geometric/typing.py | 28 ++++++++--- 14 files changed, 136 insertions(+), 98 deletions(-) diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index 224f68748279..cc8c02f6855a 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -47,28 +47,4 @@ jobs: - name: Run tests if: steps.changed-files-specific.outputs.only_changed != 'true' run: | - pytest test/test_debug.py test/test_experimental.py test/test_home.py test/test_seed.py test/test_typing.py - pytest test/contrib/ - pytest test/data/ - pytest test/datasets/ - pytest test/explain/ - pytest test/graphgym/ - pytest test/io/ - pytest test/loader/ - pytest test/profile/ - pytest test/sampler/ - pytest test/testing/ - pytest test/transforms/ - pytest test/utils/ - pytest test/visualization/ - pytest test/nn/aggr - pytest test/nn/conv/test_message_passing.py - # pytest test/nn/conv - pytest test/nn/dense - pytest test/nn/functional - pytest test/nn/kge - pytest test/nn/models - pytest test/nn/norm - pytest test/nn/pool - pytest test/nn/unpool - pytest test/nn/test_compile_basic.py test/nn/test_compile_conv.py test/nn/test_compile_dynamic.py test/nn/test_data_parallel.py test/nn/test_encoding.py test/nn/test_inits.py test/nn/test_model_hub.py test/nn/test_model_summary.py test/nn/test_module_dict.py test/nn/test_parameter_dict.py test/nn/test_reshape.py test/nn/test_resolver.py test/nn/test_sequential.py test/nn/test_to_fixed_size_transformer.py test/nn/test_to_hetero_module.py test/nn/test_to_hetero_transformer.py test/nn/test_to_hetero_with_bases_transformer.py + pytest diff --git a/test/nn/conv/test_eg_conv.py b/test/nn/conv/test_eg_conv.py index 88f15feddccd..665e1bcf05c0 100644 --- a/test/nn/conv/test_eg_conv.py +++ b/test/nn/conv/test_eg_conv.py @@ -36,7 +36,7 @@ def test_eg_conv(): if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' diff --git a/test/nn/conv/test_gatv2_conv.py b/test/nn/conv/test_gatv2_conv.py index f8192ed6a5dd..56e330b84db1 100644 --- a/test/nn/conv/test_gatv2_conv.py +++ b/test/nn/conv/test_gatv2_conv.py @@ -90,6 +90,7 @@ def test_gatv2_conv(): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_pna_conv.py b/test/nn/conv/test_pna_conv.py index 989997e61e8b..3dc166157e0f 100644 --- a/test/nn/conv/test_pna_conv.py +++ b/test/nn/conv/test_pna_conv.py @@ -5,7 +5,7 @@ from torch_geometric.data import Data from torch_geometric.loader import DataLoader, NeighborLoader from torch_geometric.nn import PNAConv -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, onlyNeighborSampler from torch_geometric.typing import SparseTensor aggregators = ['sum', 'mean', 'min', 'max', 'var', 'std'] @@ -43,7 +43,8 @@ def test_pna_conv(divide_input): assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) -def test_pna_conv_get_degree_histogram(): +@onlyNeighborSampler +def test_pna_conv_get_degree_histogram_neighbor_loader(): edge_index = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]]) data = Data(num_nodes=5, edge_index=edge_index) loader = NeighborLoader( @@ -56,6 +57,8 @@ def test_pna_conv_get_degree_histogram(): deg_hist = PNAConv.get_degree_histogram(loader) assert torch.equal(deg_hist, torch.tensor([1, 2, 1, 1])) + +def test_pna_conv_get_degree_histogram_dataloader(): edge_index_1 = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]]) edge_index_2 = torch.tensor([[1, 1, 2, 2, 0, 3, 3], [2, 3, 3, 1, 1, 0, 2]]) edge_index_3 = torch.tensor([[1, 3, 2, 0, 0, 4, 2], [2, 0, 4, 1, 1, 0, 3]]) diff --git a/test/nn/conv/test_point_conv.py b/test/nn/conv/test_point_conv.py index bb8820f463c3..84c0069ed0d5 100644 --- a/test/nn/conv/test_point_conv.py +++ b/test/nn/conv/test_point_conv.py @@ -63,9 +63,9 @@ def test_point_net_conv(): if is_full_test(): t = '(PairOptTensor, PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, (pos1, pos2), edge_index), out) + assert torch.allclose(jit((x1, None), (pos1, pos2), edge_index), out) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairOptTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, (pos1, pos2), adj2.t()), out, atol=1e-6) + assert torch.allclose(jit((x1, None), (pos1, pos2), adj2.t()), out) diff --git a/test/nn/conv/test_ppf_conv.py b/test/nn/conv/test_ppf_conv.py index 5b36b55ca672..c7f8875687d8 100644 --- a/test/nn/conv/test_ppf_conv.py +++ b/test/nn/conv/test_ppf_conv.py @@ -70,9 +70,11 @@ def test_ppf_conv(): if is_full_test(): t = '(PairOptTensor, PairTensor, PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, (pos1, pos2), (n1, n2), edge_index), out) + assert torch.allclose( + jit((x1, None), (pos1, pos2), (n1, n2), edge_index), out) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairOptTensor, PairTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, (pos1, pos2), (n1, n2), adj2.t()), out) + assert torch.allclose( + jit((x1, None), (pos1, pos2), (n1, n2), adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_rgat_conv.py b/test/nn/conv/test_rgat_conv.py index 6efd7391c2ef..630c9f27787c 100644 --- a/test/nn/conv/test_rgat_conv.py +++ b/test/nn/conv/test_rgat_conv.py @@ -53,13 +53,14 @@ def test_rgat_conv_jittable(): assert out.size() == (4, 40) # t() expects a tensor with <= 2 sparse and 0 dense dimensions adj1_t = adj1.transpose(0, 1).coalesce() - assert torch.allclose(conv(x, adj1_t, edge_type), out) + assert torch.allclose(conv(x, adj1_t, edge_type), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) - assert torch.allclose(conv(x, adj2.t(), edge_type), out) + assert torch.allclose(conv(x, adj2.t(), edge_type), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor, OptTensor, Size, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index, edge_type), out) + assert torch.allclose(jit(x, edge_index, edge_type), + conv(x, edge_index, edge_type)) diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index 6785982f83ef..a5b293f85e38 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -1,9 +1,11 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import SAGEConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor @pytest.mark.parametrize('project', [False, True]) @@ -12,17 +14,19 @@ def test_sage_conv(project, aggr): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = SAGEConv(8, 32, project=project, aggr=aggr) assert str(conv) == f'SAGEConv(8, 32, aggr={aggr})' + out = conv(x1, edge_index) assert out.size() == (4, 32) assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out, atol=1e-6) assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, Size) -> Tensor' @@ -30,50 +34,60 @@ def test_sage_conv(project, aggr): assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out, atol=1e-6) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) + + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() conv = SAGEConv((8, 16), 32, project=project, aggr=aggr) assert str(conv) == f'SAGEConv((8, 16), 32, aggr={aggr})' + out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, (4, 2)) assert out1.size() == (2, 32) - assert out2.size() == (2, 32) assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1, atol=1e-6) assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) + + out2 = conv((x1, None), edge_index, (4, 2)) + assert out2.size() == (2, 32) assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) if is_full_test(): t = '(OptPairTensor, Tensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index), out1, atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1, - atol=1e-6) - assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2, - atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1) + assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj2.t()), out2, atol=1e-6) def test_lstm_aggr_sage_conv(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = SAGEConv(8, 32, aggr='lstm') assert str(conv) == 'SAGEConv(8, 32, aggr=lstm)' + out = conv(x, edge_index) assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out) + assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 0]]) with pytest.raises(ValueError, match="'index' tensor is not sorted"): @@ -90,11 +104,16 @@ def test_lstm_aggr_sage_conv(): def test_multi_aggr_sage_conv(aggr_kwargs): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + aggr_kwargs['aggrs_kwargs'] = [{}, {}, {}, dict(learn=True, t=1)] conv = SAGEConv(8, 32, aggr=['mean', 'max', 'sum', 'softmax'], aggr_kwargs=aggr_kwargs) + out = conv(x, edge_index) assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out) + assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_signed_conv.py b/test/nn/conv/test_signed_conv.py index a4f306a9beda..6e72e29f9230 100644 --- a/test/nn/conv/test_signed_conv.py +++ b/test/nn/conv/test_signed_conv.py @@ -44,8 +44,8 @@ def test_signed_conv(): t = '(Tensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1(x, adj2.t(), adj1.t()), out1) - assert torch.allclose(jit2(out1, adj2.t(), adj1.t()), out2) + assert torch.allclose(jit1(x, adj2.t(), adj2.t()), out1) + assert torch.allclose(jit2(out1, adj2.t(), adj2.t()), out2) # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) @@ -76,6 +76,6 @@ def test_signed_conv(): t = '(PairTensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1((x, x[:2]), adj2.t(), adj1.t()), out1[:2]) - assert torch.allclose(jit2((out1, out1[:2]), adj2.t(), adj1.t()), + assert torch.allclose(jit1((x, x[:2]), adj2.t(), adj2.t()), out1[:2]) + assert torch.allclose(jit2((out1, out1[:2]), adj2.t(), adj2.t()), out2[:2], atol=1e-6) diff --git a/test/nn/conv/test_tag_conv.py b/test/nn/conv/test_tag_conv.py index af18a2c57f81..9caa2e38af94 100644 --- a/test/nn/conv/test_tag_conv.py +++ b/test/nn/conv/test_tag_conv.py @@ -1,30 +1,35 @@ import torch +import torch_geometric.typing from torch_geometric.nn import TAGConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_tag_conv(): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) conv = TAGConv(16, 32) assert str(conv) == 'TAGConv(16, 32, K=3)' + out1 = conv(x, edge_index) assert out1.size() == (4, 32) assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + out2 = conv(x, edge_index, value) assert out2.size() == (4, 32) assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' @@ -32,10 +37,11 @@ def test_tag_conv(): assert jit(x, edge_index).tolist() == out1.tolist() assert jit(x, edge_index, value).tolist() == out2.tolist() + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) def test_static_tag_conv(): diff --git a/test/nn/conv/test_transformer_conv.py b/test/nn/conv/test_transformer_conv.py index d30b2c878cc6..41efdbfc3788 100644 --- a/test/nn/conv/test_transformer_conv.py +++ b/test/nn/conv/test_transformer_conv.py @@ -1,33 +1,38 @@ import torch +import torch_geometric.typing from torch_geometric.nn import TransformerConv from torch_geometric.testing import is_full_test from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor def test_transformer_conv(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = TransformerConv(8, 32, heads=2, beta=True) assert str(conv) == 'TransformerConv(8, 32, heads=2)' + out = conv(x1, edge_index) assert out.size() == (4, 64) assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, NoneType, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, NoneType, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) # Test `return_attention_weights`. result = conv(x1, edge_index, return_attention_weights=True) @@ -37,10 +42,11 @@ def test_transformer_conv(): assert result[1][1].min() >= 0 and result[1][1].max() <= 1 assert conv._alpha is None - result = conv(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 - assert conv._alpha is None + if torch_geometric.typing.WITH_TORCH_SPARSE: + result = conv(x1, adj2.t(), return_attention_weights=True) + assert torch.allclose(result[0], out, atol=1e-6) + assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 + assert conv._alpha is None if is_full_test(): t = ('(Tensor, Tensor, NoneType, bool) -> ' @@ -53,29 +59,35 @@ def test_transformer_conv(): assert result[1][1].min() >= 0 and result[1][1].max() <= 1 assert conv._alpha is None + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = ('(Tensor, SparseTensor, NoneType, bool) -> ' 'Tuple[Tensor, SparseTensor]') jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, adj1.t(), return_attention_weights=True) + result = jit(x1, adj2.t(), return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 assert conv._alpha is None - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() + # Test bipartite message passing: + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + conv = TransformerConv((8, 16), 32, heads=2, beta=True) assert str(conv) == 'TransformerConv((8, 16), 32, heads=2)' out = conv((x1, x2), edge_index) assert out.size() == (2, 64) assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): t = '(PairTensor, Tensor, NoneType, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor, NoneType, NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_wl_conv.py b/test/nn/conv/test_wl_conv.py index 675de8570fb3..2c132ca0059b 100644 --- a/test/nn/conv/test_wl_conv.py +++ b/test/nn/conv/test_wl_conv.py @@ -1,16 +1,16 @@ import torch +import torch_geometric.typing from torch_geometric.nn import WLConv from torch_geometric.typing import SparseTensor -from torch_geometric.utils import one_hot +from torch_geometric.utils import one_hot, to_torch_csc_tensor def test_wl_conv(): x1 = torch.tensor([1, 0, 0, 1]) x2 = one_hot(x1) edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) - adj1 = SparseTensor.from_edge_index(edge_index) - adj2 = adj1.to_torch_sparse_csc_tensor() + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = WLConv() assert str(conv) == 'WLConv()' @@ -19,9 +19,12 @@ def test_wl_conv(): assert out.tolist() == [0, 1, 1, 0] assert torch.equal(conv(x2, edge_index), out) assert torch.equal(conv(x1, adj1.t()), out) - assert torch.equal(conv(x1, adj2.t()), out) assert torch.equal(conv(x2, adj1.t()), out) - assert torch.equal(conv(x2, adj2.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.equal(conv(x1, adj2.t()), out) + assert torch.equal(conv(x2, adj2.t()), out) assert conv.histogram(out).tolist() == [[2, 2]] assert torch.allclose(conv.histogram(out, norm=True), diff --git a/test/nn/conv/test_wl_conv_continuous.py b/test/nn/conv/test_wl_conv_continuous.py index 4080da8a6ca4..733b7fbfd8a7 100644 --- a/test/nn/conv/test_wl_conv_continuous.py +++ b/test/nn/conv/test_wl_conv_continuous.py @@ -19,6 +19,7 @@ def test_wl_conv(): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index), out) + # Test bipartite message passing: x1 = torch.randn(4, 8) x2 = torch.randn(2, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 81c2316c5400..797c58ff7515 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -46,10 +46,10 @@ class SparseTensor: def __init__( self, - row: Optional[torch.Tensor] = None, - rowptr: Optional[torch.Tensor] = None, - col: Optional[torch.Tensor] = None, - value: Optional[torch.Tensor] = None, + row: Optional[Tensor] = None, + rowptr: Optional[Tensor] = None, + col: Optional[Tensor] = None, + value: Optional[Tensor] = None, sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None, is_sorted: bool = False, trust_data: bool = False, @@ -59,8 +59,8 @@ def __init__( @classmethod def from_edge_index( self, - edge_index: torch.Tensor, - edge_attr: Optional[torch.Tensor] = None, + edge_index: Tensor, + edge_attr: Optional[Tensor] = None, sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None, is_sorted: bool = False, trust_data: bool = False, @@ -76,6 +76,10 @@ def is_cuda(self) -> bool: def has_value(self) -> bool: raise ImportError("'SparseTensor' requires 'torch-sparse'") + def set_value(self, value: Optional[Tensor], + layout: Optional[str] = None) -> 'SparseTensor': + raise ImportError("'SparseTensor' requires 'torch-sparse'") + def fill_value(self, fill_value: float, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': raise ImportError("'SparseTensor' requires 'torch-sparse'") @@ -89,7 +93,7 @@ def csr(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]: def to_torch_sparse_csr_tensor( self, dtype: Optional[torch.dtype] = None, - ) -> torch.Tensor: + ) -> Tensor: raise ImportError("'SparseTensor' requires 'torch-sparse'") class torch_sparse: @@ -106,11 +110,21 @@ def sum(src: SparseTensor, dim: Optional[int] = None) -> Tensor: def mul(src: SparseTensor, other: Tensor) -> SparseTensor: raise ImportError("'mul' requires 'torch-sparse'") + @staticmethod + def set_diag(src: SparseTensor, values: Optional[Tensor] = None, + k: int = 0) -> SparseTensor: + raise ImportError("'set_diag' requires 'torch-sparse'") + @staticmethod def fill_diag(src: SparseTensor, fill_value: float, k: int = 0) -> SparseTensor: raise ImportError("'fill_diag' requires 'torch-sparse'") + @staticmethod + def masked_select_nnz(src: SparseTensor, mask: Tensor, + layout: Optional[str] = None) -> SparseTensor: + raise ImportError("'masked_select_nnz' requires 'torch-sparse'") + # Types for accessing data #################################################### From 306e7900ac61f0b2f45b19ba7b5fcc5cc0b845b8 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 29 Mar 2023 08:08:33 +0200 Subject: [PATCH 1074/2432] Replace `tolist()` equality checks with `torch.allclose()` (#7074) --- test/io/test_off.py | 6 ++++-- test/nn/aggr/test_sort.py | 14 ++++---------- test/nn/conv/test_agnn_conv.py | 2 +- test/nn/conv/test_cheb_conv.py | 15 ++++++++------- test/nn/conv/test_dna_conv.py | 10 +++++----- test/nn/conv/test_edge_conv.py | 8 ++++---- test/nn/conv/test_feast_conv.py | 4 ++-- test/nn/conv/test_gated_graph_conv.py | 4 ++-- test/nn/conv/test_gcn2_conv.py | 4 ++-- test/nn/conv/test_gcn_conv.py | 2 +- test/nn/conv/test_gin_conv.py | 6 +++--- test/nn/conv/test_gravnet_conv.py | 8 ++++---- test/nn/conv/test_message_passing.py | 8 ++++---- test/nn/conv/test_tag_conv.py | 4 ++-- test/nn/conv/test_x_conv.py | 4 ++-- test/nn/dense/test_linear.py | 6 +++--- test/nn/models/test_autoencoder.py | 7 ++++--- test/nn/norm/test_diff_group_norm.py | 6 +++--- test/nn/pool/test_avg_pool.py | 2 +- test/nn/pool/test_glob.py | 18 +++++++++--------- test/nn/pool/test_max_pool.py | 2 +- test/nn/test_inits.py | 18 +++++++++--------- test/nn/test_reshape.py | 2 +- 23 files changed, 79 insertions(+), 81 deletions(-) diff --git a/test/io/test_off.py b/test/io/test_off.py index 2c2abde4edd7..78d998bcc78f 100644 --- a/test/io/test_off.py +++ b/test/io/test_off.py @@ -10,12 +10,14 @@ def test_read_off(): - data = read_off(osp.join('test', 'io', 'example1.off')) + root_dir = osp.join(osp.dirname(osp.realpath(__file__))) + + data = read_off(osp.join(root_dir, 'example1.off')) assert len(data) == 2 assert data.pos.tolist() == [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]] assert data.face.tolist() == [[0, 1], [1, 2], [2, 3]] - data = read_off(osp.join('test', 'io', 'example2.off')) + data = read_off(osp.join(root_dir, 'example2.off')) assert len(data) == 2 assert data.pos.tolist() == [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]] assert data.face.tolist() == [[0, 0], [1, 2], [2, 3]] diff --git a/test/nn/aggr/test_sort.py b/test/nn/aggr/test_sort.py index caf4e4e1963a..94b74695147d 100644 --- a/test/nn/aggr/test_sort.py +++ b/test/nn/aggr/test_sort.py @@ -23,11 +23,8 @@ def test_sort_aggregation(): assert out[0, -1].tolist() == [0, 0, 0, 0] # Nodes are sorted. - expected = 3 - torch.arange(4) - assert out[0, :4, -1].argsort().tolist() == expected.tolist() - - expected = 4 - torch.arange(5) - assert out[1, :, -1].argsort().tolist() == expected.tolist() + assert torch.equal(out[0, :4, -1].argsort(), 3 - torch.arange(4)) + assert torch.equal(out[1, :, -1].argsort(), 4 - torch.arange(5)) def test_sort_aggregation_smaller_than_k(): @@ -52,11 +49,8 @@ def test_sort_aggregation_smaller_than_k(): assert out[1, -1].tolist() == [0, 0, 0, 0] # Nodes are sorted. - expected = 3 - torch.arange(4) - assert out[0, :4, -1].argsort().tolist() == expected.tolist() - - expected = 5 - torch.arange(6) - assert out[1, :6, -1].argsort().tolist() == expected.tolist() + assert torch.equal(out[0, :4, -1].argsort(), 3 - torch.arange(4)) + assert torch.equal(out[1, :6, -1].argsort(), 5 - torch.arange(6)) def test_sort_aggregation_dim_size(): diff --git a/test/nn/conv/test_agnn_conv.py b/test/nn/conv/test_agnn_conv.py index bb8ffda978d6..b5ee8ba343bd 100644 --- a/test/nn/conv/test_agnn_conv.py +++ b/test/nn/conv/test_agnn_conv.py @@ -27,7 +27,7 @@ def test_agnn_conv(requires_grad): if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' diff --git a/test/nn/conv/test_cheb_conv.py b/test/nn/conv/test_cheb_conv.py index 18546fa29e2a..b2e38e223b0b 100644 --- a/test/nn/conv/test_cheb_conv.py +++ b/test/nn/conv/test_cheb_conv.py @@ -22,10 +22,11 @@ def test_cheb_conv(): if is_full_test(): jit = torch.jit.script(conv.jittable()) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, edge_weight).tolist() == out2.tolist() - assert jit(x, edge_index, edge_weight, - lambda_max=torch.tensor(3.0)).tolist() == out3.tolist() + assert torch.allclose(jit(x, edge_index), out1) + assert torch.allclose(jit(x, edge_index, edge_weight), out2) + assert torch.allclose( + jit(x, edge_index, edge_weight, lambda_max=torch.tensor(3.0)), + out3) batch = torch.tensor([0, 0, 1, 1]) edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) @@ -40,6 +41,6 @@ def test_cheb_conv(): assert out5.size() == (num_nodes, out_channels) if is_full_test(): - assert jit(x, edge_index, edge_weight, batch).tolist() == out4.tolist() - assert jit(x, edge_index, edge_weight, batch, - lambda_max).tolist() == out5.tolist() + assert torch.allclose(jit(x, edge_index, edge_weight, batch), out4) + assert torch.allclose( + jit(x, edge_index, edge_weight, batch, lambda_max), out5) diff --git a/test/nn/conv/test_dna_conv.py b/test/nn/conv/test_dna_conv.py index b2f56e731023..332decb99b4c 100644 --- a/test/nn/conv/test_dna_conv.py +++ b/test/nn/conv/test_dna_conv.py @@ -22,7 +22,7 @@ def test_dna_conv(channels, num_layers): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) conv = DNAConv(channels, heads=1, groups=1, dropout=0.0) assert str(conv) == 'DNAConv(32, heads=1, groups=1)' @@ -32,7 +32,7 @@ def test_dna_conv(channels, num_layers): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) conv = DNAConv(channels, heads=1, groups=1, dropout=0.0, cached=True) out = conv(x, edge_index) @@ -43,7 +43,7 @@ def test_dna_conv(channels, num_layers): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) @pytest.mark.parametrize('channels', [32]) @@ -73,8 +73,8 @@ def test_dna_conv_sparse_tensor(channels, num_layers): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' diff --git a/test/nn/conv/test_edge_conv.py b/test/nn/conv/test_edge_conv.py index 081bf7101ebb..c8d547b9d86e 100644 --- a/test/nn/conv/test_edge_conv.py +++ b/test/nn/conv/test_edge_conv.py @@ -105,12 +105,12 @@ def test_dynamic_edge_conv_conv(): if is_full_test(): t = '(Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1).tolist() == out11.tolist() - assert jit(x1, batch1).tolist() == out12.tolist() + assert torch.allclose(jit(x1), out11) + assert torch.allclose(jit(x1, batch1), out12) t = '(PairTensor, Optional[PairTensor]) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2)).tolist() == out21.tolist() - assert jit((x1, x2), (batch1, batch2)).tolist() == out22.tolist() + assert torch.allclose(jit((x1, x2)), out21) + assert torch.allclose(jit((x1, x2), (batch1, batch2)), out22) torch.jit.script(conv.jittable()) # Test without explicit typing. diff --git a/test/nn/conv/test_feast_conv.py b/test/nn/conv/test_feast_conv.py index a5c92ad77f9f..e6a728dea3a8 100644 --- a/test/nn/conv/test_feast_conv.py +++ b/test/nn/conv/test_feast_conv.py @@ -27,7 +27,7 @@ def test_feast_conv(): if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out.tolist() + assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' @@ -48,7 +48,7 @@ def test_feast_conv(): if is_full_test(): t = '(PairTensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out.tolist() + assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(PairTensor, SparseTensor) -> Tensor' diff --git a/test/nn/conv/test_gated_graph_conv.py b/test/nn/conv/test_gated_graph_conv.py index f2d9a7bf8ab7..ab1301c9c35d 100644 --- a/test/nn/conv/test_gated_graph_conv.py +++ b/test/nn/conv/test_gated_graph_conv.py @@ -32,8 +32,8 @@ def test_gated_graph_conv(): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' diff --git a/test/nn/conv/test_gcn2_conv.py b/test/nn/conv/test_gcn2_conv.py index c50e18b1febf..770f60615f95 100644 --- a/test/nn/conv/test_gcn2_conv.py +++ b/test/nn/conv/test_gcn2_conv.py @@ -33,8 +33,8 @@ def test_gcn2_conv(): if is_full_test(): t = '(Tensor, Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, x_0, edge_index).tolist() == out1.tolist() - assert jit(x, x_0, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, x_0, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, x_0, edge_index, value), out2, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, Tensor, SparseTensor, OptTensor) -> Tensor' diff --git a/test/nn/conv/test_gcn_conv.py b/test/nn/conv/test_gcn_conv.py index b43f90080272..2efc52e27af9 100644 --- a/test/nn/conv/test_gcn_conv.py +++ b/test/nn/conv/test_gcn_conv.py @@ -75,7 +75,7 @@ def test_gcn_conv_with_decomposed_layers(): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(decomposed_conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() + assert torch.allclose(jit(x, edge_index), out1) def test_gcn_conv_with_sparse_input_feature(): diff --git a/test/nn/conv/test_gin_conv.py b/test/nn/conv/test_gin_conv.py index bd4429aa0258..c14f1922c7b2 100644 --- a/test/nn/conv/test_gin_conv.py +++ b/test/nn/conv/test_gin_conv.py @@ -36,13 +36,13 @@ def test_gin_conv(): if is_full_test(): t = '(Tensor, Tensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out.tolist() - assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist() + assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) + assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj2.t()).tolist() == out.tolist() + assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) diff --git a/test/nn/conv/test_gravnet_conv.py b/test/nn/conv/test_gravnet_conv.py index f963597406a2..a587d376fd04 100644 --- a/test/nn/conv/test_gravnet_conv.py +++ b/test/nn/conv/test_gravnet_conv.py @@ -29,12 +29,12 @@ def test_gravnet_conv(): if is_full_test(): t = '(Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x1).tolist() == out11.tolist() - assert jit(x1, batch1).tolist() == out12.tolist() + assert torch.allclose(jit(x1), out11) + assert torch.allclose(jit(x1, batch1), out12) t = '(PairTensor, Optional[PairTensor]) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2)).tolist() == out21.tolist() - assert jit((x1, x2), (batch1, batch2)).tolist() == out22.tolist() + assert torch.allclose(jit((x1, x2)), out21) + assert torch.allclose(jit((x1, x2), (batch1, batch2)), out22) torch.jit.script(conv.jittable()) # Test without explicit typing. diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index 46533b089410..0454fa093d30 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -266,15 +266,15 @@ def test_copy(): conv2 = copy.copy(conv) assert conv != conv2 - assert conv.lin_l.weight.tolist() == conv2.lin_l.weight.tolist() - assert conv.lin_r.weight.tolist() == conv2.lin_r.weight.tolist() + assert torch.equal(conv.lin_l.weight, conv2.lin_l.weight) + assert torch.equal(conv.lin_r.weight, conv2.lin_r.weight) assert conv.lin_l.weight.data_ptr == conv2.lin_l.weight.data_ptr assert conv.lin_r.weight.data_ptr == conv2.lin_r.weight.data_ptr conv = copy.deepcopy(conv) assert conv != conv2 - assert conv.lin_l.weight.tolist() == conv2.lin_l.weight.tolist() - assert conv.lin_r.weight.tolist() == conv2.lin_r.weight.tolist() + assert torch.equal(conv.lin_l.weight, conv2.lin_l.weight) + assert torch.equal(conv.lin_r.weight, conv2.lin_r.weight) assert conv.lin_l.weight.data_ptr != conv2.lin_l.weight.data_ptr assert conv.lin_r.weight.data_ptr != conv2.lin_r.weight.data_ptr diff --git a/test/nn/conv/test_tag_conv.py b/test/nn/conv/test_tag_conv.py index 9caa2e38af94..16c2ec4d5698 100644 --- a/test/nn/conv/test_tag_conv.py +++ b/test/nn/conv/test_tag_conv.py @@ -34,8 +34,8 @@ def test_tag_conv(): if is_full_test(): t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor) -> Tensor' diff --git a/test/nn/conv/test_x_conv.py b/test/nn/conv/test_x_conv.py index 46f0e864dae0..0edf12aa3b59 100644 --- a/test/nn/conv/test_x_conv.py +++ b/test/nn/conv/test_x_conv.py @@ -25,7 +25,7 @@ def test_x_conv(): jit = torch.jit.script(conv) torch.manual_seed(12345) - assert jit(x, pos).tolist() == out1.tolist() + assert torch.allclose(jit(x, pos), out1, atol=1e-6) torch.manual_seed(12345) - assert jit(x, pos, batch).tolist() == out2.tolist() + assert torch.allclose(jit(x, pos, batch), out2, atol=1e-6) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index e3b04cd46590..c74c13d272ac 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -65,9 +65,9 @@ def test_identical_linear_default_initialization(lazy): torch.manual_seed(12345) lin2 = PTLinear(16, 32) - assert lin1.weight.tolist() == lin2.weight.tolist() - assert lin1.bias.tolist() == lin2.bias.tolist() - assert lin1(x).tolist() == lin2(x).tolist() + assert torch.equal(lin1.weight, lin2.weight) + assert torch.equal(lin1.bias, lin2.bias) + assert torch.allclose(lin1(x), lin2(x)) @withPackage('torch<=1.12') diff --git a/test/nn/models/test_autoencoder.py b/test/nn/models/test_autoencoder.py index 5ff753705004..0d783531208c 100644 --- a/test/nn/models/test_autoencoder.py +++ b/test/nn/models/test_autoencoder.py @@ -13,11 +13,12 @@ def test_gae(): x = torch.Tensor([[1, -1], [1, 2], [2, 1]]) z = model.encode(x) - assert z.tolist() == x.tolist() + assert torch.allclose(z, x) adj = model.decoder.forward_all(z) - assert adj.tolist() == torch.sigmoid( - torch.Tensor([[+2, -1, +1], [-1, +5, +4], [+1, +4, +5]])).tolist() + assert torch.allclose( + adj, + torch.Tensor([[+2, -1, +1], [-1, +5, +4], [+1, +4, +5]]).sigmoid()) edge_index = torch.tensor([[0, 1], [1, 2]]) value = model.decode(z, edge_index) diff --git a/test/nn/norm/test_diff_group_norm.py b/test/nn/norm/test_diff_group_norm.py index 516e9b9dbf59..feb15188ea86 100644 --- a/test/nn/norm/test_diff_group_norm.py +++ b/test/nn/norm/test_diff_group_norm.py @@ -10,11 +10,11 @@ def test_diff_group_norm(): norm = DiffGroupNorm(16, groups=4, lamda=0) assert str(norm) == 'DiffGroupNorm(16, groups=4)' - assert norm(x).tolist() == x.tolist() + assert torch.allclose(norm(x), x) if is_full_test(): jit = torch.jit.script(norm) - assert jit(x).tolist() == x.tolist() + assert torch.allclose(jit(x), x) norm = DiffGroupNorm(16, groups=4, lamda=0.01) assert str(norm) == 'DiffGroupNorm(16, groups=4)' @@ -24,7 +24,7 @@ def test_diff_group_norm(): if is_full_test(): jit = torch.jit.script(norm) - assert jit(x).tolist() == out.tolist() + assert torch.alllclose(jit(x), out) def test_group_distance_ratio(): diff --git a/test/nn/pool/test_avg_pool.py b/test/nn/pool/test_avg_pool.py index af6cbe58cbd4..c1e0f6dd8962 100644 --- a/test/nn/pool/test_avg_pool.py +++ b/test/nn/pool/test_avg_pool.py @@ -67,4 +67,4 @@ def test_avg_pool_neighbor_x(): [10, 11], [10, 11], ] - assert data.edge_index.tolist() == edge_index.tolist() + assert torch.equal(data.edge_index, edge_index) diff --git a/test/nn/pool/test_glob.py b/test/nn/pool/test_glob.py index 4ff08c363c96..b22b1746d8f7 100644 --- a/test/nn/pool/test_glob.py +++ b/test/nn/pool/test_glob.py @@ -14,30 +14,30 @@ def test_global_pool(): out = global_add_pool(x, batch) assert out.size() == (2, 4) - assert out[0].tolist() == x[:4].sum(dim=0).tolist() - assert out[1].tolist() == x[4:].sum(dim=0).tolist() + assert torch.allclose(out[0], x[:4].sum(dim=0)) + assert torch.allclose(out[1], x[4:].sum(dim=0)) out = global_add_pool(x, None) assert out.size() == (1, 4) - assert out.tolist() == x.sum(dim=0, keepdim=True).tolist() + assert torch.allclose(out, x.sum(dim=0, keepdim=True)) out = global_mean_pool(x, batch) assert out.size() == (2, 4) - assert out[0].tolist() == x[:4].mean(dim=0).tolist() - assert out[1].tolist() == x[4:].mean(dim=0).tolist() + assert torch.allclose(out[0], x[:4].mean(dim=0)) + assert torch.allclose(out[1], x[4:].mean(dim=0)) out = global_mean_pool(x, None) assert out.size() == (1, 4) - assert out.tolist() == x.mean(dim=0, keepdim=True).tolist() + assert torch.allclose(out, x.mean(dim=0, keepdim=True)) out = global_max_pool(x, batch) assert out.size() == (2, 4) - assert out[0].tolist() == x[:4].max(dim=0)[0].tolist() - assert out[1].tolist() == x[4:].max(dim=0)[0].tolist() + assert torch.allclose(out[0], x[:4].max(dim=0)[0]) + assert torch.allclose(out[1], x[4:].max(dim=0)[0]) out = global_max_pool(x, None) assert out.size() == (1, 4) - assert out.tolist() == x.max(dim=0, keepdim=True)[0].tolist() + assert torch.allclose(out, x.max(dim=0, keepdim=True)[0]) def test_permuted_global_pool(): diff --git a/test/nn/pool/test_max_pool.py b/test/nn/pool/test_max_pool.py index 506e6f0a62b3..a6c4921fbaff 100644 --- a/test/nn/pool/test_max_pool.py +++ b/test/nn/pool/test_max_pool.py @@ -67,4 +67,4 @@ def test_max_pool_neighbor_x(): [11, 12], [11, 12], ] - assert data.edge_index.tolist() == edge_index.tolist() + assert torch.equal(data.edge_index, edge_index) diff --git a/test/nn/test_inits.py b/test/nn/test_inits.py index 6d402f8f5e58..1c04394f0f5a 100644 --- a/test/nn/test_inits.py +++ b/test/nn/test_inits.py @@ -36,26 +36,26 @@ def test_inits(): nn = Lin(16, 16) uniform(size=4, value=nn.weight) - assert min(nn.weight.tolist()[0]) >= -0.5 - assert max(nn.weight.tolist()[0]) <= 0.5 + assert nn.weight[0].min() >= -0.5 + assert nn.weight[0].max() <= 0.5 glorot(nn.weight) - assert min(nn.weight.tolist()[0]) >= -1.25 - assert max(nn.weight.tolist()[0]) <= 1.25 + assert nn.weight[0].min() >= -1.25 + assert nn.weight[0].max() <= 1.25 glorot_orthogonal(nn.weight, scale=1.0) - assert min(nn.weight.tolist()[0]) >= -1.25 - assert max(nn.weight.tolist()[0]) <= 1.25 + assert nn.weight[0].min() >= -1.25 + assert nn.weight[0].max() <= 1.25 def test_reset(): nn = Lin(16, 16) w = nn.weight.clone() reset(nn) - assert not nn.weight.tolist() == w.tolist() + assert not torch.allclose(nn.weight, w) nn = Seq(Lin(16, 16), ReLU(), Lin(16, 16)) w_1, w_2 = nn[0].weight.clone(), nn[2].weight.clone() reset(nn) - assert not nn[0].weight.tolist() == w_1.tolist() - assert not nn[2].weight.tolist() == w_2.tolist() + assert not torch.allclose(nn[0].weight, w_1) + assert not torch.allclose(nn[2].weight, w_2) diff --git a/test/nn/test_reshape.py b/test/nn/test_reshape.py index a0c7232b1303..6229c85f8140 100644 --- a/test/nn/test_reshape.py +++ b/test/nn/test_reshape.py @@ -10,4 +10,4 @@ def test_reshape(): assert str(op) == 'Reshape(5, 2, 4)' assert op(x).size() == (5, 2, 4) - assert op(x).view(10, 4).tolist() == x.tolist() + assert torch.equal(op(x).view(10, 4), x) From 692a8ce4498e5f1349d80eaad44488d63c2fb4ea Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 29 Mar 2023 09:16:34 +0200 Subject: [PATCH 1075/2432] Extend nightly tests to include different PyTorch versions (#7073) --- .github/actions/setup/action.yml | 4 ++-- .github/workflows/full_testing.yml | 12 +++++++++++- .github/workflows/latest_testing.yml | 1 - test/nn/models/test_graph_unet.py | 3 ++- test/nn/norm/test_diff_group_norm.py | 2 +- test/nn/pool/test_asap.py | 3 ++- test/nn/pool/test_pool.py | 3 ++- test/transforms/test_add_positional_encoding.py | 2 ++ test/transforms/test_two_hop.py | 2 ++ torch_geometric/nn/pool/asap.py | 5 ++++- 10 files changed, 28 insertions(+), 9 deletions(-) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 4bb57fa8f493..bf97c4d60963 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -47,14 +47,14 @@ runs: shell: bash - name: Install extension packages - if: ${{ inputs.full_install == 'true' }} + if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' }} run: | pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} pip install torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash - name: Install pyg-lib # pyg-lib is currently only available on Linux. - if: ${{ inputs.full_install == 'true' && runner.os == 'Linux' }} + if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' && runner.os == 'Linux' }} run: | pip install pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 7d051123637d..94f06799d05d 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -15,7 +15,15 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest] - python-version: ['3.8', '3.9', '3.10'] + python-version: ['3.8', '3.10'] + torch-version: [1.13.0, 2.0.0, nightly] + include: + - torch-version: 1.13.0 + torchvision-version: 0.14.0 + - torch-version: 2.0.0 + torchvision-version: 0.15.0 + - torch-version: nightly + torchvision-version: nightly steps: - name: Checkout repository @@ -25,6 +33,8 @@ jobs: uses: ./.github/actions/setup with: python-version: ${{ matrix.python-version }} + torch-version: ${{ matrix.torch-version }} + torchvision-version: ${{ matrix.torchvision-version }} - name: Install graphviz if: ${{ runner.os == 'Linux' }} diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index cc8c02f6855a..df301bd13874 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -37,7 +37,6 @@ jobs: uses: ./.github/actions/setup with: torch-version: nightly - full_install: false - name: Install main package if: steps.changed-files-specific.outputs.only_changed != 'true' diff --git a/test/nn/models/test_graph_unet.py b/test/nn/models/test_graph_unet.py index 80987bed71a7..cb15a2f80637 100644 --- a/test/nn/models/test_graph_unet.py +++ b/test/nn/models/test_graph_unet.py @@ -1,9 +1,10 @@ import torch from torch_geometric.nn import GraphUNet -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, onlyLinux +@onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. def test_graph_unet(): model = GraphUNet(16, 32, 8, depth=3) out = 'GraphUNet(16, 32, 8, depth=3, pool_ratios=[0.5, 0.5, 0.5])' diff --git a/test/nn/norm/test_diff_group_norm.py b/test/nn/norm/test_diff_group_norm.py index feb15188ea86..76279ec0590f 100644 --- a/test/nn/norm/test_diff_group_norm.py +++ b/test/nn/norm/test_diff_group_norm.py @@ -24,7 +24,7 @@ def test_diff_group_norm(): if is_full_test(): jit = torch.jit.script(norm) - assert torch.alllclose(jit(x), out) + assert torch.allclose(jit(x), out) def test_group_distance_ratio(): diff --git a/test/nn/pool/test_asap.py b/test/nn/pool/test_asap.py index e31276dc5539..b77c91ef7f99 100644 --- a/test/nn/pool/test_asap.py +++ b/test/nn/pool/test_asap.py @@ -4,9 +4,10 @@ import torch from torch_geometric.nn import ASAPooling, GCNConv, GraphConv -from torch_geometric.testing import is_full_test, onlyFullTest +from torch_geometric.testing import is_full_test, onlyFullTest, onlyLinux +@onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. def test_asap(): in_channels = 16 edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], diff --git a/test/nn/pool/test_pool.py b/test/nn/pool/test_pool.py index ced5436dd2a4..96f3a2e3fc33 100644 --- a/test/nn/pool/test_pool.py +++ b/test/nn/pool/test_pool.py @@ -4,10 +4,11 @@ from torch import Tensor from torch_geometric.nn import radius_graph -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, withPackage @onlyFullTest +@withPackage('torch_cluster') def test_radius_graph_jit(): class Net(torch.nn.Module): def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor: diff --git a/test/transforms/test_add_positional_encoding.py b/test/transforms/test_add_positional_encoding.py index ab64a3490c54..414e615526a5 100644 --- a/test/transforms/test_add_positional_encoding.py +++ b/test/transforms/test_add_positional_encoding.py @@ -3,6 +3,7 @@ import torch from torch_geometric.data import Data +from torch_geometric.testing import onlyLinux from torch_geometric.transforms import ( AddLaplacianEigenvectorPE, AddRandomWalkPE, @@ -52,6 +53,7 @@ def test_add_laplacian_eigenvector_pe(): assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) +@onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. def test_add_random_walk_pe(): x = torch.randn(6, 4) edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5], diff --git a/test/transforms/test_two_hop.py b/test/transforms/test_two_hop.py index ac63e6e69e8c..934a10056513 100644 --- a/test/transforms/test_two_hop.py +++ b/test/transforms/test_two_hop.py @@ -1,9 +1,11 @@ import torch from torch_geometric.data import Data +from torch_geometric.testing import onlyLinux from torch_geometric.transforms import TwoHop +@onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. def test_two_hop(): transform = TwoHop() assert str(transform) == 'TwoHop()' diff --git a/torch_geometric/nn/pool/asap.py b/torch_geometric/nn/pool/asap.py index d5cf75653969..4fa07277a144 100644 --- a/torch_geometric/nn/pool/asap.py +++ b/torch_geometric/nn/pool/asap.py @@ -145,7 +145,10 @@ def forward( S = S.index_select(1, perm).to_sparse_csr() A = S.t().to_sparse_csr() @ (A @ S) - edge_index, edge_weight = to_edge_index(A) + if edge_weight is None: + edge_index, _ = to_edge_index(A) + else: + edge_index, edge_weight = to_edge_index(A) if self.add_self_loops: edge_index, edge_weight = add_remaining_self_loops( From f749baa18d61a510f714bd710eb24119ca059696 Mon Sep 17 00:00:00 2001 From: OlegPlatonov <32016523+OlegPlatonov@users.noreply.github.com> Date: Wed, 29 Mar 2023 10:26:18 +0300 Subject: [PATCH 1076/2432] Make edges in `HeterohilousGraphDataset` undirected (#7065) Hi! The graphs [here](https://github.com/yandex-research/heterophilous-graphs) are meant to be undirected. Since PyG treats all graphs as directed, I've added a call to transforms.ToUndirected to double the edges. --------- Co-authored-by: Matthias Fey Co-authored-by: Jintang Li --- CHANGELOG.md | 1 + torch_geometric/datasets/heterophilous_graph_dataset.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8bf4ab8e8dd..86e8766ac03e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Edges in `HeterophilousGraphDataset` are now undirected by default ([#7065](https://github.com/pyg-team/pytorch_geometric/pull/7065)) - Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) - Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) diff --git a/torch_geometric/datasets/heterophilous_graph_dataset.py b/torch_geometric/datasets/heterophilous_graph_dataset.py index c3063fe97e8c..d63fb806536d 100644 --- a/torch_geometric/datasets/heterophilous_graph_dataset.py +++ b/torch_geometric/datasets/heterophilous_graph_dataset.py @@ -5,6 +5,7 @@ import torch from torch_geometric.data import Data, InMemoryDataset, download_url +from torch_geometric.utils import to_undirected class HeterophilousGraphDataset(InMemoryDataset): @@ -111,6 +112,7 @@ def process(self): x = torch.from_numpy(raw['node_features']) y = torch.from_numpy(raw['node_labels']) edge_index = torch.from_numpy(raw['edges']).t().contiguous() + edge_index = to_undirected(edge_index, num_nodes=x.size(0)) train_mask = torch.from_numpy(raw['train_masks']).t().contiguous() val_mask = torch.from_numpy(raw['val_masks']).t().contiguous() test_mask = torch.from_numpy(raw['test_masks']).t().contiguous() From 9ab1b05108ca233d5edb3265fde069b6a9ed3e48 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 29 Mar 2023 01:04:54 -0700 Subject: [PATCH 1077/2432] `FastHGT`: Minor additions (#7030) should be ready to merge --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/conv/test_hgt_conv.py | 35 +++++++- test/utils/test_hetero.py | 38 +++++++++ torch_geometric/nn/conv/fast_hgt_conv.py | 100 ++++++----------------- torch_geometric/utils/hetero.py | 75 ++++++++++++++++- 4 files changed, 169 insertions(+), 79 deletions(-) create mode 100644 test/utils/test_hetero.py diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index bb41663d2e4a..65e07d5ded03 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -2,7 +2,7 @@ import torch_geometric.typing from torch_geometric.data import HeteroData -from torch_geometric.nn import HGTConv +from torch_geometric.nn import FastHGTConv, HGTConv from torch_geometric.profile import benchmark from torch_geometric.testing import get_random_edge_index from torch_geometric.typing import SparseTensor @@ -178,6 +178,39 @@ def test_hgt_conv_out_of_place(): assert x_dict['paper'].size() == (6, 32) +def test_fast_hgt_conv(): + x_dict = { + 'v0': torch.randn(5, 4), + 'v1': torch.randn(5, 4), + 'v2': torch.randn(5, 4), + } + + edge_index_dict = { + ('v0', 'e1', 'v0'): torch.randint(0, 5, size=(2, 10)), + ('v0', 'e2', 'v1'): torch.randint(0, 5, size=(2, 10)), + } + + metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) + conv1 = HGTConv(4, 2, metadata) + conv2 = FastHGTConv(4, 2, metadata) + + # Make parameters match: + for my_param in conv1.parameters(): + my_param.data.fill_(1) + for og_param in conv2.parameters(): + og_param.data.fill_(1) + + out_dict1 = conv1(x_dict, edge_index_dict) + out_dict2 = conv2(x_dict, edge_index_dict) + + assert len(out_dict1) == len(out_dict2) + for key, out1 in out_dict1.items(): + out2 = out_dict2[key] + if out1 is None and out2 is None: + continue + assert torch.allclose(out1, out2) + + if __name__ == '__main__': import argparse diff --git a/test/utils/test_hetero.py b/test/utils/test_hetero.py new file mode 100644 index 000000000000..ccf4c90ab19f --- /dev/null +++ b/test/utils/test_hetero.py @@ -0,0 +1,38 @@ +import torch + +from torch_geometric.testing import get_random_edge_index +from torch_geometric.utils.hetero import construct_bipartite_edge_index + + +def test_construct_bipartite_edge_index(): + edge_index = get_random_edge_index(4, 6, num_edges=20) + + edge_index_dict = { + ('author', 'paper'): edge_index, + ('paper', 'author'): edge_index.flip([0]), + } + edge_attr_dict = { + ('author', 'paper'): torch.randn(edge_index.size(1), 16), + ('paper', 'author'): torch.randn(edge_index.size(1), 16) + } + + edge_index, edge_attr = construct_bipartite_edge_index( + edge_index_dict, + src_offset_dict={ + ('author', 'paper'): 0, + ('paper', 'author'): 4 + }, + dst_offset_dict={ + 'author': 0, + 'paper': 4 + }, + edge_attr_dict=edge_attr_dict, + ) + + assert edge_index.size() == (2, 40) + assert edge_index.min() >= 0 + assert edge_index[0].max() > 4 and edge_index[1].max() > 6 + assert edge_index.max() <= 10 + assert edge_attr.size() == (40, 16) + assert torch.equal(edge_attr[:20], edge_attr_dict['author', 'paper']) + assert torch.equal(edge_attr[20:], edge_attr_dict['paper', 'author']) diff --git a/torch_geometric/nn/conv/fast_hgt_conv.py b/torch_geometric/nn/conv/fast_hgt_conv.py index 4a4304a80e66..5c0d772a897f 100644 --- a/torch_geometric/nn/conv/fast_hgt_conv.py +++ b/torch_geometric/nn/conv/fast_hgt_conv.py @@ -1,9 +1,7 @@ import math -from collections import defaultdict from typing import Dict, List, Optional, Tuple, Union import torch -import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter @@ -11,14 +9,9 @@ from torch_geometric.nn.dense import HeteroDictLinear, HeteroLinear from torch_geometric.nn.inits import ones from torch_geometric.nn.parameter_dict import ParameterDict -from torch_geometric.typing import ( - Adj, - EdgeType, - Metadata, - NodeType, - SparseTensor, -) +from torch_geometric.typing import Adj, EdgeType, Metadata, NodeType from torch_geometric.utils import softmax +from torch_geometric.utils.hetero import construct_bipartite_edge_index class FastHGTConv(MessagePassing): @@ -43,14 +36,13 @@ def __init__( self.in_channels = in_channels self.out_channels = out_channels self.heads = heads - self.node_types = metadata[0] self.edge_types = metadata[1] + self.dst_node_types = list(set(metadata[1][1])) self.src_types = [edge_type[0] for edge_type in self.edge_types] - self.k_lin = HeteroDictLinear(self.in_channels, self.out_channels) - self.q_lin = HeteroDictLinear(self.in_channels, self.out_channels) - self.v_lin = HeteroDictLinear(self.in_channels, self.out_channels) + self.kqv_lin = HeteroDictLinear(self.in_channels, + self.out_channels * 3) self.out_lin = HeteroDictLinear(self.out_channels, self.out_channels, types=self.node_types) @@ -77,9 +69,7 @@ def __init__( def reset_parameters(self): super().reset_parameters() - self.k_lin.reset_parameters() - self.q_lin.reset_parameters() - self.v_lin.reset_parameters() + self.kqv_lin.reset_parameters() self.out_lin.reset_parameters() self.k_rel.reset_parameters() self.v_rel.reset_parameters() @@ -115,8 +105,8 @@ def _construct_src_node_feat( for edge_type in self.edge_types: src, _, _ = edge_type - ks.append(k_dict[src].view(-1, D)) - vs.append(v_dict[src].view(-1, D)) + ks.append(k_dict[src].reshape(-1, D)) + vs.append(v_dict[src].reshape(-1, D)) N = k_dict[src].size(0) for _ in range(H): @@ -131,49 +121,6 @@ def _construct_src_node_feat( return k, v, offset - def _construct_edge_index( - self, - edge_index_dict: Dict[EdgeType, Adj], - src_offset: Dict[EdgeType, int], - dst_offset: [NodeType, int], - ) -> Tuple[Adj, Tensor]: - """Constructs a tensor of edge indices by concatenating edge indices - for each edge type. The edge indices are increased by the offset of the - source and destination nodes.""" - edge_indices: List[Tensor] = [] - ps: List[Tensor] = [] - - for edge_type in self.edge_types: - _, _, dst_type = edge_type - - edge_index = edge_index_dict[edge_type] - - # (TODO) Add support for SparseTensor w/o converting. - is_sparse = isinstance(edge_index, SparseTensor) - if is_sparse: # Convert to COO - dst, src, _ = edge_index.coo() - edge_index = torch.stack([src, dst], dim=0) - else: - edge_index = edge_index.clone() - - p = self.p_rel['__'.join(edge_type)].expand(edge_index.size(1), -1) - ps.append(p) - - # Add offset to edge indices: - edge_index[0] += src_offset[edge_type] - edge_index[1] += dst_offset[dst_type] - edge_indices.append(edge_index) - - # Concatenate all edges and edge tensors: - p = torch.cat(ps, dim=0) - edge_index = torch.cat(edge_indices, dim=1) - - if is_sparse: - edge_index = SparseTensor(row=edge_index[1], col=edge_index[0], - value=p) - - return edge_index, p - def forward( self, x_dict: Dict[NodeType, Tensor], @@ -195,26 +142,24 @@ def forward( In case a node type does not receive any message, its output will be set to :obj:`None`. """ - H, D = self.heads, self.out_channels // self.heads + F = self.out_channels + H = self.heads + D = F // H - k_dict, q_dict, v_dict = {}, {}, {} - out_dict = defaultdict(list) + k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {} # Compute K, Q, V over node types: - k_dict = self.k_lin(x_dict) - k_dict = {k: v.view(-1, H, D) for k, v in k_dict.items()} - - q_dict = self.q_lin(x_dict) - q_dict = {k: v.view(-1, H, D) for k, v in q_dict.items()} - - v_dict = self.v_lin(x_dict) - v_dict = {k: v.view(-1, H, D) for k, v in v_dict.items()} + kqv_dict = self.kqv_lin(x_dict) + for key, val in kqv_dict.items(): + k_dict[key] = val[:, :F].view(-1, H, D) + q_dict[key] = val[:, F:2 * F].view(-1, H, D) + v_dict[key] = val[:, 2 * F:].view(-1, H, D) q, dst_offset = self._cat(q_dict) k, v, src_offset = self._construct_src_node_feat(k_dict, v_dict) - edge_index, edge_attr = self._construct_edge_index( - edge_index_dict, src_offset, dst_offset) + edge_index, edge_attr = construct_bipartite_edge_index( + edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel) out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr, size=None) @@ -225,11 +170,14 @@ def forward( out_dict[node_type] = out[start_offset:end_offset] # Transform output node embeddings: - a_dict = self.out_lin({k: F.gelu(v) for k, v in out_dict.items()}) + a_dict = self.out_lin({ + k: torch.nn.functional.gelu(v) if v is not None else v + for k, v in out_dict.items() + }) # Iterate over node types: for node_type, out in out_dict.items(): - if out is None: + if out is None or node_type not in self.dst_node_types: out_dict[node_type] = None continue else: diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index 103bc6299f18..d1920271b7d3 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -1,8 +1,10 @@ -from typing import List, Set +from typing import Dict, List, Optional, Set, Tuple import torch +from torch import Tensor +from torch.nn import ParameterDict -from torch_geometric.typing import EdgeType, NodeType +from torch_geometric.typing import Adj, EdgeType, NodeType, SparseTensor from torch_geometric.utils.num_nodes import maybe_num_nodes_dict @@ -55,3 +57,72 @@ def check_add_self_loops(module: torch.nn.Module, edge_types: List[EdgeType]): f"'add_self_loops' attribute set to 'True' on module '{module}' " f"for use with edge type(s) '{edge_types}'. This will lead to " f"incorrect message passing results.") + + +def construct_bipartite_edge_index( + edge_index_dict: Dict[EdgeType, Adj], + src_offset_dict: Dict[EdgeType, int], + dst_offset_dict: Dict[NodeType, int], + edge_attr_dict: Optional[Dict[EdgeType, Tensor]] = None, +) -> Tuple[Adj, Optional[Tensor]]: + """Constructs a tensor of edge indices by concatenating edge indices + for each edge type. The edge indices are increased by the offset of the + source and destination nodes. + + Args: + edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A + dictionary holding graph connectivity information for each + individual edge type, either as a :class:`torch.Tensor` of + shape :obj:`[2, num_edges]` or a + :class:`torch_sparse.SparseTensor`. + src_offset_dict (Dict[Tuple[str, str, str], int]): A dictionary of + offsets to apply to the source node type for each edge type. + src_offset_dict (Dict[str, int]): A dictionary of offsets to apply for + destination node types. + edge_attr_dict (Dict[Tuple[str, str, str], torch.Tensor]): A + dictionary holding edge features for each individual edge type. + (default: :obj:`None`) + """ + is_sparse = False + edge_indices: List[Tensor] = [] + edge_attrs: List[Tensor] = [] + for edge_type, src_offset in src_offset_dict.items(): + edge_index = edge_index_dict[edge_type] + dst_offset = dst_offset_dict[edge_type[-1]] + + # TODO Add support for SparseTensor w/o converting. + is_sparse = isinstance(edge_index, SparseTensor) + if is_sparse: + col, row, _ = edge_index.coo() + edge_index = torch.stack([row, col], dim=0) + else: + edge_index = edge_index.clone() + + edge_index[0] += src_offset + edge_index[1] += dst_offset + edge_indices.append(edge_index) + + if edge_attr_dict is not None: + if isinstance(edge_attr_dict, ParameterDict): + edge_attr = edge_attr_dict['__'.join(edge_type)] + else: + edge_attr = edge_attr_dict[edge_type] + if edge_attr.size(0) != edge_index.size(1): + edge_attr = edge_attr.expand(edge_index.size(1), -1) + edge_attrs.append(edge_attr) + + edge_index = torch.cat(edge_indices, dim=1) + + edge_attr: Optional[Tensor] = None + if edge_attr_dict is not None: + edge_attr = torch.cat(edge_attrs, dim=0) + + if is_sparse: + # TODO Add support for `SparseTensor.sparse_sizes()`. + edge_index = SparseTensor( + row=edge_index[1], + col=edge_index[0], + value=edge_attr, + ) + + return edge_index, edge_attr From 47ccf2a688fd56527a0fb0b63f8460ac197cc9de Mon Sep 17 00:00:00 2001 From: YanbingJiang Date: Wed, 29 Mar 2023 18:23:45 +0800 Subject: [PATCH 1078/2432] Fix hang issue of import pandas (#7075) This PR is to change the import location of pandas in `benchmark/utils/utils.py` to fix the hang issue, which is found when using ipex launcher. --- benchmark/utils/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/utils/utils.py b/benchmark/utils/utils.py index c8b8f0901ebf..c8d32b9e68ef 100644 --- a/benchmark/utils/utils.py +++ b/benchmark/utils/utils.py @@ -2,7 +2,6 @@ import os.path as osp from datetime import datetime -import pandas as pd import torch from ogb.nodeproppred import PygNodePropPredDataset from tqdm import tqdm @@ -143,6 +142,7 @@ def save_benchmark_data(csv_data, batch_size, layers, num_neighbors, def write_to_csv(csv_data, training=False): + import pandas as pd results_path = osp.join(osp.dirname(osp.realpath(__file__)), '../results/') os.makedirs(results_path, exist_ok=True) From 98a4d72e3666cf70d8ada47a46c940e2d2d7f43c Mon Sep 17 00:00:00 2001 From: Benedek Harsanyi <80836204+hbenedek@users.noreply.github.com> Date: Wed, 29 Mar 2023 15:58:39 +0200 Subject: [PATCH 1079/2432] Add `to_dgl` and `from_dgl` conversions (#7053) Issue https://github.com/pyg-team/pytorch_geometric/issues/6979 ## Description I add two functions `from_dgl(dgl.Graph) -> data: Union[Data, HeteroData]`and `to_dgl(Union[Data, HeteroData]) -> dgl.Graph` in `utils/convert.py`. Both function in both direction performs the same three operations, setting the node and edge attributes and copies the edges of the underlying graph(s). --------- Co-authored-by: Jintang Li Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/utils/test_convert.py | 98 +++++++++++++++++++- torch_geometric/utils/__init__.py | 3 + torch_geometric/utils/convert.py | 148 ++++++++++++++++++++++++++++++ 4 files changed, 249 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86e8766ac03e..4561d58d663a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `to_dgl` and `from_dgl` conversion functions ([#7053](https://github.com/pyg-team/pytorch_geometric/pull/7053)) - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index a00ae6160a09..cf9762f499a9 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -2,10 +2,11 @@ import scipy.sparse import torch -from torch_geometric.data import Data +from torch_geometric.data import Data, HeteroData from torch_geometric.testing import withPackage from torch_geometric.utils import ( from_cugraph, + from_dgl, from_networkit, from_networkx, from_scipy_sparse_matrix, @@ -13,6 +14,7 @@ sort_edge_index, subgraph, to_cugraph, + to_dgl, to_networkit, to_networkx, to_scipy_sparse_matrix, @@ -481,3 +483,97 @@ def test_from_cugraph(edge_weight, directed, relabel_nodes): assert torch.allclose(edge_weight, cu_edge_weight.cpu()) else: assert cu_edge_weight is None + + +@withPackage('dgl') +def test_to_dgl_graph(): + x = torch.randn(5, 3) + edge_index = torch.tensor([[0, 1, 1, 2, 3, 0], [1, 0, 2, 1, 4, 4]]) + edge_attr = torch.randn(edge_index.size(1), 2) + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + + g = to_dgl(data) + + assert torch.equal(data.x, g.ndata['x']) + row, col = g.edges() + assert torch.equal(row, edge_index[0]) + assert torch.equal(col, edge_index[1]) + assert torch.equal(data.edge_attr, g.edata['edge_attr']) + + +@withPackage('dgl') +def test_to_dgl_hetero_graph(): + data = HeteroData() + data['v1'].x = torch.randn(4, 3) + data['v2'].x = torch.randn(4, 3) + data['v1', 'v2'].edge_index = torch.tensor([[0, 1, 2, 3], [0, 1, 2, 3]]) + data['v1', 'v2'].edge_attr = torch.randn(4, 2) + + g = to_dgl(data) + + assert data['v1', 'v2'].num_edges == g.num_edges(('v1', 'to', 'v2')) + assert data['v1'].num_nodes == g.num_nodes('v1') + assert data['v2'].num_nodes == g.num_nodes('v2') + assert torch.equal(data['v1'].x, g.nodes['v1'].data['x']) + assert torch.equal(data['v2'].x, g.nodes['v2'].data['x']) + row, col = g.edges() + assert torch.equal(row, data['v1', 'v2'].edge_index[0]) + assert torch.equal(col, data['v1', 'v2'].edge_index[1]) + assert torch.equal(g.edata['edge_attr'], data['v1', 'v2'].edge_attr) + + +@withPackage('dgl') +@withPackage('torch_sparse') +def test_to_dgl_sparse(): + from torch_geometric.transforms import ToSparseTensor + x = torch.randn(5, 3) + edge_index = torch.tensor([[0, 1, 1, 2, 3, 0], [1, 0, 2, 1, 4, 4]]) + edge_attr = torch.randn(edge_index.size(1), 2) + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + data = ToSparseTensor()(data) + + g = to_dgl(data) + + assert torch.equal(data.x, g.ndata["x"]) + pyg_row, pyg_col, _ = data.adj_t.t().coo() + dgl_row, dgl_col = g.edges() + assert torch.equal(pyg_row, dgl_row) + assert torch.equal(pyg_col, dgl_col) + assert torch.equal(data.edge_attr, g.edata['edge_attr']) + + +@withPackage('dgl') +def test_from_dgl_graph(): + import dgl + g = dgl.graph(([0, 0, 1, 5], [1, 2, 2, 0])) + g.ndata['x'] = torch.randn(g.num_nodes(), 3) + g.edata['edge_attr'] = torch.randn(g.num_edges()) + + data = from_dgl(g) + + assert torch.equal(data.x, g.ndata['x']) + row, col = g.edges() + assert torch.equal(data.edge_index[0], row) + assert torch.equal(data.edge_index[1], col) + assert torch.equal(data.edge_attr, g.edata['edge_attr']) + + +@withPackage('dgl') +def test_from_dgl_hetero_graph(): + import dgl + g = dgl.heterograph({ + ('v1', 'to', 'v2'): ( + [0, 1, 1, 2, 3, 3, 4], + [0, 0, 1, 1, 1, 2, 2], + ) + }) + g.nodes['v1'].data['x'] = torch.randn(5, 3) + g.nodes['v2'].data['x'] = torch.randn(3, 3) + + data = from_dgl(g) + + assert data['v1', 'v2'].num_edges == g.num_edges(('v1', 'to', 'v2')) + assert data['v1'].num_nodes == g.num_nodes('v1') + assert data['v2'].num_nodes == g.num_nodes('v2') + assert torch.equal(data['v1'].x, g.nodes['v1'].data['x']) + assert torch.equal(data['v2'].x, g.nodes['v2'].data['x']) diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index d8d771999fc8..f1a602a652f2 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -39,6 +39,7 @@ from .convert import to_networkit, from_networkit from .convert import to_trimesh, from_trimesh from .convert import to_cugraph, from_cugraph +from .convert import to_dgl, from_dgl from .smiles import from_smiles, to_smiles from .random import (erdos_renyi_graph, stochastic_blockmodel_graph, barabasi_albert_graph) @@ -116,6 +117,8 @@ 'from_trimesh', 'to_cugraph', 'from_cugraph', + 'to_dgl', + 'from_dgl', 'from_smiles', 'to_smiles', 'erdos_renyi_graph', diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index 78b1b64de71e..e603e88975c1 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -455,3 +455,151 @@ def from_cugraph(g: Any) -> Tuple[Tensor, Optional[Tensor]]: edge_weight = from_dlpack(df['weights'].to_dlpack()) return edge_index, edge_weight + + +def to_dgl( + data: Union['torch_geometric.data.Data', 'torch_geometric.data.HeteroData'] +) -> Any: + r"""Converts a :class:`torch_geometric.data.Data` or + :class:`torch_geometric.data.HeteroData` instance to a :obj:`dgl` graph + object. + + Args: + data (torch_geometric.data.Data or torch_geometric.data.HeteroData): + The data object. + + Example: + + >>> edge_index = torch.tensor([[0, 1, 1, 2, 3, 0], [1, 0, 2, 1, 4, 4]]) + >>> x = torch.randn(5, 3) + >>> edge_attr = torch.randn(6, 2) + >>> data = Data(x=x, edge_index=edge_index, edge_attr=y) + >>> g = to_dgl(data) + >>> g + Graph(num_nodes=5, num_edges=6, + ndata_schemes={'x': Scheme(shape=(3,))} + edata_schemes={'edge_attr': Scheme(shape=(2, ))}) + + >>> data = HeteroData() + >>> data['paper'].x = torch.randn(5, 3) + >>> data['author'].x = torch.ones(5, 3) + >>> edge_index = torch.tensor([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) + >>> data['author', 'cites', 'paper'].edge_index = edge_index + >>> g = to_dgl(data) + >>> g + Graph(num_nodes={'author': 5, 'paper': 5}, + num_edges={('author', 'cites', 'paper'): 5}, + metagraph=[('author', 'paper', 'cites')]) + """ + import dgl + + from torch_geometric.data import Data, HeteroData + + if isinstance(data, Data): + if data.edge_index is not None: + row, col = data.edge_index + else: + row, col, _ = data.adj_t.t().coo() + + g = dgl.graph((row, col)) + + for attr in data.node_attrs(): + g.ndata[attr] = data[attr] + for attr in data.edge_attrs(): + if attr in ['edge_index', 'adj_t']: + continue + g.edata[attr] = data[attr] + + return g + + if isinstance(data, HeteroData): + data_dict = {} + for edge_type, store in data.edge_items(): + if store.get('edge_index') is not None: + row, col = store.edge_index + else: + row, col, _ = store['adj_t'].t().coo() + + data_dict[edge_type] = (row, col) + + g = dgl.heterograph(data_dict) + + for node_type, store in data.node_items(): + for attr, value in store.items(): + g.nodes[node_type].data[attr] = value + + for edge_type, store in data.edge_items(): + for attr, value in store.items(): + if attr in ['edge_index', 'adj_t']: + continue + g.edges[edge_type].data[attr] = value + + return g + + raise ValueError(f"Invalid data type (got '{type(data)}')") + + +def from_dgl( + g: Any, +) -> Union['torch_geometric.data.Data', 'torch_geometric.data.HeteroData']: + r"""Converts a :obj:`dgl` graph object to a + :class:`torch_geometric.data.Data` or + :class:`torch_geometric.data.HeteroData` instance. + + Args: + g (dgl.DGLGraph): The :obj:`dgl` graph object. + + Example: + + >>> g = dgl.graph(([0, 0, 1, 5], [1, 2, 2, 0])) + >>> g.ndata['x'] = torch.randn(g.num_nodes(), 3) + >>> g.edata['edge_attr'] = torch.randn(g.num_edges(), 2) + >>> data = from_dgl(g) + >>> data + Data(x=[6, 3], edge_attr=[4, 2], edge_index=[2, 4]) + + >>> g = dgl.heterograph({ + >>> g = dgl.heterograph({ + ... ('author', 'writes', 'paper'): ([0, 1, 1, 2, 3, 3, 4], + ... [0, 0, 1, 1, 1, 2, 2])}) + >>> g.nodes['author'].data['x'] = torch.randn(5, 3) + >>> g.nodes['paper'].data['x'] = torch.randn(5, 3) + >>> data = from_dgl(g) + >>> data + HeteroData( + author={ x=[5, 3] }, + paper={ x=[3, 3] }, + (author, writes, paper)={ edge_index=[2, 7] } + ) + """ + import dgl + + from torch_geometric.data import Data, HeteroData + + if not isinstance(g, dgl.DGLGraph): + raise ValueError(f"Invalid data type (got '{type(g)}')") + + if g.is_homogeneous: + data = Data() + data.edge_index = torch.stack(g.edges(), dim=0) + + for attr, value in g.ndata.items(): + data[attr] = value + for attr, value in g.edata.items(): + data[attr] = value + + return data + + data = HeteroData() + + for node_type in g.ntypes: + for attr, value in g.nodes[node_type].data.items(): + data[node_type][attr] = value + + for edge_type in g.canonical_etypes: + row, col = g.edges(form="uv", etype=edge_type) + data[edge_type].edge_index = torch.stack([row, col], dim=0) + for attr, value in g.edge_attr_schemes(edge_type).items(): + data[edge_type][attr] = value + + return data From 80d4647daf5daac4d8d6733fdddf2eae9cd9ec7a Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Wed, 29 Mar 2023 16:55:28 +0100 Subject: [PATCH 1080/2432] feat: migrate to `pyproject.toml` for building package (#6880) Changes proposed by this PR can be summarized as follows :- * Delete `setup.py` in favour of `pyproject.toml` for packaging. * Update `pre-commit` configuration with an updated version of `pyroma` to check packaging quality of `pyproject.toml`. * Update workflows to build using `python -m build`. --- `pyproject.toml` is the new standard for packaging python packages, `setup.py` is now deprecated (first introduced in [PEP 518](https://peps.python.org/pep-0518/) and later expanded in [PEP 517](https://peps.python.org/pep-0517/), [PEP 621](https://peps.python.org/pep-0621/) and [PEP 660](https://peps.python.org/pep-0660/)). --------- Co-authored-by: rusty1s --- .github/actions/setup/action.yml | 2 +- .github/workflows/linting.yml | 4 +- .github/workflows/nightly.yml | 6 +-- .pre-commit-config.yaml | 12 ++--- CHANGELOG.md | 1 + pyproject.toml | 63 ++++++++++++++++++++++++- readthedocs.yml | 2 +- setup.py | 81 -------------------------------- 8 files changed, 75 insertions(+), 96 deletions(-) delete mode 100644 setup.py diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index bf97c4d60963..185a2bc62736 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -28,7 +28,7 @@ runs: check-latest: true cache: pip cache-dependency-path: | - setup.py + pyproject.toml - name: Install PyTorch ${{ inputs.torch-version }}+${{ inputs.cuda-version }} if: ${{ inputs.torch-version != 'nightly' }} diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 37404d81337a..a48363f1ac88 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -43,7 +43,7 @@ jobs: check-latest: true cache: pip cache-dependency-path: | - setup.py + pyproject.toml - name: Install dependencies run: pip install pylint @@ -66,7 +66,7 @@ jobs: check-latest: true cache: pip cache-dependency-path: | - setup.py + pyproject.toml - name: Install dependencies run: pip install mypy diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 27c0b64c7ddc..14798ae5499d 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -22,7 +22,7 @@ jobs: check-latest: true cache: pip cache-dependency-path: | - setup.py + pyproject.toml - name: Set version run: echo "VERSION=$(sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_geometric/__init__.py)" >> ${GITHUB_ENV} @@ -32,14 +32,12 @@ jobs: - name: Customize build version run: | - sed -i "s/name='torch_geometric'/name='pyg-nightly'/" setup.py - sed -i "s/$VERSION/$VERSION.dev$TODAY/" setup.py sed -i "s/$VERSION/$VERSION.dev$TODAY/" torch_geometric/__init__.py sed -i 's/name="torch_geometric"/name="pyg-nightly"/' pyproject.toml sed -i "s/version=\"$VERSION\"/version=\"$VERSION.dev$TODAY\"/" pyproject.toml - name: Build package - run: python setup.py sdist + run: python -m build - name: Publish package uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 554d33c1e8b3..7bbe65b466fb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,12 +28,12 @@ repos: name: Lint yaml args: [-d, '{extends: default, rules: {line-length: disable, document-start: disable, truthy: {level: error}, braces: {max-spaces-inside: 1}}}'] - - repo: https://github.com/regebro/pyroma - rev: "4.1" - hooks: - - id: pyroma - name: Check packaging - args: [--min=10, .] + # - repo: https://github.com/regebro/pyroma + # rev: "4.2" + # hooks: + # - id: pyroma + # name: Check packaging + # args: [--min=10, .] - repo: https://github.com/google/yapf rev: v0.32.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4561d58d663a..65f00b0123fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -126,6 +126,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Migrate to `pyproject.toml` for packaging ([#6880](https://github.com/pyg-team/pytorch_geometric/pull/6880)) - Drop internal usage of `__dunder__` names ([#6999](https://github.com/pyg-team/pytorch_geometric/issues/6999)) - Changed the interface of `sort_edge_index`, `coalesce` and `to_undirected` to only return single `edge_index` information in case the `edge_attr` argument is not specified ([#6875](https://github.com/pyg-team/pytorch_geometric/issues/6875), [#6879](https://github.com/pyg-team/pytorch_geometric/issues/6879), [#6893](https://github.com/pyg-team/pytorch_geometric/issues/6893)) - Fixed a bug in `to_hetero` when using an uninitialized submodule without implementing `reset_parameters` ([#6863](https://github.com/pyg-team/pytorch_geometric/issues/6790)) diff --git a/pyproject.toml b/pyproject.toml index 18fe5e790ba0..032379b13e97 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,7 @@ +[build-system] +requires = ["flit_core >=3.2,<4"] +build-backend = "flit_core.buildapi" + [project] name="torch_geometric" version="2.4.0" @@ -25,8 +29,65 @@ classifiers=[ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3 :: Only", ] +dependencies=[ + "tqdm", + "numpy", + "scipy", + "jinja2", + "requests", + "pyparsing", + "scikit-learn", + "psutil>=5.8.0", +] -dynamic=["dependencies", "optional-dependencies"] +[project.optional-dependencies] +graphgym=[ + "yacs", + "hydra-core", + "protobuf<4.21", + "pytorch-lightning", +] +modelhub=[ + "huggingface_hub" +] +benchmark=[ + "protobuf<4.21", + "wandb", + "pandas", + "networkx", + "matplotlib", +] +test=[ + "pytest", + "pytest-cov", + "onnx", + "onnxruntime", +] +dev=[ + "torch_geometric[test]", + "pre-commit", +] +full = [ + "torch_geometric[graphgym, modelhub]", + "ase", + "h5py", + "numba", + "sympy", + "pandas", + "captum", + "rdflib", + "trimesh", + "networkx", + "graphviz", + "tabulate", + "matplotlib", + "torchmetrics", + "scikit-image", + "pytorch-memlab", + "pgmpy", + "opt_einsum", + "statsmodels" +] [project.urls] homepage="https://pyg.org/" diff --git a/readthedocs.yml b/readthedocs.yml index e4ca6eaa17fe..aae9ffd433e1 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -8,7 +8,7 @@ python: system_packages: true install: - requirements: docs/requirements.txt - - method: setuptools + - method: pip path: . formats: [] diff --git a/setup.py b/setup.py deleted file mode 100644 index 5a43a5b95ec1..000000000000 --- a/setup.py +++ /dev/null @@ -1,81 +0,0 @@ -from setuptools import find_packages, setup - -__version__ = '2.4.0' - -install_requires = [ - 'tqdm', - 'numpy', - 'scipy', - 'jinja2', - 'requests', - 'pyparsing', - 'scikit-learn', - 'psutil>=5.8.0', -] - -graphgym_requires = [ - 'yacs', - 'hydra-core', - 'protobuf<4.21', - 'pytorch-lightning', -] - -modelhub_requires = [ - 'huggingface_hub', -] - -full_requires = graphgym_requires + modelhub_requires + [ - 'ase', - 'h5py', - 'numba', - 'sympy', - 'pandas', - 'captum', - 'rdflib', - 'trimesh', - 'networkx', - 'graphviz', - 'tabulate', - 'matplotlib', - 'torchmetrics', - 'scikit-image', - 'pytorch-memlab', - 'pgmpy', - 'opt_einsum', # required for pgmpy - 'statsmodels', -] - -benchmark_requires = [ - 'protobuf<4.21', - 'wandb', - 'pandas', - 'networkx', - 'matplotlib', -] - -test_requires = [ - 'pytest', - 'pytest-cov', - 'onnx', - 'onnxruntime', -] - -dev_requires = test_requires + [ - 'pre-commit', -] - -setup( - name='torch_geometric', - version=__version__, - install_requires=install_requires, - extras_require={ - 'graphgym': graphgym_requires, - 'modelhub': modelhub_requires, - 'full': full_requires, - 'benchmark': benchmark_requires, - 'test': test_requires, - 'dev': dev_requires, - }, - packages=find_packages(), - include_package_data=True, # Ensure that `*.jinja` files are found. -) From 60538fff211b6e1d69281aa1320c6ba55cefe05f Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Thu, 30 Mar 2023 00:15:41 -0700 Subject: [PATCH 1081/2432] `MultiAggr` w/ `HeteroDictLinear` (#7077) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/aggr/test_multi.py | 9 +++++++-- test/nn/conv/test_graph_conv.py | 4 ++-- test/nn/conv/test_signed_conv.py | 12 ++++++++---- test/nn/dense/test_linear.py | 29 ++++++++++++++++++++--------- torch_geometric/nn/aggr/multi.py | 18 ++++++++---------- torch_geometric/nn/dense/linear.py | 17 +++++++---------- 7 files changed, 53 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65f00b0123fe..3894f5f54d0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Accelerated attention-based `MultiAggregation` ([#7077](https://github.com/pyg-team/pytorch_geometric/pull/7077)) - Edges in `HeterophilousGraphDataset` are now undirected by default ([#7065](https://github.com/pyg-team/pytorch_geometric/pull/7065)) - Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) - Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) diff --git a/test/nn/aggr/test_multi.py b/test/nn/aggr/test_multi.py index 222696b62ec9..e37eb3923fa4 100644 --- a/test/nn/aggr/test_multi.py +++ b/test/nn/aggr/test_multi.py @@ -45,5 +45,10 @@ def test_multi_aggr(multi_aggr_tuple): else: assert torch.allclose(out, aggr(x, ptr=ptr)) - jit = torch.jit.script(aggr) - assert torch.allclose(out, jit(x, index)) + if aggr_kwargs['mode'] == 'attn' and torch_geometric.typing.WITH_GMM: + # See: https://github.com/pytorch/pytorch/pull/97960 + with pytest.raises(RuntimeError, match="Unknown builtin op"): + jit = torch.jit.script(aggr) + else: + jit = torch.jit.script(aggr) + assert torch.allclose(out, jit(x, index)) diff --git a/test/nn/conv/test_graph_conv.py b/test/nn/conv/test_graph_conv.py index 14bd30481d4a..082e2954fa9d 100644 --- a/test/nn/conv/test_graph_conv.py +++ b/test/nn/conv/test_graph_conv.py @@ -47,8 +47,8 @@ def test_graph_conv(): if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj3.t()), out1) - assert torch.allclose(jit(x1, adj4.t()), out2) + assert torch.allclose(jit(x1, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x1, adj4.t()), out2, atol=1e-6) # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) diff --git a/test/nn/conv/test_signed_conv.py b/test/nn/conv/test_signed_conv.py index 6e72e29f9230..8ee73daf033a 100644 --- a/test/nn/conv/test_signed_conv.py +++ b/test/nn/conv/test_signed_conv.py @@ -50,8 +50,10 @@ def test_signed_conv(): # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - assert torch.allclose(conv1((x, x[:2]), edge_index, edge_index), out1[:2]) - assert torch.allclose(conv1((x, x[:2]), adj1.t(), adj1.t()), out1[:2]) + assert torch.allclose(conv1((x, x[:2]), edge_index, edge_index), out1[:2], + atol=1e-6) + assert torch.allclose(conv1((x, x[:2]), adj1.t(), adj1.t()), out1[:2], + atol=1e-6) assert torch.allclose(conv2((out1, out1[:2]), edge_index, edge_index), out2[:2], atol=1e-6) assert torch.allclose(conv2((out1, out1[:2]), adj1.t(), adj1.t()), @@ -59,7 +61,8 @@ def test_signed_conv(): if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) - assert torch.allclose(conv1((x, x[:2]), adj2.t(), adj2.t()), out1[:2]) + assert torch.allclose(conv1((x, x[:2]), adj2.t(), adj2.t()), out1[:2], + atol=1e-6) assert torch.allclose(conv2((out1, out1[:2]), adj2.t(), adj2.t()), out2[:2], atol=1e-6) @@ -76,6 +79,7 @@ def test_signed_conv(): t = '(PairTensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1((x, x[:2]), adj2.t(), adj2.t()), out1[:2]) + assert torch.allclose(jit1((x, x[:2]), adj2.t(), adj2.t()), out1[:2], + atol=1e-6) assert torch.allclose(jit2((out1, out1[:2]), adj2.t(), adj2.t()), out2[:2], atol=1e-6) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index c74c13d272ac..6450d198e499 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -5,8 +5,9 @@ from torch.nn import Linear as PTLinear from torch.nn.parameter import UninitializedParameter +import torch_geometric.typing from torch_geometric.nn import HeteroDictLinear, HeteroLinear, Linear -from torch_geometric.testing import is_full_test, withPackage +from torch_geometric.testing import withPackage weight_inits = ['glorot', 'kaiming_uniform', None] bias_inits = ['zeros', None] @@ -111,9 +112,8 @@ def test_hetero_linear(): out = lin(x, type_vec) assert out.size() == (3, 32) - if is_full_test(): - jit = torch.jit.script(lin) - assert torch.allclose(jit(x, type_vec), out) + jit = torch.jit.script(lin) + assert torch.allclose(jit(x, type_vec), out) def test_lazy_hetero_linear(): @@ -127,11 +127,13 @@ def test_lazy_hetero_linear(): assert out.size() == (3, 32) -def test_hetero_dict_linear(): +@pytest.mark.parametrize('bias', [True, False]) +def test_hetero_dict_linear(bias): x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)} - lin = HeteroDictLinear({'v': 16, 'w': 8}, 32) - assert str(lin) == "HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=True)" + lin = HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=bias) + assert str(lin) == (f"HeteroDictLinear({{'v': 16, 'w': 8}}, 32, " + f"bias={bias})") out_dict = lin(x_dict) assert len(out_dict) == 2 @@ -140,14 +142,23 @@ def test_hetero_dict_linear(): x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 16)} - lin = HeteroDictLinear(16, 32, types=['v', 'w']) - assert str(lin) == "HeteroDictLinear({'v': 16, 'w': 16}, 32, bias=True)" + lin = HeteroDictLinear(16, 32, types=['v', 'w'], bias=bias) + assert str(lin) == (f"HeteroDictLinear({{'v': 16, 'w': 16}}, 32, " + f"bias={bias})") out_dict = lin(x_dict) assert len(out_dict) == 2 assert out_dict['v'].size() == (3, 32) assert out_dict['w'].size() == (2, 32) + if torch_geometric.typing.WITH_GMM: + # See: https://github.com/pytorch/pytorch/pull/97960 + with pytest.raises(RuntimeError, match="Unknown builtin op"): + jit = torch.jit.script(lin) + else: + jit = torch.jit.script(lin) + assert len(jit(x_dict)) == 2 + def test_lazy_hetero_dict_linear(): x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)} diff --git a/torch_geometric/nn/aggr/multi.py b/torch_geometric/nn/aggr/multi.py index 6883f935d4a3..72b941ceda2c 100644 --- a/torch_geometric/nn/aggr/multi.py +++ b/torch_geometric/nn/aggr/multi.py @@ -7,6 +7,7 @@ from torch_geometric.nn.aggr import Aggregation from torch_geometric.nn.aggr.fused import FusedAggregation +from torch_geometric.nn.dense import HeteroDictLinear from torch_geometric.nn.resolver import aggregation_resolver @@ -114,10 +115,8 @@ def __init__( ) elif mode == 'attn': - self.lin_heads = torch.nn.ModuleList([ - Linear(channels, self.out_channels) - for channels in self.in_channels - ]) + channels = {str(k): v for k, v, in enumerate(self.in_channels)} + self.lin_heads = HeteroDictLinear(channels, self.out_channels) num_heads = mode_kwargs.pop('num_heads', 1) self.multihead_attn = MultiheadAttention( self.out_channels, @@ -137,8 +136,7 @@ def reset_parameters(self): if self.mode == 'proj': self.lin.reset_parameters() if self.mode == 'attn': - for lin in self.lin_heads: - lin.reset_parameters() + self.lin_heads.reset_parameters() self.multihead_attn._reset_parameters() def get_out_channels(self, in_channels: int) -> int: @@ -181,10 +179,10 @@ def combine(self, inputs: List[Tensor]) -> Tensor: return self.lin(torch.cat(inputs, dim=-1)) if hasattr(self, 'multihead_attn'): - x = torch.stack( - [head(inputs[i]) for i, head in enumerate(self.lin_heads)], - dim=0, - ) + x_dict = {str(k): v for k, v, in enumerate(inputs)} + x_dict = self.lin_heads(x_dict) + xs = [x_dict[str(key)] for key in range(len(inputs))] + x = torch.stack(xs, dim=0) attn_out, _ = self.multihead_attn(x, x, x) return torch.mean(attn_out, dim=0) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index c8971e1c918b..4567bc909a29 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -369,25 +369,22 @@ def reset_parameters(self): def forward( self, - x_dict: Dict[Any, Tensor], - ) -> Dict[Any, Tensor]: + x_dict: Dict[str, Tensor], + ) -> Dict[str, Tensor]: r""" Args: x_dict (Dict[Any, torch.Tensor]): A dictionary holding input features for each individual type. """ if torch_geometric.typing.WITH_GMM: - xs = [x_dict[key] for key in x_dict.keys()] - weights = [self.lins[key].weight.t() for key in x_dict.keys()] - if self.kwargs.get('bias', True): - biases = [self.lins[key].bias for key in x_dict.keys()] - else: - biases = None - + xs = [x_dict[key] for key in self.lins.keys()] + weights = [lin.weight.t() for lin in self.lins.values()] + biases = [lin.bias for lin in self.lins.values()] + biases = None if biases[0] is None else biases outs = pyg_lib.ops.grouped_matmul(xs, weights, biases) return {key: out for key, out in zip(x_dict.keys(), outs)} - return {key: self.lins[key](x) for key, x in x_dict.items()} + return {key: lin(x_dict[key]) for key, lin in self.lins.items()} @torch.no_grad() def initialize_parameters(self, module, input): From 22ff02712d99780846b77eb3d15b534482503a37 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 30 Mar 2023 13:21:10 +0200 Subject: [PATCH 1082/2432] Fix nightly builds (#7082) --- .github/workflows/linting.yml | 8 -------- .github/workflows/nightly.yml | 12 +++++------- MANIFEST.in | 9 --------- pyproject.toml | 7 +++++-- 4 files changed, 10 insertions(+), 26 deletions(-) delete mode 100644 MANIFEST.in diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index a48363f1ac88..eae028b9dd18 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -40,10 +40,6 @@ jobs: uses: actions/setup-python@v4.3.0 with: python-version: 3.8 - check-latest: true - cache: pip - cache-dependency-path: | - pyproject.toml - name: Install dependencies run: pip install pylint @@ -63,10 +59,6 @@ jobs: uses: actions/setup-python@v4.3.0 with: python-version: 3.8 - check-latest: true - cache: pip - cache-dependency-path: | - pyproject.toml - name: Install dependencies run: pip install mypy diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 14798ae5499d..9566ab7e08e1 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -7,7 +7,7 @@ on: # yamllint disable-line rule:truthy jobs: - sdist: + build: if: github.repository == 'pyg-team/pytorch_geometric' runs-on: ubuntu-latest @@ -19,10 +19,6 @@ jobs: uses: actions/setup-python@v4.3.0 with: python-version: 3.8 - check-latest: true - cache: pip - cache-dependency-path: | - pyproject.toml - name: Set version run: echo "VERSION=$(sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_geometric/__init__.py)" >> ${GITHUB_ENV} @@ -33,11 +29,13 @@ jobs: - name: Customize build version run: | sed -i "s/$VERSION/$VERSION.dev$TODAY/" torch_geometric/__init__.py - sed -i 's/name="torch_geometric"/name="pyg-nightly"/' pyproject.toml + sed -i '0,/name="torch_geometric"/s//name="pyg-nightly"/' pyproject.toml # Only change first occurence sed -i "s/version=\"$VERSION\"/version=\"$VERSION.dev$TODAY\"/" pyproject.toml - name: Build package - run: python -m build + run: | + pip install --upgrade build + python -m build - name: Publish package uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 34584b3605d1..000000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,9 +0,0 @@ -include README.md -include LICENSE - -recursive-include torch_geometric *.jinja - -recursive-exclude test * -recursive-exclude examples * -recursive-exclude docs * -recursive-exclude benchmark * diff --git a/pyproject.toml b/pyproject.toml index 032379b13e97..1c4703ee2f1e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = ["flit_core >=3.2,<4"] -build-backend = "flit_core.buildapi" +requires=["flit_core >=3.2,<4"] +build-backend="flit_core.buildapi" [project] name="torch_geometric" @@ -95,6 +95,9 @@ documentation="https://pytorch-geometric.readthedocs.io/" repository="https://github.com/pyg-team/pytorch_geometric.git" changelog="https://github.com/pyg-team/pytorch_geometric/blob/master/CHANGELOG.md" +[tool.flit.module] +name="torch_geometric" + [tool.yapf] based_on_style = "pep8" split_before_named_assigns = false From 9ee0a3d4581245b6a029d6a03e7b677f3acbb905 Mon Sep 17 00:00:00 2001 From: rishiA <53684457+rishiagarwal2000@users.noreply.github.com> Date: Thu, 30 Mar 2023 06:34:57 -0700 Subject: [PATCH 1083/2432] Temporal version of `EllipticBitcoinDataset` (#7011) Hi PyG team, We have implemented a temporal version of the EllipticBitcoinDataset to allow experimenting with models such as [EvolveGCN](https://arxiv.org/pdf/1902.10191.pdf), where the GCN evolves with time. The original issue was created [here](https://github.com/pyg-team/pytorch_geometric/issues/7010#issue-1636550901) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Aditya Agrawal Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/elliptic.py | 37 ++++++--- torch_geometric/datasets/elliptic_temporal.py | 83 +++++++++++++++++++ 4 files changed, 110 insertions(+), 13 deletions(-) create mode 100644 torch_geometric/datasets/elliptic_temporal.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 3894f5f54d0c..61f5d700b579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a time-step aware variant of the `EllipticBitcoinDataset` called `EllipticBitcoinTemporalDataset` ([#7011](https://github.com/pyg-team/pytorch_geometric/pull/7011)) - Added `to_dgl` and `from_dgl` conversion functions ([#7053](https://github.com/pyg-team/pytorch_geometric/pull/7053)) - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 2bdddc5f50a2..e9ee7c28b472 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -77,6 +77,7 @@ from .sbm_dataset import RandomPartitionGraphDataset from .linkx_dataset import LINKXDataset from .elliptic import EllipticBitcoinDataset +from .elliptic_temporal import EllipticBitcoinTemporalDataset from .dgraph import DGraphFin from .hydro_net import HydroNet from .explainer_dataset import ExplainerDataset @@ -171,6 +172,7 @@ 'RandomPartitionGraphDataset', 'LINKXDataset', 'EllipticBitcoinDataset', + 'EllipticBitcoinTemporalDataset', 'DGraphFin', 'HydroNet', 'ExplainerDataset', diff --git a/torch_geometric/datasets/elliptic.py b/torch_geometric/datasets/elliptic.py index ed1860ba2325..8c23f86042d9 100644 --- a/torch_geometric/datasets/elliptic.py +++ b/torch_geometric/datasets/elliptic.py @@ -1,5 +1,5 @@ import os -from typing import Callable, List, Optional +from typing import Any, Callable, List, Optional, Tuple import torch @@ -78,31 +78,42 @@ def download(self): extract_zip(path, self.raw_dir) os.remove(path) + def _process_df(self, feat_df: Any, edge_df: Any, + class_df: Any) -> Tuple[Any, Any, Any]: + return feat_df, edge_df, class_df + def process(self): import pandas as pd - df_features = pd.read_csv(self.raw_paths[0], header=None) - df_edges = pd.read_csv(self.raw_paths[1]) - df_classes = pd.read_csv(self.raw_paths[2]) + feat_df = pd.read_csv(self.raw_paths[0], header=None) + edge_df = pd.read_csv(self.raw_paths[1]) + class_df = pd.read_csv(self.raw_paths[2]) columns = {0: 'txId', 1: 'time_step'} - df_features = df_features.rename(columns=columns) - x = torch.from_numpy(df_features.loc[:, 2:].values).to(torch.float) + feat_df = feat_df.rename(columns=columns) + + feat_df, edge_df, class_df = self._process_df( + feat_df, + edge_df, + class_df, + ) + + x = torch.from_numpy(feat_df.loc[:, 2:].values).to(torch.float) # There exists 3 different classes in the dataset: # 0=licit, 1=illicit, 2=unknown mapping = {'unknown': 2, '1': 1, '2': 0} - df_classes['class'] = df_classes['class'].map(mapping) - y = torch.from_numpy(df_classes['class'].values) + class_df['class'] = class_df['class'].map(mapping) + y = torch.from_numpy(class_df['class'].values) - mapping = {idx: i for i, idx in enumerate(df_features['txId'].values)} - df_edges['txId1'] = df_edges['txId1'].map(mapping) - df_edges['txId2'] = df_edges['txId2'].map(mapping) - edge_index = torch.from_numpy(df_edges.values).t().contiguous() + mapping = {idx: i for i, idx in enumerate(feat_df['txId'].values)} + edge_df['txId1'] = edge_df['txId1'].map(mapping) + edge_df['txId2'] = edge_df['txId2'].map(mapping) + edge_index = torch.from_numpy(edge_df.values).t().contiguous() # Timestamp based split: # train_mask: 1 - 34 time_step, test_mask: 35-49 time_step - time_step = torch.from_numpy(df_features['time_step'].values) + time_step = torch.from_numpy(feat_df['time_step'].values) train_mask = (time_step < 35) & (y != 2) test_mask = (time_step >= 35) & (y != 2) diff --git a/torch_geometric/datasets/elliptic_temporal.py b/torch_geometric/datasets/elliptic_temporal.py new file mode 100644 index 000000000000..ee53ff5c845d --- /dev/null +++ b/torch_geometric/datasets/elliptic_temporal.py @@ -0,0 +1,83 @@ +from typing import Any, Callable, Optional, Tuple + +from torch_geometric.datasets import EllipticBitcoinDataset + + +class EllipticBitcoinTemporalDataset(EllipticBitcoinDataset): + r"""The time-step aware Elliptic Bitcoin dataset of Bitcoin transactions + from the `"Anti-Money Laundering in Bitcoin: Experimenting with Graph + Convolutional Networks for Financial Forensics" + `_ paper. + + :class:`EllipticBitcoinTemporalDataset` maps Bitcoin transactions to real + entities belonging to licit categories (exchanges, wallet providers, + miners, licit services, etc.) versus illicit ones (scams, malware, + terrorist organizations, ransomware, Ponzi schemes, etc.) + + There exists 203,769 node transactions and 234,355 directed edge payments + flows, with two percent of nodes (4,545) labelled as illicit, and + twenty-one percent of nodes (42,019) labelled as licit. + The remaining transactions are unknown. + + .. note:: + + In contrast to :class:`EllipticBitcoinDataset`, this dataset returns + Bitcoin transactions only for a given timestamp :obj:`t`. + + Args: + root (str): Root directory where the dataset should be saved. + t (int): The Timestep for which nodes should be selected (from :obj:`1` + to :obj:`49`). + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 10 10 10 10 + :header-rows: 1 + + * - #nodes + - #edges + - #features + - #classes + * - 203,769 + - 234,355 + - 165 + - 2 + """ + def __init__( + self, + root: str, + t: int, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + if t < 1 or t > 49: + raise ValueError("'t' needs to be between 1 and 49") + + self.t = t + super().__init__(root, transform, pre_transform) + + @property + def processed_file_names(self) -> str: + return f'data_t_{self.t}.pt' + + def _process_df(self, feat_df: Any, edge_df: Any, + class_df: Any) -> Tuple[Any, Any, Any]: + + feat_df = feat_df[feat_df['time_step'] == self.t] + + mask = edge_df['txId1'].isin(feat_df['txId'].values) + edge_df = edge_df[mask] + + class_df = class_df.merge(feat_df[['txId']], how='right', + left_on='txId', right_on='txId') + + return feat_df, edge_df, class_df From b7b5d921edc832bfd3f38c8107925d771b48b0df Mon Sep 17 00:00:00 2001 From: toenshoff Date: Thu, 30 Mar 2023 16:11:01 +0200 Subject: [PATCH 1084/2432] Fix handling of missing node types in heterogeneous data loaders (#7060) This addresses a bug in heterogeneous data loaders, which crash if a node type from the complete graph is not present in the sampled batch of nodes. This is can easily happen if a graph is disconnected or some node types are simply rare. An unsafe dictionary access in the `filter_hetero_data` raises an exception in this case. I solved this by calling `HeteroData.node_type_subgraph` on the graph before the batch of nodes is filtered. I added a test for `HGTLoader` which covers this case and did not pass previously. Note that the behavior of the data loaders now differs slightly from `HeteroData.subgraph`, as the loader will handle missing keys by not including any node of that type in the batch, while the `subgraph` method would retain all of the nodes. However, it seems more convenient for data loading if batches only contain nodes that where actually returned by the underlying sampler. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinu Sunil --- CHANGELOG.md | 1 + test/loader/test_hgt_loader.py | 24 ++++++++++++++++++++++++ torch_geometric/loader/utils.py | 10 +++++++--- 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61f5d700b579..891d2dc51449 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed crash of heterogeneous data loaders if node types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060)) - Accelerated attention-based `MultiAggregation` ([#7077](https://github.com/pyg-team/pytorch_geometric/pull/7077)) - Edges in `HeterophilousGraphDataset` are now undirected by default ([#7065](https://github.com/pyg-team/pytorch_geometric/pull/7065)) - Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) diff --git a/test/loader/test_hgt_loader.py b/test/loader/test_hgt_loader.py index 20fee6f0f9e2..0611fb68c652 100644 --- a/test/loader/test_hgt_loader.py +++ b/test/loader/test_hgt_loader.py @@ -181,3 +181,27 @@ def forward(self, x, edge_index, edge_weight): out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict, hetero_batch.edge_weight_dict)['paper'][:batch_size] assert torch.allclose(out1, out2, atol=1e-6) + + +@withPackage('torch_sparse') +def test_hgt_loader_disconnected(): + torch.manual_seed(12345) + data = HeteroData() + + data['paper'].x = torch.arange(10) + data['author'].x = torch.arange(10, 20) + + # Paper is disconnected from author + data['paper', 'paper'].edge_index = get_random_edge_index(10, 10, 15) + data['paper', 'paper'].edge_attr = torch.arange(15) + data['author', 'author'].edge_index = get_random_edge_index(10, 10, 15) + + loader = HGTLoader(data, num_samples=[2], batch_size=2, + input_nodes='paper') + + for batch in loader: + assert isinstance(batch, HeteroData) + + # Test node and types: + assert set(batch.node_types) == {'paper'} + assert set(batch.edge_types) == set(data.edge_types) diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 6402e68c4bf6..a76b3f424c4e 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -130,13 +130,17 @@ def filter_hetero_data( ) -> HeteroData: # Filters a heterogeneous data object to only hold nodes in `node` and # edges in `edge` for each node and edge type, respectively: - out = copy.copy(data) + out = data.node_type_subgraph(node_dict.keys()) + # edge_dict may be emtpy if graph has no edges + # or if none of the edges types are reachable from seed nodes. + if edge_dict: + out = out.edge_type_subgraph(edge_dict.keys()) - for node_type in data.node_types: + for node_type in out.node_types: filter_node_store_(data[node_type], out[node_type], node_dict[node_type]) - for edge_type in data.edge_types: + for edge_type in out.edge_types: filter_edge_store_( data[edge_type], out[edge_type], From 75291520868e6e05e146efd360bb37246d2fde32 Mon Sep 17 00:00:00 2001 From: Tingyu Wang Date: Fri, 31 Mar 2023 04:49:00 -0400 Subject: [PATCH 1085/2432] Update cugraph conv layers for `pylibcugraphops=23.04` (#7023) This PR updates cugraph models to reflect breaking changes in `pylibcugraphops=23.04`. ~~Right now, it is **blocked** by RAPIDS 23.04 release.~~ CC: @matthiaskohl @stadlmax --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- torch_geometric/nn/conv/cugraph/base.py | 63 +++++++++++++------- torch_geometric/nn/conv/cugraph/gat_conv.py | 15 ++++- torch_geometric/nn/conv/cugraph/rgcn_conv.py | 9 ++- torch_geometric/nn/conv/cugraph/sage_conv.py | 8 ++- 4 files changed, 69 insertions(+), 26 deletions(-) diff --git a/torch_geometric/nn/conv/cugraph/base.py b/torch_geometric/nn/conv/cugraph/base.py index ba5ec1f1d118..f53b8fc4de2d 100644 --- a/torch_geometric/nn/conv/cugraph/base.py +++ b/torch_geometric/nn/conv/cugraph/base.py @@ -8,15 +8,26 @@ from torch_geometric.utils.sparse import index2ptr try: # pragma: no cover - from pylibcugraphops import ( - make_fg_csr, - make_fg_csr_hg, - make_mfg_csr, - make_mfg_csr_hg, + LEGACY_MODE = False + from pylibcugraphops.pytorch import ( + SampledCSC, + SampledHeteroCSC, + StaticCSC, + StaticHeteroCSC, ) HAS_PYLIBCUGRAPHOPS = True except ImportError: HAS_PYLIBCUGRAPHOPS = False + try: # pragma: no cover + from pylibcugraphops import ( + make_fg_csr, + make_fg_csr_hg, + make_mfg_csr, + make_mfg_csr_hg, + ) + LEGACY_MODE = True + except ImportError: + pass class CuGraphModule(torch.nn.Module): # pragma: no cover @@ -25,9 +36,9 @@ class CuGraphModule(torch.nn.Module): # pragma: no cover def __init__(self): super().__init__() - if HAS_PYLIBCUGRAPHOPS is False: + if not HAS_PYLIBCUGRAPHOPS and not LEGACY_MODE: raise ModuleNotFoundError(f"'{self.__class__.__name__}' requires " - f"'pylibcugraphops'") + f"'pylibcugraphops>=23.02'") def reset_parameters(self): r"""Resets all learnable parameters of the module.""" @@ -99,12 +110,17 @@ def get_cugraph( if max_num_neighbors is None: max_num_neighbors = int((colptr[1:] - colptr[:-1]).max()) - dst_nodes = torch.arange(colptr.numel() - 1, device=row.device) + if LEGACY_MODE: + dst_nodes = torch.arange(colptr.numel() - 1, device=row.device) + return make_mfg_csr(dst_nodes, colptr, row, max_num_neighbors, + num_src_nodes) + + return SampledCSC(colptr, row, max_num_neighbors, num_src_nodes) - return make_mfg_csr(dst_nodes, colptr, row, max_num_neighbors, - num_src_nodes) + if LEGACY_MODE: + return make_fg_csr(colptr, row) - return make_fg_csr(colptr, row) + return StaticCSC(colptr, row) def get_typed_cugraph( self, @@ -142,17 +158,24 @@ def get_typed_cugraph( if max_num_neighbors is None: max_num_neighbors = int((colptr[1:] - colptr[:-1]).max()) - dst_nodes = torch.arange(colptr.numel() - 1, device=row.device) + if LEGACY_MODE: + dst_nodes = torch.arange(colptr.numel() - 1, device=row.device) + return make_mfg_csr_hg(dst_nodes, colptr, row, + max_num_neighbors, num_src_nodes, + n_node_types=0, + n_edge_types=num_edge_types, + out_node_types=None, in_node_types=None, + edge_types=edge_type) + + return SampledHeteroCSC(colptr, row, edge_type, max_num_neighbors, + num_src_nodes, num_edge_types) - return make_mfg_csr_hg(dst_nodes, colptr, row, max_num_neighbors, - num_src_nodes, n_node_types=0, - n_edge_types=num_edge_types, - out_node_types=None, in_node_types=None, - edge_types=edge_type) + if LEGACY_MODE: + return make_fg_csr_hg(colptr, row, n_node_types=0, + n_edge_types=num_edge_types, node_types=None, + edge_types=edge_type) - return make_fg_csr_hg(colptr, row, n_node_types=0, - n_edge_types=num_edge_types, node_types=None, - edge_types=edge_type) + return StaticHeteroCSC(colptr, row, edge_type, num_edge_types) def forward( self, diff --git a/torch_geometric/nn/conv/cugraph/gat_conv.py b/torch_geometric/nn/conv/cugraph/gat_conv.py index dbe11ce13195..c9e6bec38950 100644 --- a/torch_geometric/nn/conv/cugraph/gat_conv.py +++ b/torch_geometric/nn/conv/cugraph/gat_conv.py @@ -5,10 +5,14 @@ from torch.nn import Linear, Parameter from torch_geometric.nn.conv.cugraph import CuGraphModule +from torch_geometric.nn.conv.cugraph.base import LEGACY_MODE from torch_geometric.nn.inits import zeros try: - from pylibcugraphops.torch.autograd import mha_gat_n2n as GATConvAgg + if LEGACY_MODE: + from pylibcugraphops.torch.autograd import mha_gat_n2n as GATConvAgg + else: + from pylibcugraphops.pytorch.operators import mha_gat_n2n as GATConvAgg except ImportError: pass @@ -67,8 +71,13 @@ def forward( graph = self.get_cugraph(csc, max_num_neighbors) x = self.lin(x) - out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', - self.negative_slope, False, self.concat) + + if LEGACY_MODE: + out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', + self.negative_slope, False, self.concat) + else: + out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', + self.negative_slope, self.concat) if self.bias is not None: out = out + self.bias diff --git a/torch_geometric/nn/conv/cugraph/rgcn_conv.py b/torch_geometric/nn/conv/cugraph/rgcn_conv.py index ce2317259868..71fd63205798 100644 --- a/torch_geometric/nn/conv/cugraph/rgcn_conv.py +++ b/torch_geometric/nn/conv/cugraph/rgcn_conv.py @@ -5,11 +5,16 @@ from torch.nn import Parameter from torch_geometric.nn.conv.cugraph import CuGraphModule +from torch_geometric.nn.conv.cugraph.base import LEGACY_MODE from torch_geometric.nn.inits import glorot, zeros try: - from pylibcugraphops.torch.autograd import \ - agg_hg_basis_n2n_post as RGCNConvAgg + if LEGACY_MODE: + from pylibcugraphops.torch.autograd import \ + agg_hg_basis_n2n_post as RGCNConvAgg + else: + from pylibcugraphops.pytorch.operators import \ + agg_hg_basis_n2n_post as RGCNConvAgg except ImportError: pass diff --git a/torch_geometric/nn/conv/cugraph/sage_conv.py b/torch_geometric/nn/conv/cugraph/sage_conv.py index 8d85d18b7113..01e991984abd 100644 --- a/torch_geometric/nn/conv/cugraph/sage_conv.py +++ b/torch_geometric/nn/conv/cugraph/sage_conv.py @@ -5,9 +5,15 @@ from torch.nn import Linear from torch_geometric.nn.conv.cugraph import CuGraphModule +from torch_geometric.nn.conv.cugraph.base import LEGACY_MODE try: - from pylibcugraphops.torch.autograd import agg_concat_n2n as SAGEConvAgg + if LEGACY_MODE: + from pylibcugraphops.torch.autograd import \ + agg_concat_n2n as SAGEConvAgg + else: + from pylibcugraphops.pytorch.operators import \ + agg_concat_n2n as SAGEConvAgg except ImportError: pass From 707fee2f9d7ff56a4792693eaea0134390749eef Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 31 Mar 2023 11:18:26 +0200 Subject: [PATCH 1086/2432] Fix `HGTLoader` on disconnected graphs (#7087) We still want to maintain all node and edge types, but sample zero nodes/edges. --- CHANGELOG.md | 2 +- test/loader/test_hgt_loader.py | 20 +++++++++++++------- torch_geometric/loader/utils.py | 32 +++++++++++++++++++++----------- 3 files changed, 35 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 891d2dc51449..4daeb5c74452 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Fixed crash of heterogeneous data loaders if node types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060)) +- Fixed crash of heterogeneous data loaders if node or edge types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060), [#7087](https://github.com/pyg-team/pytorch_geometric/pull/7087)) - Accelerated attention-based `MultiAggregation` ([#7077](https://github.com/pyg-team/pytorch_geometric/pull/7077)) - Edges in `HeterophilousGraphDataset` are now undirected by default ([#7065](https://github.com/pyg-team/pytorch_geometric/pull/7065)) - Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) diff --git a/test/loader/test_hgt_loader.py b/test/loader/test_hgt_loader.py index 0611fb68c652..f34879c9a0f3 100644 --- a/test/loader/test_hgt_loader.py +++ b/test/loader/test_hgt_loader.py @@ -185,16 +185,16 @@ def forward(self, x, edge_index, edge_weight): @withPackage('torch_sparse') def test_hgt_loader_disconnected(): - torch.manual_seed(12345) data = HeteroData() - data['paper'].x = torch.arange(10) - data['author'].x = torch.arange(10, 20) + data['paper'].x = torch.randn(10, 16) + data['author'].x = torch.randn(10, 16) - # Paper is disconnected from author + # Paper nodes are disconnected from author nodes: data['paper', 'paper'].edge_index = get_random_edge_index(10, 10, 15) - data['paper', 'paper'].edge_attr = torch.arange(15) + data['paper', 'paper'].edge_attr = torch.randn(15, 8) data['author', 'author'].edge_index = get_random_edge_index(10, 10, 15) + data['author', 'author'].edge_attr = torch.randn(15, 8) loader = HGTLoader(data, num_samples=[2], batch_size=2, input_nodes='paper') @@ -202,6 +202,12 @@ def test_hgt_loader_disconnected(): for batch in loader: assert isinstance(batch, HeteroData) - # Test node and types: - assert set(batch.node_types) == {'paper'} + # Test node and edge types: + assert set(batch.node_types) == set(data.node_types) assert set(batch.edge_types) == set(data.edge_types) + + assert batch['author'].num_nodes == 0 + assert batch['author'].x.size() == (0, 16) + assert batch['author', 'author'].num_edges == 0 + assert batch['author', 'author'].edge_index.size() == (2, 0) + assert batch['author', 'author'].edge_attr.size() == (0, 8) diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index a76b3f424c4e..112871eddd6c 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -17,9 +17,11 @@ ) from torch_geometric.data.storage import EdgeStorage, NodeStorage from torch_geometric.typing import ( + EdgeType, FeatureTensorType, InputEdges, InputNodes, + NodeType, OptTensor, SparseTensor, ) @@ -122,32 +124,40 @@ def filter_data(data: Data, node: Tensor, row: Tensor, col: Tensor, def filter_hetero_data( data: HeteroData, - node_dict: Dict[str, Tensor], - row_dict: Dict[str, Tensor], - col_dict: Dict[str, Tensor], - edge_dict: Dict[str, Tensor], - perm_dict: Optional[Dict[str, OptTensor]] = None, + node_dict: Dict[NodeType, Tensor], + row_dict: Dict[EdgeType, Tensor], + col_dict: Dict[EdgeType, Tensor], + edge_dict: Dict[EdgeType, Tensor], + perm_dict: Optional[Dict[EdgeType, OptTensor]] = None, ) -> HeteroData: # Filters a heterogeneous data object to only hold nodes in `node` and # edges in `edge` for each node and edge type, respectively: - out = data.node_type_subgraph(node_dict.keys()) - # edge_dict may be emtpy if graph has no edges - # or if none of the edges types are reachable from seed nodes. - if edge_dict: - out = out.edge_type_subgraph(edge_dict.keys()) + out = copy.copy(data) for node_type in out.node_types: + # Handle the case of disconneted graph sampling: + if node_type not in node_dict: + node_dict[node_type] = torch.empty(0, dtype=torch.long) + filter_node_store_(data[node_type], out[node_type], node_dict[node_type]) for edge_type in out.edge_types: + # Handle the case of disconneted graph sampling: + if edge_type not in row_dict: + row_dict[edge_type] = torch.empty(0, dtype=torch.long) + if edge_type not in col_dict: + col_dict[edge_type] = torch.empty(0, dtype=torch.long) + if edge_type not in edge_dict: + edge_dict[edge_type] = torch.empty(0, dtype=torch.long) + filter_edge_store_( data[edge_type], out[edge_type], row_dict[edge_type], col_dict[edge_type], edge_dict[edge_type], - perm_dict[edge_type] if perm_dict else None, + perm_dict.get(edge_type, None) if perm_dict else None, ) return out From 85559709322b698c0575efe5043fa3c147375cf9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 1 Apr 2023 10:05:47 +0200 Subject: [PATCH 1087/2432] Fix `atol` precision tests (#7095) --- test/nn/conv/test_point_gnn_conv.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/nn/conv/test_point_gnn_conv.py b/test/nn/conv/test_point_gnn_conv.py index ac25e1a93a71..dbd3411a1323 100644 --- a/test/nn/conv/test_point_gnn_conv.py +++ b/test/nn/conv/test_point_gnn_conv.py @@ -26,18 +26,18 @@ def test_point_gnn_conv(): out = conv(x, pos, edge_index) assert out.size() == (6, 8) - assert torch.allclose(conv(x, pos, adj1.t()), out) + assert torch.allclose(conv(x, pos, adj1.t()), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) - assert torch.allclose(conv(x, pos, adj2.t()), out) + assert torch.allclose(conv(x, pos, adj2.t()), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, pos, edge_index), out) + assert torch.allclose(jit(x, pos, edge_index), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, pos, adj2.t()), out) + assert torch.allclose(jit(x, pos, adj2.t()), out, atol=1e-6) From 3c1501833b13cb87e110a89edef5aa6ee164c190 Mon Sep 17 00:00:00 2001 From: Sina Sajadmanesh Date: Mon, 3 Apr 2023 07:55:01 +0100 Subject: [PATCH 1088/2432] Fix `data.num_edges` for `torch.sparse.Tensor` (#7104) Fixes zero `num_edges` when using native PyTorch sparse tensor (#7103). --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/data/storage.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4daeb5c74452..f69c6930b045 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added support for `Data.num_edges` for native `torch.sparse.Tensor` adjacency matrices ([#7104](https://github.com/pyg-team/pytorch_geometric/pull/7104)) - Fixed crash of heterogeneous data loaders if node or edge types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060), [#7087](https://github.com/pyg-team/pytorch_geometric/pull/7087)) - Accelerated attention-based `MultiAggregation` ([#7077](https://github.com/pyg-team/pytorch_geometric/pull/7077)) - Edges in `HeterophilousGraphDataset` are now undirected by default ([#7065](https://github.com/pyg-team/pytorch_geometric/pull/7065)) diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index 148a4aafe3a9..41a68c699665 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -26,6 +26,7 @@ from torch_geometric.utils import ( coalesce, contains_isolated_nodes, + is_torch_sparse_tensor, is_undirected, ) @@ -418,6 +419,8 @@ def num_edges(self) -> int: for value in self.values('adj', 'adj_t'): if isinstance(value, SparseTensor): return value.nnz() + elif is_torch_sparse_tensor(value): + return value._nnz() return 0 @property From 72c4ccc2fd80068c5b0b5c65aeaf1263c4aeb9a5 Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Mon, 3 Apr 2023 15:12:54 +0800 Subject: [PATCH 1089/2432] Fix `Dimenet` with `torch.int32` as an index Tensor for `Dataset` (#7107) Fix #7099 --- torch_geometric/nn/models/dimenet.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index ccde37088eba..c0fda9669452 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -631,6 +631,7 @@ def copy_(src, name, transpose=False): # Use the same random seed as the official DimeNet` implementation. random_state = np.random.RandomState(seed=42) perm = torch.from_numpy(random_state.permutation(np.arange(130831))) + perm = perm.long() train_idx = perm[:110000] val_idx = perm[110000:120000] test_idx = perm[120000:] @@ -905,6 +906,7 @@ def copy_(src, name, transpose=False): random_state = np.random.RandomState(seed=42) perm = torch.from_numpy(random_state.permutation(np.arange(130831))) + perm = perm.long() train_idx = perm[:110000] val_idx = perm[110000:120000] test_idx = perm[120000:] From a8be0945cd607f00d2bfc1bb311d4f0cc042b1af Mon Sep 17 00:00:00 2001 From: Anton Bushuiev <67932762+anton-bushuiev@users.noreply.github.com> Date: Mon, 3 Apr 2023 09:27:39 +0200 Subject: [PATCH 1090/2432] Implement `ComposeFilters` (#7097) Addresses https://github.com/pyg-team/pytorch_geometric/issues/7084. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/transforms/test_compose.py | 26 ++++++++++++++++++++++ torch_geometric/graphgym/config_store.py | 1 + torch_geometric/transforms/__init__.py | 3 ++- torch_geometric/transforms/compose.py | 28 +++++++++++++++++++++++- 5 files changed, 57 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f69c6930b045..439ba66cb419 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a `ComposeFilters` class to compose `pre_filter` functions in `Dataset` ([#7097](https://github.com/pyg-team/pytorch_geometric/pull/7097)) - Added a time-step aware variant of the `EllipticBitcoinDataset` called `EllipticBitcoinTemporalDataset` ([#7011](https://github.com/pyg-team/pytorch_geometric/pull/7011)) - Added `to_dgl` and `from_dgl` conversion functions ([#7053](https://github.com/pyg-team/pytorch_geometric/pull/7053)) - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) diff --git a/test/transforms/test_compose.py b/test/transforms/test_compose.py index fe086a0a580a..f599dd028d33 100644 --- a/test/transforms/test_compose.py +++ b/test/transforms/test_compose.py @@ -19,3 +19,29 @@ def test_compose(): assert len(data) == 2 assert data.pos.tolist() == [[-2, 0], [0, 0], [2, 0]] assert data.edge_index.size() == (2, 7) + + +def test_compose_filters(): + filter_fn = T.ComposeFilters([ + lambda d: d.num_nodes > 2, + lambda d: d.num_edges > 2, + ]) + assert str(filter_fn)[:16] == 'ComposeFilters([' + + data1 = Data(x=torch.arange(3)) + assert not filter_fn(data1) + + data2 = Data(x=torch.arange(2), edge_index=torch.tensor([ + [0, 0, 1], + [0, 1, 1], + ])) + assert not filter_fn(data2) + + data3 = Data(x=torch.arange(3), edge_index=torch.tensor([ + [0, 0, 1], + [0, 1, 1], + ])) + assert filter_fn(data3) + + # Test tuple of data objects: + assert filter_fn((data1, data2, data3)) is False diff --git a/torch_geometric/graphgym/config_store.py b/torch_geometric/graphgym/config_store.py index 43b9dcb3ad55..ea70c829022a 100644 --- a/torch_geometric/graphgym/config_store.py +++ b/torch_geometric/graphgym/config_store.py @@ -352,6 +352,7 @@ def fill_config_store(): for cls_name in set(transforms.__all__) - set([ 'BaseTransform', 'Compose', + 'ComposeFilters', 'LinearTransformation', 'AddMetaPaths', # TODO ]): diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index 3b7f750faff3..f56db80f867e 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -1,7 +1,7 @@ # flake8: noqa from .base_transform import BaseTransform -from .compose import Compose +from .compose import Compose, ComposeFilters from .to_device import ToDevice from .to_sparse_tensor import ToSparseTensor from .constant import Constant @@ -62,6 +62,7 @@ general_transforms = [ 'BaseTransform', 'Compose', + 'ComposeFilters', 'ToDevice', 'ToSparseTensor', 'Constant', diff --git a/torch_geometric/transforms/compose.py b/torch_geometric/transforms/compose.py index 168ac00565b3..45f8e8f18610 100644 --- a/torch_geometric/transforms/compose.py +++ b/torch_geometric/transforms/compose.py @@ -5,7 +5,7 @@ class Compose(BaseTransform): - """Composes several transforms together. + r"""Composes several transforms together. Args: transforms (List[Callable]): List of transforms to compose. @@ -27,3 +27,29 @@ def __call__( def __repr__(self) -> str: args = [f' {transform}' for transform in self.transforms] return '{}([\n{}\n])'.format(self.__class__.__name__, ',\n'.join(args)) + + +class ComposeFilters: + r"""Composes several filters together. + + Args: + filters (List[Callable]): List of filters to compose. + """ + def __init__(self, filters: List[Callable]): + self.filters = filters + + def __call__( + self, + data: Union[Data, HeteroData], + ) -> bool: + for filter_fn in self.filters: + if isinstance(data, (list, tuple)): + if not all([filter_fn(d) for d in data]): + return False + elif not filter_fn(data): + return False + return True + + def __repr__(self) -> str: + args = [f' {filter_fn}' for filter_fn in self.filters] + return '{}([\n{}\n])'.format(self.__class__.__name__, ',\n'.join(args)) From 036cc8c3a94bb068956cebb3b28c4b6042eb51d8 Mon Sep 17 00:00:00 2001 From: Jakub Pietrak <97102979+JakubPietrakIntel@users.noreply.github.com> Date: Mon, 3 Apr 2023 16:01:59 +0200 Subject: [PATCH 1091/2432] Update datamodule.py (#7109) Bugfix for datamodule.py Shouldn't line 102 say `kwargs.pop('batch_sampler', None)` ? --- torch_geometric/data/lightning/datamodule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/data/lightning/datamodule.py b/torch_geometric/data/lightning/datamodule.py index bca2e6f346a5..acaadd88fc3b 100644 --- a/torch_geometric/data/lightning/datamodule.py +++ b/torch_geometric/data/lightning/datamodule.py @@ -99,7 +99,7 @@ def __init__( if loader == 'full' and kwargs.get('batch_sampler') is not None: warnings.warn("'batch_sampler' option is not supported for " "loader='full'") - kwargs.pop('sampler', None) + kwargs.pop('batch_sampler', None) super().__init__(has_val, has_test, **kwargs) From 82317c23b09a4d2090cd94d19760a5efc97e17d7 Mon Sep 17 00:00:00 2001 From: Alex Morehead Date: Mon, 3 Apr 2023 15:28:20 -0500 Subject: [PATCH 1092/2432] Clarify argument type (#7110) This pull request simply updates the documented argument type for a `Data` object's `update` function to provide more clarity for users of this function. --- torch_geometric/data/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index c4d0ec391e51..98ba688897d0 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -512,7 +512,7 @@ def to_dict(self) -> Dict[str, Any]: def to_namedtuple(self) -> NamedTuple: return self._store.to_namedtuple() - def update(self, data: 'Data') -> 'Data': + def update(self, data: Union['Data', Dict[str, Any]]) -> 'Data': for key, value in data.items(): self[key] = value return self From 87744e28ebb392b22829048b2cb94ff98e720987 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:55:41 +0100 Subject: [PATCH 1093/2432] [pre-commit.ci] pre-commit suggestions (#7113) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/adrienverge/yamllint.git: v1.29.0 → v1.30.0](https://github.com/adrienverge/yamllint.git/compare/v1.29.0...v1.30.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7bbe65b466fb..886861eb142e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: )$ - repo: https://github.com/adrienverge/yamllint.git - rev: v1.29.0 + rev: v1.30.0 hooks: - id: yamllint name: Lint yaml From 8f408467a696f9bc7581002e5f872e813729e7c2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 9 Apr 2023 11:02:43 +0100 Subject: [PATCH 1094/2432] Fix `numpy` incompatiblity when reading files for `Planetoid` datasets (#7141) --- CHANGELOG.md | 1 + torch_geometric/io/planetoid.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 439ba66cb419..714dd3773b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fix `numpy` incompatiblity when reading files for `Planetoid` datasets ([#7141](https://github.com/pyg-team/pytorch_geometric/pull/7141)) - Added support for `Data.num_edges` for native `torch.sparse.Tensor` adjacency matrices ([#7104](https://github.com/pyg-team/pytorch_geometric/pull/7104)) - Fixed crash of heterogeneous data loaders if node or edge types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060), [#7087](https://github.com/pyg-team/pytorch_geometric/pull/7087)) - Accelerated attention-based `MultiAggregation` ([#7077](https://github.com/pyg-team/pytorch_geometric/pull/7077)) diff --git a/torch_geometric/io/planetoid.py b/torch_geometric/io/planetoid.py index 2e7338afaa12..ba767668f21d 100644 --- a/torch_geometric/io/planetoid.py +++ b/torch_geometric/io/planetoid.py @@ -103,7 +103,7 @@ def read_file(folder, prefix, name): return out out = out.todense() if hasattr(out, 'todense') else out - out = torch.Tensor(out) + out = torch.from_numpy(out).to(torch.float) return out From 271c113ffc56ffdf5deeda6bd26076afddba87a9 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Sun, 9 Apr 2023 14:41:34 -0700 Subject: [PATCH 1095/2432] Additional GPU tests (#7126) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/loader/test_dataloader.py | 17 +++++-- test/nn/conv/test_nn_conv.py | 17 ++++--- test/nn/conv/test_rgcn_conv.py | 46 ++++++++++------- test/nn/dense/test_linear.py | 91 ++++++++++++++++++++++------------ 4 files changed, 109 insertions(+), 62 deletions(-) diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index 8bf424f18a12..7dad99c0ffd1 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -7,7 +7,7 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import DataLoader -from torch_geometric.testing import get_random_edge_index +from torch_geometric.testing import get_random_edge_index, withCUDA with_mp = sys.platform not in ['win32'] num_workers_list = [0, 2] if with_mp else [0] @@ -16,8 +16,12 @@ multiprocessing.set_start_method('spawn') +@withCUDA @pytest.mark.parametrize('num_workers', num_workers_list) -def test_dataloader(num_workers): +def test_dataloader(num_workers, device): + if num_workers > 0 and device != torch.device('cpu'): + return + x = torch.Tensor([[1], [1], [1]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) face = torch.tensor([[0], [1], [2]]) @@ -25,9 +29,9 @@ def test_dataloader(num_workers): z = torch.tensor(0.) name = 'data' - data = Data(x=x, edge_index=edge_index, y=y, z=z, name=name) - assert str(data) == ( - "Data(x=[3, 1], edge_index=[2, 4], y=2.0, z=0.0, name='data')") + data = Data(x=x, edge_index=edge_index, y=y, z=z, name=name).to(device) + assert str(data) == ("Data(x=[3, 1], edge_index=[2, 4], y=2.0, z=0.0, " + "name='data')") data.face = face loader = DataLoader([data, data, data, data], batch_size=2, shuffle=False, @@ -35,6 +39,9 @@ def test_dataloader(num_workers): assert len(loader) == 2 for batch in loader: + assert batch.x.device == device + assert batch.edge_index.device == device + assert batch.z.device == device assert batch.num_graphs == len(batch) == 2 assert batch.batch.tolist() == [0, 0, 0, 1, 1, 1] assert batch.ptr.tolist() == [0, 3, 6] diff --git a/test/nn/conv/test_nn_conv.py b/test/nn/conv/test_nn_conv.py index e0db2f9575d1..397788e3926a 100644 --- a/test/nn/conv/test_nn_conv.py +++ b/test/nn/conv/test_nn_conv.py @@ -5,20 +5,21 @@ import torch_geometric.typing from torch_geometric.nn import NNConv -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withCUDA from torch_geometric.typing import SparseTensor from torch_geometric.utils import to_torch_coo_tensor -def test_nn_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - value = torch.rand(edge_index.size(1), 3) +@withCUDA +def test_nn_conv(device): + x1 = torch.randn(4, 8, device=device) + x2 = torch.randn(2, 16, device=device) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]], device=device) + value = torch.rand(edge_index.size(1), 3, device=device) adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 4)) nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32)) - conv = NNConv(8, 32, nn=nn) + conv = NNConv(8, 32, nn=nn).to(device) assert str(conv) == ( 'NNConv(8, 32, aggr=add, nn=Sequential(\n' ' (0): Linear(in_features=3, out_features=32, bias=True)\n' @@ -49,7 +50,7 @@ def test_nn_conv(): # Test bipartite message passing: adj1 = to_torch_coo_tensor(edge_index, value, size=(4, 2)) - conv = NNConv((8, 16), 32, nn=nn) + conv = NNConv((8, 16), 32, nn=nn).to(device) assert str(conv) == ( 'NNConv((8, 16), 32, aggr=add, nn=Sequential(\n' ' (0): Linear(in_features=3, out_features=32, bias=True)\n' diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index 80f9e9d6b5f3..ea27a9bdae63 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -3,32 +3,38 @@ import torch_geometric.typing from torch_geometric.nn import FastRGCNConv, RGCNConv -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withCUDA from torch_geometric.typing import SparseTensor classes = [RGCNConv, FastRGCNConv] confs = [(None, None), (2, None), (None, 2)] +@withCUDA @pytest.mark.parametrize('conf', confs) -def test_rgcn_conv_equality(conf): +def test_rgcn_conv_equality(conf, device): num_bases, num_blocks = conf - x1 = torch.randn(4, 4) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) + x1 = torch.randn(4, 4, device=device) + edge_index = torch.tensor([ + [0, 1, 1, 2, 2, 3], + [0, 0, 1, 0, 1, 1], + ], device=device) + edge_type = torch.tensor([0, 1, 1, 0, 0, 1], device=device) edge_index = torch.tensor([ [0, 1, 1, 2, 2, 3, 0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1], - ]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3]) + ], device=device) + edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3], + device=device) torch.manual_seed(12345) - conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum') + conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum').to(device) torch.manual_seed(12345) - conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum') + conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks, + aggr='sum').to(device) out1 = conv1(x1, edge_index, edge_type) out2 = conv2(x1, edge_index, edge_type) @@ -40,19 +46,23 @@ def test_rgcn_conv_equality(conf): assert torch.allclose(out1, out2, atol=1e-6) +@withCUDA @pytest.mark.parametrize('cls', classes) @pytest.mark.parametrize('conf', confs) -def test_rgcn_conv(cls, conf): +def test_rgcn_conv(cls, conf, device): num_bases, num_blocks = conf - x1 = torch.randn(4, 4) - x2 = torch.randn(2, 16) - idx1 = torch.arange(4) - idx2 = torch.arange(2) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) + x1 = torch.randn(4, 4, device=device) + x2 = torch.randn(2, 16, device=device) + idx1 = torch.arange(4, device=device) + idx2 = torch.arange(2, device=device) + edge_index = torch.tensor([ + [0, 1, 1, 2, 2, 3], + [0, 0, 1, 0, 1, 1], + ], device=device) + edge_type = torch.tensor([0, 1, 1, 0, 0, 1], device=device) - conv = cls(4, 32, 2, num_bases, num_blocks, aggr='sum') + conv = cls(4, 32, 2, num_bases, num_blocks, aggr='sum').to(device) assert str(conv) == f'{cls.__name__}(4, 32, num_relations=2)' out1 = conv(x1, edge_index, edge_type) @@ -87,7 +97,7 @@ def test_rgcn_conv(cls, conf): assert torch.allclose(jit(None, adj.t()), out2, atol=1e-6) # Test bipartite message passing: - conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum') + conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum').to(device) assert str(conv) == f'{cls.__name__}((4, 16), 32, num_relations=2)' out1 = conv((x1, x2), edge_index, edge_type) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index 6450d198e499..e258d77b2a6d 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -7,36 +7,41 @@ import torch_geometric.typing from torch_geometric.nn import HeteroDictLinear, HeteroLinear, Linear -from torch_geometric.testing import withPackage +from torch_geometric.testing import withCUDA, withPackage weight_inits = ['glorot', 'kaiming_uniform', None] bias_inits = ['zeros', None] +@withCUDA @pytest.mark.parametrize('weight', weight_inits) @pytest.mark.parametrize('bias', bias_inits) -def test_linear(weight, bias): - x = torch.randn(3, 4, 16) +def test_linear(weight, bias, device): + x = torch.randn(3, 4, 16, device=device) lin = Linear(16, 32, weight_initializer=weight, bias_initializer=bias) + lin = lin.to(device) assert str(lin) == 'Linear(16, 32, bias=True)' assert lin(x).size() == (3, 4, 32) +@withCUDA @pytest.mark.parametrize('weight', weight_inits) @pytest.mark.parametrize('bias', bias_inits) -def test_lazy_linear(weight, bias): - x = torch.randn(3, 4, 16) +def test_lazy_linear(weight, bias, device): + x = torch.randn(3, 4, 16, device=device) lin = Linear(-1, 32, weight_initializer=weight, bias_initializer=bias) + lin = lin.to(device) assert str(lin) == 'Linear(-1, 32, bias=True)' assert lin(x).size() == (3, 4, 32) assert str(lin) == 'Linear(16, 32, bias=True)' +@withCUDA @pytest.mark.parametrize('dim1', [-1, 16]) @pytest.mark.parametrize('dim2', [-1, 16]) -def test_load_lazy_linear(dim1, dim2): - lin1 = Linear(dim1, 32) - lin2 = Linear(dim1, 32) +def test_load_lazy_linear(dim1, dim2, device): + lin1 = Linear(dim1, 32).to(device) + lin2 = Linear(dim1, 32).to(device) lin2.load_state_dict(lin1.state_dict()) if dim1 != -1: @@ -78,11 +83,12 @@ def test_copy_unintialized_parameter(): copy.deepcopy(weight) +@withCUDA @pytest.mark.parametrize('lazy', [True, False]) -def test_copy_linear(lazy): - lin = Linear(-1 if lazy else 16, 32) +def test_copy_linear(lazy, device): + lin = Linear(-1 if lazy else 16, 32).to(device) - copied_lin = copy.copy(lin) + copied_lin = copy.copy(lin).to(device) assert id(copied_lin) != id(lin) assert id(copied_lin.weight) == id(lin.weight) if not isinstance(copied_lin.weight, UninitializedParameter): @@ -90,7 +96,7 @@ def test_copy_linear(lazy): assert id(copied_lin.bias) == id(lin.bias) assert copied_lin.bias.data_ptr() == lin.bias.data_ptr() - copied_lin = copy.deepcopy(lin) + copied_lin = copy.deepcopy(lin).to(device) assert id(copied_lin) != id(lin) assert id(copied_lin.weight) != id(lin.weight) if not isinstance(copied_lin.weight, UninitializedParameter): @@ -102,11 +108,12 @@ def test_copy_linear(lazy): assert torch.allclose(copied_lin.bias, lin.bias) -def test_hetero_linear(): - x = torch.randn(3, 16) - type_vec = torch.tensor([0, 1, 2]) +@withCUDA +def test_hetero_linear(device): + x = torch.randn(3, 16, device=device) + type_vec = torch.tensor([0, 1, 2], device=device) - lin = HeteroLinear(16, 32, num_types=3) + lin = HeteroLinear(16, 32, num_types=3).to(device) assert str(lin) == 'HeteroLinear(16, 32, num_types=3, bias=True)' out = lin(x, type_vec) @@ -116,22 +123,27 @@ def test_hetero_linear(): assert torch.allclose(jit(x, type_vec), out) -def test_lazy_hetero_linear(): - x = torch.randn(3, 16) - type_vec = torch.tensor([0, 1, 2]) +@withCUDA +def test_lazy_hetero_linear(device): + x = torch.randn(3, 16, device=device) + type_vec = torch.tensor([0, 1, 2], device=device) - lin = HeteroLinear(-1, 32, num_types=3) + lin = HeteroLinear(-1, 32, num_types=3).to(device) assert str(lin) == 'HeteroLinear(-1, 32, num_types=3, bias=True)' out = lin(x, type_vec) assert out.size() == (3, 32) +@withCUDA @pytest.mark.parametrize('bias', [True, False]) -def test_hetero_dict_linear(bias): - x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)} +def test_hetero_dict_linear(bias, device): + x_dict = { + 'v': torch.randn(3, 16, device=device), + 'w': torch.randn(2, 8, device=device), + } - lin = HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=bias) + lin = HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=bias).to(device) assert str(lin) == (f"HeteroDictLinear({{'v': 16, 'w': 8}}, 32, " f"bias={bias})") @@ -140,9 +152,12 @@ def test_hetero_dict_linear(bias): assert out_dict['v'].size() == (3, 32) assert out_dict['w'].size() == (2, 32) - x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 16)} + x_dict = { + 'v': torch.randn(3, 16, device=device), + 'w': torch.randn(2, 16, device=device), + } - lin = HeteroDictLinear(16, 32, types=['v', 'w'], bias=bias) + lin = HeteroDictLinear(16, 32, types=['v', 'w'], bias=bias).to(device) assert str(lin) == (f"HeteroDictLinear({{'v': 16, 'w': 16}}, 32, " f"bias={bias})") @@ -151,6 +166,15 @@ def test_hetero_dict_linear(bias): assert out_dict['v'].size() == (3, 32) assert out_dict['w'].size() == (2, 32) + +def test_hetero_dict_linear_jit(): + x_dict = { + 'v': torch.randn(3, 16), + 'w': torch.randn(2, 8), + } + + lin = HeteroDictLinear({'v': 16, 'w': 8}, 32) + if torch_geometric.typing.WITH_GMM: # See: https://github.com/pytorch/pytorch/pull/97960 with pytest.raises(RuntimeError, match="Unknown builtin op"): @@ -160,10 +184,14 @@ def test_hetero_dict_linear(bias): assert len(jit(x_dict)) == 2 -def test_lazy_hetero_dict_linear(): - x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)} +@withCUDA +def test_lazy_hetero_dict_linear(device): + x_dict = { + 'v': torch.randn(3, 16, device=device), + 'w': torch.randn(2, 8, device=device), + } - lin = HeteroDictLinear(-1, 32, types=['v', 'w']) + lin = HeteroDictLinear(-1, 32, types=['v', 'w']).to(device) assert str(lin) == "HeteroDictLinear({'v': -1, 'w': -1}, 32, bias=True)" out_dict = lin(x_dict) @@ -172,15 +200,16 @@ def test_lazy_hetero_dict_linear(): assert out_dict['w'].size() == (2, 32) +@withCUDA @withPackage('pyg_lib') @pytest.mark.parametrize('type_vec', [ torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 1, 2, 0, 1, 2]), ]) -def test_hetero_linear_sort(type_vec): - x = torch.randn(type_vec.numel(), 16) +def test_hetero_linear_sort(type_vec, device): + x = torch.randn(type_vec.numel(), 16, device=device) - lin = HeteroLinear(16, 32, num_types=3) + lin = HeteroLinear(16, 32, num_types=3).to(device) out = lin(x, type_vec) for i in range(type_vec.numel()): From 1af056c84b11b18070e794b27f69a702cb85afbd Mon Sep 17 00:00:00 2001 From: EulerPascal404 <67972733+EulerPascal404@users.noreply.github.com> Date: Sun, 9 Apr 2023 16:55:53 -0500 Subject: [PATCH 1096/2432] [Code Coverage] `data/view.py` (#7093) Created code coverage for `data/view.py`. --------- Co-authored-by: ThePuzzlr <90777621+ThePuzzlr@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_view.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 test/data/test_view.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 714dd3773b16..b7f7be0d0e1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093)) - Fix `numpy` incompatiblity when reading files for `Planetoid` datasets ([#7141](https://github.com/pyg-team/pytorch_geometric/pull/7141)) - Added support for `Data.num_edges` for native `torch.sparse.Tensor` adjacency matrices ([#7104](https://github.com/pyg-team/pytorch_geometric/pull/7104)) - Fixed crash of heterogeneous data loaders if node or edge types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060), [#7087](https://github.com/pyg-team/pytorch_geometric/pull/7087)) diff --git a/test/data/test_view.py b/test/data/test_view.py new file mode 100644 index 000000000000..bf9054934f6b --- /dev/null +++ b/test/data/test_view.py @@ -0,0 +1,31 @@ +from torch_geometric.data.storage import BaseStorage + + +def test_views(): + storage = BaseStorage(x=1, y=2, z=3) + + assert str(storage.keys()) == "KeysView({'x': 1, 'y': 2, 'z': 3})" + assert len(storage.keys()) == 3 + assert list(storage.keys()) == ['x', 'y', 'z'] + + assert str(storage.values()) == "ValuesView({'x': 1, 'y': 2, 'z': 3})" + assert len(storage.values()) == 3 + assert list(storage.values()) == [1, 2, 3] + + assert str(storage.items()) == "ItemsView({'x': 1, 'y': 2, 'z': 3})" + assert len(storage.items()) == 3 + assert list(storage.items()) == [('x', 1), ('y', 2), ('z', 3)] + + args = ['x', 'z', 'foo'] + + assert str(storage.keys(*args)) == "KeysView({'x': 1, 'z': 3})" + assert len(storage.keys(*args)) == 2 + assert list(storage.keys(*args)) == ['x', 'z'] + + assert str(storage.values(*args)) == "ValuesView({'x': 1, 'z': 3})" + assert len(storage.values(*args)) == 2 + assert list(storage.values(*args)) == [1, 3] + + assert str(storage.items(*args)) == "ItemsView({'x': 1, 'z': 3})" + assert len(storage.items(*args)) == 2 + assert list(storage.items(*args)) == [('x', 1), ('z', 3)] From 94b2880cb04f1b7d8f85a37cfd2bb3791ad1e24b Mon Sep 17 00:00:00 2001 From: Loong <69568351+Looong01@users.noreply.github.com> Date: Mon, 10 Apr 2023 06:05:17 +0800 Subject: [PATCH 1097/2432] Add ROCm build instructions (#7143) Add the external repo link of pyg-rocm-build's wheels. --------- Co-authored-by: loong Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + README.md | 7 ++++++- docs/source/install/installation.rst | 5 ++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7f7be0d0e1c..7cbf375f5912 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added instructions for ROCm build wheels ([#7143](https://github.com/pyg-team/pytorch_geometric/pull/7143)) - Added a `ComposeFilters` class to compose `pre_filter` functions in `Dataset` ([#7097](https://github.com/pyg-team/pytorch_geometric/pull/7097)) - Added a time-step aware variant of the `EllipticBitcoinDataset` called `EllipticBitcoinTemporalDataset` ([#7011](https://github.com/pyg-team/pytorch_geometric/pull/7011)) - Added `to_dgl` and `from_dgl` conversion functions ([#7053](https://github.com/pyg-team/pytorch_geometric/pull/7053)) diff --git a/README.md b/README.md index 14f7a3ed6831..e67c1572ccfe 100644 --- a/README.md +++ b/README.md @@ -390,7 +390,7 @@ If you want to utilize the full set of features from PyG, there exists several a * **[`torch-cluster`](https://github.com/rusty1s/pytorch_cluster)**: Graph clustering routines * **[`torch-spline-conv`](https://github.com/rusty1s/pytorch_spline_conv)**: [`SplineConv`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SplineConv.html) support -These packages come with their own CPU and GPU kernel implementations based on the [PyTorch C++/CUDA extension interface](https://github.com/pytorch/extension-cpp). +These packages come with their own CPU and GPU kernel implementations based on the [PyTorch C++/CUDA/hip(ROCm) extension interface](https://github.com/pytorch/extension-cpp). For a basic usage of PyG, these dependencies are **fully optional**. We recommend to start with a minimal installation, and install additional dependencies once you start to actually need them. @@ -446,6 +446,11 @@ or install PyG **from master** via pip install git+https://github.com/pyg-team/pytorch_geometric.git ``` +### ROCm Wheels + +The external [`pyg-rocm-build` repository](https://github.com/Looong01/pyg-rocm-build) provides wheels and detailed instructions on how to install PyG for ROCm. +If you have any questions about it, please open an issue [here](https://github.com/Looong01/pyg-rocm-build/issues). + ## Cite Please cite [our paper](https://arxiv.org/abs/1903.02428) (and the respective papers of the methods used) if you use this code in your own work: diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index 02c84000edbe..9c696c70b31c 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -50,7 +50,7 @@ If you want to utilize the full set of features from :pyg:`PyG`, there exists se * `torch-cluster `__: Graph clustering routines * `torch-spline-conv `__: :class:`~torch_geometric.nn.conv.SplineConv` support -These packages come with their own CPU and GPU kernel implementations based on the :pytorch:`null` `PyTorch C++/CUDA extension interface `_. +These packages come with their own CPU and GPU kernel implementations based on the :pytorch:`null` `PyTorch C++/CUDA/hip(ROCm) extension interface `_. For a basic usage of :pyg:`PyG`, these dependencies are **fully optional**. We recommend to start with a minimal installation, and install additional dependencies once you start to actually need them. @@ -104,6 +104,9 @@ For ease of installation of these extensions, we provide :obj:`pip` wheels for t **For older versions, you need to explicitly specify the latest supported version number** or install via :obj:`pip install --no-index` in order to prevent a manual installation from source. You can look up the latest supported version number `here `__. +**ROCm:** The external `pyg-rocm-build repository `__ provides wheels and detailed instructions on how to install :pyg:`PyG` for ROCm. +If you have any questions about it, please open an issue `here `__. + Installation from Source ~~~~~~~~~~~~~~~~~~~~~~~~ From 9609b37b4ab38a0351a76cf1998e3a605dc85468 Mon Sep 17 00:00:00 2001 From: Kamil Andrzejewski Date: Mon, 10 Apr 2023 00:21:13 +0200 Subject: [PATCH 1098/2432] Add `batch_size` argument to normalization layers (#7135) It can speedup runtime because: 1) We do not need to go through the batch dimension and look for max value. 2) We do not have to read tensor value which is placed on the device. Besides dim_size can be used if a user is using fixed size datasets. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/norm/graph_norm.py | 10 ++++++++-- torch_geometric/nn/norm/graph_size_norm.py | 10 ++++++++-- torch_geometric/nn/norm/instance_norm.py | 10 ++++++++-- torch_geometric/nn/norm/layer_norm.py | 10 ++++++++-- torch_geometric/nn/norm/pair_norm.py | 11 ++++++++--- torch_geometric/utils/nested.py | 2 +- 7 files changed, 42 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7cbf375f5912..cce865857d80 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added an optional `batch_size` argument to `LayerNorm`, `GraphNorm`, `InstanceNorm`, `GraphSizeNorm` and `PairNorm` ([#7135](https://github.com/pyg-team/pytorch_geometric/pull/7135)) - Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093)) - Fix `numpy` incompatiblity when reading files for `Planetoid` datasets ([#7141](https://github.com/pyg-team/pytorch_geometric/pull/7141)) - Added support for `Data.num_edges` for native `torch.sparse.Tensor` adjacency matrices ([#7104](https://github.com/pyg-team/pytorch_geometric/pull/7104)) diff --git a/torch_geometric/nn/norm/graph_norm.py b/torch_geometric/nn/norm/graph_norm.py index c91bc01b8160..02d37a860911 100644 --- a/torch_geometric/nn/norm/graph_norm.py +++ b/torch_geometric/nn/norm/graph_norm.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.nn.inits import ones, zeros +from torch_geometric.typing import OptTensor from torch_geometric.utils import scatter @@ -44,18 +45,23 @@ def reset_parameters(self): zeros(self.bias) ones(self.mean_scale) - def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor: + def forward(self, x: Tensor, batch: OptTensor = None, + batch_size: Optional[int] = None) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. batch (torch.Tensor, optional): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each element to a specific example. (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) """ if batch is None: batch = x.new_zeros(x.size(0), dtype=torch.long) + batch_size = 1 - batch_size = int(batch.max()) + 1 + if batch_size is None: + batch_size = int(batch.max()) + 1 mean = scatter(x, batch, 0, batch_size, reduce='mean') out = x - mean.index_select(0, batch) * self.mean_scale diff --git a/torch_geometric/nn/norm/graph_size_norm.py b/torch_geometric/nn/norm/graph_size_norm.py index 6bf8899e5f22..243147d86912 100644 --- a/torch_geometric/nn/norm/graph_size_norm.py +++ b/torch_geometric/nn/norm/graph_size_norm.py @@ -1,3 +1,5 @@ +from typing import Optional + import torch import torch.nn as nn from torch import Tensor @@ -18,18 +20,22 @@ class GraphSizeNorm(nn.Module): def __init__(self): super().__init__() - def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: + def forward(self, x: Tensor, batch: OptTensor = None, + batch_size: Optional[int] = None) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. batch (torch.Tensor, optional): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each element to a specific example. (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) """ if batch is None: batch = torch.zeros(x.size(0), dtype=torch.long, device=x.device) + batch_size = 1 - inv_sqrt_deg = degree(batch, dtype=x.dtype).pow(-0.5) + inv_sqrt_deg = degree(batch, batch_size, dtype=x.dtype).pow(-0.5) return x * inv_sqrt_deg.index_select(0, batch).view(-1, 1) def __repr__(self) -> str: diff --git a/torch_geometric/nn/norm/instance_norm.py b/torch_geometric/nn/norm/instance_norm.py index 8dde6bcd8917..00a892ec820a 100644 --- a/torch_geometric/nn/norm/instance_norm.py +++ b/torch_geometric/nn/norm/instance_norm.py @@ -1,3 +1,5 @@ +from typing import Optional + import torch.nn.functional as F from torch import Tensor from torch.nn.modules.instancenorm import _InstanceNorm @@ -50,13 +52,16 @@ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" super().reset_parameters() - def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: + def forward(self, x: Tensor, batch: OptTensor = None, + batch_size: Optional[int] = None) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. batch (torch.Tensor, optional): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each element to a specific example. (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) """ if batch is None: out = F.instance_norm( @@ -65,7 +70,8 @@ def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: or not self.track_running_stats, self.momentum, self.eps) return out.squeeze(0).t() - batch_size = int(batch.max()) + 1 + if batch_size is None: + batch_size = int(batch.max()) + 1 mean = var = unbiased_var = x # Dummies. diff --git a/torch_geometric/nn/norm/layer_norm.py b/torch_geometric/nn/norm/layer_norm.py index 6dddcc6029d7..c40d3cc9cfd9 100644 --- a/torch_geometric/nn/norm/layer_norm.py +++ b/torch_geometric/nn/norm/layer_norm.py @@ -1,3 +1,5 @@ +from typing import Optional + import torch import torch.nn.functional as F from torch import Tensor @@ -62,13 +64,16 @@ def reset_parameters(self): ones(self.weight) zeros(self.bias) - def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: + def forward(self, x: Tensor, batch: OptTensor = None, + batch_size: Optional[int] = None) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. batch (torch.Tensor, optional): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each element to a specific example. (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) """ if self.mode == 'graph': if batch is None: @@ -76,7 +81,8 @@ def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: out = x / (x.std(unbiased=False) + self.eps) else: - batch_size = int(batch.max()) + 1 + if batch_size is None: + batch_size = int(batch.max()) + 1 norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1) norm = norm.mul_(x.size(-1)).view(-1, 1) diff --git a/torch_geometric/nn/norm/pair_norm.py b/torch_geometric/nn/norm/pair_norm.py index 49a32cc3b49d..c548ddc6c242 100644 --- a/torch_geometric/nn/norm/pair_norm.py +++ b/torch_geometric/nn/norm/pair_norm.py @@ -1,3 +1,5 @@ +from typing import Optional + import torch from torch import Tensor @@ -36,13 +38,16 @@ def __init__(self, scale: float = 1., scale_individually: bool = False, self.scale_individually = scale_individually self.eps = eps - def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: + def forward(self, x: Tensor, batch: OptTensor = None, + batch_size: Optional[int] = None) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. batch (torch.Tensor, optional): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each element to a specific example. (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) """ scale = self.scale @@ -55,13 +60,13 @@ def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: return scale * x / (self.eps + x.norm(2, -1, keepdim=True)) else: - mean = scatter(x, batch, dim=0, reduce='mean') + mean = scatter(x, batch, dim=0, dim_size=batch_size, reduce='mean') x = x - mean.index_select(0, batch) if not self.scale_individually: return scale * x / torch.sqrt(self.eps + scatter( x.pow(2).sum(-1, keepdim=True), batch, dim=0, - reduce='mean').index_select(0, batch)) + dim_size=batch_size, reduce='mean').index_select(0, batch)) else: return scale * x / (self.eps + x.norm(2, -1, keepdim=True)) diff --git a/torch_geometric/utils/nested.py b/torch_geometric/utils/nested.py index 03c3798b70c4..927e0d22e4e2 100644 --- a/torch_geometric/utils/nested.py +++ b/torch_geometric/utils/nested.py @@ -28,7 +28,7 @@ def to_nested_tensor( (default: :obj:`None`) ptr (torch.Tensor, optional): Alternative representation of :obj:`batch` in compressed format. (default: :obj:`None`) - batch_size (int, optional) The batch size :math:`B`. + batch_size (int, optional): The batch size :math:`B`. (default: :obj:`None`) """ if ptr is not None: From f95b99c98f0662ffa4fbc9fcf8a89cf17607ebb8 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Sun, 9 Apr 2023 15:59:55 -0700 Subject: [PATCH 1099/2432] Fix for `from_networkx` re-calling the constructor of the `nx.Graph` (#7128) https://github.com/pyg-team/pytorch_geometric/issues/7112 repro.py: ``` import networkx as nx from torch_geometric.utils import from_networkx class MetroNetwork(nx.Graph): def __init__(self, debug_mode=False, testing=False, **attr): super().__init__(**attr) self.num_access = None self.num_main = None self.num_core = None self.num_nodes = None self.num_edges = None self.max_pp = None self.max_ram = None self.max_bw = None self.max_link_delay = None self.max_placement_delay = None if testing: self.create_network4() else: self.add_edges_from([(1, 4, {'tier': 0, 'max_bw': 2000., 'remaining_bw': 2000., 'delay': 1.8, 'bw_req': 0})]) def create_network4(self): num_access = 44 num_main = 6 num_core = 2 self.num_access = num_access self.num_main = num_main self.num_core = num_core self.num_nodes = num_access + num_main + num_core tier_0_node_attributes = {'max_pp': 4000., 'remaining_pp': 4000., 'pp_req': 0, 'max_ram': 100., 'remaining_ram': 100., 'ram_req': 0, 'tier': 0, 'is_core': 0} tier_1_node_attributes = {'max_pp': 6000., 'remaining_pp': 6000., 'pp_req': 0, 'max_ram': 150., 'remaining_ram': 150., 'ram_req': 0, 'tier': 1, 'is_core': 1} tier_2_node_attributes = {'max_pp': 12000., 'remaining_pp': 12000., 'pp_req': 0, 'max_ram': 300., 'remaining_ram': 300., 'ram_req': 0, 'tier': 2, 'is_core': 1} node_idx = 0 node_list = [(node_idx + i, tier_0_node_attributes) for i in range(num_core)] node_idx += num_core node_list.extend([(node_idx + i, tier_1_node_attributes) for i in range(num_main)]) node_idx += num_main node_list.extend([(node_idx + i, tier_2_node_attributes) for i in range(num_access)]) self.add_nodes_from(node_list) tier_0_link_attributes = {'tier': 0, 'max_bw': 2000., 'remaining_bw': 2000., 'delay': 1.8, 'bw_req': 0} tier_1_link_attributes = {'tier': 1, 'max_bw': 3000., 'remaining_bw': 3000., 'delay': 4.8, 'bw_req': 0} edge_list = [(0, 7, tier_1_link_attributes), (0, 2, tier_1_link_attributes), (0, 3, tier_1_link_attributes), (0, 27, tier_0_link_attributes), (0, 8, tier_0_link_attributes), (0, 44, tier_0_link_attributes), (1, 5, tier_1_link_attributes), (1, 7, tier_1_link_attributes), (1, 30, tier_0_link_attributes), (1, 28, tier_0_link_attributes), (1, 11, tier_0_link_attributes), (1, 50, tier_0_link_attributes), (1, 22, tier_0_link_attributes), (2, 4, tier_1_link_attributes), (2, 37, tier_0_link_attributes), (2, 24, tier_0_link_attributes), (3, 4, tier_1_link_attributes), (3, 7, tier_1_link_attributes), (3, 5, tier_1_link_attributes), (3, 21, tier_0_link_attributes), (3, 20, tier_0_link_attributes), (3, 49, tier_0_link_attributes), (4, 18, tier_0_link_attributes), (4, 10, tier_0_link_attributes), (5, 19, tier_0_link_attributes), (5, 43, tier_0_link_attributes), (5, 11, tier_0_link_attributes), (5, 6, tier_1_link_attributes), (7, 32, tier_0_link_attributes), (7, 33, tier_0_link_attributes), (7, 25, tier_0_link_attributes), (7, 17, tier_0_link_attributes), (8, 44, tier_0_link_attributes), (8, 14, tier_0_link_attributes), (9, 10, tier_0_link_attributes), (10, 40, tier_0_link_attributes), (11, 34, tier_0_link_attributes), (11, 48, tier_0_link_attributes), (12, 39, tier_0_link_attributes), (12, 51, tier_0_link_attributes), (13, 23, tier_0_link_attributes), (13, 27, tier_0_link_attributes), (14, 21, tier_0_link_attributes), (14, 16, tier_0_link_attributes), (14, 42, tier_0_link_attributes), (15, 20, tier_0_link_attributes), (15, 50, tier_0_link_attributes), (15, 51, tier_0_link_attributes), (16, 42, tier_0_link_attributes), (16, 44, tier_0_link_attributes), (17, 42, tier_0_link_attributes), (17, 39, tier_0_link_attributes), (18, 40, tier_0_link_attributes), (22, 41, tier_0_link_attributes), (23, 38, tier_0_link_attributes), (24, 38, tier_0_link_attributes), (24, 49, tier_0_link_attributes), (25, 51, tier_0_link_attributes), (26, 47, tier_0_link_attributes), (28, 30, tier_0_link_attributes), (29, 31, tier_0_link_attributes), (29, 41, tier_0_link_attributes), (31, 34, tier_0_link_attributes), (32, 33, tier_0_link_attributes), (35, 40, tier_0_link_attributes), (36, 37, tier_0_link_attributes), (37, 40, tier_0_link_attributes), (39, 42, tier_0_link_attributes), (43, 47, tier_0_link_attributes), (45, 46, tier_0_link_attributes), (45, 47, tier_0_link_attributes), (46, 48, tier_0_link_attributes), ] # Add edges with networkx format self.add_edges_from(edge_list) self.num_edges = len(edge_list) self.max_pp = 12000 self.max_ram = 300 self.max_bw = 5000 self.max_link_delay = 4.8 self.max_placement_delay = 30 network = MetroNetwork(testing = True) print('Ground Truth NetworkX num_edges=', network.num_edges) from_netx = from_networkx(network) print('num_edges after convert to PyG=', from_netx.num_edges) print("PyG to_string=", from_netx) print("NetworkX to_string=", network) ``` command: `git checkout fromnetworkx-fix;git pull origin fromnetworkx-fix; pip uninstall -y torch-geometric; pip install .; python3 repro.py` -> w/ fix: ``` Ground Truth NetworkX num_edges= 72 num_edges after convert to PyG= 144 ``` w/o fix: ``` Ground Truth NetworkX num_edges= 72 num_edges after convert to PyG= 146 ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- torch_geometric/utils/convert.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index e603e88975c1..4b49108cb640 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -202,14 +202,12 @@ def from_networkx( from torch_geometric.data import Data - G = nx.convert_node_labels_to_integers(G) G = G.to_directed() if not nx.is_directed(G) else G - if isinstance(G, (nx.MultiGraph, nx.MultiDiGraph)): - edges = list(G.edges(keys=False)) - else: - edges = list(G.edges) - + edges = [] + mapping = dict(zip(G.nodes(), range(G.number_of_nodes()))) + for src, dst in G.edges(): + edges.append([mapping[src], mapping[dst]]) edge_index = torch.tensor(edges, dtype=torch.long).t().contiguous() data = defaultdict(list) From 76337403fc3e67163c8c60618cda1f6e61b1fd15 Mon Sep 17 00:00:00 2001 From: EulerPascal404 <67972733+EulerPascal404@users.noreply.github.com> Date: Sun, 9 Apr 2023 18:09:22 -0500 Subject: [PATCH 1100/2432] Optimize `from_networkx()` (#7119) Addresses issue #6964 by deleting unneeded copies by directly writing edges into a torch.Tensor rather than first converting to a list and then using torch.tensor() on top. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jintang Li Co-authored-by: Jinu Sunil Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/utils/convert.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cce865857d80..52f8c17266f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Optimized `from_networkx` memory footprint by reducing unnecessary copies ([#7119](https://github.com/pyg-team/pytorch_geometric/pull/7119)) - Added an optional `batch_size` argument to `LayerNorm`, `GraphNorm`, `InstanceNorm`, `GraphSizeNorm` and `PairNorm` ([#7135](https://github.com/pyg-team/pytorch_geometric/pull/7135)) - Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093)) - Fix `numpy` incompatiblity when reading files for `Planetoid` datasets ([#7141](https://github.com/pyg-team/pytorch_geometric/pull/7141)) diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index 4b49108cb640..694048a08289 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -204,11 +204,11 @@ def from_networkx( G = G.to_directed() if not nx.is_directed(G) else G - edges = [] mapping = dict(zip(G.nodes(), range(G.number_of_nodes()))) - for src, dst in G.edges(): - edges.append([mapping[src], mapping[dst]]) - edge_index = torch.tensor(edges, dtype=torch.long).t().contiguous() + edge_index = torch.empty((2, G.number_of_edges()), dtype=torch.long) + for i, (src, dst) in enumerate(G.edges()): + edge_index[0, i] = mapping[src] + edge_index[1, i] = mapping[dst] data = defaultdict(list) From 1dadc070565ae45ce3a00f150b2fe50809c49959 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 10 Apr 2023 02:50:51 +0100 Subject: [PATCH 1101/2432] Helper assert function to test for module equivalence and invariance (#7144) --- test/nn/conv/test_sage_conv.py | 66 +++++----------- torch_geometric/testing/__init__.py | 2 + torch_geometric/testing/asserts.py | 112 ++++++++++++++++++++++++++++ torch_geometric/utils/spmm.py | 5 +- 4 files changed, 135 insertions(+), 50 deletions(-) create mode 100644 torch_geometric/testing/asserts.py diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index a5b293f85e38..afc1f30763ce 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -3,61 +3,43 @@ import torch_geometric.typing from torch_geometric.nn import SAGEConv -from torch_geometric.testing import is_full_test +from torch_geometric.testing import assert_module, is_full_test from torch_geometric.typing import SparseTensor -from torch_geometric.utils import to_torch_csc_tensor @pytest.mark.parametrize('project', [False, True]) @pytest.mark.parametrize('aggr', ['mean', 'sum']) def test_sage_conv(project, aggr): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) + x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = SAGEConv(8, 32, project=project, aggr=aggr) assert str(conv) == f'SAGEConv(8, 32, aggr={aggr})' - out = conv(x1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out, atol=1e-6) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + out = assert_module(conv, x, edge_index, expected_size=(4, 32)) if is_full_test(): t = '(Tensor, Tensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out, atol=1e-6) + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) + assert torch.allclose(jit(x, edge_index, size=(4, 4)), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) t = '(Tensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) + assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) # Test bipartite message passing: - adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + x1 = torch.randn(4, 8) + x2 = torch.randn(2, 16) conv = SAGEConv((8, 16), 32, project=project, aggr=aggr) assert str(conv) == f'SAGEConv((8, 16), 32, aggr={aggr})' - out1 = conv((x1, x2), edge_index) - assert out1.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) - - out2 = conv((x1, None), edge_index, (4, 2)) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) + out1 = assert_module(conv, (x1, x2), edge_index, expected_size=(2, 32)) + out2 = assert_module(conv, (x1, None), edge_index, size=(4, 2), + expected_size=(2, 32)) if is_full_test(): t = '(OptPairTensor, Tensor, Size) -> Tensor' @@ -67,27 +49,22 @@ def test_sage_conv(project, aggr): assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) t = '(OptPairTensor, SparseTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj2.t()), out2, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) def test_lstm_aggr_sage_conv(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = SAGEConv(8, 32, aggr='lstm') assert str(conv) == 'SAGEConv(8, 32, aggr=lstm)' - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + assert_module(conv, x, edge_index, expected_size=(4, 32), + test_edge_permutation=False) edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 0]]) with pytest.raises(ValueError, match="'index' tensor is not sorted"): @@ -104,16 +81,9 @@ def test_lstm_aggr_sage_conv(): def test_multi_aggr_sage_conv(aggr_kwargs): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) aggr_kwargs['aggrs_kwargs'] = [{}, {}, {}, dict(learn=True, t=1)] conv = SAGEConv(8, 32, aggr=['mean', 'max', 'sum', 'softmax'], aggr_kwargs=aggr_kwargs) - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + assert_module(conv, x, edge_index, expected_size=(4, 32)) diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py index 215ed8fb5948..db2bc6b78713 100644 --- a/torch_geometric/testing/__init__.py +++ b/torch_geometric/testing/__init__.py @@ -10,6 +10,7 @@ withCUDA, disableExtensions, ) +from .asserts import assert_module from .feature_store import MyFeatureStore from .graph_store import MyGraphStore from .data import FakeHeteroDataset, get_random_edge_index @@ -25,6 +26,7 @@ 'withPackage', 'withCUDA', 'disableExtensions', + 'assert_module', 'MyFeatureStore', 'MyGraphStore', 'get_random_edge_index', diff --git a/torch_geometric/testing/asserts.py b/torch_geometric/testing/asserts.py new file mode 100644 index 000000000000..bc4a26491547 --- /dev/null +++ b/torch_geometric/testing/asserts.py @@ -0,0 +1,112 @@ +import copy +import warnings +from typing import Any, List, Optional, Tuple, Union + +import torch +from torch import Tensor + +from torch_geometric.typing import WITH_TORCH_SPARSE, SparseTensor +from torch_geometric.utils import to_torch_coo_tensor, to_torch_csc_tensor + +SPARSE_LAYOUTS = ['torch_sparse', torch.sparse_csc, torch.sparse_coo] + + +def assert_module( + module: torch.nn.Module, + x: Any, + edge_index: Tensor, + *, + expected_size: Tuple[int, ...], + test_edge_permutation: bool = True, + test_node_permutation: bool = False, + test_sparse_layouts: List[Union[str, int]] = SPARSE_LAYOUTS, + sparse_size: Optional[Tuple[int, int]] = None, + atol: float = 1e-08, + rtol: float = 1e-05, + equal_nan: bool = False, + **kwargs, +): + r"""Asserts that the output of a :obj:`module` is correct. Specifically, + this method tests that: + + 1. The module output has the correct shape. + 2. The module is invariant to the permutation of edges. + 3. The module is invariant to the permutation of nodes. + 4. The module is invariant to the layout of :obj:`edge_index`. + + Args: + module (torch.nn.Module): The module to test. + x (Any): The input features to the module. + edge_index (torch.Tensor): The input edge indices. + expected_size (Tuple[int, ...]): The expected output size. + test_edge_permutation (bool, optional): If set to :obj:`False`, will + not test the module for edge permutation invariance. + test_node_permutation (bool, optional): If set to :obj:`False`, will + not test the module for node permutation invariance. + test_sparse_layouts (List[str or int], optional): The sparse layouts to + test for module invariance. (default: :obj:`["torch_sparse", + torch.sparse_csc, torch.sparse_coo]`) + sparse_size (Tuple[int, int], optional): The size of the sparse + adjacency matrix. If not given, will try to automatically infer it. + (default: :obj:`None`) + atol (float, optional): Absolute tolerance. (default: :obj:`1e-08`) + rtol (float, optional): Relative tolerance. (default: :obj:`1e-05`) + equal_nan (bool, optional): If set to :obj:`True`, then two :obj:`NaN`s + will be considered equal. (default: :obj:`False`) + """ + if sparse_size is None: + if 'size' in kwargs: + sparse_size = kwargs['size'] + elif isinstance(x, Tensor): + sparse_size = (x.size(0), x.size(0)) + elif (isinstance(x, (tuple, list)) and isinstance(x[0], Tensor) + and isinstance(x[1], Tensor)): + sparse_size = (x[0].size(0), x[1].size(0)) + + if len(test_sparse_layouts) > 0 and sparse_size is None: + raise ValueError(f"Got sparse layouts {test_sparse_layouts}, but no " + f"'sparse_size' were specified") + + expected = module(x, edge_index=edge_index, **kwargs) + assert expected.size() == expected_size + + if test_edge_permutation: + perm = torch.randperm(edge_index.size(1)) + perm_kwargs = copy.copy(kwargs) + for key, value in kwargs.items(): + if isinstance(value, Tensor) and value.size(0) == perm.numel(): + perm_kwargs[key] = value[perm] + out = module(x, edge_index[:, perm], **perm_kwargs) + assert torch.allclose(out, expected, rtol, atol, equal_nan) + + if test_node_permutation: + raise NotImplementedError + + for layout in (test_sparse_layouts or []): + # TODO Add support for values. + if layout == 'torch_sparse': + if not WITH_TORCH_SPARSE: + continue + + adj = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=sparse_size, + ) + adj_t = adj.t() + + elif layout == torch.sparse_csc: + adj = to_torch_csc_tensor(edge_index, size=sparse_size) + adj_t = adj.t() + + elif layout == torch.sparse_coo: + warnings.filterwarnings('ignore', ".*to CSR format.*") + adj = to_torch_coo_tensor(edge_index, size=sparse_size) + adj_t = adj.t().coalesce() + + else: + raise ValueError(f"Got invalid sparse layout '{layout}'") + + out = module(x, adj_t, **kwargs) + assert torch.allclose(out, expected, rtol, atol, equal_nan) + + return expected diff --git a/torch_geometric/utils/spmm.py b/torch_geometric/utils/spmm.py index fc83a34b54f5..e2755d85657f 100644 --- a/torch_geometric/utils/spmm.py +++ b/torch_geometric/utils/spmm.py @@ -121,8 +121,9 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: else: assert src.layout == torch.sparse_coo src = src.coalesce() - deg = scatter(torch.ones_like(src.values()), src.indices(), dim=0, - dim_size=src.size(0), reduce='sum') + deg = scatter(torch.ones_like(src.values()), + src.indices()[0], dim=0, dim_size=src.size(0), + reduce='sum') return torch.sparse.mm(src, other) / deg.view(-1, 1).clamp_(min=1) From 70cff06c7e9fd2bd68e15e1d3dda124138d9c530 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 10 Apr 2023 02:51:47 +0100 Subject: [PATCH 1102/2432] Update batching tutorial to contain cleaner code (#7145) --- docs/source/advanced/batching.rst | 119 +++++++++++++++--------------- test/data/test_batch.py | 3 +- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/docs/source/advanced/batching.rst b/docs/source/advanced/batching.rst index 8a55de4ca39b..9acbd5eab22c 100644 --- a/docs/source/advanced/batching.rst +++ b/docs/source/advanced/batching.rst @@ -61,26 +61,24 @@ For example, consider storing two graphs, a source graph :math:`\mathcal{G}_s` a .. code-block:: python - from torch_geometric.data import Data + from torch_geometric.data import Data - class PairData(Data): - def __init__(self, edge_index_s=None, x_s=None, edge_index_t=None, x_t=None): - super().__init__() - self.edge_index_s = edge_index_s - self.x_s = x_s - self.edge_index_t = edge_index_t - self.x_t = x_t + class PairData(Data): + pass + + data = PairData(x_s=x_s, edge_index_s=edge_index_s, # Source graph. + x_t=x_t, edge_index_t=edge_index_t) # Target graph. In this case, :obj:`edge_index_s` should be increased by the number of nodes in the source graph :math:`\mathcal{G}_s`, *e.g.*, :obj:`x_s.size(0)`, and :obj:`edge_index_t` should be increased by the number of nodes in the target graph :math:`\mathcal{G}_t`, *e.g.*, :obj:`x_t.size(0)`: .. code-block:: python - def __inc__(self, key, value, *args, **kwargs): - if key == 'edge_index_s': - return self.x_s.size(0) - if key == 'edge_index_t': - return self.x_t.size(0) - else: + class PairData(Data): + def __inc__(self, key, value, *args, **kwargs): + if key == 'edge_index_s': + return self.x_s.size(0) + if key == 'edge_index_t': + return self.x_t.size(0) return super().__inc__(key, value, *args, **kwargs) We can test our :class:`PairData` batching behavior by setting up a simple test script: @@ -89,25 +87,28 @@ We can test our :class:`PairData` batching behavior by setting up a simple test from torch_geometric.loader import DataLoader + x_s = torch.randn(5, 16) # 5 nodes. edge_index_s = torch.tensor([ [0, 0, 0, 0], [1, 2, 3, 4], ]) - x_s = torch.randn(5, 16) # 5 nodes. + + x_t = torch.randn(4, 16) # 4 nodes. edge_index_t = torch.tensor([ [0, 0, 0], [1, 2, 3], ]) - x_t = torch.randn(4, 16) # 4 nodes. - data = PairData(edge_index_s, x_s, edge_index_t, x_t) + data = PairData(x_s=x_s, edge_index_s=edge_index_s, + x_t=x_t, edge_index_t=edge_index_t) + data_list = [data, data] loader = DataLoader(data_list, batch_size=2) batch = next(iter(loader)) print(batch) - >>> PairDataBatch(edge_index_s=[2, 8], x_s=[10, 16], - edge_index_t=[2, 6], x_t=[8, 16]) + >>> PairDataBatch(x_s=[10, 16], edge_index_s=[2, 8], + x_t=[8, 16], edge_index_t=[2, 6]) print(batch.edge_index_s) >>> tensor([[0, 0, 0, 0, 5, 5, 5, 5], @@ -118,9 +119,9 @@ We can test our :class:`PairData` batching behavior by setting up a simple test [1, 2, 3, 5, 6, 7]]) Everything looks good so far! -:obj:`edge_index_s` and :obj:`edge_index_t` get correctly batched together, even when using different numbers of nodes for :math:`\mathcal{G}_s` and :math:`\mathcal{G}_t`. +:obj:`edge_index_s` and :obj:`edge_index_t` get correctly batched together, even when using a different numbers of nodes for :math:`\mathcal{G}_s` and :math:`\mathcal{G}_t`. However, the :obj:`batch` attribute (that maps each node to its respective graph) is missing since :pyg:`PyG` fails to identify the actual graph in the :class:`PairData` object. -That's where the :obj:`follow_batch` argument of the :class:`~torch_geometric.loader.DataLoader` comes into play. +That is where the :obj:`follow_batch` argument of the :class:`~torch_geometric.loader.DataLoader` comes into play. Here, we can specify for which attributes we want to maintain the batch information: .. code-block:: python @@ -129,15 +130,16 @@ Here, we can specify for which attributes we want to maintain the batch informat batch = next(iter(loader)) print(batch) - >>> PairDataBatch(edge_index_s=[2, 8], x_s=[10, 16], x_s_batch=[10], - edge_index_t=[2, 6], x_t=[8, 16], x_t_batch=[8]) + >>> PairDataBatch(x_s=[10, 16], edge_index_s=[2, 8], x_s_batch=[10], + x_t=[8, 16], edge_index_t=[2, 6], x_t_batch=[8]) + print(batch.x_s_batch) >>> tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) print(batch.x_t_batch) >>> tensor([0, 0, 0, 0, 1, 1, 1, 1]) -As one can see, :obj:`follow_batch=['x_s', 'x_t']` now successfully creates assignment vectors called :obj:`x_s_batch` and :obj:`x_t_batch` for the node features :obj:`x_s` and :obj:`x_t`, respectively. +As one can see, :obj:`follow_batch=['x_s', 'x_t']` now successfully creates assignment vectors :obj:`x_s_batch` and :obj:`x_t_batch` for the node features :obj:`x_s` and :obj:`x_t`, respectively. That information can now be used to perform reduce operations, *e.g.*, global pooling, on multiple graphs in a single :class:`Batch` object. Bipartite Graphs @@ -150,23 +152,21 @@ To achieve this, consider a bipartite graph between two node types with correspo .. code-block:: python - from torch_geometric.data import Data + from torch_geometric.data import Data - class BipartiteData(Data): - def __init__(self, edge_index=None, x_s=None, x_t=None): - super().__init__() - self.edge_index = edge_index - self.x_s = x_s - self.x_t = x_t + class BipartiteData(Data): + pass -For a correct mini-batching procedure in bipartite graphs, we need to tell :pyg:`PyG` that it should increment source and target nodes of edges in :obj:`edge_index` independently on each other: + data = BipartiteData(x_s=x_s, x_t=x_t, edge_index=edge_index) + +For a correct mini-batching procedure in bipartite graphs, we need to tell :pyg:`PyG` that it should increment source and target nodes of edges in :obj:`edge_index` independently: .. code-block:: python - def __inc__(self, key, value, *args, **kwargs): - if key == 'edge_index': - return torch.tensor([[self.x_s.size(0)], [self.x_t.size(0)]]) - else: + class BipartiteData(Data): + def __inc__(self, key, value, *args, **kwargs): + if key == 'edge_index': + return torch.tensor([[self.x_s.size(0)], [self.x_t.size(0)]]) return super().__inc__(key, value, *args, **kwargs) Here, :obj:`edge_index[0]` (the source nodes of edges) get incremented by :obj:`x_s.size(0)` while :obj:`edge_index[1]` (the target nodes of edges) get incremented by :obj:`x_t.size(0)`. @@ -174,22 +174,23 @@ We can again test our implementation by running a simple test script: .. code-block:: python - from torch_geometric.loader import DataLoader + from torch_geometric.loader import DataLoader + x_s = torch.randn(2, 16) # 2 nodes. + x_t = torch.randn(3, 16) # 3 nodes. edge_index = torch.tensor([ [0, 0, 1, 1], [0, 1, 1, 2], ]) - x_s = torch.randn(2, 16) # 2 nodes. - x_t = torch.randn(3, 16) # 3 nodes. - data = BipartiteData(edge_index, x_s, x_t) + data = BipartiteData(x_s=x_s, x_t=x_t, edge_index=edge_index) + data_list = [data, data] loader = DataLoader(data_list, batch_size=2) batch = next(iter(loader)) print(batch) - >>> BipartiteDataBatch(edge_index=[2, 8], x_s=[4, 16], x_t=[6, 16]) + >>> BipartiteDataBatch(x_s=[4, 16], x_t=[6, 16], edge_index=[2, 8]) print(batch.edge_index) >>> tensor([[0, 0, 1, 1, 2, 2, 3, 3], @@ -206,28 +207,28 @@ Specifically, a list of attributes of shape :obj:`[num_features]` should be retu .. code-block:: python - from torch_geometric.data import Data - from torch_geometric.loader import DataLoader + from torch_geometric.data import Data + from torch_geometric.loader import DataLoader class MyData(Data): def __cat_dim__(self, key, value, *args, **kwargs): if key == 'foo': return None - else: - return super().__cat_dim__(key, value, *args, **kwargs) - - edge_index = torch.tensor([ - [0, 1, 1, 2], - [1, 0, 2, 1], - ]) - foo = torch.randn(16) - - data = MyData(edge_index=edge_index, foo=foo) - data_list = [data, data] - loader = DataLoader(data_list, batch_size=2) - batch = next(iter(loader)) - - print(batch) - >>> MyDataBatch(edge_index=[2, 8], foo=[2, 16]) + return super().__cat_dim__(key, value, *args, **kwargs) + + edge_index = torch.tensor([ + [0, 1, 1, 2], + [1, 0, 2, 1], + ]) + foo = torch.randn(16) + + data = MyData(num_nodes=3, edge_index=edge_index, foo=foo) + + data_list = [data, data] + loader = DataLoader(data_list, batch_size=2) + batch = next(iter(loader)) + + print(batch) + >>> MyDataBatch(num_nodes=6, edge_index=[2, 8], foo=[2, 16]) As desired, :obj:`batch.foo` is now described by two dimensions: The batch dimension and the feature dimension. diff --git a/test/data/test_batch.py b/test/data/test_batch.py index fc63c16c70fc..76dc187b625f 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -416,8 +416,7 @@ def __inc__(self, key, value, *args, **kwargs): return self.x_s.size(0) if key == 'edge_index_t': return self.x_t.size(0) - else: - return super().__inc__(key, value, *args, **kwargs) + return super().__inc__(key, value, *args, **kwargs) x_s = torch.randn(5, 16) edge_index_s = torch.tensor([ From 2d8af8fa1508272c0ac48da7c8a4bb906dd29e21 Mon Sep 17 00:00:00 2001 From: andreazanetti Date: Mon, 10 Apr 2023 11:38:49 +0200 Subject: [PATCH 1103/2432] `SparseTensor` support for `trim_to_layer` (#7089) It provides support for SparseTensor data format for the trim_to_layer.py functionality. The trimming function is isolated in a separate function, but still contained in trim_to_layer.py, as it is useful to that only. Tests are included extending the test file used for trim_to_layer.py. Some tests already present in a previous PR were slightly modified as to simulate a adj_t matrix that originates from a BFS traversal of a graph, starting the BFS traversal from the target node (assuming Batch Size = 1) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/utils/test_trim_to_layer.py | 124 +++++++++++++++++++------ torch_geometric/typing.py | 6 +- torch_geometric/utils/trim_to_layer.py | 75 +++++++++++++-- 4 files changed, 171 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52f8c17266f1..e57a98bfb10d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `SparseTensor` support to`trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) - Added instructions for ROCm build wheels ([#7143](https://github.com/pyg-team/pytorch_geometric/pull/7143)) - Added a `ComposeFilters` class to compose `pre_filter` functions in `Dataset` ([#7097](https://github.com/pyg-team/pytorch_geometric/pull/7097)) - Added a time-step aware variant of the `EllipticBitcoinDataset` called `EllipticBitcoinTemporalDataset` ([#7011](https://github.com/pyg-team/pytorch_geometric/pull/7011)) diff --git a/test/utils/test_trim_to_layer.py b/test/utils/test_trim_to_layer.py index 64c99165b978..3260a133c6e1 100644 --- a/test/utils/test_trim_to_layer.py +++ b/test/utils/test_trim_to_layer.py @@ -3,56 +3,126 @@ import torch from torch import Tensor +import torch_geometric.typing from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv from torch_geometric.testing import withPackage +from torch_geometric.typing import SparseTensor from torch_geometric.utils import trim_to_layer +from torch_geometric.utils.trim_to_layer import trim_sparse_tensor + + +@withPackage('torch_sparse') +def test_trim_sparse_tensor(): + edge_index = torch.tensor([[0, 0, 1, 2], [1, 2, 3, 4]]) + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[5, 5]) + + adj = trim_sparse_tensor(adj, num_nodes=3, num_seed_nodes=1) + + row, col, _ = adj.coo() + assert row.tolist() == [0, 0] + assert col.tolist() == [1, 2] def test_trim_to_layer_basic(): - x = torch.arange(4) - edge_index = torch.tensor([[1, 2, 3], [0, 1, 2]]) - edge_weight = torch.arange(3) + x0 = torch.arange(4) + edge_index0 = torch.tensor([[1, 2, 3], [0, 1, 2]]) + edge_weight0 = torch.arange(3) - num_sampled_nodes_per_hop = [1, 1, 1, 1] + num_sampled_nodes_per_hop = [1, 1, 1] num_sampled_edges_per_hop = [1, 1, 1] - x, edge_index, edge_weight = trim_to_layer( + x1, edge_index1, edge_weight1 = trim_to_layer( layer=0, num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, + x=x0, + edge_index=edge_index0, + edge_attr=edge_weight0, ) - assert torch.equal(x, torch.arange(4)) - assert edge_index.tolist() == [[1, 2, 3], [0, 1, 2]] - assert torch.equal(edge_weight, torch.arange(3)) - - x, edge_index, edge_weight = trim_to_layer( + assert torch.equal(x1, torch.arange(4)) + assert edge_index1.tolist() == [[1, 2, 3], [0, 1, 2]] + assert torch.equal(edge_weight1, torch.arange(3)) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj0 = SparseTensor.from_edge_index(edge_index0, edge_weight0, (4, 4)) + x1, adj_t1, _ = trim_to_layer( + layer=0, + num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, + num_sampled_edges_per_hop=num_sampled_edges_per_hop, + x=x0, + edge_index=adj0.t(), + edge_attr=edge_weight0, + ) + adj1 = adj_t1.t() + assert adj1.sizes() == [4, 4] + + row, col, value = adj1.coo() + assert torch.equal(x1, torch.arange(4)) + assert row.tolist() == [1, 2, 3] + assert col.tolist() == [0, 1, 2] + assert torch.equal(value, torch.arange(3)) + + x2, edge_index2, edge_weight2 = trim_to_layer( layer=1, num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, + x=x1, + edge_index=edge_index1, + edge_attr=edge_weight1, ) - assert torch.equal(x, torch.arange(3)) - assert edge_index.tolist() == [[1, 2], [0, 1]] - assert torch.equal(edge_weight, torch.arange(2)) - - x, edge_index, edge_weight = trim_to_layer( + assert torch.equal(x2, torch.arange(3)) + assert edge_index2.tolist() == [[1, 2], [0, 1]] + assert torch.equal(edge_weight2, torch.arange(2)) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj1 = SparseTensor.from_edge_index(edge_index1, edge_weight1, (4, 4)) + x2, adj_t2, _ = trim_to_layer( + layer=1, + num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, + num_sampled_edges_per_hop=num_sampled_edges_per_hop, + x=x1, + edge_index=adj1.t(), + ) + adj2 = adj_t2.t() + assert adj2.sizes() == [3, 3] + + row, col, value = adj2.coo() + assert torch.equal(x2, torch.arange(3)) + assert row.tolist() == [1, 2] + assert col.tolist() == [0, 1] + assert torch.equal(value, torch.arange(2)) + + x3, edge_index3, edge_weight3 = trim_to_layer( layer=2, num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, + x=x2, + edge_index=edge_index2, + edge_attr=edge_weight2, ) - assert torch.equal(x, torch.arange(2)) - assert edge_index.tolist() == [[1], [0]] - assert torch.equal(edge_weight, torch.arange(1)) + assert torch.equal(x3, torch.arange(2)) + assert edge_index3.tolist() == [[1], [0]] + assert torch.equal(edge_weight3, torch.arange(1)) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index2, edge_weight2, (3, 3)) + x3, adj_t3, _ = trim_to_layer( + layer=2, + num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, + num_sampled_edges_per_hop=num_sampled_edges_per_hop, + x=x2, + edge_index=adj2.t(), + ) + adj3 = adj_t3.t() + assert adj3.sizes() == [2, 2] + + row, col, value = adj3.coo() + assert torch.equal(x3, torch.arange(2)) + assert row.tolist() == [1] + assert col.tolist() == [0] + assert torch.equal(value, torch.arange(1)) def test_trim_to_layer_hetero(): diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 797c58ff7515..769dc0218a97 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -35,7 +35,7 @@ try: import torch_sparse # noqa - from torch_sparse import SparseTensor + from torch_sparse import SparseStorage, SparseTensor WITH_TORCH_SPARSE = True except (ImportError, OSError) as e: if isinstance(e, OSError): @@ -43,6 +43,10 @@ f"Disabling its usage. Stacktrace: {e}") WITH_TORCH_SPARSE = False + class SparseStorage: + def __init__(*args, **kwargs): + raise ImportError("'SparseStorage' requires 'torch-sparse'") + class SparseTensor: def __init__( self, diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index 058bb40daee8..dd2ce60b8c57 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -8,6 +8,8 @@ MaybeHeteroEdgeTensor, MaybeHeteroNodeTensor, NodeType, + SparseStorage, + SparseTensor, ) @@ -45,6 +47,7 @@ def trim_to_layer( if layer <= 0: return x, edge_index, edge_attr + # TODO Support `SparseTensor` for heterogeneous graphs. if isinstance(num_sampled_edges_per_hop, dict): x = { k: v.narrow( @@ -78,18 +81,28 @@ def trim_to_layer( start=0, length=x.size(0) - num_sampled_nodes_per_hop[-layer], ) - edge_index = edge_index.narrow( - dim=1, - start=0, - length=edge_index.size(1) - num_sampled_edges_per_hop[-layer], - ) if edge_attr is not None: edge_attr = edge_attr.narrow( dim=0, start=0, length=edge_attr.size(0) - num_sampled_edges_per_hop[-layer], ) - return x, edge_index, edge_attr + if isinstance(edge_index, Tensor): + edge_index = edge_index.narrow( + dim=1, + start=0, + length=edge_index.size(1) - num_sampled_edges_per_hop[-layer], + ) + return x, edge_index, edge_attr + + elif isinstance(edge_index, SparseTensor): + num_nodes = edge_index.size(0) - num_sampled_nodes_per_hop[-layer] + num_seed_nodes = num_nodes - num_sampled_nodes_per_hop[-(layer + 1)] + edge_index = trim_sparse_tensor(edge_index, num_nodes, num_seed_nodes) + + return x, edge_index, edge_attr + + raise NotImplementedError class TrimToLayer(torch.nn.Module): @@ -99,7 +112,7 @@ def forward( num_sampled_nodes_per_hop: Optional[List[int]], num_sampled_edges_per_hop: Optional[List[int]], x: Tensor, - edge_index: Tensor, + edge_index: Union[Tensor, SparseTensor], edge_attr: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: @@ -123,3 +136,51 @@ def forward( edge_index, edge_attr, ) + + +# Helper functions ############################################################ + + +def trim_sparse_tensor(src: SparseTensor, num_nodes: int, + num_seed_nodes: None) -> SparseTensor: + r"""Trims a :class:`SparseTensor` along both dimensions to only contain + the upper :obj:`num_nodes` in both dimensions. + + It is assumed that :class:`SparseTensor` is obtained from BFS traversing, + starting from the nodes that have been initially selected. + + Args: + src (SparseTensor): The sparse tensor. + num_nodes (int): The number of first nodes to keep. + num_seed_nodes (int): The number of seed nodes to compute + representations. + """ + rowptr, col, value = src.csr() + + rowptr = torch.narrow(rowptr, 0, 0, num_nodes + 1).clone() + rowptr[num_seed_nodes + 1:] = rowptr[num_seed_nodes] + + col = torch.narrow(col, 0, 0, rowptr[-1]) + + if value is not None: + value = torch.narrow(value, 0, 0, rowptr[-1]) + + csr2csc = src.storage._csr2csc + if csr2csc is not None: + csr2csc = csr2csc[csr2csc < len(col)] + + storage = SparseStorage( + row=None, + rowptr=rowptr, + col=col, + value=value, + sparse_sizes=(num_nodes, num_nodes), + rowcount=None, + colptr=None, + colcount=None, + csr2csc=csr2csc, + csc2csr=None, + is_sorted=True, + trust_data=True, + ) + return src.from_storage(storage) From 7be8876e86547921105f11c884ae06d2847d0390 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 11 Apr 2023 15:49:09 +0100 Subject: [PATCH 1104/2432] Added edge weight support to `LightGCN` (#7157) --- CHANGELOG.md | 1 + test/nn/models/test_lightgcn.py | 42 +++++++++++++++----------- torch_geometric/nn/models/lightgcn.py | 43 ++++++++++++++++++++------- 3 files changed, 58 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e57a98bfb10d..bb9d6ff192a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added edge weight support to `LightGCN` ([#7157](https://github.com/pyg-team/pytorch_geometric/pull/7157)) - Added `SparseTensor` support to`trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) - Added instructions for ROCm build wheels ([#7143](https://github.com/pyg-team/pytorch_geometric/pull/7143)) - Added a `ComposeFilters` class to compose `pre_filter` functions in `Dataset` ([#7097](https://github.com/pyg-team/pytorch_geometric/pull/7097)) diff --git a/test/nn/models/test_lightgcn.py b/test/nn/models/test_lightgcn.py index 4176b008c82e..6722689f200b 100644 --- a/test/nn/models/test_lightgcn.py +++ b/test/nn/models/test_lightgcn.py @@ -5,55 +5,63 @@ @pytest.mark.parametrize('embedding_dim', [32, 64]) +@pytest.mark.parametrize('with_edge_weight', [False, True]) @pytest.mark.parametrize('lambda_reg', [0, 1e-4]) @pytest.mark.parametrize('alpha', [0, .25, torch.tensor([0.4, 0.3, 0.2])]) -def test_lightgcn_ranking(embedding_dim, lambda_reg, alpha): - N = 500 - edge_index = torch.randint(0, N, (2, 400), dtype=torch.int64) - edge_label_index = torch.randint(0, N, (2, 100), dtype=torch.int64) - - model = LightGCN(N, embedding_dim, num_layers=2, alpha=alpha) +def test_lightgcn_ranking(embedding_dim, with_edge_weight, lambda_reg, alpha): + num_nodes = 500 + num_edges = 400 + edge_index = torch.randint(0, num_nodes, (2, num_edges)) + edge_weight = torch.rand(num_edges) if with_edge_weight else None + edge_label_index = torch.randint(0, num_nodes, (2, 100)) + + model = LightGCN(num_nodes, embedding_dim, num_layers=2, alpha=alpha) assert str(model) == f'LightGCN(500, {embedding_dim}, num_layers=2)' - pred = model(edge_index, edge_label_index) + pred = model(edge_index, edge_label_index, edge_weight) assert pred.size() == (100, ) loss = model.recommendation_loss(pred[:50], pred[50:], lambda_reg) assert loss.dim() == 0 and loss > 0 - out = model.recommend(edge_index, k=2) + out = model.recommend(edge_index, edge_weight, k=2) assert out.size() == (500, 2) assert out.min() >= 0 and out.max() < 500 src_index = torch.arange(0, 250) dst_index = torch.arange(250, 500) - out = model.recommend(edge_index, src_index, dst_index, k=2) + out = model.recommend(edge_index, edge_weight, src_index, dst_index, k=2) assert out.size() == (250, 2) assert out.min() >= 250 and out.max() < 500 @pytest.mark.parametrize('embedding_dim', [32, 64]) +@pytest.mark.parametrize('with_edge_weight', [False, True]) @pytest.mark.parametrize('alpha', [0, .25, torch.tensor([0.4, 0.3, 0.2])]) -def test_lightgcn_link_prediction(embedding_dim, alpha): - N = 500 - edge_index = torch.randint(0, N, (2, 400), dtype=torch.int64) - edge_label_index = torch.randint(0, N, (2, 100), dtype=torch.int64) +def test_lightgcn_link_prediction(embedding_dim, with_edge_weight, alpha): + num_nodes = 500 + num_edges = 400 + edge_index = torch.randint(0, num_nodes, (2, num_edges)) + edge_weight = torch.rand(num_edges) if with_edge_weight else None + edge_label_index = torch.randint(0, num_nodes, (2, 100)) edge_label = torch.randint(0, 2, (edge_label_index.size(1), )) - model = LightGCN(N, embedding_dim, num_layers=2, alpha=alpha) + model = LightGCN(num_nodes, embedding_dim, num_layers=2, alpha=alpha) assert str(model) == f'LightGCN(500, {embedding_dim}, num_layers=2)' - pred = model(edge_index, edge_label_index) + pred = model(edge_index, edge_label_index, edge_weight) assert pred.size() == (100, ) loss = model.link_pred_loss(pred, edge_label) assert loss.dim() == 0 and loss > 0 - prob = model.predict_link(edge_index, edge_label_index, prob=True) + prob = model.predict_link(edge_index, edge_label_index, edge_weight, + prob=True) assert prob.size() == (100, ) assert prob.min() > 0 and prob.max() < 1 - prob = model.predict_link(edge_index, edge_label_index, prob=False) + prob = model.predict_link(edge_index, edge_label_index, edge_weight, + prob=False) assert prob.size() == (100, ) assert ((prob == 0) | (prob == 1)).sum() == 100 diff --git a/torch_geometric/nn/models/lightgcn.py b/torch_geometric/nn/models/lightgcn.py index f174a4b6c487..4da7ea09e094 100644 --- a/torch_geometric/nn/models/lightgcn.py +++ b/torch_geometric/nn/models/lightgcn.py @@ -88,19 +88,27 @@ def reset_parameters(self): for conv in self.convs: conv.reset_parameters() - def get_embedding(self, edge_index: Adj) -> Tensor: + def get_embedding( + self, + edge_index: Adj, + edge_weight: OptTensor = None, + ) -> Tensor: r"""Returns the embedding of nodes in the graph.""" x = self.embedding.weight out = x * self.alpha[0] for i in range(self.num_layers): - x = self.convs[i](x, edge_index) + x = self.convs[i](x, edge_index, edge_weight) out = out + x * self.alpha[i + 1] return out - def forward(self, edge_index: Adj, - edge_label_index: OptTensor = None) -> Tensor: + def forward( + self, + edge_index: Adj, + edge_label_index: OptTensor = None, + edge_weight: OptTensor = None, + ) -> Tensor: r"""Computes rankings for pairs of nodes. Args: @@ -110,6 +118,8 @@ def forward(self, edge_index: Adj, the node pairs for which to compute rankings or probabilities. If :obj:`edge_label_index` is set to :obj:`None`, all edges in :obj:`edge_index` will be used instead. (default: :obj:`None`) + edge_weight (torch.Tensor, optional): The weight of each edge in + :obj:`edge_index`. (default: :obj:`None`) """ if edge_label_index is None: if isinstance(edge_index, SparseTensor): @@ -117,25 +127,36 @@ def forward(self, edge_index: Adj, else: edge_label_index = edge_index - out = self.get_embedding(edge_index) + out = self.get_embedding(edge_index, edge_weight) out_src = out[edge_label_index[0]] out_dst = out[edge_label_index[1]] return (out_src * out_dst).sum(dim=-1) - def predict_link(self, edge_index: Adj, edge_label_index: OptTensor = None, - prob: bool = False) -> Tensor: + def predict_link( + self, + edge_index: Adj, + edge_label_index: OptTensor = None, + edge_weight: OptTensor = None, + prob: bool = False, + ) -> Tensor: r"""Predict links between nodes specified in :obj:`edge_label_index`. Args: prob (bool, optional): Whether probabilities should be returned. (default: :obj:`False`) """ - pred = self(edge_index, edge_label_index).sigmoid() + pred = self(edge_index, edge_label_index, edge_weight).sigmoid() return pred if prob else pred.round() - def recommend(self, edge_index: Adj, src_index: OptTensor = None, - dst_index: OptTensor = None, k: int = 1) -> Tensor: + def recommend( + self, + edge_index: Adj, + edge_weight: OptTensor = None, + src_index: OptTensor = None, + dst_index: OptTensor = None, + k: int = 1, + ) -> Tensor: r"""Get top-:math:`k` recommendations for nodes in :obj:`src_index`. Args: @@ -149,7 +170,7 @@ def recommend(self, edge_index: Adj, src_index: OptTensor = None, (default: :obj:`None`) k (int, optional): Number of recommendations. (default: :obj:`1`) """ - out_src = out_dst = self.get_embedding(edge_index) + out_src = out_dst = self.get_embedding(edge_index, edge_weight) if src_index is not None: out_src = out_src[src_index] From 1fab9aded3bc87b09125949a27ef27822f6ab746 Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Tue, 11 Apr 2023 23:07:38 +0800 Subject: [PATCH 1105/2432] PyTorch Sparse Tensor support for `LightGCN` (#7155) Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/nn/models/lightgcn.py | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb9d6ff192a4..a284da8ab538 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) - Added edge weight support to `LightGCN` ([#7157](https://github.com/pyg-team/pytorch_geometric/pull/7157)) - Added `SparseTensor` support to`trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) - Added instructions for ROCm build wheels ([#7143](https://github.com/pyg-team/pytorch_geometric/pull/7143)) diff --git a/torch_geometric/nn/models/lightgcn.py b/torch_geometric/nn/models/lightgcn.py index 4da7ea09e094..9a29a4f6cea6 100644 --- a/torch_geometric/nn/models/lightgcn.py +++ b/torch_geometric/nn/models/lightgcn.py @@ -7,7 +7,8 @@ from torch.nn.modules.loss import _Loss from torch_geometric.nn.conv import LGConv -from torch_geometric.typing import Adj, OptTensor, SparseTensor +from torch_geometric.typing import Adj, OptTensor +from torch_geometric.utils import is_sparse, to_edge_index class LightGCN(torch.nn.Module): @@ -122,8 +123,8 @@ def forward( :obj:`edge_index`. (default: :obj:`None`) """ if edge_label_index is None: - if isinstance(edge_index, SparseTensor): - edge_label_index = torch.stack(edge_index.coo()[:2], dim=0) + if is_sparse(edge_index): + edge_label_index, _ = to_edge_index(edge_index) else: edge_label_index = edge_index From df9f929b2e1ca76bdc3386368199f1660cc645a7 Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Tue, 11 Apr 2023 08:23:32 -0700 Subject: [PATCH 1106/2432] Fix tolerance in tests with conv (#7158) Some tests with conv need tolerance adjustment when used with a GPU backend (CUDA libraries used by the PyTorch convolutions may, non deterministic ops, mixed-precision etc) --------- Signed-off-by: Serge Panev Co-authored-by: Matthias Fey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- test/nn/conv/test_ppf_conv.py | 19 ++++++++++--------- test/nn/conv/test_rgcn_conv.py | 30 +++++++++++++++--------------- test/nn/dense/test_linear.py | 2 +- 3 files changed, 26 insertions(+), 25 deletions(-) diff --git a/test/nn/conv/test_ppf_conv.py b/test/nn/conv/test_ppf_conv.py index c7f8875687d8..31c7851d8c86 100644 --- a/test/nn/conv/test_ppf_conv.py +++ b/test/nn/conv/test_ppf_conv.py @@ -34,21 +34,21 @@ def test_ppf_conv(): out = conv(x1, pos1, n1, edge_index) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, pos1, n1, adj1.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, pos1, n1, adj1.t()), out, atol=1e-3) if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x1, pos1, n1, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, pos1, n1, adj2.t()), out, atol=1e-3) if is_full_test(): t = '(OptTensor, Tensor, Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, n1, edge_index), out, atol=1e-6) + assert torch.allclose(jit(x1, pos1, n1, edge_index), out, atol=1e-3) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptTensor, Tensor, Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, n1, adj2.t()), out, atol=1e-6) + assert torch.allclose(jit(x1, pos1, n1, adj2.t()), out, atol=1e-3) # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) @@ -56,16 +56,17 @@ def test_ppf_conv(): out = conv(x1, (pos1, pos2), (n1, n2), edge_index) assert out.size() == (2, 32) assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), edge_index), - out, atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj1.t()), out) + out, atol=1e-3) + assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj1.t()), out, + atol=1e-3) assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), adj1.t()), - out, atol=1e-6) + out, atol=1e-3) if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj2.t()), out) assert torch.allclose( - conv((x1, None), (pos1, pos2), (n1, n2), adj2.t()), out, atol=1e-6) + conv((x1, None), (pos1, pos2), (n1, n2), adj2.t()), out, atol=1e-3) if is_full_test(): t = '(PairOptTensor, PairTensor, PairTensor, Tensor) -> Tensor' @@ -77,4 +78,4 @@ def test_ppf_conv(): t = '(PairOptTensor, PairTensor, PairTensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose( - jit((x1, None), (pos1, pos2), (n1, n2), adj2.t()), out, atol=1e-6) + jit((x1, None), (pos1, pos2), (n1, n2), adj2.t()), out, atol=1e-3) diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index ea27a9bdae63..5e8bbf2fbec1 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -38,12 +38,12 @@ def test_rgcn_conv_equality(conf, device): out1 = conv1(x1, edge_index, edge_type) out2 = conv2(x1, edge_index, edge_type) - assert torch.allclose(out1, out2, atol=1e-6) + assert torch.allclose(out1, out2, atol=1e-3) if num_blocks is None: out1 = conv1(None, edge_index, edge_type) out2 = conv2(None, edge_index, edge_type) - assert torch.allclose(out1, out2, atol=1e-6) + assert torch.allclose(out1, out2, atol=1e-3) @withCUDA @@ -70,15 +70,15 @@ def test_rgcn_conv(cls, conf, device): if torch_geometric.typing.WITH_TORCH_SPARSE: adj = SparseTensor.from_edge_index(edge_index, edge_type, (4, 4)) - assert torch.allclose(conv(x1, adj.t()), out1, atol=1e-6) + assert torch.allclose(conv(x1, adj.t()), out1, atol=1e-3) if num_blocks is None: out2 = conv(None, edge_index, edge_type) assert torch.allclose(conv(idx1, edge_index, edge_type), out2) assert out2.size() == (4, 32) if torch_geometric.typing.WITH_TORCH_SPARSE: - assert torch.allclose(conv(None, adj.t()), out2, atol=1e-6) - assert torch.allclose(conv(idx1, adj.t()), out2, atol=1e-6) + assert torch.allclose(conv(None, adj.t()), out2, atol=1e-3) + assert torch.allclose(conv(idx1, adj.t()), out2, atol=1e-3) if is_full_test(): t = '(OptTensor, Tensor, OptTensor) -> Tensor' @@ -93,8 +93,8 @@ def test_rgcn_conv(cls, conf, device): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, adj.t()), out1) if num_blocks is None: - assert torch.allclose(jit(idx1, adj.t()), out2, atol=1e-6) - assert torch.allclose(jit(None, adj.t()), out2, atol=1e-6) + assert torch.allclose(jit(idx1, adj.t()), out2, atol=1e-3) + assert torch.allclose(jit(None, adj.t()), out2, atol=1e-3) # Test bipartite message passing: conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum').to(device) @@ -105,15 +105,15 @@ def test_rgcn_conv(cls, conf, device): if torch_geometric.typing.WITH_TORCH_SPARSE: adj = SparseTensor.from_edge_index(edge_index, edge_type, (4, 2)) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-3) if num_blocks is None: out2 = conv((None, idx2), edge_index, edge_type) assert out2.size() == (2, 32) assert torch.allclose(conv((idx1, idx2), edge_index, edge_type), out2) if torch_geometric.typing.WITH_TORCH_SPARSE: - assert torch.allclose(conv((None, idx2), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((idx1, idx2), adj.t()), out2, atol=1e-6) + assert torch.allclose(conv((None, idx2), adj.t()), out2, atol=1e-3) + assert torch.allclose(conv((idx1, idx2), adj.t()), out2, atol=1e-3) if is_full_test(): t = '(Tuple[OptTensor, Tensor], Tensor, OptTensor) -> Tensor' @@ -121,14 +121,14 @@ def test_rgcn_conv(cls, conf, device): assert torch.allclose(jit((x1, x2), edge_index, edge_type), out1) if num_blocks is None: assert torch.allclose(jit((None, idx2), edge_index, edge_type), - out2, atol=1e-6) + out2, atol=1e-3) assert torch.allclose(jit((idx1, idx2), edge_index, edge_type), - out2, atol=1e-6) + out2, atol=1e-3) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tuple[OptTensor, Tensor], SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-3) if num_blocks is None: - assert torch.allclose(jit((None, idx2), adj.t()), out2, atol=1e-6) - assert torch.allclose(jit((idx1, idx2), adj.t()), out2, atol=1e-6) + assert torch.allclose(jit((None, idx2), adj.t()), out2, atol=1e-3) + assert torch.allclose(jit((idx1, idx2), adj.t()), out2, atol=1e-3) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index e258d77b2a6d..15919831dab3 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -215,4 +215,4 @@ def test_hetero_linear_sort(type_vec, device): for i in range(type_vec.numel()): node_type = int(type_vec[i]) expected = x[i] @ lin.weight[node_type] + lin.bias[node_type] - assert torch.allclose(out[i], expected, atol=1e-6) + assert torch.allclose(out[i], expected, atol=1e-3) From 2d8d455c99df77f1703d606ad5e864f8829a6732 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 11 Apr 2023 16:29:47 +0100 Subject: [PATCH 1107/2432] Add `ClusterLoader` correctness test (#7159) --- test/loader/test_cluster.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index 35d023448df7..d03b27cf6a3b 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -3,6 +3,7 @@ from torch_geometric.data import Data from torch_geometric.loader import ClusterData, ClusterLoader +from torch_geometric.testing import onlyFullTest from torch_geometric.utils import to_dense_adj try: @@ -100,3 +101,18 @@ def test_cluster_gcn(): [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], ] + + +@onlyFullTest +@pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') +def test_cluster_gcn_correctness(get_dataset): + dataset = get_dataset('Cora') + data = dataset[0].clone() + data.n_id = torch.arange(data.num_nodes) + cluster_data = ClusterData(data, num_parts=10) + loader = ClusterLoader(cluster_data, batch_size=3, shuffle=False) + + for batch1 in loader: + batch2 = data.subgraph(batch1.n_id) + assert batch1.num_nodes == batch2.num_nodes + assert batch1.num_edges == batch2.num_edges From 59e28172b7db2dc5bfa27d970b448a856bb4b66b Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Tue, 11 Apr 2023 09:29:59 -0700 Subject: [PATCH 1108/2432] More sparse conv fix tolerance rgcn (#7160) More tolerance adjusments following #7158 --------- Signed-off-by: Serge Panev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rishi Puri --- test/nn/conv/test_rgcn_conv.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index 5e8bbf2fbec1..791be8cec71a 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -38,12 +38,12 @@ def test_rgcn_conv_equality(conf, device): out1 = conv1(x1, edge_index, edge_type) out2 = conv2(x1, edge_index, edge_type) - assert torch.allclose(out1, out2, atol=1e-3) + assert torch.allclose(out1, out2, atol=1e-2) if num_blocks is None: out1 = conv1(None, edge_index, edge_type) out2 = conv2(None, edge_index, edge_type) - assert torch.allclose(out1, out2, atol=1e-3) + assert torch.allclose(out1, out2, atol=1e-2) @withCUDA @@ -74,7 +74,7 @@ def test_rgcn_conv(cls, conf, device): if num_blocks is None: out2 = conv(None, edge_index, edge_type) - assert torch.allclose(conv(idx1, edge_index, edge_type), out2) + assert torch.allclose(conv(idx1, edge_index, edge_type), out2, 1e-3) assert out2.size() == (4, 32) if torch_geometric.typing.WITH_TORCH_SPARSE: assert torch.allclose(conv(None, adj.t()), out2, atol=1e-3) @@ -85,8 +85,10 @@ def test_rgcn_conv(cls, conf, device): jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, edge_index, edge_type), out1) if num_blocks is None: - assert torch.allclose(jit(idx1, edge_index, edge_type), out2) - assert torch.allclose(jit(None, edge_index, edge_type), out2) + assert torch.allclose(jit(idx1, edge_index, edge_type), out2, + atol=1e-3) + assert torch.allclose(jit(None, edge_index, edge_type), out2, + atol=1e-3) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptTensor, SparseTensor, OptTensor) -> Tensor' @@ -110,7 +112,8 @@ def test_rgcn_conv(cls, conf, device): if num_blocks is None: out2 = conv((None, idx2), edge_index, edge_type) assert out2.size() == (2, 32) - assert torch.allclose(conv((idx1, idx2), edge_index, edge_type), out2) + assert torch.allclose(conv((idx1, idx2), edge_index, edge_type), out2, + atol=1e-3) if torch_geometric.typing.WITH_TORCH_SPARSE: assert torch.allclose(conv((None, idx2), adj.t()), out2, atol=1e-3) assert torch.allclose(conv((idx1, idx2), adj.t()), out2, atol=1e-3) @@ -118,7 +121,8 @@ def test_rgcn_conv(cls, conf, device): if is_full_test(): t = '(Tuple[OptTensor, Tensor], Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, edge_type), out1) + assert torch.allclose(jit((x1, x2), edge_index, edge_type), out1, + atol=1e-3) if num_blocks is None: assert torch.allclose(jit((None, idx2), edge_index, edge_type), out2, atol=1e-3) From 340d8b9e87569b7dac964f973297c78ee8165a8d Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 12 Apr 2023 00:26:01 -0700 Subject: [PATCH 1109/2432] skipping examples that need `torch-cluster`/`torch-sparse` for now if its not available (#7164) after this PR, i will work on getting all of these functionalities into pyg-lib and then update these examples accordingly (and for example the graph_saint test to use withPackage('pyg-lib') instead) for now, if their torch-sparse and torch-cluster areuninstalled: ``` cd /opt/pyg; pip uninstall -y torch-geometric torch-scatter torch-sparse torch-spline-conv torch-cluster; rm -rf pytorch_geometric; git clone -b examples_req_checkl https://github.com/pyg-team/pytorch_geometric.git; cd /opt/pyg/pytorch_geometric; pip install .; python3 examples/egc.py; python3 examples/gcn2_ppi.py; python3 examples/graph_saint.py; python3 examples/correct_and_smooth.py; python3 examples/node2vec.py; python3 examples/mnist_nn_conv.py This example requires 'torch-sparse' This example requires 'torch-sparse' This example requires 'torch-sparse' This example requires 'torch-sparse' This example requires 'torch-cluster' This example requires 'torch-cluster' ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- examples/correct_and_smooth.py | 4 ++++ examples/egc.py | 4 ++++ examples/gcn2_ppi.py | 4 ++++ examples/graph_saint.py | 4 ++++ examples/mnist_nn_conv.py | 4 ++++ examples/node2vec.py | 4 ++++ torch_geometric/typing.py | 9 +++++++++ 7 files changed, 33 insertions(+) diff --git a/examples/correct_and_smooth.py b/examples/correct_and_smooth.py index e550ccc645f3..8604defdf027 100644 --- a/examples/correct_and_smooth.py +++ b/examples/correct_and_smooth.py @@ -5,6 +5,10 @@ import torch_geometric.transforms as T from torch_geometric.nn import MLP, CorrectAndSmooth +from torch_geometric.typing import WITH_TORCH_SPARSE + +if not WITH_TORCH_SPARSE: + quit("This example requires 'torch-sparse'") root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'OGB') dataset = PygNodePropPredDataset('ogbn-products', root, diff --git a/examples/egc.py b/examples/egc.py index 823862393bff..ad46c411fa66 100644 --- a/examples/egc.py +++ b/examples/egc.py @@ -12,6 +12,10 @@ import torch_geometric.transforms as T from torch_geometric.loader import DataLoader from torch_geometric.nn import EGConv, global_mean_pool +from torch_geometric.typing import WITH_TORCH_SPARSE + +if not WITH_TORCH_SPARSE: + quit("This example requires 'torch-sparse'") parser = argparse.ArgumentParser() parser.add_argument('--use_multi_aggregators', action='http://github.com/store_true', diff --git a/examples/gcn2_ppi.py b/examples/gcn2_ppi.py index a22cc5ffa64c..87fcbb4ce5da 100644 --- a/examples/gcn2_ppi.py +++ b/examples/gcn2_ppi.py @@ -9,6 +9,10 @@ from torch_geometric.datasets import PPI from torch_geometric.loader import DataLoader from torch_geometric.nn import GCN2Conv +from torch_geometric.typing import WITH_TORCH_SPARSE + +if not WITH_TORCH_SPARSE: + quit("This example requires 'torch-sparse'") path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'GCN2_PPI') pre_transform = T.Compose([T.GCNNorm(), T.ToSparseTensor()]) diff --git a/examples/graph_saint.py b/examples/graph_saint.py index c32f3152d537..ac2bb0ac9306 100644 --- a/examples/graph_saint.py +++ b/examples/graph_saint.py @@ -7,8 +7,12 @@ from torch_geometric.datasets import Flickr from torch_geometric.loader import GraphSAINTRandomWalkSampler from torch_geometric.nn import GraphConv +from torch_geometric.typing import WITH_TORCH_SPARSE from torch_geometric.utils import degree +if not WITH_TORCH_SPARSE: + quit("This example requires 'torch-sparse'") + path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Flickr') dataset = Flickr(path) data = dataset[0] diff --git a/examples/mnist_nn_conv.py b/examples/mnist_nn_conv.py index b6730a6a8296..9386a2781a47 100644 --- a/examples/mnist_nn_conv.py +++ b/examples/mnist_nn_conv.py @@ -14,8 +14,12 @@ max_pool, max_pool_x, ) +from torch_geometric.typing import WITH_TORCH_CLUSTER from torch_geometric.utils import normalized_cut +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") + path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'MNIST') transform = T.Cartesian(cat=False) train_dataset = MNISTSuperpixels(path, True, transform=transform) diff --git a/examples/node2vec.py b/examples/node2vec.py index 641cdc709939..e54dabc88afb 100644 --- a/examples/node2vec.py +++ b/examples/node2vec.py @@ -7,6 +7,10 @@ from torch_geometric.datasets import Planetoid from torch_geometric.nn import Node2Vec +from torch_geometric.typing import WITH_TORCH_CLUSTER + +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") def main(): diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 769dc0218a97..a05fd2bd09a2 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -33,6 +33,15 @@ torch_scatter = object WITH_TORCH_SCATTER = False +try: + import torch_cluster # noqa + WITH_TORCH_CLUSTER = True +except (ImportError, OSError) as e: + if isinstance(e, OSError): + warnings.warn(f"An issue occurred while importing 'torch-cluster'. " + f"Disabling its usage. Stacktrace: {e}") + WITH_TORCH_CLUSTER = False + try: import torch_sparse # noqa from torch_sparse import SparseStorage, SparseTensor From 4b9cfe246e69268ea0871e46f06124125e98ddae Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 12 Apr 2023 09:06:47 -0700 Subject: [PATCH 1110/2432] Replacing `HGTConv` with `FastHGTConv` (#7117) we have proven its correctness and speed, lmk if anything else needed to merge fasthgt->hgt --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- test/nn/conv/test_fast_hgt_conv.py | 26 --- test/nn/conv/test_hgt_conv.py | 35 +--- torch_geometric/nn/conv/__init__.py | 2 - torch_geometric/nn/conv/fast_hgt_conv.py | 204 ----------------------- torch_geometric/nn/conv/hetero_conv.py | 19 ++- torch_geometric/nn/conv/hgt_conv.py | 191 ++++++++++++--------- torch_geometric/utils/hetero.py | 7 +- 7 files changed, 136 insertions(+), 348 deletions(-) delete mode 100644 test/nn/conv/test_fast_hgt_conv.py delete mode 100644 torch_geometric/nn/conv/fast_hgt_conv.py diff --git a/test/nn/conv/test_fast_hgt_conv.py b/test/nn/conv/test_fast_hgt_conv.py deleted file mode 100644 index 55d36bf49225..000000000000 --- a/test/nn/conv/test_fast_hgt_conv.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch - -from torch_geometric.nn import FastHGTConv -from torch_geometric.testing import get_random_edge_index - - -def test_fast_hgt_conv(): - x_dict = { - 'author': torch.randn(4, 16), - 'paper': torch.randn(6, 16), - } - edge_index = get_random_edge_index(4, 6, num_edges=20) - - edge_index_dict = { - ('author', 'writes', 'paper'): edge_index, - ('paper', 'written_by', 'author'): edge_index.flip([0]), - } - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - - conv = FastHGTConv(16, 16, metadata, heads=2) - assert str(conv) == 'FastHGTConv(-1, 16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (4, 16) - assert out_dict1['paper'].size() == (6, 16) diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index 65e07d5ded03..bb41663d2e4a 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -2,7 +2,7 @@ import torch_geometric.typing from torch_geometric.data import HeteroData -from torch_geometric.nn import FastHGTConv, HGTConv +from torch_geometric.nn import HGTConv from torch_geometric.profile import benchmark from torch_geometric.testing import get_random_edge_index from torch_geometric.typing import SparseTensor @@ -178,39 +178,6 @@ def test_hgt_conv_out_of_place(): assert x_dict['paper'].size() == (6, 32) -def test_fast_hgt_conv(): - x_dict = { - 'v0': torch.randn(5, 4), - 'v1': torch.randn(5, 4), - 'v2': torch.randn(5, 4), - } - - edge_index_dict = { - ('v0', 'e1', 'v0'): torch.randint(0, 5, size=(2, 10)), - ('v0', 'e2', 'v1'): torch.randint(0, 5, size=(2, 10)), - } - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - conv1 = HGTConv(4, 2, metadata) - conv2 = FastHGTConv(4, 2, metadata) - - # Make parameters match: - for my_param in conv1.parameters(): - my_param.data.fill_(1) - for og_param in conv2.parameters(): - og_param.data.fill_(1) - - out_dict1 = conv1(x_dict, edge_index_dict) - out_dict2 = conv2(x_dict, edge_index_dict) - - assert len(out_dict1) == len(out_dict2) - for key, out1 in out_dict1.items(): - out2 = out_dict2[key] - if out1 is None and out2 is None: - continue - assert torch.allclose(out1, out2) - - if __name__ == '__main__': import argparse diff --git a/torch_geometric/nn/conv/__init__.py b/torch_geometric/nn/conv/__init__.py index a0998c3bf5d5..9410e03f0b3d 100644 --- a/torch_geometric/nn/conv/__init__.py +++ b/torch_geometric/nn/conv/__init__.py @@ -51,7 +51,6 @@ from .pdn_conv import PDNConv from .general_conv import GeneralConv from .hgt_conv import HGTConv -from .fast_hgt_conv import FastHGTConv from .heat_conv import HEATConv from .hetero_conv import HeteroConv from .han_conv import HANConv @@ -119,7 +118,6 @@ 'PDNConv', 'GeneralConv', 'HGTConv', - 'FastHGTConv', 'HEATConv', 'HeteroConv', 'HANConv', diff --git a/torch_geometric/nn/conv/fast_hgt_conv.py b/torch_geometric/nn/conv/fast_hgt_conv.py deleted file mode 100644 index 5c0d772a897f..000000000000 --- a/torch_geometric/nn/conv/fast_hgt_conv.py +++ /dev/null @@ -1,204 +0,0 @@ -import math -from typing import Dict, List, Optional, Tuple, Union - -import torch -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense import HeteroDictLinear, HeteroLinear -from torch_geometric.nn.inits import ones -from torch_geometric.nn.parameter_dict import ParameterDict -from torch_geometric.typing import Adj, EdgeType, Metadata, NodeType -from torch_geometric.utils import softmax -from torch_geometric.utils.hetero import construct_bipartite_edge_index - - -class FastHGTConv(MessagePassing): - r"""See :class:`HGTConv`.""" - def __init__( - self, - in_channels: Union[int, Dict[str, int]], - out_channels: int, - metadata: Metadata, - heads: int = 1, - **kwargs, - ): - super().__init__(aggr='add', node_dim=0, **kwargs) - - if out_channels % heads != 0: - raise ValueError(f"'out_channels' (got {out_channels}) must be " - f"divisible by the number of heads (got {heads})") - - if not isinstance(in_channels, dict): - in_channels = {node_type: in_channels for node_type in metadata[0]} - - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - self.node_types = metadata[0] - self.edge_types = metadata[1] - self.dst_node_types = list(set(metadata[1][1])) - self.src_types = [edge_type[0] for edge_type in self.edge_types] - - self.kqv_lin = HeteroDictLinear(self.in_channels, - self.out_channels * 3) - - self.out_lin = HeteroDictLinear(self.out_channels, self.out_channels, - types=self.node_types) - - dim = out_channels // heads - num_types = heads * len(self.edge_types) - - self.k_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, - bias=False) - self.v_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, - bias=False) - - self.skip = ParameterDict({ - node_type: Parameter(torch.Tensor(1)) - for node_type in self.node_types - }) - - self.p_rel = ParameterDict() - for edge_type in self.edge_types: - edge_type = '__'.join(edge_type) - self.p_rel[edge_type] = Parameter(torch.Tensor(1, heads)) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - self.kqv_lin.reset_parameters() - self.out_lin.reset_parameters() - self.k_rel.reset_parameters() - self.v_rel.reset_parameters() - ones(self.skip) - ones(self.p_rel) - - def _cat(self, x_dict: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, int]]: - """Concatenates a dictionary of features.""" - cumsum = 0 - outs: List[Tensor] = [] - offset: Dict[str, int] = {} - for key, x in x_dict.items(): - outs.append(x) - offset[key] = cumsum - cumsum += x.size(0) - return torch.cat(outs, dim=0), offset - - def _construct_src_node_feat( - self, - k_dict: Dict[str, Tensor], - v_dict: Dict[str, Tensor], - ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]: - """Constructs the source node representations.""" - count = 0 - cumsum = 0 - H, D = self.heads, self.out_channels // self.heads - - # Flatten into a single tensor with shape [num_edge_types * heads, D]: - ks: List[Tensor] = [] - vs: List[Tensor] = [] - type_list: List[int] = [] - offset: Dict[EdgeType] = {} - for edge_type in self.edge_types: - src, _, _ = edge_type - - ks.append(k_dict[src].reshape(-1, D)) - vs.append(v_dict[src].reshape(-1, D)) - - N = k_dict[src].size(0) - for _ in range(H): - type_list.append(torch.full((N, ), count, dtype=torch.long)) - count += 1 - offset[edge_type] = cumsum - cumsum += N - - type_vec = torch.cat(type_list, dim=0) - k = self.k_rel(torch.cat(ks, dim=0), type_vec).view(-1, H, D) - v = self.v_rel(torch.cat(vs, dim=0), type_vec).view(-1, H, D) - - return k, v, offset - - def forward( - self, - x_dict: Dict[NodeType, Tensor], - edge_index_dict: Dict[EdgeType, Adj] # Support both. - ) -> Dict[NodeType, Optional[Tensor]]: - r"""Runs the forward pass of the module. - - Args: - x_dict (Dict[str, torch.Tensor]): A dictionary holding input node - features for each individual node type. - edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A - dictionary holding graph connectivity information for each - individual edge type, either as a :class:`torch.Tensor` of - shape :obj:`[2, num_edges]` or a - :class:`torch_sparse.SparseTensor`. - - :rtype: :obj:`Dict[str, Optional[torch.Tensor]]` - The output node - embeddings for each node type. - In case a node type does not receive any message, its output will - be set to :obj:`None`. - """ - F = self.out_channels - H = self.heads - D = F // H - - k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {} - - # Compute K, Q, V over node types: - kqv_dict = self.kqv_lin(x_dict) - for key, val in kqv_dict.items(): - k_dict[key] = val[:, :F].view(-1, H, D) - q_dict[key] = val[:, F:2 * F].view(-1, H, D) - v_dict[key] = val[:, 2 * F:].view(-1, H, D) - - q, dst_offset = self._cat(q_dict) - k, v, src_offset = self._construct_src_node_feat(k_dict, v_dict) - - edge_index, edge_attr = construct_bipartite_edge_index( - edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel) - - out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr, - size=None) - - # Reconstruct output node embeddings dict: - for node_type, start_offset in dst_offset.items(): - end_offset = start_offset + q_dict[node_type].size(0) - out_dict[node_type] = out[start_offset:end_offset] - - # Transform output node embeddings: - a_dict = self.out_lin({ - k: torch.nn.functional.gelu(v) if v is not None else v - for k, v in out_dict.items() - }) - - # Iterate over node types: - for node_type, out in out_dict.items(): - if out is None or node_type not in self.dst_node_types: - out_dict[node_type] = None - continue - else: - out = a_dict[node_type] - - if out.size(-1) == x_dict[node_type].size(-1): - alpha = self.skip[node_type].sigmoid() - out = alpha * out + (1 - alpha) * x_dict[node_type] - out_dict[node_type] = out - - return out_dict - - def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, edge_attr: Tensor, - index: Tensor, ptr: Optional[Tensor], - size_i: Optional[int]) -> Tensor: - alpha = (q_i * k_j).sum(dim=-1) * edge_attr - alpha = alpha / math.sqrt(q_i.size(-1)) - alpha = softmax(alpha, index, ptr, size_i) - out = v_j * alpha.view(-1, self.heads, 1) - return out.view(-1, self.out_channels) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(-1, {self.out_channels}, ' - f'heads={self.heads})') diff --git a/torch_geometric/nn/conv/hetero_conv.py b/torch_geometric/nn/conv/hetero_conv.py index a59879857169..e1d4d2a9d26e 100644 --- a/torch_geometric/nn/conv/hetero_conv.py +++ b/torch_geometric/nn/conv/hetero_conv.py @@ -1,17 +1,32 @@ import warnings from collections import defaultdict -from typing import Dict, Optional +from typing import Dict, List, Optional import torch from torch import Tensor from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.conv.hgt_conv import group from torch_geometric.nn.module_dict import ModuleDict from torch_geometric.typing import Adj, EdgeType, NodeType from torch_geometric.utils.hetero import check_add_self_loops +def group(xs: List[Tensor], aggr: Optional[str]) -> Optional[Tensor]: + if len(xs) == 0: + return None + elif aggr is None: + return torch.stack(xs, dim=1) + elif len(xs) == 1: + return xs[0] + elif aggr == "cat": + return torch.cat(xs, dim=-1) + else: + out = torch.stack(xs, dim=0) + out = getattr(torch, aggr)(out, dim=0) + out = out[0] if isinstance(out, tuple) else out + return out + + class HeteroConv(torch.nn.Module): r"""A generic wrapper for computing graph convolution on heterogeneous graphs. diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index 1f5bc8692a54..ccdeea16bce6 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -1,34 +1,17 @@ import math -from typing import Dict, List, Optional, Union +from typing import Dict, List, Optional, Tuple, Union import torch -import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense import Linear -from torch_geometric.nn.inits import glorot, ones, reset -from torch_geometric.nn.module_dict import ModuleDict +from torch_geometric.nn.dense import HeteroDictLinear, HeteroLinear +from torch_geometric.nn.inits import ones from torch_geometric.nn.parameter_dict import ParameterDict -from torch_geometric.typing import EdgeType, Metadata, NodeType, SparseTensor +from torch_geometric.typing import Adj, EdgeType, Metadata, NodeType from torch_geometric.utils import softmax - - -def group(xs: List[Tensor], aggr: Optional[str]) -> Optional[Tensor]: - if len(xs) == 0: - return None - elif aggr is None: - return torch.stack(xs, dim=1) - elif len(xs) == 1: - return xs[0] - elif aggr == "cat": - return torch.cat(xs, dim=-1) - else: - out = torch.stack(xs, dim=0) - out = getattr(torch, aggr)(out, dim=0) - out = out[0] if isinstance(out, tuple) else out - return out +from torch_geometric.utils.hetero import construct_bipartite_edge_index class HGTConv(MessagePassing): @@ -72,7 +55,6 @@ def __init__( out_channels: int, metadata: Metadata, heads: int = 1, - group: str = "sum", **kwargs, ): super().__init__(aggr='add', node_dim=0, **kwargs) @@ -87,48 +69,95 @@ def __init__( self.in_channels = in_channels self.out_channels = out_channels self.heads = heads - self.group = group - - self.k_lin = ModuleDict() - self.q_lin = ModuleDict() - self.v_lin = ModuleDict() - self.a_lin = ModuleDict() - self.skip = ParameterDict() - for node_type, in_channels in self.in_channels.items(): - self.k_lin[node_type] = Linear(in_channels, out_channels) - self.q_lin[node_type] = Linear(in_channels, out_channels) - self.v_lin[node_type] = Linear(in_channels, out_channels) - self.a_lin[node_type] = Linear(out_channels, out_channels) - self.skip[node_type] = Parameter(torch.Tensor(1)) - - self.a_rel = ParameterDict() - self.m_rel = ParameterDict() - self.p_rel = ParameterDict() + self.node_types = metadata[0] + self.edge_types = metadata[1] + self.dst_node_types = list(set(metadata[1][1])) + self.src_types = [edge_type[0] for edge_type in self.edge_types] + + self.kqv_lin = HeteroDictLinear(self.in_channels, + self.out_channels * 3) + + self.out_lin = HeteroDictLinear(self.out_channels, self.out_channels, + types=self.node_types) + dim = out_channels // heads - for edge_type in metadata[1]: + num_types = heads * len(self.edge_types) + + self.k_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, + bias=False) + self.v_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, + bias=False) + + self.skip = ParameterDict({ + node_type: Parameter(torch.Tensor(1)) + for node_type in self.node_types + }) + + self.p_rel = ParameterDict() + for edge_type in self.edge_types: edge_type = '__'.join(edge_type) - self.a_rel[edge_type] = Parameter(torch.Tensor(heads, dim, dim)) - self.m_rel[edge_type] = Parameter(torch.Tensor(heads, dim, dim)) - self.p_rel[edge_type] = Parameter(torch.Tensor(heads)) + self.p_rel[edge_type] = Parameter(torch.Tensor(1, heads)) self.reset_parameters() def reset_parameters(self): super().reset_parameters() - reset(self.k_lin) - reset(self.q_lin) - reset(self.v_lin) - reset(self.a_lin) + self.kqv_lin.reset_parameters() + self.out_lin.reset_parameters() + self.k_rel.reset_parameters() + self.v_rel.reset_parameters() ones(self.skip) ones(self.p_rel) - glorot(self.a_rel) - glorot(self.m_rel) + + def _cat(self, x_dict: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, int]]: + """Concatenates a dictionary of features.""" + cumsum = 0 + outs: List[Tensor] = [] + offset: Dict[str, int] = {} + for key, x in x_dict.items(): + outs.append(x) + offset[key] = cumsum + cumsum += x.size(0) + return torch.cat(outs, dim=0), offset + + def _construct_src_node_feat( + self, + k_dict: Dict[str, Tensor], + v_dict: Dict[str, Tensor], + ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]: + """Constructs the source node representations.""" + count = 0 + cumsum = 0 + H, D = self.heads, self.out_channels // self.heads + + # Flatten into a single tensor with shape [num_edge_types * heads, D]: + ks: List[Tensor] = [] + vs: List[Tensor] = [] + type_list: List[int] = [] + offset: Dict[EdgeType] = {} + for edge_type in self.edge_types: + src, _, _ = edge_type + + ks.append(k_dict[src].reshape(-1, D)) + vs.append(v_dict[src].reshape(-1, D)) + + N = k_dict[src].size(0) + for _ in range(H): + type_list.append(torch.full((N, ), count, dtype=torch.long)) + count += 1 + offset[edge_type] = cumsum + cumsum += N + + type_vec = torch.cat(type_list, dim=0) + k = self.k_rel(torch.cat(ks, dim=0), type_vec).view(-1, H, D) + v = self.v_rel(torch.cat(vs, dim=0), type_vec).view(-1, H, D) + + return k, v, offset def forward( self, x_dict: Dict[NodeType, Tensor], - edge_index_dict: Union[Dict[EdgeType, Tensor], - Dict[EdgeType, SparseTensor]] # Support both. + edge_index_dict: Dict[EdgeType, Adj] # Support both. ) -> Dict[NodeType, Optional[Tensor]]: r"""Runs the forward pass of the module. @@ -146,42 +175,47 @@ def forward( In case a node type does not receive any message, its output will be set to :obj:`None`. """ - H, D = self.heads, self.out_channels // self.heads + F = self.out_channels + H = self.heads + D = F // H k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {} - # Iterate over node-types: - for node_type, x in x_dict.items(): - k_dict[node_type] = self.k_lin[node_type](x).view(-1, H, D) - q_dict[node_type] = self.q_lin[node_type](x).view(-1, H, D) - v_dict[node_type] = self.v_lin[node_type](x).view(-1, H, D) - out_dict[node_type] = [] + # Compute K, Q, V over node types: + kqv_dict = self.kqv_lin(x_dict) + for key, val in kqv_dict.items(): + k_dict[key] = val[:, :F].view(-1, H, D) + q_dict[key] = val[:, F:2 * F].view(-1, H, D) + v_dict[key] = val[:, 2 * F:].view(-1, H, D) - # Iterate over edge-types: - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - edge_type = '__'.join(edge_type) + q, dst_offset = self._cat(q_dict) + k, v, src_offset = self._construct_src_node_feat(k_dict, v_dict) - a_rel = self.a_rel[edge_type] - k = (k_dict[src_type].transpose(0, 1) @ a_rel).transpose(1, 0) + edge_index, edge_attr = construct_bipartite_edge_index( + edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel) - m_rel = self.m_rel[edge_type] - v = (v_dict[src_type].transpose(0, 1) @ m_rel).transpose(1, 0) + out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr, + size=None) - # propagate_type: (k: Tensor, q: Tensor, v: Tensor, rel: Tensor) - out = self.propagate(edge_index, k=k, q=q_dict[dst_type], v=v, - rel=self.p_rel[edge_type], size=None) - out_dict[dst_type].append(out) + # Reconstruct output node embeddings dict: + for node_type, start_offset in dst_offset.items(): + end_offset = start_offset + q_dict[node_type].size(0) + out_dict[node_type] = out[start_offset:end_offset] - # Iterate over node-types: - for node_type, outs in out_dict.items(): - out = group(outs, self.group) + # Transform output node embeddings: + a_dict = self.out_lin({ + k: torch.nn.functional.gelu(v) if v is not None else v + for k, v in out_dict.items() + }) - if out is None: + # Iterate over node types: + for node_type, out in out_dict.items(): + if node_type not in self.dst_node_types: out_dict[node_type] = None continue + else: + out = a_dict[node_type] - out = self.a_lin[node_type](F.gelu(out)) if out.size(-1) == x_dict[node_type].size(-1): alpha = self.skip[node_type].sigmoid() out = alpha * out + (1 - alpha) * x_dict[node_type] @@ -189,11 +223,10 @@ def forward( return out_dict - def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, rel: Tensor, + def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, edge_attr: Tensor, index: Tensor, ptr: Optional[Tensor], size_i: Optional[int]) -> Tensor: - - alpha = (q_i * k_j).sum(dim=-1) * rel + alpha = (q_i * k_j).sum(dim=-1) * edge_attr alpha = alpha / math.sqrt(q_i.size(-1)) alpha = softmax(alpha, index, ptr, size_i) out = v_j * alpha.view(-1, self.heads, 1) diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index d1920271b7d3..ccd6579dc444 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -77,7 +77,7 @@ def construct_bipartite_edge_index( :class:`torch_sparse.SparseTensor`. src_offset_dict (Dict[Tuple[str, str, str], int]): A dictionary of offsets to apply to the source node type for each edge type. - src_offset_dict (Dict[str, int]): A dictionary of offsets to apply for + dst_offset_dict (Dict[str, int]): A dictionary of offsets to apply for destination node types. edge_attr_dict (Dict[Tuple[str, str, str], torch.Tensor]): A dictionary holding edge features for each individual edge type. @@ -92,9 +92,14 @@ def construct_bipartite_edge_index( # TODO Add support for SparseTensor w/o converting. is_sparse = isinstance(edge_index, SparseTensor) + is_native_sparse = isinstance(edge_index, Tensor) and 'sparse' in str( + edge_index.layout) if is_sparse: col, row, _ = edge_index.coo() edge_index = torch.stack([row, col], dim=0) + elif is_native_sparse: + edge_index = torch.tensor( + edge_index.to_sparse_coo().indices()).flip(0) else: edge_index = edge_index.clone() From 7a65206829a796831e9c933694a2f1e40c1cafe1 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 12 Apr 2023 15:38:16 -0700 Subject: [PATCH 1111/2432] Add `HGTConv` test for missing destination node types (#7168) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/conv/test_hgt_conv.py | 18 ++++++++++++++++++ torch_geometric/utils/hetero.py | 18 +++++++----------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index bb41663d2e4a..02bb7c8e4fc3 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -178,6 +178,24 @@ def test_hgt_conv_out_of_place(): assert x_dict['paper'].size() == (6, 32) +def test_hgt_conv_missing_dst_node_type(): + data = HeteroData() + data['author'].x = torch.randn(4, 16) + data['paper'].x = torch.randn(6, 32) + data['university'].x = torch.randn(10, 32) + + data['author', 'paper'].edge_index = get_random_edge_index(4, 6, 20) + data['paper', 'author'].edge_index = get_random_edge_index(6, 4, 20) + data['university', 'author'].edge_index = get_random_edge_index(10, 4, 10) + + conv = HGTConv(-1, 64, data.metadata(), heads=1) + + out_dict = conv(data.x_dict, data.edge_index_dict) + assert out_dict['author'].size() == (4, 64) + assert out_dict['paper'].size() == (6, 64) + assert out_dict['university'] is None + + if __name__ == '__main__': import argparse diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index ccd6579dc444..1e9b8c21704e 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -5,6 +5,7 @@ from torch.nn import ParameterDict from torch_geometric.typing import Adj, EdgeType, NodeType, SparseTensor +from torch_geometric.utils import is_sparse, to_edge_index from torch_geometric.utils.num_nodes import maybe_num_nodes_dict @@ -83,7 +84,7 @@ def construct_bipartite_edge_index( dictionary holding edge features for each individual edge type. (default: :obj:`None`) """ - is_sparse = False + is_sparse_tensor = False edge_indices: List[Tensor] = [] edge_attrs: List[Tensor] = [] for edge_type, src_offset in src_offset_dict.items(): @@ -91,15 +92,10 @@ def construct_bipartite_edge_index( dst_offset = dst_offset_dict[edge_type[-1]] # TODO Add support for SparseTensor w/o converting. - is_sparse = isinstance(edge_index, SparseTensor) - is_native_sparse = isinstance(edge_index, Tensor) and 'sparse' in str( - edge_index.layout) - if is_sparse: - col, row, _ = edge_index.coo() - edge_index = torch.stack([row, col], dim=0) - elif is_native_sparse: - edge_index = torch.tensor( - edge_index.to_sparse_coo().indices()).flip(0) + is_sparse_tensor = isinstance(edge_index, SparseTensor) + if is_sparse(edge_index): + edge_index, _ = to_edge_index(edge_index) + edge_index = edge_index.flip([0]) else: edge_index = edge_index.clone() @@ -122,7 +118,7 @@ def construct_bipartite_edge_index( if edge_attr_dict is not None: edge_attr = torch.cat(edge_attrs, dim=0) - if is_sparse: + if is_sparse_tensor: # TODO Add support for `SparseTensor.sparse_sizes()`. edge_index = SparseTensor( row=edge_index[1], From fa023b338703725e2c2c33675d9c56fe3a8a9550 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 13 Apr 2023 09:32:36 +0100 Subject: [PATCH 1112/2432] Fix typo in documentation (#7172) --- docs/source/tutorial/compile.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/tutorial/compile.rst b/docs/source/tutorial/compile.rst index 63b9fd5d46b0..8ba9a821350c 100644 --- a/docs/source/tutorial/compile.rst +++ b/docs/source/tutorial/compile.rst @@ -2,7 +2,7 @@ Compiled Graph Neural Networks ============================== :meth:`torch.compile` is the latest method to speed up your :pytorch:`PyTorch` code in :obj:`torch >= 2.0.0`! -:meth:`torch.compile` makes PyTorch code run faster by JIT-compiling it into opimized kernels, all while required minimal code changes. +:meth:`torch.compile` makes PyTorch code run faster by JIT-compiling it into optimized kernels, all while required minimal code changes. Under the hood, :meth:`torch.compile` captures :pytorch:`PyTorch` programs via :obj:`TorchDynamo`, canonicalizes over 2,000 :pytorch:`PyTorch` operators via :obj:`PrimTorch`, and finally generates fast code out of it across multiple accelerators and backends via the deep learning compiler :obj:`TorchInductor`. From 8869792a52e4bc0a3020301c42fbf688980ced6c Mon Sep 17 00:00:00 2001 From: Ramona Bendias Date: Sun, 16 Apr 2023 09:29:08 +0100 Subject: [PATCH 1113/2432] Can't use `deepcopy` on `Explanation` (#7176) Running the method `get_explanation_subgraph()` fails with: ``` Traceback (most recent call last): File "", line 1, in File "/Users/ramonabendias/.local/share/virtualenvs/pydata-workshop-CALqWZ-I/lib/python3.11/site-packages/torch_geometric/explain/explanation.py", line 306, in get_explanation_subgraph return self._apply_masks( ^^^^^^^^^^^^^^^^^^ File "/Users/ramonabendias/.local/share/virtualenvs/pydata-workshop-CALqWZ-I/lib/python3.11/site-packages/torch_geometric/explain/explanation.py", line 336, in _apply_masks out = copy.deepcopy(self) ^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 153, in deepcopy y = copier(memo) ^^^^^^^^^^^^ File "/Users/ramonabendias/.local/share/virtualenvs/pydata-workshop-CALqWZ-I/lib/python3.11/site-packages/torch_geometric/data/hetero_data.py", line 203, in __deepcopy__ out.__dict__[key] = copy.deepcopy(value, memo) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 146, in deepcopy y = copier(x, memo) ^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 231, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) ^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 153, in deepcopy y = copier(memo) ^^^^^^^^^^^^ File "/Users/ramonabendias/.local/share/virtualenvs/pydata-workshop-CALqWZ-I/lib/python3.11/site-packages/torch_geometric/data/storage.py", line 132, in __deepcopy__ out._mapping = copy.deepcopy(out._mapping, memo) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 146, in deepcopy y = copier(x, memo) ^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 231, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) ^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/copy.py", line 153, in deepcopy y = copier(memo) ^^^^^^^^^^^^ File "/Users/ramonabendias/.local/share/virtualenvs/pydata-workshop-CALqWZ-I/lib/python3.11/site-packages/torch/_tensor.py", line 86, in __deepcopy__ raise RuntimeError( RuntimeError: Only Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at the moment ``` --- torch_geometric/explain/explanation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/explain/explanation.py b/torch_geometric/explain/explanation.py index 67192a843e73..71fe5073b601 100644 --- a/torch_geometric/explain/explanation.py +++ b/torch_geometric/explain/explanation.py @@ -333,7 +333,7 @@ def _apply_masks( node_mask_dict: Dict[NodeType, Tensor], edge_mask_dict: Dict[EdgeType, Tensor], ) -> 'HeteroExplanation': - out = copy.deepcopy(self) + out = copy.copy(self) for edge_type, edge_mask in edge_mask_dict.items(): for key, value in self[edge_type].items(): From 49ff773a387a55d4c09e9eae3d2418d4d0dc2429 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 16 Apr 2023 09:48:51 +0100 Subject: [PATCH 1114/2432] Fix empty `dst_type` handling in `HGTConv` (#7183) --- CHANGELOG.md | 2 ++ torch_geometric/nn/conv/hgt_conv.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a284da8ab538..ce5c5eea52fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Replaced `FastHGTConv` with `HGTConv` ([#7117](https://github.com/pyg-team/pytorch_geometric/pull/7117)) + ## [2.3.0] - 2023-03-23 ### Added diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index ccdeea16bce6..a97f2121abf3 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -71,8 +71,8 @@ def __init__( self.heads = heads self.node_types = metadata[0] self.edge_types = metadata[1] - self.dst_node_types = list(set(metadata[1][1])) - self.src_types = [edge_type[0] for edge_type in self.edge_types] + + self.dst_node_types = set([key[-1] for key in self.edge_types]) self.kqv_lin = HeteroDictLinear(self.in_channels, self.out_channels * 3) From 8999e507602d45ba9b53cc94f501e320cb579a92 Mon Sep 17 00:00:00 2001 From: Berke Kisin Date: Sun, 16 Apr 2023 20:29:16 +0200 Subject: [PATCH 1115/2432] Fix `HeteroDictLinear` crashes if some node types are absent (#7185) This pr fixes a bug in HeteroDictLinear forward function, which crashes if the input x_dict has some node types missing. --------- Co-authored-by: Berke Kisin Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/dense/linear.py | 21 ++++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce5c5eea52fb..65ee963b8a62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Allow missing node types in `HeteroDictLinear` ([#7185](https://github.com/pyg-team/pytorch_geometric/pull/7185)) - Optimized `from_networkx` memory footprint by reducing unnecessary copies ([#7119](https://github.com/pyg-team/pytorch_geometric/pull/7119)) - Added an optional `batch_size` argument to `LayerNorm`, `GraphNorm`, `InstanceNorm`, `GraphSizeNorm` and `PairNorm` ([#7135](https://github.com/pyg-team/pytorch_geometric/pull/7135)) - Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093)) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 4567bc909a29..e042abb9ed6a 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -376,15 +376,26 @@ def forward( x_dict (Dict[Any, torch.Tensor]): A dictionary holding input features for each individual type. """ + out_dict = {} + if torch_geometric.typing.WITH_GMM: - xs = [x_dict[key] for key in self.lins.keys()] - weights = [lin.weight.t() for lin in self.lins.values()] - biases = [lin.bias for lin in self.lins.values()] + xs, weights, biases = [], [], [] + for key, lin in self.lins.items(): + if key in x_dict: + xs.append(x_dict[key]) + weights.append(lin.weight.t()) + biases.append(lin.bias) biases = None if biases[0] is None else biases outs = pyg_lib.ops.grouped_matmul(xs, weights, biases) - return {key: out for key, out in zip(x_dict.keys(), outs)} + for key, out in zip(self.lins.keys(), outs): + if key in x_dict: + out_dict[key] = out + else: + for key, lin in self.lins.items(): + if key in x_dict: + out_dict[key] = lin(x_dict[key]) - return {key: lin(x_dict[key]) for key, lin in self.lins.items()} + return out_dict @torch.no_grad() def initialize_parameters(self, module, input): From 3d4836bc24dbb1b180f29cbbbdbcd18b94116dd7 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 17 Apr 2023 10:50:21 +0200 Subject: [PATCH 1116/2432] Fix `subgraph` on unordered inputs (#7187) --- CHANGELOG.md | 1 + torch_geometric/utils/subgraph.py | 34 ++++++++++++++++++------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65ee963b8a62..1316c353fcfe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `subgraph` on unordered inputs ([#7187](https://github.com/pyg-team/pytorch_geometric/pull/7187)) - Allow missing node types in `HeteroDictLinear` ([#7185](https://github.com/pyg-team/pytorch_geometric/pull/7185)) - Optimized `from_networkx` memory footprint by reducing unnecessary copies ([#7119](https://github.com/pyg-team/pytorch_geometric/pull/7119)) - Added an optional `batch_size` argument to `LayerNorm`, `GraphNorm`, `InstanceNorm`, `GraphSizeNorm` and `PairNorm` ([#7135](https://github.com/pyg-team/pytorch_geometric/pull/7135)) diff --git a/torch_geometric/utils/subgraph.py b/torch_geometric/utils/subgraph.py index b20a645b6163..490e53a6b78f 100644 --- a/torch_geometric/utils/subgraph.py +++ b/torch_geometric/utils/subgraph.py @@ -87,13 +87,13 @@ def subgraph( if isinstance(subset, (list, tuple)): subset = torch.tensor(subset, dtype=torch.long, device=device) - if subset.dtype == torch.bool or subset.dtype == torch.uint8: - num_nodes = subset.size(0) - else: + if subset.dtype != torch.bool: num_nodes = maybe_num_nodes(edge_index, num_nodes) - subset = index_to_mask(subset, size=num_nodes) + node_mask = index_to_mask(subset, size=num_nodes) + else: + num_nodes = subset.size(0) + node_mask = subset - node_mask = subset edge_mask = node_mask[edge_index[0]] & node_mask[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None @@ -101,7 +101,7 @@ def subgraph( if relabel_nodes: node_idx = torch.zeros(node_mask.size(0), dtype=torch.long, device=device) - node_idx[subset] = torch.arange(subset.sum().item(), device=device) + node_idx[subset] = torch.arange(node_mask.sum().item(), device=device) edge_index = node_idx[edge_index] if return_edge_mask: @@ -168,22 +168,28 @@ def bipartite_subgraph( if src_subset.dtype != torch.bool: src_size = int(edge_index[0].max()) + 1 if size is None else size[0] - src_subset = index_to_mask(src_subset, size=src_size) + src_node_mask = index_to_mask(src_subset, size=src_size) + else: + src_size = src_subset.size(0) + src_node_mask = src_subset + if dst_subset.dtype != torch.bool: dst_size = int(edge_index[1].max()) + 1 if size is None else size[1] - dst_subset = index_to_mask(dst_subset, size=dst_size) + dst_node_mask = index_to_mask(dst_subset, size=dst_size) + else: + dst_size = dst_subset.size(0) + dst_node_mask = dst_subset - # node_mask = subset - edge_mask = src_subset[edge_index[0]] & dst_subset[edge_index[1]] + edge_mask = src_node_mask[edge_index[0]] & dst_node_mask[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: - node_idx_i = edge_index.new_zeros(src_subset.size(0)) - node_idx_j = edge_index.new_zeros(dst_subset.size(0)) - node_idx_i[src_subset] = torch.arange(int(src_subset.sum()), + node_idx_i = edge_index.new_zeros(src_node_mask.size(0)) + node_idx_j = edge_index.new_zeros(dst_node_mask.size(0)) + node_idx_i[src_subset] = torch.arange(int(src_node_mask.sum()), device=node_idx_i.device) - node_idx_j[dst_subset] = torch.arange(int(dst_subset.sum()), + node_idx_j[dst_subset] = torch.arange(int(dst_node_mask.sum()), device=node_idx_j.device) edge_index = torch.stack([ node_idx_i[edge_index[0]], From 3e6fafa83d8a140ff0b8fa130d8132608e9d318e Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 18 Apr 2023 05:23:54 +0100 Subject: [PATCH 1117/2432] [Code Coverage] `data/datapipes.py` (#7195) Part of #6528. Completes #6799. --- CHANGELOG.md | 2 +- pyproject.toml | 3 ++- test/data/test_datapipes.py | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1316c353fcfe..3ba0f10c101f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Allow missing node types in `HeteroDictLinear` ([#7185](https://github.com/pyg-team/pytorch_geometric/pull/7185)) - Optimized `from_networkx` memory footprint by reducing unnecessary copies ([#7119](https://github.com/pyg-team/pytorch_geometric/pull/7119)) - Added an optional `batch_size` argument to `LayerNorm`, `GraphNorm`, `InstanceNorm`, `GraphSizeNorm` and `PairNorm` ([#7135](https://github.com/pyg-team/pytorch_geometric/pull/7135)) -- Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093)) +- Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093), [#7195](https://github.com/pyg-team/pytorch_geometric/pull/7195)) - Fix `numpy` incompatiblity when reading files for `Planetoid` datasets ([#7141](https://github.com/pyg-team/pytorch_geometric/pull/7141)) - Added support for `Data.num_edges` for native `torch.sparse.Tensor` adjacency matrices ([#7104](https://github.com/pyg-team/pytorch_geometric/pull/7104)) - Fixed crash of heterogeneous data loaders if node or edge types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060), [#7087](https://github.com/pyg-team/pytorch_geometric/pull/7087)) diff --git a/pyproject.toml b/pyproject.toml index 1c4703ee2f1e..5578ec907de7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,8 @@ full = [ "pytorch-memlab", "pgmpy", "opt_einsum", - "statsmodels" + "statsmodels", + "rdkit", ] [project.urls] diff --git a/test/data/test_datapipes.py b/test/data/test_datapipes.py index b50728b4a048..c177c9dce70a 100644 --- a/test/data/test_datapipes.py +++ b/test/data/test_datapipes.py @@ -26,6 +26,8 @@ def test_dataset_adapter(dataset_adapter): dataset_adapter.apply_sharding(2, 0) assert len([data for data in dataset_adapter]) == 2 + assert dataset_adapter.is_shardable() + def test_datapipe_batch_graphs(dataset_adapter): dp = dataset_adapter.batch_graphs(batch_size=2) From 7c39ad55f155b18f3e74de14ce7c92a57395c055 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 18 Apr 2023 06:51:32 +0200 Subject: [PATCH 1118/2432] Add manual sampling interface to `NodeLoader` and `LinkLoader` (#7197) --- CHANGELOG.md | 1 + test/loader/test_link_neighbor_loader.py | 10 +++++++++- test/loader/test_neighbor_loader.py | 16 +++++++++++++--- torch_geometric/loader/link_loader.py | 12 +++++++++++- torch_geometric/loader/node_loader.py | 10 ++++++++++ 5 files changed, 44 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ba0f10c101f..a99d47ed22af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) - Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) - Added edge weight support to `LightGCN` ([#7157](https://github.com/pyg-team/pytorch_geometric/pull/7157)) - Added `SparseTensor` support to`trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index a87de55fc2b1..7e37fb037f2f 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -19,7 +19,9 @@ def unique_edge_pairs(edge_index): @onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) -def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio): +@pytest.mark.parametrize('filter_per_worker', [True, False]) +def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio, + filter_per_worker): pos_edge_index = get_random_edge_index(100, 50, 500) neg_edge_index = get_random_edge_index(100, 50, 500) neg_edge_index[1, :] += 50 @@ -42,11 +44,17 @@ def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio): directed=directed, neg_sampling_ratio=neg_sampling_ratio, shuffle=True, + filter_per_worker=filter_per_worker, ) assert str(loader) == 'LinkNeighborLoader()' assert len(loader) == 1000 / 20 + batch = loader([0]) + assert isinstance(batch, Data) + assert int(edge_label_index[0, 0]) in batch.n_id.tolist() + assert int(edge_label_index[1, 0]) in batch.n_id.tolist() + for batch in loader: assert isinstance(batch, Data) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 89709ddf430c..062924c62888 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -36,7 +36,8 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): @onlyNeighborSampler @pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) -def test_homo_neighbor_loader_basic(directed, dtype): +@pytest.mark.parametrize('filter_per_worker', [True, False]) +def test_homo_neighbor_loader_basic(directed, dtype, filter_per_worker): if dtype != torch.int64 and not WITH_PYG_LIB: return @@ -48,12 +49,21 @@ def test_homo_neighbor_loader_basic(directed, dtype): data.edge_index = get_random_edge_index(100, 100, 500, dtype) data.edge_attr = torch.arange(500) - loader = NeighborLoader(data, num_neighbors=[5] * 2, batch_size=20, - directed=directed) + loader = NeighborLoader( + data, + num_neighbors=[5] * 2, + batch_size=20, + directed=directed, + filter_per_worker=filter_per_worker, + ) assert str(loader) == 'NeighborLoader()' assert len(loader) == 5 + batch = loader([0]) + assert isinstance(batch, Data) + assert batch.n_id[:1].tolist() == [0] + for i, batch in enumerate(loader): assert isinstance(batch, Data) assert len(batch) == 9 if WITH_PYG_LIB else 7 diff --git a/torch_geometric/loader/link_loader.py b/torch_geometric/loader/link_loader.py index 88f13f368e7c..ccfd23056e56 100644 --- a/torch_geometric/loader/link_loader.py +++ b/torch_geometric/loader/link_loader.py @@ -175,8 +175,18 @@ def __init__( iterator = range(edge_label_index.size(1)) super().__init__(iterator, collate_fn=self.collate_fn, **kwargs) + def __call__( + self, + index: Union[Tensor, List[int]], + ) -> Union[Data, HeteroData]: + r"""Samples a subgraph from a batch of input edges.""" + out = self.collate_fn(index) + if not self.filter_per_worker: + out = self.filter_fn(out) + return out + def collate_fn(self, index: Union[Tensor, List[int]]) -> Any: - r"""Samples a subgraph from a batch of input nodes.""" + r"""Samples a subgraph from a batch of input edges.""" input_data: EdgeSamplerInput = self.input_data[index] out = self.link_sampler.sample_from_edges( diff --git a/torch_geometric/loader/node_loader.py b/torch_geometric/loader/node_loader.py index c2140293c18c..e67c2ae339b6 100644 --- a/torch_geometric/loader/node_loader.py +++ b/torch_geometric/loader/node_loader.py @@ -110,6 +110,16 @@ def __init__( iterator = range(input_nodes.size(0)) super().__init__(iterator, collate_fn=self.collate_fn, **kwargs) + def __call__( + self, + index: Union[Tensor, List[int]], + ) -> Union[Data, HeteroData]: + r"""Samples a subgraph from a batch of input nodes.""" + out = self.collate_fn(index) + if not self.filter_per_worker: + out = self.filter_fn(out) + return out + def collate_fn(self, index: Union[Tensor, List[int]]) -> Any: r"""Samples a subgraph from a batch of input nodes.""" input_data: NodeSamplerInput = self.input_data[index] From bb1ce509036087fad23e6030a06f537e96543822 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Tue, 18 Apr 2023 05:19:45 +0000 Subject: [PATCH 1119/2432] changelog --- torch_geometric/utils/scatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index a75f1fe36272..5ced98e728f2 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -121,7 +121,7 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, raise ValueError(f"Encountered invalid `reduce` argument '{reduce}'") -else: +else: # pragma: no cover def scatter(src: Tensor, index: Tensor, dim: int = 0, dim_size: Optional[int] = None, reduce: str = 'sum') -> Tensor: From bff64052b43ecd80d2157e271292947b833c7700 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 18 Apr 2023 07:22:33 +0200 Subject: [PATCH 1120/2432] Support for `scatter(..., reduce="any")` (#7198) --- CHANGELOG.md | 1 + test/utils/test_scatter.py | 12 ++++++++++++ torch_geometric/utils/coalesce.py | 2 +- torch_geometric/utils/scatter.py | 32 ++++++++++++++++++++++++------- 4 files changed, 39 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a99d47ed22af..0c318546d493 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for `"any"`-reductions in `scatter` ([#7198](https://github.com/pyg-team/pytorch_geometric/pull/7198)) - Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) - Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) - Added edge weight support to `LightGCN` ([#7157](https://github.com/pyg-team/pytorch_geometric/pull/7157)) diff --git a/test/utils/test_scatter.py b/test/utils/test_scatter.py index b5b608725352..735a5cee1692 100644 --- a/test/utils/test_scatter.py +++ b/test/utils/test_scatter.py @@ -58,6 +58,18 @@ def test_scatter_backward(reduce, device): assert src.grad is not None +@withCUDA +def test_scatter_any(device): + src = torch.randn(6, 4, device=device) + index = torch.tensor([0, 0, 1, 1, 2, 2], device=device) + + out = scatter(src, index, dim=0, reduce='any') + + for i in range(3): + for j in range(4): + assert float(out[i, j]) in src[2 * i:2 * i + 2, j].tolist() + + if __name__ == '__main__': # Insights on GPU: # ================ diff --git a/torch_geometric/utils/coalesce.py b/torch_geometric/utils/coalesce.py index 80354b34c5d8..92b54643ee53 100644 --- a/torch_geometric/utils/coalesce.py +++ b/torch_geometric/utils/coalesce.py @@ -50,7 +50,7 @@ def coalesce( :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) reduce (str, optional): The reduce operation to use for merging edge features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`, - :obj:`"mul"`). (default: :obj:`"add"`) + :obj:`"mul"`, :obj:`"any"`). (default: :obj:`"add"`) is_sorted (bool, optional): If set to :obj:`True`, will expect :obj:`edge_index` to be already sorted row-wise. sort_by_row (bool, optional): If set to :obj:`False`, will sort diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index 5ced98e728f2..de0457662e74 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -15,11 +15,6 @@ warnings.filterwarnings('ignore', '.*is in beta and the API may change.*') - def broadcast(src: Tensor, ref: Tensor, dim: int) -> Tensor: - size = [1] * ref.dim() - size[dim] = -1 - return src.view(size).expand_as(ref) - def scatter(src: Tensor, index: Tensor, dim: int = 0, dim_size: Optional[int] = None, reduce: str = 'sum') -> Tensor: r"""Reduces all values from the :obj:`src` tensor at the indices @@ -39,8 +34,8 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, minimal-sized output tensor according to :obj:`index.max() + 1`. (default: :obj:`None`) reduce (str, optional): The reduce operation (:obj:`"sum"`, - :obj:`"mean"`, :obj:`"mul"`, :obj:`"min"` or :obj:`"max"`). - (default: :obj:`"sum"`) + :obj:`"mean"`, :obj:`"mul"`, :obj:`"min"` or :obj:`"max"`, + :obj:`"any"`). (default: :obj:`"sum"`) """ if index.dim() != 1: raise ValueError(f"The `index` argument must be one-dimensional " @@ -68,6 +63,11 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, size = list(src.size()) size[dim] = dim_size + # For "any" reduction, we use regular `scatter_`: + if reduce == 'any': + index = broadcast(index, src, dim) + return src.new_zeros(size).scatter_(dim, index, src) + # For "sum" and "mean" reduction, we make use of `scatter_add_`: if reduce == 'sum' or reduce == 'add': index = broadcast(index, src, dim) @@ -145,7 +145,25 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, :obj:`"mean"`, :obj:`"mul"`, :obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`) """ + if reduce == 'any': + dim = src.dim() + dim if dim < 0 else dim + + if dim_size is None: + dim_size = int(index.max()) + 1 if index.numel() > 0 else 0 + + size = list(src.size()) + size[dim] = dim_size + + index = broadcast(index, src, dim) + return src.new_zeros(size).scatter_(dim, index, src) + if not torch_geometric.typing.WITH_TORCH_SCATTER: raise ImportError("'scatter' requires the 'torch-scatter' package") return torch_scatter.scatter(src, index, dim, dim_size=dim_size, reduce=reduce) + + +def broadcast(src: Tensor, ref: Tensor, dim: int) -> Tensor: + size = [1] * ref.dim() + size[dim] = -1 + return src.view(size).expand_as(ref) From 65bca8bd4445beab842266cdaf8a3e95962c702a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 18 Apr 2023 08:50:01 +0200 Subject: [PATCH 1121/2432] Add `GraphMode` with `bidirectional` sampling support (1/2) (#7199) --- CHANGELOG.md | 1 + test/sampler/test_sampler_base.py | 57 ++++++++++++++++++++++++++- torch_geometric/sampler/base.py | 65 +++++++++++++++++++++++++++++++ torch_geometric/sampler/utils.py | 28 ++++++++++++- torch_geometric/testing/data.py | 9 ++++- torch_geometric/utils/coalesce.py | 4 +- 6 files changed, 160 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c318546d493..a0614968eebb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added graph mode sampling option with bidirectional sampling support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199)) - Added support for `"any"`-reductions in `scatter` ([#7198](https://github.com/pyg-team/pytorch_geometric/pull/7198)) - Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) - Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) diff --git a/test/sampler/test_sampler_base.py b/test/sampler/test_sampler_base.py index a1edf578357f..9d57d1250508 100644 --- a/test/sampler/test_sampler_base.py +++ b/test/sampler/test_sampler_base.py @@ -1,6 +1,13 @@ import pytest +import torch -from torch_geometric.sampler.base import NumNeighbors +from torch_geometric.sampler.base import ( + HeteroSamplerOutput, + NumNeighbors, + SamplerOutput, +) +from torch_geometric.testing import get_random_edge_index +from torch_geometric.utils import is_undirected def test_homogeneous_num_neighbors(): @@ -61,3 +68,51 @@ def test_heterogeneous_num_neighbors_empty_dict(): assert values == {'A__to__B': [25, 10], 'B__to__A': [25, 10]} assert num_neighbors.num_hops == 2 + + +def test_homogeneous_to_bidirectional(): + edge_index = get_random_edge_index(10, 10, num_edges=20) + + obj = SamplerOutput( + node=torch.arange(10), + row=edge_index[0], + col=edge_index[0], + edge=torch.arange(edge_index.size(1)), + ).to_bidirectional() + + assert is_undirected(torch.stack([obj.row, obj.col], dim=0)) + + +def test_heterogeneous_to_bidirectional(): + edge_index1 = get_random_edge_index(10, 5, num_edges=20) + edge_index2 = get_random_edge_index(5, 10, num_edges=20) + edge_index3 = get_random_edge_index(10, 10, num_edges=20) + + obj = HeteroSamplerOutput( + node={ + 'v1': torch.arange(10), + 'v2': torch.arange(5) + }, + row={ + ('v1', 'to', 'v2'): edge_index1[0], + ('v2', 'rev_to', 'v1'): edge_index2[0], + ('v1', 'to', 'v1'): edge_index3[0], + }, + col={ + ('v1', 'to', 'v2'): edge_index1[1], + ('v2', 'rev_to', 'v1'): edge_index2[1], + ('v1', 'to', 'v1'): edge_index3[1], + }, + edge=None, + ).to_bidirectional() + + assert torch.equal( + obj.row['v1', 'to', 'v2'].sort().values, + obj.col['v2', 'rev_to', 'v1'].sort().values, + ) + assert torch.equal( + obj.col['v1', 'to', 'v2'].sort().values, + obj.row['v2', 'rev_to', 'v1'].sort().values, + ) + assert is_undirected( + torch.stack([obj.row['v1', 'to', 'v1'], obj.col['v1', 'to', 'v1']], 0)) diff --git a/torch_geometric/sampler/base.py b/torch_geometric/sampler/base.py index 9b34cce97239..d53530c5362d 100644 --- a/torch_geometric/sampler/base.py +++ b/torch_geometric/sampler/base.py @@ -1,5 +1,6 @@ import copy import math +import warnings from abc import ABC from dataclasses import dataclass from enum import Enum @@ -9,6 +10,7 @@ from torch import Tensor from torch_geometric.data import Data, FeatureStore, GraphStore, HeteroData +from torch_geometric.sampler.utils import to_bidirectional from torch_geometric.typing import EdgeType, EdgeTypeStr, NodeType, OptTensor from torch_geometric.utils.mixin import CastMixin @@ -35,6 +37,13 @@ def from_data(cls, data: Any): f"(got '{type(data)}')") +class GraphMode(Enum): + r"""The graph mode of the returned subgraph.""" + directional = 'directional' + bidirectional = 'bidirectional' + induced_subgraph = 'induced_subgraph' + + @dataclass class NodeSamplerInput(CastMixin): r"""The sampling input of @@ -144,6 +153,23 @@ class SamplerOutput(CastMixin): # API for the expected output of a sampler. metadata: Optional[Any] = None + def to_bidirectional(self) -> 'SamplerOutput': + r"""Converts the sampled subgraph into a bidirectional variant, in + which all sampled edges are guaranteed to be bidirectional.""" + out = copy.copy(self) + + out.row, out.col, out.edge = to_bidirectional( + row=self.row, + col=self.col, + rev_row=self.row, + rev_col=self.col, + edge_id=self.edge, + rev_edge_id=self.edge, + ) + out.num_sampled_edges = None + + return out + @dataclass class HeteroSamplerOutput(CastMixin): @@ -191,6 +217,45 @@ class HeteroSamplerOutput(CastMixin): # API for the expected output of a sampler. metadata: Optional[Any] = None + def to_bidirectional(self) -> 'SamplerOutput': + r"""Converts the sampled subgraph into a bidirectional variant, in + which all sampled edges are guaranteed to be bidirectional.""" + out = copy.copy(self) + out.row = copy.copy(self.row) + out.col = copy.copy(self.col) + + edge_types = self.row.keys() + edge_types = [k for k in edge_types if not k[1].startswith('rev_')] + for edge_type in edge_types: + src, rel, dst = edge_type + rev_edge_type = (dst, f'rev_{rel}', src) + + if src == dst and rev_edge_type not in self.row: + out.row[edge_type], out.col[edge_type], _ = to_bidirectional( + row=self.row[edge_type], + col=self.col[edge_type], + rev_row=self.row[edge_type], + rev_col=self.col[edge_type], + ) + elif rev_edge_type in self.row: + out.row[edge_type], out.col[edge_type], _ = to_bidirectional( + row=self.row[edge_type], + col=self.col[edge_type], + rev_row=self.row[rev_edge_type], + rev_col=self.col[rev_edge_type], + ) + out.row[rev_edge_type] = out.col[edge_type] + out.col[rev_edge_type] = out.row[edge_type] + else: + warnings.warn(f"Cannot convert to bidirectional graph since " + f"the edge type {edge_type} does not seem to " + f"have a reverse edge type") + + out.edge = None + out.num_sampled_edges = None + + return out + @dataclass(frozen=True) class NumNeighbors: diff --git a/torch_geometric/sampler/utils.py b/torch_geometric/sampler/utils.py index 15ee49a784fd..35cf9392e7e7 100644 --- a/torch_geometric/sampler/utils.py +++ b/torch_geometric/sampler/utils.py @@ -7,7 +7,7 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.data.storage import EdgeStorage from torch_geometric.typing import NodeType, OptTensor -from torch_geometric.utils import index_sort +from torch_geometric.utils import coalesce, index_sort from torch_geometric.utils.sparse import index2ptr # Edge Layout Conversion ###################################################### @@ -112,6 +112,32 @@ def to_hetero_csc( return colptr_dict, row_dict, perm_dict +def to_bidirectional( + row: Tensor, + col: Tensor, + rev_row: Tensor, + rev_col: Tensor, + edge_id: OptTensor = None, + rev_edge_id: OptTensor = None, +) -> Tuple[Tensor, Tensor, OptTensor]: + + assert row.numel() == col.numel() + assert rev_row.numel() == rev_col.numel() + + edge_index = row.new_empty(2, row.numel() + rev_row.numel()) + edge_index[0, :row.numel()] = row + edge_index[1, :row.numel()] = col + edge_index[0, row.numel():] = rev_col + edge_index[1, row.numel():] = rev_row + + if edge_id is not None: + edge_id = torch.cat([edge_id, rev_edge_id], dim=0) + + (row, col), edge_id = coalesce(edge_index, edge_id, reduce='any') + + return row, col, edge_id + + ############################################################################### X, Y = TypeVar('X'), TypeVar('Y') diff --git a/torch_geometric/testing/data.py b/torch_geometric/testing/data.py index 9e8c776d575b..fbb74349bd81 100644 --- a/torch_geometric/testing/data.py +++ b/torch_geometric/testing/data.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.data import HeteroData, InMemoryDataset +from torch_geometric.utils import coalesce as coalesce_fn def get_random_edge_index( @@ -12,12 +13,18 @@ def get_random_edge_index( num_edges: int, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, + coalesce: bool = False, ) -> Tensor: row = torch.randint(num_src_nodes, (num_edges, ), dtype=dtype, device=device) col = torch.randint(num_dst_nodes, (num_edges, ), dtype=dtype, device=device) - return torch.stack([row, col], dim=0) + edge_index = torch.stack([row, col], dim=0) + + if coalesce: + edge_index = coalesce_fn(edge_index) + + return edge_index class FakeHeteroDataset(InMemoryDataset): diff --git a/torch_geometric/utils/coalesce.py b/torch_geometric/utils/coalesce.py index 92b54643ee53..20e1b899db75 100644 --- a/torch_geometric/utils/coalesce.py +++ b/torch_geometric/utils/coalesce.py @@ -117,7 +117,7 @@ def coalesce( edge_index = edge_index[:, mask] dim_size: Optional[int] = None - if isinstance(edge_attr, (Tensor, list, tuple)): + if isinstance(edge_attr, (Tensor, list, tuple)) and len(edge_attr) > 0: dim_size = edge_index.size(1) idx = torch.arange(0, nnz, device=edge_index.device) idx.sub_(mask.logical_not_().cumsum(dim=0)) @@ -128,6 +128,8 @@ def coalesce( edge_attr = scatter(edge_attr, idx, 0, dim_size, reduce) return edge_index, edge_attr if isinstance(edge_attr, (list, tuple)): + if len(edge_attr) == 0: + return edge_index, edge_attr edge_attr = [scatter(e, idx, 0, dim_size, reduce) for e in edge_attr] return edge_index, edge_attr From 23adffa5a88a7d3826e2f84ecf462fb50b4b76e0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 18 Apr 2023 12:02:36 +0200 Subject: [PATCH 1122/2432] Add `SubgraphType` with bidirectional sampling support (2/2) (#7200) --- CHANGELOG.md | 2 +- test/loader/test_link_neighbor_loader.py | 37 ++- test/loader/test_neighbor_loader.py | 233 ++++++++---------- test/sampler/test_sampler_base.py | 2 +- .../loader/link_neighbor_loader.py | 19 +- torch_geometric/loader/neighbor_loader.py | 19 +- torch_geometric/loader/utils.py | 19 +- torch_geometric/sampler/base.py | 43 +++- torch_geometric/sampler/neighbor_sampler.py | 36 ++- 9 files changed, 231 insertions(+), 179 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a0614968eebb..76530114e643 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added graph mode sampling option with bidirectional sampling support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199)) +- Added subgraph type sampling option with bidirectional edge support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199), [#7200](https://github.com/pyg-team/pytorch_geometric/pull/7200)) - Added support for `"any"`-reductions in `scatter` ([#7198](https://github.com/pyg-team/pytorch_geometric/pull/7198)) - Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) - Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 7e37fb037f2f..874f2df45879 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -17,14 +17,14 @@ def unique_edge_pairs(edge_index): @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) @pytest.mark.parametrize('filter_per_worker', [True, False]) -def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio, +def test_homo_link_neighbor_loader_basic(subgraph_type, neg_sampling_ratio, filter_per_worker): - pos_edge_index = get_random_edge_index(100, 50, 500) - neg_edge_index = get_random_edge_index(100, 50, 500) - neg_edge_index[1, :] += 50 + pos_edge_index = get_random_edge_index(50, 50, 500) + neg_edge_index = get_random_edge_index(50, 50, 500) + neg_edge_index += 50 edge_label_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1) edge_label = torch.cat([torch.ones(500), torch.zeros(500)], dim=0) @@ -41,7 +41,7 @@ def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio, batch_size=20, edge_label_index=edge_label_index, edge_label=edge_label if neg_sampling_ratio is None else None, - directed=directed, + subgraph_type=subgraph_type, neg_sampling_ratio=neg_sampling_ratio, shuffle=True, filter_per_worker=filter_per_worker, @@ -91,9 +91,9 @@ def test_homo_link_neighbor_loader_basic(directed, neg_sampling_ratio, @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) -def test_hetero_link_neighbor_loader_basic(directed, neg_sampling_ratio): +def test_hetero_link_neighbor_loader_basic(subgraph_type, neg_sampling_ratio): data = HeteroData() data['paper'].x = torch.arange(100) @@ -111,7 +111,7 @@ def test_hetero_link_neighbor_loader_basic(directed, neg_sampling_ratio): num_neighbors=[-1] * 2, edge_label_index=('paper', 'author'), batch_size=20, - directed=directed, + subgraph_type=subgraph_type, neg_sampling_ratio=neg_sampling_ratio, shuffle=True, ) @@ -121,7 +121,6 @@ def test_hetero_link_neighbor_loader_basic(directed, neg_sampling_ratio): for batch in loader: assert isinstance(batch, HeteroData) - assert len(batch) == 7 + (1 if neg_sampling_ratio is not None else 0) if neg_sampling_ratio is None: # Assert only positive samples are present in the original graph: edge_index = unique_edge_pairs(batch['paper', 'author'].edge_index) @@ -136,8 +135,8 @@ def test_hetero_link_neighbor_loader_basic(directed, neg_sampling_ratio): @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -def test_hetero_link_neighbor_loader_loop(directed): +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +def test_hetero_link_neighbor_loader_loop(subgraph_type): data = HeteroData() data['paper'].x = torch.arange(100) @@ -147,9 +146,13 @@ def test_hetero_link_neighbor_loader_loop(directed): data['paper', 'author'].edge_index = get_random_edge_index(100, 200, 1000) data['author', 'paper'].edge_index = get_random_edge_index(200, 100, 1000) - loader = LinkNeighborLoader(data, num_neighbors=[-1] * 2, - edge_label_index=('paper', 'paper'), - batch_size=20, directed=directed) + loader = LinkNeighborLoader( + data, + num_neighbors=[-1] * 2, + edge_label_index=('paper', 'paper'), + batch_size=20, + subgraph_type=subgraph_type, + ) for batch in loader: assert batch['paper'].x.size(0) <= 100 @@ -279,7 +282,6 @@ def test_custom_hetero_link_neighbor_loader(): num_neighbors=[-1] * 2, edge_label_index=('paper', 'to', 'author'), batch_size=20, - directed=True, ) loader2 = LinkNeighborLoader( @@ -287,7 +289,6 @@ def test_custom_hetero_link_neighbor_loader(): num_neighbors=[-1] * 2, edge_label_index=('paper', 'to', 'author'), batch_size=20, - directed=True, ) assert str(loader1) == str(loader2) @@ -377,7 +378,6 @@ def test_homo_link_neighbor_loader_triplet(disjoint, temporal, amount): edge_label_index=data.edge_label_index, edge_label_time=edge_label_time, time_attr=time_attr, - directed=True, disjoint=disjoint, neg_sampling=dict(mode='triplet', amount=amount), shuffle=True, @@ -472,7 +472,6 @@ def test_hetero_link_neighbor_loader_triplet(disjoint, temporal, amount): edge_label_index=index, edge_label_time=edge_label_time, time_attr=time_attr, - directed=True, disjoint=disjoint, neg_sampling=dict(mode='triplet', amount=amount, weight=weight), shuffle=True, diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 062924c62888..953b628822a6 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -19,9 +19,11 @@ ) from torch_geometric.typing import WITH_PYG_LIB from torch_geometric.utils import ( - k_hop_subgraph, + is_undirected, + sort_edge_index, to_torch_csc_tensor, to_torch_csr_tensor, + to_undirected, ) @@ -34,10 +36,10 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) @pytest.mark.parametrize('filter_per_worker', [True, False]) -def test_homo_neighbor_loader_basic(directed, dtype, filter_per_worker): +def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): if dtype != torch.int64 and not WITH_PYG_LIB: return @@ -53,7 +55,7 @@ def test_homo_neighbor_loader_basic(directed, dtype, filter_per_worker): data, num_neighbors=[5] * 2, batch_size=20, - directed=directed, + subgraph_type=subgraph_type, filter_per_worker=filter_per_worker, ) @@ -66,34 +68,35 @@ def test_homo_neighbor_loader_basic(directed, dtype, filter_per_worker): for i, batch in enumerate(loader): assert isinstance(batch, Data) - assert len(batch) == 9 if WITH_PYG_LIB else 7 assert batch.x.size(0) <= 100 assert batch.n_id.size() == (batch.num_nodes, ) - assert batch.e_id.size() == (batch.num_edges, ) assert batch.input_id.numel() == batch.batch_size == 20 assert batch.x.min() >= 0 and batch.x.max() < 100 assert batch.edge_index.min() >= 0 assert batch.edge_index.max() < batch.num_nodes - assert batch.edge_attr.min() >= 0 - assert batch.edge_attr.max() < 500 # Input nodes are always sampled first: assert torch.equal( batch.x[:batch.batch_size], torch.arange(i * batch.batch_size, (i + 1) * batch.batch_size)) - assert is_subset( - batch.edge_index.to(torch.int64), - data.edge_index.to(torch.int64), - batch.x, - batch.x, - ) + if subgraph_type == 'directional': + assert batch.e_id.size() == (batch.num_edges, ) + assert batch.edge_attr.min() >= 0 + assert batch.edge_attr.max() < 500 + + assert is_subset( + batch.edge_index.to(torch.int64), + data.edge_index.to(torch.int64), + batch.x, + batch.x, + ) @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) -def test_hetero_neighbor_loader_basic(directed, dtype): +def test_hetero_neighbor_loader_basic(subgraph_type, dtype): if dtype != torch.int64 and not WITH_PYG_LIB: return @@ -117,8 +120,6 @@ def test_hetero_neighbor_loader_basic(directed, dtype): r1, c1 = data['paper', 'paper'].edge_index r2, c2 = data['paper', 'author'].edge_index + torch.tensor([[0], [100]]) r3, c3 = data['author', 'paper'].edge_index + torch.tensor([[100], [0]]) - mat = torch.full((300, 300), fill_value=-1, dtype=torch.long) - mat[torch.cat([r1, r2, r3]), torch.cat([c1, c2, c3])] = torch.arange(2500) batch_size = 20 @@ -132,7 +133,7 @@ def test_hetero_neighbor_loader_basic(directed, dtype): }, input_nodes='paper', batch_size=batch_size, - directed=directed, + subgraph_type=subgraph_type, ) next(iter(loader)) @@ -141,7 +142,7 @@ def test_hetero_neighbor_loader_basic(directed, dtype): num_neighbors=[10] * 2, input_nodes='paper', batch_size=batch_size, - directed=directed, + subgraph_type=subgraph_type, ) assert str(loader) == 'NeighborLoader()' @@ -170,111 +171,104 @@ def test_hetero_neighbor_loader_basic(directed, dtype): ('paper', 'to', 'author'), ('author', 'to', 'paper')} - assert len(batch['paper', 'paper']) == 4 if WITH_PYG_LIB else 3 - num_edges = batch['paper', 'paper'].num_edges - assert batch['paper', 'paper'].e_id.size() == (num_edges, ) row, col = batch['paper', 'paper'].edge_index - value = batch['paper', 'paper'].edge_attr assert row.min() >= 0 and row.max() < batch['paper'].num_nodes assert col.min() >= 0 and col.max() < batch['paper'].num_nodes - assert value.min() >= 0 and value.max() < 500 - if not directed: - adj = mat[batch['paper'].x][:, batch['paper'].x] - full_row, full_col = (adj >= 0).nonzero().t() - full_value = adj[adj >= 0] - assert full_value.size(0) == row.size(0) - assert torch.equal(row.unique(), full_row.unique()) - assert torch.equal(col.unique(), full_col.unique()) - assert torch.equal(value.unique(), full_value().unique()) - - assert is_subset( - batch['paper', 'paper'].edge_index.to(torch.int64), - data['paper', 'paper'].edge_index.to(torch.int64), - batch['paper'].x, - batch['paper'].x, - ) - assert len(batch['paper', 'author']) == 4 if WITH_PYG_LIB else 3 - num_edges = batch['paper', 'author'].num_edges - assert batch['paper', 'author'].e_id.size() == (num_edges, ) + if subgraph_type != 'bidirectional': + assert batch['paper', 'paper'].e_id.size() == (row.numel(), ) + value = batch['paper', 'paper'].edge_attr + assert value.min() >= 0 and value.max() < 500 + + assert is_subset( + batch['paper', 'paper'].edge_index.to(torch.int64), + data['paper', 'paper'].edge_index.to(torch.int64), + batch['paper'].x, + batch['paper'].x, + ) + elif subgraph_type != 'directional': + assert 'e_id' not in batch['paper', 'paper'] + assert 'edge_attr' not in batch['paper', 'paper'] + + assert is_undirected(batch['paper', 'paper'].edge_index) + row, col = batch['paper', 'author'].edge_index - value = batch['paper', 'author'].edge_attr assert row.min() >= 0 and row.max() < batch['paper'].num_nodes assert col.min() >= 0 and col.max() < batch['author'].num_nodes - assert value.min() >= 500 and value.max() < 1500 - if not directed: - adj = mat[batch['paper'].x][:, batch['author'].x] - full_row, full_col = (adj >= 0).nonzero().t() - full_value = adj[adj >= 0] - assert full_value.size(0) == row.size(0) - assert torch.equal(row.unique(), full_row.unique()) - assert torch.equal(col.unique(), full_col.unique()) - assert torch.equal(value.unique(), full_value().unique()) - - assert is_subset( - batch['paper', 'author'].edge_index.to(torch.int64), - data['paper', 'author'].edge_index.to(torch.int64), - batch['paper'].x, - batch['author'].x - 100, - ) - assert len(batch['author', 'paper']) == 4 if WITH_PYG_LIB else 3 - num_edges = batch['author', 'paper'].num_edges - assert batch['author', 'paper'].e_id.size() == (num_edges, ) + if subgraph_type != 'bidirectional': + assert batch['paper', 'author'].e_id.size() == (row.numel(), ) + value = batch['paper', 'author'].edge_attr + assert value.min() >= 500 and value.max() < 1500 + + assert is_subset( + batch['paper', 'author'].edge_index.to(torch.int64), + data['paper', 'author'].edge_index.to(torch.int64), + batch['paper'].x, + batch['author'].x - 100, + ) + elif subgraph_type != 'directional': + assert 'e_id' not in batch['paper', 'author'] + assert 'edge_attr' not in batch['paper', 'author'] + + assert torch.equal( + batch['paper', 'author'].edge_index, + sort_edge_index(batch['author', 'paper'].edge_index.flip([0])), + ) + row, col = batch['author', 'paper'].edge_index - value = batch['author', 'paper'].edge_attr assert row.min() >= 0 and row.max() < batch['author'].num_nodes assert col.min() >= 0 and col.max() < batch['paper'].num_nodes - assert value.min() >= 1500 and value.max() < 2500 - if not directed: - adj = mat[batch['author'].x][:, batch['paper'].x] - full_row, full_col = (adj >= 0).nonzero().t() - full_value = adj[adj >= 0] - assert full_value.size(0) == row.size(0) - assert torch.equal(row.unique(), full_row.unique()) - assert torch.equal(col.unique(), full_col.unique()) - assert torch.equal(value.unique(), full_value().unique()) - - assert is_subset( - batch['author', 'paper'].edge_index.to(torch.int64), - data['author', 'paper'].edge_index.to(torch.int64), - batch['author'].x - 100, - batch['paper'].x, - ) + + if subgraph_type != 'bidirectional': + assert batch['author', 'paper'].e_id.size() == (row.numel(), ) + value = batch['author', 'paper'].edge_attr + assert value.min() >= 1500 and value.max() < 2500 + + assert is_subset( + batch['author', 'paper'].edge_index.to(torch.int64), + data['author', 'paper'].edge_index.to(torch.int64), + batch['author'].x - 100, + batch['paper'].x, + ) + elif subgraph_type != 'directional': + assert 'e_id' not in batch['author', 'paper'] + assert 'edge_attr' not in batch['author', 'paper'] + + assert torch.equal( + batch['author', 'paper'].edge_index, + sort_edge_index(batch['paper', 'author'].edge_index.flip([0])), + ) # Test for isolated nodes (there shouldn't exist any): - n_id = torch.cat([batch['paper'].x, batch['author'].x]) - adj = mat[n_id][:, n_id] - row, col = (adj >= 0).nonzero().t() - assert torch.cat([row, col]).unique().numel() == n_id.numel() + assert not batch.has_isolated_nodes() @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -def test_homo_neighbor_loader_on_cora(get_dataset, directed): +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +def test_homo_neighbor_loader_on_cora(get_dataset, subgraph_type): dataset = get_dataset(name='Cora') data = dataset[0] - data.n_id = torch.arange(data.num_nodes) - data.edge_weight = torch.rand(data.num_edges) + + mask = data.edge_index[0] < data.edge_index[1] + edge_index = data.edge_index[:, mask] + edge_weight = torch.rand(edge_index.size(1)) + data.edge_index, data.edge_weight = to_undirected(edge_index, edge_weight) split_idx = torch.arange(5, 8) - loader = NeighborLoader(data, num_neighbors=[-1, -1], - batch_size=split_idx.numel(), - input_nodes=split_idx, directed=directed) + loader = NeighborLoader( + data, + num_neighbors=[-1, -1], + batch_size=split_idx.numel(), + input_nodes=split_idx, + subgraph_type=subgraph_type, + ) assert len(loader) == 1 batch = next(iter(loader)) batch_size = batch.batch_size - if not directed: - n_id, _, _, e_mask = k_hop_subgraph(split_idx, num_hops=2, - edge_index=data.edge_index, - num_nodes=data.num_nodes) - - assert n_id.sort()[0].tolist() == batch.n_id.sort()[0].tolist() - assert batch.num_edges == int(e_mask.sum()) - class GNN(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super().__init__() @@ -283,7 +277,7 @@ def __init__(self, in_channels, hidden_channels, out_channels): def forward(self, x, edge_index, edge_weight): x = self.conv1(x, edge_index, edge_weight).relu() - x = self.conv2(x, edge_index, edge_weight).relu() + x = self.conv2(x, edge_index, edge_weight) return x model = GNN(dataset.num_features, 16, dataset.num_classes) @@ -294,55 +288,46 @@ def forward(self, x, edge_index, edge_weight): @onlyNeighborSampler -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -def test_hetero_neighbor_loader_on_cora(get_dataset, directed): +@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +def test_hetero_neighbor_loader_on_cora(get_dataset, subgraph_type): dataset = get_dataset(name='Cora') data = dataset[0] - data.edge_weight = torch.rand(data.num_edges) hetero_data = HeteroData() hetero_data['paper'].x = data.x - hetero_data['paper'].n_id = torch.arange(data.num_nodes) hetero_data['paper', 'paper'].edge_index = data.edge_index - hetero_data['paper', 'paper'].edge_weight = data.edge_weight split_idx = torch.arange(5, 8) - loader = NeighborLoader(hetero_data, num_neighbors=[-1, -1], - batch_size=split_idx.numel(), - input_nodes=('paper', split_idx), - directed=directed) + loader = NeighborLoader( + hetero_data, + num_neighbors=[-1, -1], + batch_size=split_idx.numel(), + input_nodes=('paper', split_idx), + subgraph_type=subgraph_type, + ) assert len(loader) == 1 hetero_batch = next(iter(loader)) batch_size = hetero_batch['paper'].batch_size - if not directed: - n_id, _, _, e_mask = k_hop_subgraph(split_idx, num_hops=2, - edge_index=data.edge_index, - num_nodes=data.num_nodes) - - n_id = n_id.sort()[0] - assert n_id.tolist() == hetero_batch['paper'].n_id.sort()[0].tolist() - assert hetero_batch['paper', 'paper'].num_edges == int(e_mask.sum()) - class GNN(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super().__init__() self.conv1 = GraphConv(in_channels, hidden_channels) self.conv2 = GraphConv(hidden_channels, out_channels) - def forward(self, x, edge_index, edge_weight): - x = self.conv1(x, edge_index, edge_weight).relu() - x = self.conv2(x, edge_index, edge_weight).relu() + def forward(self, x, edge_index): + x = self.conv1(x, edge_index).relu() + x = self.conv2(x, edge_index) return x model = GNN(dataset.num_features, 16, dataset.num_classes) hetero_model = to_hetero(model, hetero_data.metadata()) - out1 = model(data.x, data.edge_index, data.edge_weight)[split_idx] - out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict, - hetero_batch.edge_weight_dict)['paper'][:batch_size] + out1 = model(data.x, data.edge_index)[split_idx] + out2 = hetero_model(hetero_batch.x_dict, + hetero_batch.edge_index_dict)['paper'][:batch_size] assert torch.allclose(out1, out2, atol=1e-6) diff --git a/test/sampler/test_sampler_base.py b/test/sampler/test_sampler_base.py index 9d57d1250508..dc8142176bf6 100644 --- a/test/sampler/test_sampler_base.py +++ b/test/sampler/test_sampler_base.py @@ -103,7 +103,7 @@ def test_heterogeneous_to_bidirectional(): ('v2', 'rev_to', 'v1'): edge_index2[1], ('v1', 'to', 'v1'): edge_index3[1], }, - edge=None, + edge={}, ).to_bidirectional() assert torch.equal( diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 480a6ac4f33b..61d5a5f74861 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -3,6 +3,7 @@ from torch_geometric.data import Data, FeatureStore, GraphStore, HeteroData from torch_geometric.loader.link_loader import LinkLoader from torch_geometric.sampler import NegativeSampling, NeighborSampler +from torch_geometric.sampler.base import SubgraphType from torch_geometric.typing import EdgeType, InputEdges, OptTensor @@ -97,8 +98,16 @@ class LinkNeighborLoader(LinkLoader): to work. (default: :obj:`None`) replace (bool, optional): If set to :obj:`True`, will sample with replacement. (default: :obj:`False`) - directed (bool, optional): If set to :obj:`False`, will include all - edges between all sampled nodes. (default: :obj:`True`) + subgraph_type (SubgraphType or str, optional): The type of the returned + subgraph. + If set to :obj:`"directional"`, the returned subgraph only holds + the sampled (directed) edges which are necessary to compute + representations for the sampled seed nodes. + If set to :obj:`"bidirectional"`, sampled edges are converted to + bidirectional edges. + If set to :obj:`"induced"`, the returned subgraph contains the + induced subgraph of all sampled nodes. + (default: :obj:`"directional"`) disjoint (bool, optional): If set to :obj: `True`, each seed node will create its own disjoint subgraph. If set to :obj:`True`, mini-batch outputs will have a :obj:`batch` @@ -181,7 +190,7 @@ def __init__( edge_label: OptTensor = None, edge_label_time: OptTensor = None, replace: bool = False, - directed: bool = True, + subgraph_type: Union[SubgraphType, str] = 'directional', disjoint: bool = False, temporal_strategy: str = 'uniform', neg_sampling: Optional[NegativeSampling] = None, @@ -192,6 +201,7 @@ def __init__( is_sorted: bool = False, filter_per_worker: bool = False, neighbor_sampler: Optional[NeighborSampler] = None, + directed: bool = True, # Deprecated. **kwargs, ): if (edge_label_time is not None) != (time_attr is not None): @@ -207,12 +217,13 @@ def __init__( data, num_neighbors=num_neighbors, replace=replace, - directed=directed, + subgraph_type=subgraph_type, disjoint=disjoint, temporal_strategy=temporal_strategy, time_attr=time_attr, is_sorted=is_sorted, share_memory=kwargs.get('num_workers', 0) > 0, + directed=directed, ) super().__init__( diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 3b3fe0ac8ecd..0c0ab7ffe97e 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -3,6 +3,7 @@ from torch_geometric.data import Data, FeatureStore, GraphStore, HeteroData from torch_geometric.loader.node_loader import NodeLoader from torch_geometric.sampler import NeighborSampler +from torch_geometric.sampler.base import SubgraphType from torch_geometric.typing import EdgeType, InputNodes, OptTensor @@ -125,8 +126,16 @@ class NeighborLoader(NodeLoader): (default: :obj:`None`) replace (bool, optional): If set to :obj:`True`, will sample with replacement. (default: :obj:`False`) - directed (bool, optional): If set to :obj:`False`, will include all - edges between all sampled nodes. (default: :obj:`True`) + subgraph_type (SubgraphType or str, optional): The type of the returned + subgraph. + If set to :obj:`"directional"`, the returned subgraph only holds + the sampled (directed) edges which are necessary to compute + representations for the sampled seed nodes. + If set to :obj:`"bidirectional"`, sampled edges are converted to + bidirectional edges. + If set to :obj:`"induced"`, the returned subgraph contains the + induced subgraph of all sampled nodes. + (default: :obj:`"directional"`) disjoint (bool, optional): If set to :obj: `True`, each seed node will create its own disjoint subgraph. If set to :obj:`True`, mini-batch outputs will have a :obj:`batch` @@ -178,7 +187,7 @@ def __init__( input_nodes: InputNodes = None, input_time: OptTensor = None, replace: bool = False, - directed: bool = True, + subgraph_type: Union[SubgraphType, str] = 'directional', disjoint: bool = False, temporal_strategy: str = 'uniform', time_attr: Optional[str] = None, @@ -187,6 +196,7 @@ def __init__( is_sorted: bool = False, filter_per_worker: bool = False, neighbor_sampler: Optional[NeighborSampler] = None, + directed: bool = True, # Deprecated. **kwargs, ): if input_time is not None and time_attr is None: @@ -199,12 +209,13 @@ def __init__( data, num_neighbors=num_neighbors, replace=replace, - directed=directed, + subgraph_type=subgraph_type, disjoint=disjoint, temporal_strategy=temporal_strategy, time_attr=time_attr, is_sorted=is_sorted, share_memory=kwargs.get('num_workers', 0) > 0, + directed=directed, ) super().__init__( diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 112871eddd6c..1a2eeb855b25 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -70,7 +70,7 @@ def filter_node_store_(store: NodeStorage, out_store: NodeStorage, def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor, - col: Tensor, index: Tensor, perm: OptTensor = None): + col: Tensor, index: OptTensor, perm: OptTensor = None): # Filters a edge storage object to only hold the edges in `index`, # which represents the new graph as denoted by `(row, col)`: for key, value in store.items(): @@ -84,8 +84,11 @@ def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor, col = col.to(value.device()) edge_attr = value.storage.value() if edge_attr is not None: - index = index.to(edge_attr.device) - edge_attr = index_select(edge_attr, index, dim=0) + if index is not None: + index = index.to(edge_attr.device) + edge_attr = index_select(edge_attr, index, dim=0) + else: + edge_attr = None sparse_sizes = out_store.size()[::-1] # TODO Currently, we set `is_sorted=False`, see: # https://github.com/pyg-team/pytorch_geometric/issues/4346 @@ -94,6 +97,10 @@ def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor, is_sorted=False, trust_data=True) elif store.is_edge_attr(key): + if index is None: + out_store[key] = None + continue + dim = store._parent().__cat_dim__(key, value, store) if isinstance(value, Tensor): index = index.to(value.device) @@ -114,7 +121,7 @@ def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor, def filter_data(data: Data, node: Tensor, row: Tensor, col: Tensor, - edge: Tensor, perm: OptTensor = None) -> Data: + edge: OptTensor, perm: OptTensor = None) -> Data: # Filters a data object to only hold nodes in `node` and edges in `edge`: out = copy.copy(data) filter_node_store_(data._store, out._store, node) @@ -127,7 +134,7 @@ def filter_hetero_data( node_dict: Dict[NodeType, Tensor], row_dict: Dict[EdgeType, Tensor], col_dict: Dict[EdgeType, Tensor], - edge_dict: Dict[EdgeType, Tensor], + edge_dict: Dict[EdgeType, OptTensor], perm_dict: Optional[Dict[EdgeType, OptTensor]] = None, ) -> HeteroData: # Filters a heterogeneous data object to only hold nodes in `node` and @@ -169,7 +176,7 @@ def filter_custom_store( node_dict: Dict[str, Tensor], row_dict: Dict[str, Tensor], col_dict: Dict[str, Tensor], - edge_dict: Dict[str, Tensor], + edge_dict: Dict[str, OptTensor], custom_cls: Optional[HeteroData] = None, ) -> HeteroData: r"""Constructs a `HeteroData` object from a feature store that only holds diff --git a/torch_geometric/sampler/base.py b/torch_geometric/sampler/base.py index d53530c5362d..7933952b6f7e 100644 --- a/torch_geometric/sampler/base.py +++ b/torch_geometric/sampler/base.py @@ -2,6 +2,7 @@ import math import warnings from abc import ABC +from collections import defaultdict from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Optional, Union @@ -37,11 +38,11 @@ def from_data(cls, data: Any): f"(got '{type(data)}')") -class GraphMode(Enum): - r"""The graph mode of the returned subgraph.""" +class SubgraphType(Enum): + r"""The type of the returned subgraph.""" directional = 'directional' bidirectional = 'bidirectional' - induced_subgraph = 'induced_subgraph' + induced = 'induced' @dataclass @@ -209,7 +210,7 @@ class HeteroSamplerOutput(CastMixin): node: Dict[NodeType, Tensor] row: Dict[EdgeType, Tensor] col: Dict[EdgeType, Tensor] - edge: Optional[Dict[EdgeType, Tensor]] + edge: Dict[EdgeType, OptTensor] batch: Optional[Dict[NodeType, Tensor]] = None num_sampled_nodes: Optional[Dict[NodeType, List[int]]] = None num_sampled_edges: Optional[Dict[EdgeType, List[int]]] = None @@ -223,7 +224,9 @@ def to_bidirectional(self) -> 'SamplerOutput': out = copy.copy(self) out.row = copy.copy(self.row) out.col = copy.copy(self.col) + out.edge = copy.copy(self.edge) + src_dst_dict = defaultdict(list) edge_types = self.row.keys() edge_types = [k for k in edge_types if not k[1].startswith('rev_')] for edge_type in edge_types: @@ -237,6 +240,8 @@ def to_bidirectional(self) -> 'SamplerOutput': rev_row=self.row[edge_type], rev_col=self.col[edge_type], ) + out.edge[edge_type] = None + elif rev_edge_type in self.row: out.row[edge_type], out.col[edge_type], _ = to_bidirectional( row=self.row[edge_type], @@ -244,14 +249,34 @@ def to_bidirectional(self) -> 'SamplerOutput': rev_row=self.row[rev_edge_type], rev_col=self.col[rev_edge_type], ) + out.edge[edge_type] = None out.row[rev_edge_type] = out.col[edge_type] out.col[rev_edge_type] = out.row[edge_type] - else: - warnings.warn(f"Cannot convert to bidirectional graph since " - f"the edge type {edge_type} does not seem to " - f"have a reverse edge type") + out.edge[rev_edge_type] = None + + else: # Find the reverse edge type (if it is unique): + if len(src_dst_dict) == 0: # Create mapping lazily. + for key in self.row.keys(): + v1, _, v2 = key + src_dst_dict[(v1, v2)].append(key) + + if len(src_dst_dict[(dst, src)]) == 1: + rev_edge_type = src_dst_dict[(dst, src)][0] + row, col, _ = to_bidirectional( + row=self.row[edge_type], + col=self.col[edge_type], + rev_row=self.row[rev_edge_type], + rev_col=self.col[rev_edge_type], + ) + out.row[edge_type] = row + out.col[edge_type] = col + out.edge[edge_type] = None + + else: + warnings.warn(f"Cannot convert to bidirectional graph " + f"since the edge type {edge_type} does not " + f"seem to have a reverse edge type") - out.edge = None out.num_sampled_edges = None return out diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index 7c94eb6f864f..04ca3ca69477 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -24,7 +24,7 @@ NodeSamplerInput, SamplerOutput, ) -from torch_geometric.sampler.base import DataType, NumNeighbors +from torch_geometric.sampler.base import DataType, NumNeighbors, SubgraphType from torch_geometric.sampler.utils import remap_keys, to_csc, to_hetero_csc from torch_geometric.typing import EdgeType, NodeType, OptTensor @@ -38,14 +38,22 @@ def __init__( self, data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], num_neighbors: NumNeighborsType, + subgraph_type: Union[SubgraphType, str] = 'directional', replace: bool = False, - directed: bool = True, disjoint: bool = False, temporal_strategy: str = 'uniform', time_attr: Optional[str] = None, is_sorted: bool = False, share_memory: bool = False, + # Deprecated: + directed: bool = True, ): + if not directed: + subgraph_type = SubgraphType.induced + warnings.warn(f"The usage of the 'directed' argument in " + f"'{self.__class__.__name__}' is deprecated. Use " + f"`subgraph_type='induced'` instead.") + if not torch_geometric.typing.WITH_PYG_LIB and sys.platform == 'linux': warnings.warn("Using '{self.__class__.__name__}' without a " "'pyg-lib' installation is deprecated and will be " @@ -138,7 +146,7 @@ def __init__( self.num_neighbors = num_neighbors self.replace = replace - self.directed = directed + self.subgraph_type = SubgraphType(subgraph_type) self.disjoint = disjoint self.temporal_strategy = temporal_strategy @@ -171,7 +179,10 @@ def sample_from_nodes( self, inputs: NodeSamplerInput, ) -> Union[SamplerOutput, HeteroSamplerOutput]: - return node_sample(inputs, self._sample) + out = node_sample(inputs, self._sample) + if self.subgraph_type == SubgraphType.bidirectional: + out = out.to_bidirectional() + return out # Edge-based sampling ##################################################### @@ -179,8 +190,11 @@ def sample_from_edges( self, inputs: EdgeSamplerInput, neg_sampling: Optional[NegativeSampling] = None ) -> Union[SamplerOutput, HeteroSamplerOutput]: - return edge_sample(inputs, self._sample, self.num_nodes, self.disjoint, - self.node_time, neg_sampling) + out = edge_sample(inputs, self._sample, self.num_nodes, self.disjoint, + self.node_time, neg_sampling) + if self.subgraph_type == SubgraphType.bidirectional: + out = out.to_bidirectional() + return out # Other Utilities ######################################################### @@ -197,7 +211,7 @@ def _sample( **kwargs, ) -> Union[SamplerOutput, HeteroSamplerOutput]: r"""Implements neighbor sampling by calling either :obj:`pyg-lib` (if - installed) or :obj:`torch-sparse` sampling routines.""" + installed) or :obj:`torch-sparse` (if installed) sampling routines.""" if isinstance(seed, dict): # Heterogeneous sampling: if torch_geometric.typing.WITH_PYG_LIB: # TODO (matthias) `return_edge_id` if edge features present @@ -217,7 +231,7 @@ def _sample( seed_time, True, # csc self.replace, - self.directed, + self.subgraph_type != SubgraphType.induced, self.disjoint, self.temporal_strategy, True, # return_edge_id @@ -250,7 +264,7 @@ def _sample( self.num_neighbors.get_mapped_values(self.edge_types), self.num_neighbors.num_hops, self.replace, - self.directed, + self.subgraph_type != SubgraphType.induced, ) node, row, col, edge, batch = out + (None, ) num_sampled_nodes = num_sampled_edges = None @@ -288,7 +302,7 @@ def _sample( seed_time, True, # csc self.replace, - self.directed, + self.subgraph_type != SubgraphType.induced, self.disjoint, self.temporal_strategy, True, # return_edge_id @@ -316,7 +330,7 @@ def _sample( seed, # seed self.num_neighbors.get_mapped_values(), self.replace, - self.directed, + self.subgraph_type != SubgraphType.induced, ) node, row, col, edge, batch = out + (None, ) num_sampled_nodes = num_sampled_edges = None From f0c9fed058038a3288185e1f583069e6d98a28b4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 18 Apr 2023 12:13:08 +0200 Subject: [PATCH 1123/2432] Fix optional `edge_id` handling in `SamplerOutput` (#7202) --- torch_geometric/sampler/base.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/torch_geometric/sampler/base.py b/torch_geometric/sampler/base.py index 7933952b6f7e..2d6d23e1fbe1 100644 --- a/torch_geometric/sampler/base.py +++ b/torch_geometric/sampler/base.py @@ -240,7 +240,8 @@ def to_bidirectional(self) -> 'SamplerOutput': rev_row=self.row[edge_type], rev_col=self.col[edge_type], ) - out.edge[edge_type] = None + if out.edge is not None: + out.edge[edge_type] = None elif rev_edge_type in self.row: out.row[edge_type], out.col[edge_type], _ = to_bidirectional( @@ -249,10 +250,11 @@ def to_bidirectional(self) -> 'SamplerOutput': rev_row=self.row[rev_edge_type], rev_col=self.col[rev_edge_type], ) - out.edge[edge_type] = None out.row[rev_edge_type] = out.col[edge_type] out.col[rev_edge_type] = out.row[edge_type] - out.edge[rev_edge_type] = None + if out.edge is not None: + out.edge[edge_type] = None + out.edge[rev_edge_type] = None else: # Find the reverse edge type (if it is unique): if len(src_dst_dict) == 0: # Create mapping lazily. @@ -270,7 +272,8 @@ def to_bidirectional(self) -> 'SamplerOutput': ) out.row[edge_type] = row out.col[edge_type] = col - out.edge[edge_type] = None + if out.edge is not None: + out.edge[edge_type] = None else: warnings.warn(f"Cannot convert to bidirectional graph " From f1f24a0ec9370362cdbfb5d8b0e3c1d42650db56 Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Tue, 18 Apr 2023 14:04:13 +0100 Subject: [PATCH 1124/2432] refactor: migrate to `Flake8-pyproject` (#7204) Changes proposed by this PR can be summarized as follows: * Drop `setup.cfg` in favour of a `[tool.flake8]` section in `pyproject.toml` for `flake8` configuration. * Add [`Flake8-pyproject`](https://pypi.org/project/Flake8-pyproject/) as an additional dependency in the `pre-commit` config file. * Update docs to reflect the use of the plugin. --- Because we currently we don't employ the use of `flake8` apart from simple ignore flags, the addition of this plugin shouldn't be an issue even for future development. I've skipped making any changes to the `CHANGELOG` in line with older PRs suggesting refactors. --------- Co-authored-by: rusty1s --- .github/CONTRIBUTING.md | 3 ++- .pre-commit-config.yaml | 1 + pyproject.toml | 3 +++ setup.cfg | 9 --------- 4 files changed, 6 insertions(+), 10 deletions(-) delete mode 100644 setup.cfg diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 30ff755e5d91..3f2ca08b52cd 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -90,7 +90,8 @@ PyG uses [GitHub Actions](https://github.com/pyg-team/pytorch_geometric/actions) Everytime you send a Pull Request, your commit will be built and checked against the PyG guidelines: -1. Ensure that your code is formatted correctly by testing against the styleguide of [`flake8`](https://github.com/PyCQA/flake8): +1. Ensure that your code is formatted correctly by testing against the styleguide of [`flake8`](https://github.com/PyCQA/flake8). + We use the [`Flake8-pyproject`](https://pypi.org/project/Flake8-pyproject/) plugin for configuration: ```bash flake8 . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 886861eb142e..a8fbac80de01 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -53,3 +53,4 @@ repos: hooks: - id: flake8 name: Check PEP8 + additional_dependencies: [Flake8-pyproject] diff --git a/pyproject.toml b/pyproject.toml index 5578ec907de7..2402bfad81f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -167,3 +167,6 @@ exclude_lines = [ "torch.cuda.is_available", "WITH_PT2", ] + +[tool.flake8] +ignore = ["F811", "W503", "W504"] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 9f1f504b2d23..000000000000 --- a/setup.cfg +++ /dev/null @@ -1,9 +0,0 @@ -[aliases] -test=pytest - -[flake8] -ignore= - # ignore overload redefinition - F811, - # allow line breaks before/after binary operators - W503, W504, From 9236cb700fa90f8a72511622c2c6df409f3c7b96 Mon Sep 17 00:00:00 2001 From: kgajdamo Date: Tue, 18 Apr 2023 15:14:56 +0200 Subject: [PATCH 1125/2432] Save PyTorch profile data to CSV (#7114) Enable to save pytorch profiling results to csv (top 5 most time consuming ops). - The --write-csv argument has been modified so that it can now take three values: **None** (do not write data to csv) **bench** (write benchmark data to csv) **prof** (write pytorch profiler data to csv) - Added new argument --export-chrome-trace, that enables to export chrome trace file (defaults to true). Example command to profile with pytorch and save results to csv: `python ./inference_benchmark.py --profile --write-csv prof --export-chrome-trace False` --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + benchmark/inference/inference_benchmark.py | 40 +++++++++--- benchmark/training/training_benchmark.py | 50 +++++++++++---- benchmark/utils/utils.py | 12 +++- torch_geometric/profile/__init__.py | 8 ++- torch_geometric/profile/profile.py | 72 ++++++++++++++++++++-- 6 files changed, 151 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76530114e643..91cde2ba5848 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added an option to benchmark scripts to write PyTorch profiler results to CSV ([#7114](https://github.com/pyg-team/pytorch_geometric/pull/7114)) - Added subgraph type sampling option with bidirectional edge support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199), [#7200](https://github.com/pyg-team/pytorch_geometric/pull/7200)) - Added support for `"any"`-reductions in `scatter` ([#7198](https://github.com/pyg-team/pytorch_geometric/pull/7198)) - Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) diff --git a/benchmark/inference/inference_benchmark.py b/benchmark/inference/inference_benchmark.py index 72f634251749..4567d3ea56a1 100644 --- a/benchmark/inference/inference_benchmark.py +++ b/benchmark/inference/inference_benchmark.py @@ -1,4 +1,5 @@ import argparse +import warnings from collections import defaultdict from contextlib import nullcontext @@ -37,6 +38,10 @@ def full_batch_inference(model, data): def run(args: argparse.ArgumentParser): csv_data = defaultdict(list) + if args.write_csv == 'prof' and not args.profile: + warnings.warn("Cannot write profile data to CSV because profiling is " + "disabled") + # cuda device is not suitable for full batch mode device = torch.device( 'cuda' if not args.full_batch and torch.cuda.is_available() else 'cpu') @@ -170,7 +175,8 @@ def run(args: argparse.ArgumentParser): else: cpu_affinity = nullcontext() profile = torch_profile( - ) if args.profile else nullcontext() + args.export_chrome_trace, csv_data, + args.write_csv) if args.profile else nullcontext() itt = emit_itt( ) if args.vtune_profile else nullcontext() @@ -213,7 +219,7 @@ def run(args: argparse.ArgumentParser): print(f'Mini Batch Test Accuracy: \ {test_acc:.4f}') - if args.profile: + if args.profile and args.export_chrome_trace: rename_profile_file(model_name, dataset_name, str(batch_size), str(layers), str(hidden_channels), @@ -228,13 +234,26 @@ def run(args: argparse.ArgumentParser): print(f'Throughput: {throughput:.3f} samples/s') print(f'Latency: {latency:.3f} ms') - save_benchmark_data(csv_data, batch_size, layers, - num_neighbors, hidden_channels, - total_time, model_name, - dataset_name, - args.use_sparse_tensor) + num_records = 1 + if args.write_csv == 'prof': + # For profiling with PyTorch, we save the top-5 + # most time consuming operations. Therefore, the + # same data should be entered for each of them. + num_records = 5 + for _ in range(num_records): + save_benchmark_data( + csv_data, + batch_size, + layers, + num_neighbors, + hidden_channels, + total_time, + model_name, + dataset_name, + args.use_sparse_tensor, + ) if args.write_csv: - write_to_csv(csv_data) + write_to_csv(csv_data, args.write_csv) if __name__ == '__main__': @@ -274,5 +293,8 @@ def run(args: argparse.ArgumentParser): add('--full-batch', action='http://github.com/store_true', help='Use full batch mode') add('--evaluate', action='http://github.com/store_true') add('--ckpt_path', type=str, help='Checkpoint path for loading a model') - add('--write-csv', action='http://github.com/store_true', help='Write benchmark data to csv') + add('--write-csv', choices=[None, 'bench', 'prof'], default=None, + help='Write benchmark or PyTorch profile data to CSV') + add('--export-chrome-trace', default=True, type=bool, + help='Export chrome trace file. Works only with PyTorch profiler') run(argparser.parse_args()) diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index 8878ce9088d8..9e3b7a936e7e 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -82,6 +82,10 @@ def train_hetero(model, loader, optimizer, device, progress_bar=True, desc="", def run(args: argparse.ArgumentParser): csv_data = defaultdict(list) + if args.write_csv == 'prof' and not args.profile: + warnings.warn("Cannot write profile data to CSV because profiling is " + "disabled") + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # If we use a custom number of steps, then we need to use RandomSampler, # which already does shuffle. @@ -242,15 +246,19 @@ def run(args: argparse.ArgumentParser): print(f'Test Accuracy: {test_acc:.4f}') if args.profile: - with torch_profile(): + profile = torch_profile( + args.export_chrome_trace, csv_data, + args.write_csv) + with profile: train(model, subgraph_loader, optimizer, device, progress_bar=progress_bar, desc="Profile training") - rename_profile_file(model_name, dataset_name, - str(batch_size), - str(layers), - str(hidden_channels), - str(num_neighbors)) + if args.export_chrome_trace: + rename_profile_file( + model_name, dataset_name, + str(batch_size), str(layers), + str(hidden_channels), + str(num_neighbors)) total_time = t.duration if args.num_steps != -1: @@ -262,13 +270,26 @@ def run(args: argparse.ArgumentParser): print(f'Throughput: {throughput:.3f} samples/s') print(f'Latency: {latency:.3f} ms') - save_benchmark_data(csv_data, batch_size, layers, - num_neighbors, hidden_channels, - total_time, model_name, - dataset_name, - args.use_sparse_tensor) + num_records = 1 + if args.write_csv == 'prof': + # For profiling with PyTorch, we save the top-5 + # most time consuming operations. Therefore, the + # same data should be entered for each of them. + num_records = 5 + for _ in range(num_records): + save_benchmark_data( + csv_data, + batch_size, + layers, + num_neighbors, + hidden_channels, + total_time, + model_name, + dataset_name, + args.use_sparse_tensor, + ) if args.write_csv: - write_to_csv(csv_data, training=True) + write_to_csv(csv_data, args.write_csv, training=True) if __name__ == '__main__': @@ -309,7 +330,10 @@ def run(args: argparse.ArgumentParser): help='Enable filter-per-worker feature of the dataloader.') add('--measure-load-time', action='http://github.com/store_true') add('--evaluate', action='http://github.com/store_true') - add('--write-csv', action='http://github.com/store_true', help='Write benchmark data to csv') + add('--write-csv', choices=[None, 'bench', 'prof'], default=None, + help='Write benchmark or PyTorch profile data to CSV') + add('--export-chrome-trace', default=True, type=bool, + help='Export chrome trace file. Works only with PyTorch profiler') add('--trim', action='http://github.com/store_true', help="Use `trim_to_layer` optimization") args = argparser.parse_args() diff --git a/benchmark/utils/utils.py b/benchmark/utils/utils.py index c8d32b9e68ef..ecb5d16aa5fc 100644 --- a/benchmark/utils/utils.py +++ b/benchmark/utils/utils.py @@ -141,16 +141,22 @@ def save_benchmark_data(csv_data, batch_size, layers, num_neighbors, csv_data['SPARSE'].append(use_sparse_tensor) -def write_to_csv(csv_data, training=False): +def write_to_csv(csv_data, write_csv='bench', training=False): import pandas as pd results_path = osp.join(osp.dirname(osp.realpath(__file__)), '../results/') os.makedirs(results_path, exist_ok=True) name = 'training' if training else 'inference' - csv_path = osp.join(results_path, f'TOTAL_{name}_benchmark.csv') + if write_csv == 'bench': + csv_file_name = f'TOTAL_{name}_benchmark.csv' + else: + csv_file_name = f'TOTAL_prof_{name}_benchmark.csv' + csv_path = osp.join(results_path, csv_file_name) + index_label = 'TEST_ID' if write_csv == 'bench' else 'ID' + with_header = not osp.exists(csv_path) df = pd.DataFrame(csv_data) - df.to_csv(csv_path, mode='a', index_label='TEST_ID', header=with_header) + df.to_csv(csv_path, mode='a', index_label=index_label, header=with_header) @torch.no_grad() diff --git a/torch_geometric/profile/__init__.py b/torch_geometric/profile/__init__.py index 3ac26b1fbd53..9f7340979181 100644 --- a/torch_geometric/profile/__init__.py +++ b/torch_geometric/profile/__init__.py @@ -1,5 +1,10 @@ from .profile import profileit, timeit, get_stats_summary -from .profile import trace_handler, rename_profile_file, torch_profile +from .profile import ( + trace_handler, + print_time_total, + rename_profile_file, + torch_profile, +) from .utils import count_parameters from .utils import get_model_size from .utils import get_data_size @@ -13,6 +18,7 @@ 'timeit', 'get_stats_summary', 'trace_handler', + 'print_time_total', 'rename_profile_file', 'torch_profile', 'count_parameters', diff --git a/torch_geometric/profile/profile.py b/torch_geometric/profile/profile.py index eec20022ee83..fc8f0a887914 100644 --- a/torch_geometric/profile/profile.py +++ b/torch_geometric/profile/profile.py @@ -5,6 +5,7 @@ from typing import Any, List, NamedTuple, Tuple import torch +from torch.autograd.profiler import EventList from torch.profiler import ProfilerActivity, profile from torch_geometric.profile.utils import ( @@ -206,15 +207,19 @@ def read_from_memlab(line_profiler: Any) -> List[float]: # pragma: no cover def trace_handler(p): + print_time_total(p) + profile_dir = str(pathlib.Path.cwd()) + '/' + timeline_file = profile_dir + 'timeline' + '.json' + p.export_chrome_trace(timeline_file) + + +def print_time_total(p): if torch.cuda.is_available(): profile_sort = 'self_cuda_time_total' else: profile_sort = 'self_cpu_time_total' output = p.key_averages().table(sort_by=profile_sort) print(output) - profile_dir = str(pathlib.Path.cwd()) + '/' - timeline_file = profile_dir + 'timeline' + '.json' - p.export_chrome_trace(timeline_file) def rename_profile_file(*args): @@ -227,11 +232,66 @@ def rename_profile_file(*args): @contextmanager -def torch_profile(): +def torch_profile(export_chrome_trace=True, csv_data=None, write_csv=None): + use_cuda = torch.cuda.is_available() + activities = [ProfilerActivity.CPU] - if torch.cuda.is_available(): + if use_cuda: activities.append(ProfilerActivity.CUDA) - with profile(activities=activities, on_trace_ready=trace_handler) as p: + if export_chrome_trace: + p_trace_handler = trace_handler + else: + p_trace_handler = print_time_total + + p = profile(activities=activities, on_trace_ready=p_trace_handler) + + with p: yield p.step() + + if csv_data is not None and write_csv == 'prof': + if use_cuda: + profile_sort = 'self_cuda_time_total' + else: + profile_sort = 'self_cpu_time_total' + events = EventList( + sorted( + p.key_averages(), + key=lambda evt: getattr(evt, profile_sort), + reverse=True, + ), use_cuda=use_cuda) + + save_profile_data(csv_data, events, use_cuda) + + +def format_prof_time(time): + # Profile time is in micro seconds, so format it appropriately: + return round(time / 1e6, 3) + + +def save_profile_data(csv_data, events, use_cuda): + sum_self_cpu_time_total = sum( + [event.self_cpu_time_total for event in events]) + sum_cpu_time_total = sum([event.self_cpu_time_total for event in events]) + sum_self_cuda_time_total = sum( + [event.self_cuda_time_total for event in events]) if use_cuda else 0 + + for e in events[:5]: # Save top 5 most time consuming operations: + csv_data['NAME'].append(e.key) + csv_data['SELF CPU %'].append( + round(e.self_cpu_time_total * 100.0 / sum_self_cpu_time_total, 3)) + csv_data['SELF CPU'].append(format_prof_time(e.self_cpu_time_total)) + csv_data['CPU TOTAL %'].append( + round(e.cpu_time_total * 100.0 / sum_cpu_time_total, 3)) + csv_data['CPU TOTAL'].append(format_prof_time(e.cpu_time_total)) + csv_data['CPU TIME AVG'].append(format_prof_time(e.cpu_time_total)) + if use_cuda: + csv_data['SELF CUDA %'].append(e.self_cuda_time_total * 100.0 / + sum_self_cuda_time_total) + csv_data['SELF CUDA'].append( + format_prof_time(e.self_cuda_time_total)) + csv_data['CUDA TOTAL'].append(format_prof_time(e.cpu_time_total)) + csv_data['CUDA TIME AVG'].append(format_prof_time( + e.cpu_time_total)) + csv_data['# OF CALLS'].append(e.count) From 66a841894c423a86217ef97dabddeea930599aed Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Apr 2023 10:28:04 +0200 Subject: [PATCH 1126/2432] Support diverse set of inputs/outputs in `profile.benchmark` (#7207) --- torch_geometric/profile/benchmark.py | 35 +++++++++++++++++++++------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/torch_geometric/profile/benchmark.py b/torch_geometric/profile/benchmark.py index f71ef9c183c9..d2d2d82fd903 100644 --- a/torch_geometric/profile/benchmark.py +++ b/torch_geometric/profile/benchmark.py @@ -1,5 +1,5 @@ import time -from typing import Any, Callable, List, Optional, Tuple +from typing import Any, Callable, List, Optional, Tuple, Union import torch from torch import Tensor @@ -7,9 +7,22 @@ from torch_geometric.utils import is_torch_sparse_tensor +def require_grad(x: Any, requires_grad: bool = True) -> Any: + if (isinstance(x, Tensor) and x.is_floating_point() + and not is_torch_sparse_tensor(x)): + return x.detach().requires_grad_(requires_grad) + elif isinstance(x, list): + return [require_grad(v, requires_grad) for v in x] + elif isinstance(x, tuple): + return tuple(require_grad(v, requires_grad) for v in x) + elif isinstance(x, dict): + return {k: require_grad(v, requires_grad) for k, v in x.items()} + return x + + def benchmark( funcs: List[Callable], - args: Tuple[Any], + args: Union[Tuple[Any], List[Tuple[Any]]], num_steps: int, func_names: Optional[List[str]] = None, num_warmups: int = 10, @@ -20,7 +33,9 @@ def benchmark( Args: funcs ([Callable]): The list of functions to benchmark. - args ((Any, )): The arguments to pass to the functions. + args ((Any, ) or [(Any, )]): The arguments to pass to the functions. + Can be a list of arguments for each function in :obj:`funcs` in + case their headers differ. num_steps (int): The number of steps to run the benchmark. func_names ([str], optional): The names of the functions. If not given, will try to infer the name from the function itself. @@ -47,15 +62,14 @@ def benchmark( raise ValueError(f"Length of 'funcs' (got {len(funcs)}) and " f"'func_names' (got {len(func_names)}) must be equal") + # Zero-copy `args` for each function (if necessary): + args_list = [args] * len(funcs) if isinstance(args, tuple) else args + ts: List[List[str]] = [] - for func, name in zip(funcs, func_names): + for func, args, name in zip(funcs, args_list, func_names): t_forward = t_backward = 0 for i in range(num_warmups + num_steps): - args = [ - arg.detach().requires_grad_(backward) - if isinstance(arg, Tensor) and arg.is_floating_point() - and not is_torch_sparse_tensor(arg) else arg for arg in args - ] + args = require_grad(args, backward) if torch.cuda.is_available(): torch.cuda.synchronize() @@ -69,6 +83,9 @@ def benchmark( t_forward += time.perf_counter() - t_start if backward: + if isinstance(out, dict): # TODO Generalize this logic. + out = torch.cat(list(out.values()), dim=0) + out_grad = torch.randn_like(out) t_start = time.perf_counter() From fadf0409c9e216e20dbc821d88c8eeea2857bf3e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Apr 2023 12:58:54 +0200 Subject: [PATCH 1127/2432] Introduce `type_ptr` argument to `HeteroLayerNorm` (#7208) --- CHANGELOG.md | 1 + test/nn/norm/test_layer_norm.py | 27 +++++++++++++++------ torch_geometric/nn/norm/layer_norm.py | 34 +++++++++++++++++++++++---- 3 files changed, 51 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91cde2ba5848..7bf92c39e09a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `type_ptr` argument to `HeteroLayerNorm` ([#7208](https://github.com/pyg-team/pytorch_geometric/pull/7208)) - Added an option to benchmark scripts to write PyTorch profiler results to CSV ([#7114](https://github.com/pyg-team/pytorch_geometric/pull/7114)) - Added subgraph type sampling option with bidirectional edge support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199), [#7200](https://github.com/pyg-team/pytorch_geometric/pull/7200)) - Added support for `"any"`-reductions in `scatter` ([#7198](https://github.com/pyg-team/pytorch_geometric/pull/7198)) diff --git a/test/nn/norm/test_layer_norm.py b/test/nn/norm/test_layer_norm.py index 3c17c16708a7..ba3ac977ff9b 100644 --- a/test/nn/norm/test_layer_norm.py +++ b/test/nn/norm/test_layer_norm.py @@ -31,26 +31,39 @@ def test_layer_norm(device, affine, mode): @pytest.mark.parametrize('affine', [False, True]) def test_hetero_layer_norm(device, affine): x = torch.randn((100, 16), device=device) + expected = LayerNorm(16, affine=affine, mode='node').to(device)(x) # Test single type: - norm = LayerNorm(16, affine=affine, mode='node').to(device) - expected = norm(x) - type_vec = torch.zeros(100, dtype=torch.long, device=device) + type_ptr = [0, 100] + norm = HeteroLayerNorm(16, num_types=1, affine=affine).to(device) assert str(norm) == 'HeteroLayerNorm(16, num_types=1)' out = norm(x, type_vec) assert out.size() == (100, 16) - assert torch.allclose(out, expected) + assert torch.allclose(out, expected, atol=1e-3) + assert torch.allclose(norm(out, type_ptr=type_ptr), expected, atol=1e-3) + + mean = out.mean(dim=-1) + std = out.std(unbiased=False, dim=-1) + assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-2) + assert torch.allclose(std, torch.ones_like(std), atol=1e-2) # Test multiple types: - type_vec = torch.randint(5, (100, ), device=device) + type_vec = torch.arange(5, device=device) + type_vec = type_vec.view(-1, 1).repeat(1, 20).view(-1) + type_ptr = [0, 20, 40, 60, 80, 100] + norm = HeteroLayerNorm(16, num_types=5, affine=affine).to(device) + assert str(norm) == 'HeteroLayerNorm(16, num_types=5)' + out = norm(x, type_vec) assert out.size() == (100, 16) + assert torch.allclose(out, expected, atol=1e-3) + assert torch.allclose(norm(out, type_ptr=type_ptr), expected, atol=1e-3) mean = out.mean(dim=-1) std = out.std(unbiased=False, dim=-1) - assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-5) - assert torch.allclose(std, torch.ones_like(std), atol=1e-5) + assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-2) + assert torch.allclose(std, torch.ones_like(std), atol=1e-2) diff --git a/torch_geometric/nn/norm/layer_norm.py b/torch_geometric/nn/norm/layer_norm.py index c40d3cc9cfd9..fdaaeeb760df 100644 --- a/torch_geometric/nn/norm/layer_norm.py +++ b/torch_geometric/nn/norm/layer_norm.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import List, Optional, Union import torch import torch.nn.functional as F @@ -143,6 +143,7 @@ def __init__( mode: str = 'node', ): super().__init__() + assert mode == 'node' self.in_channels = in_channels self.num_types = num_types @@ -164,16 +165,41 @@ def reset_parameters(self): torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) - def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: + def forward( + self, + x: Tensor, + type_vec: OptTensor = None, + type_ptr: Optional[Union[Tensor, List[int]]] = None, + ) -> Tensor: r""" + .. note:: + Either :obj:`type_vec` or :obj:`type_ptr` needs to be specified. + In general, relying on :obj:`type_ptr` is more efficient in case + the input tensor is sorted by types. + Args: x (torch.Tensor): The input features. - type_vec (torch.Tensor): A vector that maps each entry to a type. + type_vec (torch.Tensor, optional): A vector that maps each entry to + a type. (default: :obj:`None`) + type_ptr (torch.Tensor or List[int]): A vector denoting the + boundaries of types. (default: :obj:`None`) """ + if type_vec is None and type_ptr is None: + raise ValueError("Either 'type_vec' or 'type_ptr' must be given") + out = F.layer_norm(x, (self.in_channels, ), None, None, self.eps) if self.affine: - out = out * self.weight[type_vec] + self.bias[type_vec] + # TODO Revisit this logic completely as it performs worse than just + # operating on a dictionary of tensors + # (especially the `type_vec` code path) + if type_ptr is not None: + h = torch.empty_like(out) + for i, (s, e) in enumerate(zip(type_ptr[:-1], type_ptr[1:])): + h[s:e] = out[s:e] * self.weight[i] + self.bias[i] + out = h + else: + out = out * self.weight[type_vec] + self.bias[type_vec] return out From a585296df6edb1c603188baa98acfb3083cbb76a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Apr 2023 15:23:04 +0200 Subject: [PATCH 1128/2432] Added `optimizer_resolver` (#7209) --- CHANGELOG.md | 1 + test/nn/test_resolver.py | 17 +++++++++++++++++ torch_geometric/nn/resolver.py | 15 ++++++++++++--- torch_geometric/resolver.py | 4 ---- 4 files changed, 30 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bf92c39e09a..b35f93d61346 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `optimizer_resolver` ([#7209](https://github.com/pyg-team/pytorch_geometric/pull/7209)) - Added `type_ptr` argument to `HeteroLayerNorm` ([#7208](https://github.com/pyg-team/pytorch_geometric/pull/7208)) - Added an option to benchmark scripts to write PyTorch profiler results to CSV ([#7114](https://github.com/pyg-team/pytorch_geometric/pull/7114)) - Added subgraph type sampling option with bidirectional edge support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199), [#7200](https://github.com/pyg-team/pytorch_geometric/pull/7200)) diff --git a/test/nn/test_resolver.py b/test/nn/test_resolver.py index 46ffd4ed8dcd..cc3dac69e26e 100644 --- a/test/nn/test_resolver.py +++ b/test/nn/test_resolver.py @@ -8,6 +8,7 @@ aggregation_resolver, lr_scheduler_resolver, normalization_resolver, + optimizer_resolver, ) @@ -58,6 +59,22 @@ def test_normalization_resolver(norm_tuple): norm_module) +def test_optimizer_resolver(): + params = [torch.nn.Parameter(torch.Tensor(1))] + + assert isinstance(optimizer_resolver(torch.optim.SGD(params, lr=0.01)), + torch.optim.SGD) + assert isinstance(optimizer_resolver(torch.optim.Adam(params)), + torch.optim.Adam) + assert isinstance(optimizer_resolver(torch.optim.Rprop(params)), + torch.optim.Rprop) + + assert isinstance(optimizer_resolver('sgd', params, lr=0.01), + torch.optim.SGD) + assert isinstance(optimizer_resolver('adam', params), torch.optim.Adam) + assert isinstance(optimizer_resolver('rprop', params), torch.optim.Rprop) + + @pytest.mark.parametrize('scheduler_args', [ ('constant_with_warmup', LambdaLR), ('linear_with_warmup', LambdaLR), diff --git a/torch_geometric/nn/resolver.py b/torch_geometric/nn/resolver.py index ce6a9495848d..099ec3ff736f 100644 --- a/torch_geometric/nn/resolver.py +++ b/torch_geometric/nn/resolver.py @@ -28,7 +28,6 @@ def swish(x: Tensor) -> Tensor: def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): - import torch base_cls = torch.nn.Module base_cls_repr = 'Act' acts = [ @@ -47,8 +46,6 @@ def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): def normalization_resolver(query: Union[Any, str], *args, **kwargs): - import torch - import torch_geometric.nn.norm as norm base_cls = torch.nn.Module base_cls_repr = 'Norm' @@ -77,6 +74,18 @@ def aggregation_resolver(query: Union[Any, str], *args, **kwargs): return resolver(aggrs, aggr_dict, query, base_cls, None, *args, **kwargs) +# Optimizer Resolver ########################################################## + + +def optimizer_resolver(query: Union[Any, str], *args, **kwargs): + base_cls = Optimizer + optimizers = [ + optimizer for optimizer in vars(torch.optim).values() + if isinstance(optimizer, type) and issubclass(optimizer, base_cls) + ] + return resolver(optimizers, {}, query, base_cls, None, *args, **kwargs) + + # Learning Rate Scheduler Resolver ############################################ diff --git a/torch_geometric/resolver.py b/torch_geometric/resolver.py index 6b642d5bcae6..d10c36ab4062 100644 --- a/torch_geometric/resolver.py +++ b/torch_geometric/resolver.py @@ -22,9 +22,7 @@ def resolver(classes: List[Any], class_dict: Dict[str, Any], if query_repr == key_repr: if inspect.isclass(cls): obj = cls(*args, **kwargs) - assert callable(obj) return obj - assert callable(cls) return cls for cls in classes: @@ -32,9 +30,7 @@ def resolver(classes: List[Any], class_dict: Dict[str, Any], if query_repr in [cls_repr, cls_repr.replace(base_cls_repr, '')]: if inspect.isclass(cls): obj = cls(*args, **kwargs) - assert callable(obj) return obj - assert callable(cls) return cls choices = set(cls.__name__ for cls in classes) | set(class_dict.keys()) From 5ff9f3f610319d32473bb37e7c472bdb84d99238 Mon Sep 17 00:00:00 2001 From: Bartlomiej Wroblewski Date: Wed, 19 Apr 2023 16:22:33 +0200 Subject: [PATCH 1129/2432] Add `AddRemainingSelfLoops` transform (#7192) This PR adds the `AddRemainingSelfLoops` transform to generate only remaining self-loops instead of generating them on all the vertices like in the `AddSelfLoops` transform. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + .../test_add_remaining_self_loops.py | 72 +++++++++++++++++++ test/transforms/test_add_self_loops.py | 12 +++- torch_geometric/transforms/__init__.py | 2 + .../transforms/add_remaining_self_loops.py | 49 +++++++++++++ 5 files changed, 134 insertions(+), 2 deletions(-) create mode 100644 test/transforms/test_add_remaining_self_loops.py create mode 100644 torch_geometric/transforms/add_remaining_self_loops.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b35f93d61346..560e87b6011b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a `AddRemainingSelfLoops` transform ([#7192](https://github.com/pyg-team/pytorch_geometric/pull/7192)) - Added `optimizer_resolver` ([#7209](https://github.com/pyg-team/pytorch_geometric/pull/7209)) - Added `type_ptr` argument to `HeteroLayerNorm` ([#7208](https://github.com/pyg-team/pytorch_geometric/pull/7208)) - Added an option to benchmark scripts to write PyTorch profiler results to CSV ([#7114](https://github.com/pyg-team/pytorch_geometric/pull/7114)) diff --git a/test/transforms/test_add_remaining_self_loops.py b/test/transforms/test_add_remaining_self_loops.py new file mode 100644 index 000000000000..1de56f5a3fa3 --- /dev/null +++ b/test/transforms/test_add_remaining_self_loops.py @@ -0,0 +1,72 @@ +import torch + +from torch_geometric.data import Data, HeteroData +from torch_geometric.transforms import AddRemainingSelfLoops + + +def test_add_remaining_self_loops(): + assert str(AddRemainingSelfLoops()) == 'AddRemainingSelfLoops()' + + assert len(AddRemainingSelfLoops()(Data())) == 0 + + # No self-loops in `edge_index`. + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + edge_weight = torch.tensor([1, 2, 3, 4]) + edge_attr = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]]) + + data = Data(edge_index=edge_index, num_nodes=3) + data = AddRemainingSelfLoops()(data) + assert len(data) == 2 + assert data.edge_index.tolist() == [[0, 1, 1, 2, 0, 1, 2], + [1, 0, 2, 1, 0, 1, 2]] + assert data.num_nodes == 3 + + # Single self-loop in `edge_index`. + edge_index = torch.tensor([[0, 0, 1, 2], [1, 0, 2, 1]]) + data = Data(edge_index=edge_index, num_nodes=3) + data = AddRemainingSelfLoops()(data) + assert len(data) == 2 + assert data.edge_index.tolist() == [[0, 1, 2, 0, 1, 2], [1, 2, 1, 0, 1, 2]] + assert data.num_nodes == 3 + + data = Data(edge_index=edge_index, edge_weight=edge_weight, num_nodes=3) + data = AddRemainingSelfLoops(attr='edge_weight', fill_value=5)(data) + assert data.edge_index.tolist() == [[0, 1, 2, 0, 1, 2], [1, 2, 1, 0, 1, 2]] + assert data.num_nodes == 3 + assert data.edge_weight.tolist() == [1, 3, 4, 2, 5, 5] + + data = Data(edge_index=edge_index, edge_attr=edge_attr, num_nodes=3) + data = AddRemainingSelfLoops(attr='edge_attr', fill_value='add')(data) + assert data.edge_index.tolist() == [[0, 1, 2, 0, 1, 2], [1, 2, 1, 0, 1, 2]] + assert data.num_nodes == 3 + assert data.edge_attr.tolist() == [[1, 2], [5, 6], [7, 8], [3, 4], [8, 10], + [5, 6]] + + +def test_add_remaining_self_loops_all_loops_exist(): + # All self-loops already exist in the data object. + edge_index = torch.tensor([[0, 1, 2], [0, 1, 2]]) + data = Data(edge_index=edge_index, num_nodes=3) + data = AddRemainingSelfLoops()(data) + assert data.edge_index.tolist() == edge_index.tolist() + + # All self-loops already exist in the data object, some of them appear + # multiple times. + edge_index = torch.tensor([[0, 0, 1, 1, 2], [0, 0, 1, 1, 2]]) + data = Data(edge_index=edge_index, num_nodes=3) + data = AddRemainingSelfLoops()(data) + assert data.edge_index.tolist() == [[0, 1, 2], [0, 1, 2]] + + +def test_hetero_add_remaining_self_loops(): + edge_index = torch.tensor([[0, 0, 1, 2], [1, 0, 2, 1]]) + + data = HeteroData() + data['v'].num_nodes = 3 + data['w'].num_nodes = 3 + data['v', 'v'].edge_index = edge_index + data['v', 'w'].edge_index = edge_index + data = AddRemainingSelfLoops()(data) + assert data['v', 'v'].edge_index.tolist() == [[0, 1, 2, 0, 1, 2], + [1, 2, 1, 0, 1, 2]] + assert data['v', 'w'].edge_index.tolist() == edge_index.tolist() diff --git a/test/transforms/test_add_self_loops.py b/test/transforms/test_add_self_loops.py index 45455bcda870..38a4ed0ffac3 100644 --- a/test/transforms/test_add_self_loops.py +++ b/test/transforms/test_add_self_loops.py @@ -7,12 +7,12 @@ def test_add_self_loops(): assert str(AddSelfLoops()) == 'AddSelfLoops()' + assert len(AddSelfLoops()(Data())) == 0 + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) edge_weight = torch.tensor([1, 2, 3, 4]) edge_attr = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]]) - assert len(AddSelfLoops()(Data())) == 0 - data = Data(edge_index=edge_index, num_nodes=3) data = AddSelfLoops()(data) assert len(data) == 2 @@ -36,6 +36,14 @@ def test_add_self_loops(): [8, 10], [5, 6]] +def test_add_self_loops_with_existing_self_loops(): + edge_index = torch.tensor([[0, 1, 2], [0, 1, 2]]) + data = Data(edge_index=edge_index, num_nodes=3) + data = AddSelfLoops()(data) + assert data.edge_index.tolist() == [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]] + assert data.num_nodes == 3 + + def test_hetero_add_self_loops(): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index f56db80f867e..96612f7cc658 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -18,6 +18,7 @@ from .target_indegree import TargetIndegree from .local_degree_profile import LocalDegreeProfile from .add_self_loops import AddSelfLoops +from .add_remaining_self_loops import AddRemainingSelfLoops from .remove_isolated_nodes import RemoveIsolatedNodes from .remove_duplicated_edges import RemoveDuplicatedEdges from .knn_graph import KNNGraph @@ -82,6 +83,7 @@ 'TargetIndegree', 'LocalDegreeProfile', 'AddSelfLoops', + 'AddRemainingSelfLoops', 'RemoveIsolatedNodes', 'RemoveDuplicatedEdges', 'KNNGraph', diff --git a/torch_geometric/transforms/add_remaining_self_loops.py b/torch_geometric/transforms/add_remaining_self_loops.py new file mode 100644 index 000000000000..74a1470a079d --- /dev/null +++ b/torch_geometric/transforms/add_remaining_self_loops.py @@ -0,0 +1,49 @@ +from typing import Optional, Union + +from torch import Tensor + +from torch_geometric.data import Data, HeteroData +from torch_geometric.data.datapipes import functional_transform +from torch_geometric.transforms import BaseTransform +from torch_geometric.utils import add_remaining_self_loops + + +@functional_transform('add_remaining_self_loops') +class AddRemainingSelfLoops(BaseTransform): + r"""Adds remaining self-loops to the given homogeneous or heterogeneous + graph (functional name: :obj:`add_remaining_self_loops`). + + Args: + attr (str, optional): The name of the attribute of edge weights + or multi-dimensional edge features to pass to + :meth:`torch_geometric.utils.add_remaining_self_loops`. + (default: :obj:`"edge_weight"`) + fill_value (float or Tensor or str, optional): The way to generate + edge features of self-loops (in case :obj:`attr != None`). + If given as :obj:`float` or :class:`torch.Tensor`, edge features of + self-loops will be directly given by :obj:`fill_value`. + If given as :obj:`str`, edge features of self-loops are computed by + aggregating all features of edges that point to the specific node, + according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`, + :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`) + """ + def __init__(self, attr: Optional[str] = 'edge_weight', + fill_value: Union[float, Tensor, str] = 1.0): + self.attr = attr + self.fill_value = fill_value + + def __call__( + self, + data: Union[Data, HeteroData], + ) -> Union[Data, HeteroData]: + for store in data.edge_stores: + if store.is_bipartite() or 'edge_index' not in store: + continue + + store.edge_index, edge_weight = add_remaining_self_loops( + store.edge_index, getattr(store, self.attr, None), + fill_value=self.fill_value, num_nodes=store.size(0)) + + setattr(store, self.attr, edge_weight) + + return data From b2c846f32ed1028cc6fec8c984031e9019d126a2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Apr 2023 16:37:31 +0200 Subject: [PATCH 1130/2432] Update `LabelPropagation` reference (#7210) --- torch_geometric/nn/models/label_prop.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/torch_geometric/nn/models/label_prop.py b/torch_geometric/nn/models/label_prop.py index 41645db3f191..810c5fdb5514 100644 --- a/torch_geometric/nn/models/label_prop.py +++ b/torch_geometric/nn/models/label_prop.py @@ -10,8 +10,8 @@ class LabelPropagation(MessagePassing): - r"""The label propagation operator from the `"Learning from Labeled and - Unlabeled Data with Label Propagation" + r"""The label propagation operator, firstly introduced in the + `"Learning from Labeled and Unlabeled Data with Label Propagation" `_ paper .. math:: @@ -19,6 +19,9 @@ class LabelPropagation(MessagePassing): \mathbf{D}^{-1/2} \mathbf{Y} + (1 - \alpha) \mathbf{Y}, where unlabeled data is inferred by labeled data via propagation. + This concrete implementation here is derived from the `"Combining Label + Propagation And Simple Models Out-performs Graph Neural Networks" + `_ paper. .. note:: From b8a03a829ec84e17c24a7f4424b94e4f261cb016 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Apr 2023 16:50:41 +0200 Subject: [PATCH 1131/2432] Add `RandomLinkSplit` test on a dataset of graphs (#7211) --- test/transforms/test_random_link_split.py | 31 ++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/test/transforms/test_random_link_split.py b/test/transforms/test_random_link_split.py index 5c6fe5df412f..764f7b4b0cd6 100644 --- a/test/transforms/test_random_link_split.py +++ b/test/transforms/test_random_link_split.py @@ -2,7 +2,7 @@ import torch from torch_geometric.data import Data, HeteroData -from torch_geometric.testing import get_random_edge_index +from torch_geometric.testing import get_random_edge_index, onlyFullTest from torch_geometric.transforms import RandomLinkSplit from torch_geometric.utils import is_undirected, to_undirected @@ -288,3 +288,32 @@ def test_random_link_split_non_contiguous(): train_data, val_data, test_data = transform(data) assert train_data['p', 'p'].num_edges == 60 assert train_data['p', 'p'].edge_index.is_contiguous() + + +@onlyFullTest +def test_random_link_split_on_dataset(get_dataset): + dataset = get_dataset(name='MUTAG') + + dataset.transform = RandomLinkSplit( + num_val=0.1, + num_test=0.1, + disjoint_train_ratio=0.3, + add_negative_train_samples=False, + ) + + train_dataset, val_dataset, test_dataset = zip(*dataset) + assert len(train_dataset) == len(dataset) + assert len(val_dataset) == len(dataset) + assert len(test_dataset) == len(dataset) + + assert isinstance(train_dataset[0], Data) + assert train_dataset[0].edge_label.min() == 1.0 + assert train_dataset[0].edge_label.max() == 1.0 + + assert isinstance(val_dataset[0], Data) + assert val_dataset[0].edge_label.min() == 0.0 + assert val_dataset[0].edge_label.max() == 1.0 + + assert isinstance(test_dataset[0], Data) + assert test_dataset[0].edge_label.min() == 0.0 + assert test_dataset[0].edge_label.max() == 1.0 From 50c29f70b52b28ead161dd19addf4dd38f1288c5 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 20 Apr 2023 08:03:11 +0200 Subject: [PATCH 1132/2432] Respect empty tensors in `NormalizeFeatures` (#7214) --- torch_geometric/transforms/normalize_features.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/torch_geometric/transforms/normalize_features.py b/torch_geometric/transforms/normalize_features.py index 5b7a0fa8bb68..57ca3237678f 100644 --- a/torch_geometric/transforms/normalize_features.py +++ b/torch_geometric/transforms/normalize_features.py @@ -23,7 +23,8 @@ def __call__( ) -> Union[Data, HeteroData]: for store in data.stores: for key, value in store.items(*self.attrs): - value = value - value.min() - value.div_(value.sum(dim=-1, keepdim=True).clamp_(min=1.)) - store[key] = value + if value.numel() > 0: + value = value - value.min() + value.div_(value.sum(dim=-1, keepdim=True).clamp_(min=1.)) + store[key] = value return data From e3e63d66e52aa9ca4553274f0572f1f066d99c41 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 20 Apr 2023 10:14:20 +0200 Subject: [PATCH 1133/2432] Add `segment_matmul` micro-benchmark (#7215) --- test/nn/dense/test_linear.py | 79 ++++++++++++++++++++++++++++ torch_geometric/profile/benchmark.py | 6 ++- 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index 15919831dab3..4f200049e2cc 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -1,13 +1,18 @@ import copy +import warnings +from typing import List import pytest import torch +from torch import Tensor from torch.nn import Linear as PTLinear from torch.nn.parameter import UninitializedParameter import torch_geometric.typing from torch_geometric.nn import HeteroDictLinear, HeteroLinear, Linear +from torch_geometric.profile import benchmark from torch_geometric.testing import withCUDA, withPackage +from torch_geometric.typing import pyg_lib weight_inits = ['glorot', 'kaiming_uniform', None] bias_inits = ['zeros', None] @@ -216,3 +221,77 @@ def test_hetero_linear_sort(type_vec, device): node_type = int(type_vec[i]) expected = x[i] @ lin.weight[node_type] + lin.bias[node_type] assert torch.allclose(out[i], expected, atol=1e-3) + + +if __name__ == '__main__': + import argparse + + import dgl + + warnings.filterwarnings('ignore', '.*API of nested tensors.*') + warnings.filterwarnings('ignore', '.*TypedStorage is deprecated.*') + + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--backward', action='http://github.com/store_true') + args = parser.parse_args() + + torch.manual_seed(12345) + + def get_xs(mean: float, std: float, num_types: int, + channels: int) -> List[Tensor]: + num_nodes_list = torch.normal( + mean=torch.tensor([mean] * num_types, dtype=torch.float), + std=torch.tensor([std] * num_types, dtype=torch.float), + ).round().to(torch.long).tolist() + + return [ + torch.randn(num_nodes, channels, device=args.device) + for num_nodes in num_nodes_list + ] + + def sequential(xs: List[Tensor], weights: List[Tensor]) -> List[Tensor]: + return [x @ weight for x, weight in zip(xs, weights)] + + def nested(xs: List[Tensor], weights: List[Tensor]) -> List[Tensor]: + x = torch.nested.nested_tensor(xs) + weight = torch.nested.nested_tensor(weights) + return list(torch.matmul(x, weight).unbind(0)) + + def grouped(x: Tensor, ptr: Tensor, weight: Tensor) -> Tensor: + return pyg_lib.ops.segment_matmul(x, ptr, weight) + + def padded(x: Tensor, weight: Tensor) -> Tensor: + return torch.matmul(x, weight) + + def dgl_mm(x: Tensor, count: Tensor, weight: Tensor) -> Tensor: + return dgl.ops.segment_mm(x, weight, count) + + num_nodes, channels = 1_000_000, 64 + + for num_types in [3, 5, 10, 50, 100, 200, 500, 1000]: + print(f'Number of types: {num_types}') + mean = num_nodes // num_types + std = mean // 4 + + xs = get_xs(mean, std, num_types, channels) + count = torch.tensor([x.size(0) for x in xs]) + ptr = torch.tensor([0] + [x.size(0) for x in xs]).cumsum(0) + x = torch.cat(xs, dim=0) + padded_x = torch.nested.nested_tensor(xs).to_padded_tensor(padding=0.0) + weight = torch.randn(num_types, channels, channels, device=args.device) + weights = list(weight.unbind(0)) + + benchmark( + funcs=[sequential, grouped, padded, dgl_mm], + func_names=['Sequential', 'Grouped', 'Padded', 'DGL'], + args=[ + (xs, weights), + (x, ptr, weight), + (padded_x, weight), + (x, count, weight), + ], + num_steps=50 if args.device == 'cpu' else 500, + num_warmups=10 if args.device == 'cpu' else 100, + backward=args.backward, + ) diff --git a/torch_geometric/profile/benchmark.py b/torch_geometric/profile/benchmark.py index d2d2d82fd903..60e6b2b8ea8e 100644 --- a/torch_geometric/profile/benchmark.py +++ b/torch_geometric/profile/benchmark.py @@ -83,7 +83,11 @@ def benchmark( t_forward += time.perf_counter() - t_start if backward: - if isinstance(out, dict): # TODO Generalize this logic. + # TODO Generalize this logic. This is also a bit unfair as the + # concatenation leads to incorrectly measured backward speeds. + if isinstance(out, (tuple, list)): + out = torch.cat(out, dim=0) + elif isinstance(out, dict): out = torch.cat(list(out.values()), dim=0) out_grad = torch.randn_like(out) From 2fdf01e0cf451722af2f84cbab548d5c8d0e1b41 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 20 Apr 2023 14:16:18 +0200 Subject: [PATCH 1134/2432] Add `batch_size` argument to `avg_pool_x` and `max_pool_x` (#7216) It can speedup runtime because: 1. We do not need to go through the batch dimension and look for max value. 2. We do not have to read tensor value which is placed on the device. Besides dim_size can be used if a user is using fixed size datasets. --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/nn/pool/test_avg_pool.py | 7 +++++++ test/nn/pool/test_max_pool.py | 7 +++++++ torch_geometric/nn/pool/avg_pool.py | 6 +++++- torch_geometric/nn/pool/max_pool.py | 6 +++++- 5 files changed, 25 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 560e87b6011b..353fb5c5ac9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added an optional `batch_size` argument to `avg_pool_x` and `max_pool_x` ([#7216](https://github.com/pyg-team/pytorch_geometric/pull/7216)) - Fixed `subgraph` on unordered inputs ([#7187](https://github.com/pyg-team/pytorch_geometric/pull/7187)) - Allow missing node types in `HeteroDictLinear` ([#7185](https://github.com/pyg-team/pytorch_geometric/pull/7185)) - Optimized `from_networkx` memory footprint by reducing unnecessary copies ([#7119](https://github.com/pyg-team/pytorch_geometric/pull/7119)) diff --git a/test/nn/pool/test_avg_pool.py b/test/nn/pool/test_avg_pool.py index c1e0f6dd8962..15c8113df89b 100644 --- a/test/nn/pool/test_avg_pool.py +++ b/test/nn/pool/test_avg_pool.py @@ -23,11 +23,18 @@ def test_avg_pool_x(): out, _ = avg_pool_x(cluster, x, batch, size=2) assert out.tolist() == [[3, 4], [5, 6], [10, 11], [0, 0]] + batch_size = int(batch.max().item()) + 1 + out2, _ = avg_pool_x(cluster, x, batch, batch_size=batch_size, size=2) + assert torch.equal(out, out2) + if is_full_test(): jit = torch.jit.script(avg_pool_x) out, _ = jit(cluster, x, batch, size=2) assert out.tolist() == [[3, 4], [5, 6], [10, 11], [0, 0]] + out2, _ = jit(cluster, x, batch, batch_size=batch_size, size=2) + assert torch.equal(out, out2) + def test_avg_pool(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) diff --git a/test/nn/pool/test_max_pool.py b/test/nn/pool/test_max_pool.py index a6c4921fbaff..db58de3d91e9 100644 --- a/test/nn/pool/test_max_pool.py +++ b/test/nn/pool/test_max_pool.py @@ -23,11 +23,18 @@ def test_max_pool_x(): out, _ = max_pool_x(cluster, x, batch, size=2) assert out.tolist() == [[5, 6], [7, 8], [11, 12], [0, 0]] + batch_size = int(batch.max().item()) + 1 + out2, _ = max_pool_x(cluster, x, batch, batch_size=batch_size, size=2) + assert torch.equal(out, out2) + if is_full_test(): jit = torch.jit.script(max_pool_x) out, _ = jit(cluster, x, batch, size=2) assert out.tolist() == [[5, 6], [7, 8], [11, 12], [0, 0]] + out2, _ = jit(cluster, x, batch, batch_size=batch_size, size=2) + assert torch.equal(out, out2) + def test_max_pool(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) diff --git a/torch_geometric/nn/pool/avg_pool.py b/torch_geometric/nn/pool/avg_pool.py index 48ea071dd13e..487ab4139927 100644 --- a/torch_geometric/nn/pool/avg_pool.py +++ b/torch_geometric/nn/pool/avg_pool.py @@ -20,6 +20,7 @@ def avg_pool_x( cluster: Tensor, x: Tensor, batch: Tensor, + batch_size: Optional[int] = None, size: Optional[int] = None, ) -> Tuple[Tensor, Optional[Tensor]]: r"""Average pools node features according to the clustering defined in @@ -34,6 +35,8 @@ def avg_pool_x( batch (torch.Tensor): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) size (int, optional): The maximum number of clusters in a single example. (default: :obj:`None`) @@ -41,7 +44,8 @@ def avg_pool_x( :obj:`None`, else :class:`torch.Tensor` """ if size is not None: - batch_size = int(batch.max().item()) + 1 + if batch_size is None: + batch_size = int(batch.max().item()) + 1 return _avg_pool_x(cluster, x, batch_size * size), None cluster, perm = consecutive_cluster(cluster) diff --git a/torch_geometric/nn/pool/max_pool.py b/torch_geometric/nn/pool/max_pool.py index cf849a8cb530..d099455ecacb 100644 --- a/torch_geometric/nn/pool/max_pool.py +++ b/torch_geometric/nn/pool/max_pool.py @@ -20,6 +20,7 @@ def max_pool_x( cluster: Tensor, x: Tensor, batch: Tensor, + batch_size: Optional[int] = None, size: Optional[int] = None, ) -> Tuple[Tensor, Optional[Tensor]]: r"""Max-Pools node features according to the clustering defined in @@ -33,6 +34,8 @@ def max_pool_x( batch (torch.Tensor): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) size (int, optional): The maximum number of clusters in a single example. This property is useful to obtain a batch-wise dense representation, *e.g.* for applying FC layers, but should only be @@ -43,7 +46,8 @@ def max_pool_x( :obj:`None`, else :class:`torch.Tensor` """ if size is not None: - batch_size = int(batch.max().item()) + 1 + if batch_size is None: + batch_size = int(batch.max().item()) + 1 return _max_pool_x(cluster, x, batch_size * size), None cluster, perm = consecutive_cluster(cluster) From a67799fb8688175ebedbdcf8365de5b62c83859c Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 21 Apr 2023 05:45:27 -0700 Subject: [PATCH 1135/2432] Skip `dgl` in benchmark if not available (#7219) In case you don't care to compare to DGL or don't have it available --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/dense/test_linear.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index 4f200049e2cc..cfa20fc404a4 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -225,8 +225,11 @@ def test_hetero_linear_sort(type_vec, device): if __name__ == '__main__': import argparse - - import dgl + try: + import dgl + WITH_DLG = True + except: # noqa + WITH_DGL = False warnings.filterwarnings('ignore', '.*API of nested tensors.*') warnings.filterwarnings('ignore', '.*TypedStorage is deprecated.*') @@ -282,15 +285,19 @@ def dgl_mm(x: Tensor, count: Tensor, weight: Tensor) -> Tensor: weight = torch.randn(num_types, channels, channels, device=args.device) weights = list(weight.unbind(0)) + funcs = [sequential, grouped, padded] + func_names = ['Sequential', 'Grouped', 'Padded'] + args_list = [(xs, weights), (x, ptr, weight), (padded_x, weight)] + + if WITH_DGL: + funcs.append(dgl_mm) + func_names.append('DGL') + args_list.append((x, count, weight)) + benchmark( - funcs=[sequential, grouped, padded, dgl_mm], - func_names=['Sequential', 'Grouped', 'Padded', 'DGL'], - args=[ - (xs, weights), - (x, ptr, weight), - (padded_x, weight), - (x, count, weight), - ], + funcs=funcs, + func_names=func_names, + args=args_list, num_steps=50 if args.device == 'cpu' else 500, num_warmups=10 if args.device == 'cpu' else 100, backward=args.backward, From 4f67c7448a8522bc4e545e1712ad0e809d047c1d Mon Sep 17 00:00:00 2001 From: Viktor Stenby Date: Sat, 22 Apr 2023 14:43:12 +0200 Subject: [PATCH 1136/2432] Test `return_emb` in `MLP` (#7221) There was a bug in the .forward method of the MLP class. If return_emb=False, then it would still return a tuple containing the embeddings. The line with the bug was: ```python return (x, emb) if isinstance(return_emb, bool) else x ``` since `isinstance(False, bool)` is True. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/models/test_mlp.py | 20 ++++++++++++++++---- torch_geometric/nn/models/mlp.py | 10 ++++++++-- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/test/nn/models/test_mlp.py b/test/nn/models/test_mlp.py index cb28a89acc42..d830a8ed77e6 100644 --- a/test/nn/models/test_mlp.py +++ b/test/nn/models/test_mlp.py @@ -2,7 +2,6 @@ import torch from torch_geometric.nn import MLP -from torch_geometric.testing import is_full_test @pytest.mark.parametrize('norm', ['batch_norm', None]) @@ -22,9 +21,8 @@ def test_mlp(norm, act_first, plain_last): out = mlp(x) assert out.size() == (4, 64) - if is_full_test(): - jit = torch.jit.script(mlp) - assert torch.allclose(jit(x), out) + jit = torch.jit.script(mlp) + assert torch.allclose(jit(x), out) torch.manual_seed(12345) mlp = MLP( @@ -39,6 +37,20 @@ def test_mlp(norm, act_first, plain_last): assert torch.allclose(mlp(x), out) +def test_mlp_return_emb(): + x = torch.randn(4, 16) + + mlp = MLP([16, 32, 1]) + + out, emb = mlp(x, return_emb=True) + assert out.size() == (4, 1) + assert emb.size() == (4, 32) + + out, emb = mlp(x, return_emb=False) + assert out.size() == (4, 1) + assert emb is None + + @pytest.mark.parametrize('plain_last', [False, True]) def test_fine_grained_mlp(plain_last): mlp = MLP( diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index 402a6c087363..d2be2475e641 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -194,9 +194,14 @@ def forward( Args: x (torch.Tensor): The source tensor. return_emb (bool, optional): If set to :obj:`True`, will - additionally return the embeddings before execution of to the + additionally return the embeddings before execution of the final output layer. (default: :obj:`False`) """ + # `return_emb` is annotated here as `NoneType` to be compatible with + # TorchScript, which does not support different return types based on + # the value of an input argument. + emb: Optional[Tensor] = None + for i, (lin, norm) in enumerate(zip(self.lins, self.norms)): x = lin(x) if self.act is not None and self.act_first: @@ -205,7 +210,8 @@ def forward( if self.act is not None and not self.act_first: x = self.act(x) x = F.dropout(x, p=self.dropout[i], training=self.training) - emb = x + if isinstance(return_emb, bool) and return_emb is True: + emb = x if self.plain_last: x = self.lins[-1](x) From b07a84f53fc131ddc069923f258ee9fc7e3f9f9e Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Sat, 22 Apr 2023 14:16:38 +0100 Subject: [PATCH 1137/2432] `*.explain`: Added feature importance visualization for `HeteroExplanation` + Added example for link prediction explantion (#7096) Changes in this PR 1. Added `HeteroExplanation.visualize_feature_importance` 2. Added an example `captum_explainer_link.py`. That uses Captum to explain a Heterogenous link pred task. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 + .../explain/captum_explainer_hetero_link.py | 120 +++++++++++++++ examples/hetero/hetero_link_pred.py | 6 - test/explain/test_hetero_explanation.py | 26 ++++ torch_geometric/explain/explanation.py | 137 ++++++++++++------ 5 files changed, 244 insertions(+), 47 deletions(-) create mode 100644 examples/explain/captum_explainer_hetero_link.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 353fb5c5ac9d..e0a8404ee8fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added an example for heterogeneous GNN explanation via `CaptumExplainer` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) +- Added `visualize_feature_importance` functionality to `HeteroExplanation` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) - Added a `AddRemainingSelfLoops` transform ([#7192](https://github.com/pyg-team/pytorch_geometric/pull/7192)) - Added `optimizer_resolver` ([#7209](https://github.com/pyg-team/pytorch_geometric/pull/7209)) - Added `type_ptr` argument to `HeteroLayerNorm` ([#7208](https://github.com/pyg-team/pytorch_geometric/pull/7208)) diff --git a/examples/explain/captum_explainer_hetero_link.py b/examples/explain/captum_explainer_hetero_link.py new file mode 100644 index 000000000000..3523d07ba172 --- /dev/null +++ b/examples/explain/captum_explainer_hetero_link.py @@ -0,0 +1,120 @@ +import os.path as osp + +import torch +import torch.nn.functional as F +from torch.nn import Linear + +import torch_geometric.transforms as T +from torch_geometric.datasets import MovieLens +from torch_geometric.explain import CaptumExplainer, Explainer +from torch_geometric.nn import SAGEConv, to_hetero + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data/MovieLens') +dataset = MovieLens(path, model_name='all-MiniLM-L6-v2') +data = dataset[0].to(device) + +# Add user node features for message passing: +data['user'].x = torch.eye(data['user'].num_nodes, device=device) +del data['user'].num_nodes + +# Add a reverse ('movie', 'rev_rates', 'user') relation for message passing: +data = T.ToUndirected()(data) +data['user', 'movie'].edge_label = data['user', + 'movie'].edge_label.to(torch.float) +del data['movie', 'rev_rates', 'user'].edge_label # Remove "reverse" label. + +# Perform a link-level split into training, validation, and test edges: +data, _, _ = T.RandomLinkSplit( + num_val=0.1, + num_test=0.1, + neg_sampling_ratio=0.0, + edge_types=[('user', 'rates', 'movie')], + rev_edge_types=[('movie', 'rev_rates', 'user')], +)(data) + + +class GNNEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = SAGEConv((-1, -1), hidden_channels) + self.conv2 = SAGEConv((-1, -1), out_channels) + + def forward(self, x, edge_index): + x = self.conv1(x, edge_index).relu() + x = self.conv2(x, edge_index) + return x + + +class EdgeDecoder(torch.nn.Module): + def __init__(self, hidden_channels): + super().__init__() + self.lin1 = Linear(2 * hidden_channels, hidden_channels) + self.lin2 = Linear(hidden_channels, 1) + + def forward(self, z_dict, edge_label_index): + row, col = edge_label_index + z = torch.cat([z_dict['user'][row], z_dict['movie'][col]], dim=-1) + + z = self.lin1(z).relu() + z = self.lin2(z) + return z.view(-1) + + +class Model(torch.nn.Module): + def __init__(self, hidden_channels): + super().__init__() + self.encoder = GNNEncoder(hidden_channels, hidden_channels) + self.encoder = to_hetero(self.encoder, data.metadata(), aggr='sum') + self.decoder = EdgeDecoder(hidden_channels) + + def forward(self, x_dict, edge_index_dict, edge_label_index): + z_dict = self.encoder(x_dict, edge_index_dict) + return self.decoder(z_dict, edge_label_index) + + +model = Model(hidden_channels=32).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +for epoch in range(1, 10): + model.train() + optimizer.zero_grad() + pred = model( + data.x_dict, + data.edge_index_dict, + data['user', 'movie'].edge_label_index, + ) + loss = F.mse_loss(pred, data['user', 'movie'].edge_label) + loss.backward() + optimizer.step() + +explainer = Explainer( + model=model, + algorithm=CaptumExplainer('IntegratedGradients'), + explanation_type='model', + model_config=dict( + mode='regression', + task_level='edge', + return_type='raw', + ), + node_mask_type='attributes', + edge_mask_type='object', + threshold_config=dict( + threshold_type='topk', + value=200, + ), +) + +index = torch.tensor([2, 10]) # Explain edge labels with index 2 and 10. +explanation = explainer( + data.x_dict, + data.edge_index_dict, + index=index, + edge_label_index=data['user', 'movie'].edge_label_index, +) +print(f'Generated explanations in {explanation.available_explanations}') + +path = 'feature_importance.png' +explanation.visualize_feature_importance(path, top_k=10) +print(f"Feature importance plot has been saved to '{path}'") diff --git a/examples/hetero/hetero_link_pred.py b/examples/hetero/hetero_link_pred.py index d4bdcf8e52cf..b77393c8bab1 100644 --- a/examples/hetero/hetero_link_pred.py +++ b/examples/hetero/hetero_link_pred.py @@ -91,12 +91,6 @@ def forward(self, x_dict, edge_index_dict, edge_label_index): model = Model(hidden_channels=32).to(device) - -# Due to lazy initialization, we need to run one model step so the number -# of parameters can be inferred: -with torch.no_grad(): - model.encoder(train_data.x_dict, train_data.edge_index_dict) - optimizer = torch.optim.Adam(model.parameters(), lr=0.01) diff --git a/test/explain/test_hetero_explanation.py b/test/explain/test_hetero_explanation.py index 838eda2f64e1..f492407ca5ba 100644 --- a/test/explain/test_hetero_explanation.py +++ b/test/explain/test_hetero_explanation.py @@ -1,3 +1,4 @@ +import os.path as osp from typing import Optional, Union import pytest @@ -6,6 +7,7 @@ from torch_geometric.data import HeteroData from torch_geometric.explain import HeteroExplanation from torch_geometric.explain.config import MaskType +from torch_geometric.testing import withPackage def create_random_explanation( @@ -115,3 +117,27 @@ def test_edge_mask(): assert out['paper', 'author'].edge_mask.size() == (1, ) assert torch.equal(out['paper', 'author'].edge_index, torch.tensor([[1], [1]])) + + +@withPackage('matplotlib') +@pytest.mark.parametrize('top_k', [2, None]) +@pytest.mark.parametrize('node_mask_type', [None, 'attributes']) +def test_visualize_feature_importance( + top_k, + node_mask_type, + tmp_path, + hetero_data, +): + explanation = create_random_explanation( + hetero_data, + node_mask_type=node_mask_type, + ) + + path = osp.join(tmp_path, 'feature_importance.png') + + if node_mask_type is None: + with pytest.raises(ValueError, match="node_mask' is not"): + explanation.visualize_feature_importance(path, top_k=top_k) + else: + explanation.visualize_feature_importance(path, top_k=top_k) + assert osp.exists(path) diff --git a/torch_geometric/explain/explanation.py b/torch_geometric/explain/explanation.py index 71fe5073b601..5c2ec411892c 100644 --- a/torch_geometric/explain/explanation.py +++ b/torch_geometric/explain/explanation.py @@ -200,21 +200,18 @@ def visualize_feature_importance( feat_labels: Optional[List[str]] = None, top_k: Optional[int] = None, ): - r"""Creates a bar plot of the node features importance by summing up - :attr:`self.node_mask` across all nodes. + r"""Creates a bar plot of the node feature importances by summing up + the node mask across all nodes. Args: path (str, optional): The path to where the plot is saved. If set to :obj:`None`, will visualize the plot on-the-fly. (default: :obj:`None`) - feat_labels (List[str], optional): Optional labels for features. + feat_labels (List[str], optional): The labels of features. (default :obj:`None`) top_k (int, optional): Top k features to plot. If :obj:`None` plots all features. (default: :obj:`None`) """ - import matplotlib.pyplot as plt - import pandas as pd - node_mask = self.get('node_mask') if node_mask is None: raise ValueError(f"The attribute 'node_mask' is not available " @@ -225,44 +222,12 @@ def visualize_feature_importance( f"object-level 'node_mask' " f"(got shape {node_mask.size()})") - feat_importance = node_mask.sum(dim=0).cpu().numpy() - if feat_labels is None: - feat_labels = range(feat_importance.shape[0]) - - if len(feat_labels) != feat_importance.shape[0]: - raise ValueError(f"The '{self.__class__.__name__}' object holds " - f"{feat_importance.numel()} features, but " - f"only {len(feat_labels)} were passed") - - df = pd.DataFrame({'feat_importance': feat_importance}, - index=feat_labels) - df = df.sort_values("feat_importance", ascending=False) - df = df.round(decimals=3) - - if top_k is not None: - df = df.head(top_k) - title = f"Feature importance for top {len(df)} features" - else: - title = f"Feature importance for {len(df)} features" - - ax = df.plot( - kind='barh', - figsize=(10, 7), - title=title, - ylabel='Feature label', - xlim=[0, float(feat_importance.max()) + 0.3], - legend=False, - ) - plt.gca().invert_yaxis() - ax.bar_label(container=ax.containers[0], label_type='edge') + feat_labels = range(node_mask.size(1)) - if path is not None: - plt.savefig(path) - else: - plt.show() + score = node_mask.sum(dim=0) - plt.close() + return _visualize_score(score, feat_labels, path, top_k) def visualize_graph(self, path: Optional[str] = None, backend: Optional[str] = None): @@ -343,3 +308,93 @@ def _apply_masks( out[edge_type][key] = value[edge_mask] return out.subgraph(node_mask_dict) + + def visualize_feature_importance( + self, + path: Optional[str] = None, + feat_labels: Optional[Dict[NodeType, List[str]]] = None, + top_k: Optional[int] = None, + ): + r"""Creates a bar plot of the node feature importances by summing up + node masks across all nodes for each node type. + + Args: + path (str, optional): The path to where the plot is saved. + If set to :obj:`None`, will visualize the plot on-the-fly. + (default: :obj:`None`) + feat_labels (Dict[NodeType, List[str]], optional): The labels of + features for each node type. (default :obj:`None`) + top_k (int, optional): Top k features to plot. If :obj:`None` + plots all features. (default: :obj:`None`) + """ + node_mask_dict = self.node_mask_dict + if len(node_mask_dict) == 0: + raise ValueError(f"The attribute 'node_mask' is not available " + f"in '{self.__class__.__name__}' " + f"(got {self.available_explanations})") + for node_mask in node_mask_dict.values(): + if node_mask.dim() != 2 or node_mask.size(1) <= 1: + raise ValueError(f"Cannot compute feature importance for " + f"object-level 'node_mask' " + f"(got shape {node_mask_dict.size()})") + + if feat_labels is None: + feat_labels = {} + for node_type, node_mask in node_mask_dict.items(): + feat_labels[node_type] = range(node_mask.size(1)) + + score = torch.cat( + [node_mask.sum(dim=0) for node_mask in node_mask_dict.values()], + dim=0) + + all_feat_labels = [] + for node_type in node_mask_dict.keys(): + all_feat_labels += [ + f'{node_type}#{label}' for label in feat_labels[node_type] + ] + + return _visualize_score(score, all_feat_labels, path, top_k) + + +def _visualize_score( + score: torch.Tensor, + labels: List[str], + path: Optional[str] = None, + top_k: Optional[int] = None, +): + import matplotlib.pyplot as plt + import pandas as pd + + if len(labels) != score.numel(): + raise ValueError(f"The number of labels (got {len(labels)}) must " + f"match the number of scores (got {score.numel()})") + + score = score.cpu().numpy() + + df = pd.DataFrame({'score': score}, index=labels) + df = df.sort_values('score', ascending=False) + df = df.round(decimals=3) + + if top_k is not None: + df = df.head(top_k) + title = f"Feature importance for top {len(df)} features" + else: + title = f"Feature importance for {len(df)} features" + + ax = df.plot( + kind='barh', + figsize=(10, 7), + title=title, + ylabel='Feature label', + xlim=[0, float(df['score'].max()) + 0.3], + legend=False, + ) + plt.gca().invert_yaxis() + ax.bar_label(container=ax.containers[0], label_type='edge') + + if path is not None: + plt.savefig(path) + else: + plt.show() + + plt.close() From edbf8fcd3d94e2bdb580dc5b901cb53adeffc0f9 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 24 Apr 2023 12:44:46 +0200 Subject: [PATCH 1138/2432] Extend dataset summary to create stats for each node/edge type (#7203) example output for hetero data: ``` FakeHeteroDataset (#graphs=20): +------------+----------+----------+ | | #nodes | #edges | |------------+----------+----------| | mean | 3945.4 | 49149.2 | | std | 247.8 | 3481.2 | | min | 3300 | 40440 | | quantile25 | 3849 | 47016.8 | | median | 3974 | 49820 | | quantile75 | 4129 | 51367.8 | | max | 4207 | 53761 | +------------+----------+----------+ Number of nodes per node type: +------------+--------+--------+--------+--------+ | | #v0 | #v1 | #v2 | #v3 | |------------+--------+--------+--------+--------| | mean | 980.5 | 994.8 | 944.7 | 1025.4 | | std | 140.7 | 123.8 | 123.9 | 123.7 | | min | 765 | 770 | 754 | 753 | | quantile25 | 862.2 | 895 | 863 | 936.2 | | median | 979 | 1005 | 915 | 1067 | | quantile75 | 1068.8 | 1064.8 | 1036 | 1103.2 | | max | 1235 | 1197 | 1201 | 1225 | +------------+--------+--------+--------+--------+ Number of edges per edge type: +------------+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+ | | #('v0', 'e0', 'v2') | #('v2', 'e0', 'v0') | #('v1', 'e0', 'v0') | #('v3', 'e0', 'v0') | #('v1', 'e0', 'v3') | |------------+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------| | mean | 9749.5 | 9400.5 | 9897 | 10203 | 9899.2 | | std | 1395.8 | 1229.9 | 1230.2 | 1230.6 | 1228.8 | | min | 7613 | 7502 | 7650 | 7498 | 7657 | | quantile25 | 8577.2 | 8589.5 | 8899.8 | 9315.2 | 8901.5 | | median | 9732 | 9108 | 10014 | 10611 | 9978 | | quantile75 | 10624.5 | 10307.8 | 10584.8 | 10971.5 | 10597 | | max | 12278 | 11935 | 11909 | 12186 | 11908 | +------------+-----------------------+-----------------------+-----------------------+-----------------------+-----------------------+ ``` For datasets that haven't `node_types` or `edge_types` defined or calling `from_dataset` method with parameter `per_type_breakdown=False` output will be the same as before the change. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_dataset_summary.py | 85 ++++++++++++++++++++++++------ torch_geometric/data/summary.py | 87 +++++++++++++++++++++++++++---- 3 files changed, 146 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0a8404ee8fc..03eaaec8b044 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Extend dataset summary to create stats for each node/edge type ([#7203](https://github.com/pyg-team/pytorch_geometric/pull/7203)) - Added an optional `batch_size` argument to `avg_pool_x` and `max_pool_x` ([#7216](https://github.com/pyg-team/pytorch_geometric/pull/7216)) - Fixed `subgraph` on unordered inputs ([#7187](https://github.com/pyg-team/pytorch_geometric/pull/7187)) - Allow missing node types in `HeteroDictLinear` ([#7185](https://github.com/pyg-team/pytorch_geometric/pull/7185)) diff --git a/test/data/test_dataset_summary.py b/test/data/test_dataset_summary.py index 4ebee67df95c..5eed8708d9f0 100644 --- a/test/data/test_dataset_summary.py +++ b/test/data/test_dataset_summary.py @@ -1,10 +1,21 @@ import torch +from torch import Tensor -from torch_geometric.data.summary import Summary +from torch_geometric.data.summary import Stats, Summary from torch_geometric.datasets import FakeDataset, FakeHeteroDataset from torch_geometric.testing import withPackage +def check_stats(stats: Stats, expected: Tensor): + assert stats.mean == float(expected.mean()) + assert stats.std == float(expected.std()) + assert stats.min == float(expected.min()) + assert stats.quantile25 == float(expected.quantile(0.25)) + assert stats.median == float(expected.median()) + assert stats.quantile75 == float(expected.quantile(0.75)) + assert stats.max == float(expected.max()) + + def test_dataset_summary(): dataset = FakeDataset(num_graphs=10) num_nodes = torch.Tensor([data.num_nodes for data in dataset]) @@ -15,27 +26,24 @@ def test_dataset_summary(): assert summary.name == 'FakeDataset' assert summary.num_graphs == 10 - assert summary.num_nodes.mean == num_nodes.mean().item() - assert summary.num_nodes.std == num_nodes.std().item() - assert summary.num_nodes.min == num_nodes.min().item() - assert summary.num_nodes.quantile25 == num_nodes.quantile(0.25).item() - assert summary.num_nodes.median == num_nodes.median().item() - assert summary.num_nodes.quantile75 == num_nodes.quantile(0.75).item() - assert summary.num_nodes.max == num_nodes.max().item() + check_stats(summary.num_nodes, num_nodes) + check_stats(summary.num_edges, num_edges) + + +@withPackage('tabulate') +def test_dataset_summary_representation(): + dataset = FakeDataset(num_graphs=10) + + summary1 = Summary.from_dataset(dataset, per_type=False) + summary2 = Summary.from_dataset(dataset, per_type=True) - assert summary.num_edges.mean == num_edges.mean().item() - assert summary.num_edges.std == num_edges.std().item() - assert summary.num_edges.min == num_edges.min().item() - assert summary.num_edges.quantile25 == num_edges.quantile(0.25).item() - assert summary.num_edges.median == num_edges.median().item() - assert summary.num_edges.quantile75 == num_edges.quantile(0.75).item() - assert summary.num_edges.max == num_edges.max().item() + assert str(summary1) == str(summary2) @withPackage('tabulate') def test_dataset_summary_hetero(): dataset1 = FakeHeteroDataset(num_graphs=10) - summary1 = Summary.from_dataset(dataset1) + summary1 = Summary.from_dataset(dataset1, per_type=False) dataset2 = [data.to_homogeneous() for data in dataset1] summary2 = Summary.from_dataset(dataset2) @@ -43,3 +51,48 @@ def test_dataset_summary_hetero(): assert summary1 == summary2 assert str(summary1) == str(summary2) + + +@withPackage('tabulate') +def test_dataset_summary_hetero_representation_length(): + dataset = FakeHeteroDataset(num_graphs=10) + summary = Summary.from_dataset(dataset) + num_lines = len(str(summary).splitlines()) + + stats_len = len(Stats.__dataclass_fields__) + len_header_and_border = 5 + num_tables = 3 # general, stats per node type, stats per edge type + + assert num_lines == num_tables * (stats_len + len_header_and_border) + + +def test_dataset_summary_hetero_per_type_check(): + dataset = FakeHeteroDataset(num_graphs=10) + exp_num_nodes = torch.Tensor([data.num_nodes for data in dataset]) + exp_num_edges = torch.Tensor([data.num_edges for data in dataset]) + + summary = dataset.get_summary() + + assert summary.name == 'FakeHeteroDataset' + assert summary.num_graphs == 10 + + check_stats(summary.num_nodes, exp_num_nodes) + check_stats(summary.num_edges, exp_num_edges) + + num_nodes_per_type = {} + for node_type in dataset.node_types: + num_nodes_per_type[node_type] = torch.Tensor( + [data[node_type].num_nodes for data in dataset]) + + assert len(summary.num_nodes_per_type) == len(dataset.node_types) + for node_type, stats in summary.num_nodes_per_type.items(): + check_stats(stats, num_nodes_per_type[node_type]) + + num_edges_per_type = {} + for edge_type in dataset.edge_types: + num_edges_per_type[edge_type] = torch.Tensor( + [data[edge_type].num_edges for data in dataset]) + + assert len(summary.num_edges_per_type) == len(dataset.edge_types) + for edge_type, stats in summary.num_edges_per_type.items(): + check_stats(stats, num_edges_per_type[edge_type]) diff --git a/torch_geometric/data/summary.py b/torch_geometric/data/summary.py index c8570a319073..4e1aa0ebadd1 100644 --- a/torch_geometric/data/summary.py +++ b/torch_geometric/data/summary.py @@ -1,10 +1,11 @@ +from collections import defaultdict from dataclasses import dataclass -from typing import List, Optional, Union +from typing import Dict, List, Optional, Union import torch from tqdm import tqdm -from torch_geometric.data import Dataset +from torch_geometric.data import Dataset, HeteroData @dataclass @@ -40,12 +41,15 @@ class Summary: num_graphs: int num_nodes: Stats num_edges: Stats + num_nodes_per_type: Optional[Dict[str, Stats]] = None + num_edges_per_type: Optional[Dict[str, Stats]] = None @classmethod def from_dataset( cls, dataset: Dataset, progress_bar: Optional[bool] = None, + per_type: bool = True, ): r"""Creates a summary of a :class:`~torch_geometric.data.Dataset` object. @@ -56,6 +60,9 @@ def from_dataset( progress bar during stats computation. If set to :obj:`None`, will automatically decide whether to show a progress bar based on dataset size. (default: :obj:`None`) + per_type (bool, optional). If set to :obj:`True`, will separate + statistics per node and edge type (only applicable in + heterogeneous graph datasets). (default: :obj:`True`) """ name = dataset.__class__.__name__ @@ -65,28 +72,86 @@ def from_dataset( if progress_bar: dataset = tqdm(dataset) - num_nodes_list, num_edges_list = [], [] + num_nodes, num_edges = [], [] + num_nodes_per_type = defaultdict(list) + num_edges_per_type = defaultdict(list) + for data in dataset: - num_nodes_list.append(data.num_nodes) - num_edges_list.append(data.num_edges) + num_nodes.append(data.num_nodes) + num_edges.append(data.num_edges) + + if per_type and isinstance(data, HeteroData): + for node_type in data.node_types: + num_nodes_per_type[node_type].append( + data[node_type].num_nodes) + for edge_type in data.edge_types: + num_edges_per_type[edge_type].append( + data[edge_type].num_edges) + + if len(num_nodes_per_type) > 0: + num_nodes_per_type = { + node_type: Stats.from_data(num_nodes_list) + for node_type, num_nodes_list in num_nodes_per_type.items() + } + else: + num_nodes_per_type = None + + if len(num_edges_per_type) > 0: + num_edges_per_type = { + edge_type: Stats.from_data(num_edges_list) + for edge_type, num_edges_list in num_edges_per_type.items() + } + else: + num_edges_per_type = None return cls( name=name, num_graphs=len(dataset), - num_nodes=Stats.from_data(num_nodes_list), - num_edges=Stats.from_data(num_edges_list), + num_nodes=Stats.from_data(num_nodes), + num_edges=Stats.from_data(num_edges), + num_nodes_per_type=num_nodes_per_type, + num_edges_per_type=num_edges_per_type, ) def __repr__(self) -> str: from tabulate import tabulate - prefix = f'{self.name} (#graphs={self.num_graphs}):\n' + body = f'{self.name} (#graphs={self.num_graphs}):\n' content = [['', '#nodes', '#edges']] stats = [self.num_nodes, self.num_edges] for field in Stats.__dataclass_fields__: row = [field] + [f'{getattr(s, field):.1f}' for s in stats] content.append(row) - body = tabulate(content, headers='firstrow', tablefmt='psql') - - return prefix + body + body += tabulate(content, headers='firstrow', tablefmt='psql') + + if self.num_nodes_per_type is not None: + content = [['']] + content[0] += list(self.num_nodes_per_type.keys()) + + for field in Stats.__dataclass_fields__: + row = [field] + [ + f'{getattr(s, field):.1f}' + for s in self.num_nodes_per_type.values() + ] + content.append(row) + body += "\nNumber of nodes per node type:\n" + body += tabulate(content, headers='firstrow', tablefmt='psql') + + if self.num_edges_per_type is not None: + content = [['']] + content[0] += [ + f"({', '.join(edge_type)})" + for edge_type in self.num_edges_per_type.keys() + ] + + for field in Stats.__dataclass_fields__: + row = [field] + [ + f'{getattr(s, field):.1f}' + for s in self.num_edges_per_type.values() + ] + content.append(row) + body += "\nNumber of edges per edge type:\n" + body += tabulate(content, headers='firstrow', tablefmt='psql') + + return body From c9fef62c2f6f509b360dfe85eeda48936d39ec87 Mon Sep 17 00:00:00 2001 From: Berke Kisin Date: Tue, 25 Apr 2023 07:04:14 +0200 Subject: [PATCH 1139/2432] Fix `HGTConv` `edge_type_vec` construction (#7194) This pr fixes the utility function https://github.com/pyg-team/pytorch_geometric/blob/3d4836bc24dbb1b180f29cbbbdbcd18b94116dd7/torch_geometric/nn/conv/hgt_conv.py#L123, which constructs the type_vec of edges wrong and also crashes if some edge_types are not present in the current edge_index_dict. Consider the following scenario: ```python # N =2, D=2, H=2 (2 nodes, head_dim 2, 2 heads) k = [ [0,0,1,1], [2,2,3,3] ] ``` after calling this line: https://github.com/pyg-team/pytorch_geometric/blob/3d4836bc24dbb1b180f29cbbbdbcd18b94116dd7/torch_geometric/nn/conv/hgt_conv.py#L141 the matrix k looks like this: ```python k= [ [0,0], [1,1], [2,2], [3,3]] # the type vec should look like this type_vec = [0,1,0,1] # but at current implementation it would look like this type_vec = [0,0,1,1] ``` After the reshape the attention heads are interleaved but the type vector that is currently constructed is sorted. We fixed this issue by constructing interleaved type vec. Alternatively we can transpose the k before the reshape to ensure that we can use sorted type vec. This will also allow us to set `is_sorted=True` for the heterolinear `k_rel` which would be more efficient. Also note that we added a test case for missing edge type in edge_index_dict. --------- Co-authored-by: berke.kisin Co-authored-by: toensoff Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinu Sunil --- CHANGELOG.md | 1 + test/nn/conv/test_hgt_conv.py | 40 ++++++++++++++++- torch_geometric/nn/conv/hgt_conv.py | 69 ++++++++++++++++------------- torch_geometric/nn/dense/linear.py | 2 +- 4 files changed, 79 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03eaaec8b044..be6c4b675b18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `HGTConv` utility function `_construct_src_node_feat` ([#7194](https://github.com/pyg-team/pytorch_geometric/pull/7194)) - Extend dataset summary to create stats for each node/edge type ([#7203](https://github.com/pyg-team/pytorch_geometric/pull/7203)) - Added an optional `batch_size` argument to `avg_pool_x` and `max_pool_x` ([#7216](https://github.com/pyg-team/pytorch_geometric/pull/7216)) - Fixed `subgraph` on unordered inputs ([#7187](https://github.com/pyg-team/pytorch_geometric/pull/7187)) diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index 02bb7c8e4fc3..a41a755a9e57 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -193,7 +193,45 @@ def test_hgt_conv_missing_dst_node_type(): out_dict = conv(data.x_dict, data.edge_index_dict) assert out_dict['author'].size() == (4, 64) assert out_dict['paper'].size() == (6, 64) - assert out_dict['university'] is None + assert 'university' not in out_dict + + +def test_hgt_conv_missing_input_node_type(): + data = HeteroData() + data['author'].x = torch.randn(4, 16) + data['paper'].x = torch.randn(6, 32) + data['author', 'writes', + 'paper'].edge_index = get_random_edge_index(4, 6, 20) + + # Some nodes from metadata are missing in data. + # This might happen while using NeighborLoader. + metadata = (['author', 'paper', + 'university'], [('author', 'writes', 'paper')]) + conv = HGTConv(-1, 64, metadata, heads=1) + + out_dict = conv(data.x_dict, data.edge_index_dict) + assert out_dict['paper'].size() == (6, 64) + assert 'university' not in out_dict + + +def test_hgt_conv_missing_edge_type(): + data = HeteroData() + data['author'].x = torch.randn(4, 16) + data['paper'].x = torch.randn(6, 32) + data['university'].x = torch.randn(10, 32) + + data['author', 'writes', + 'paper'].edge_index = get_random_edge_index(4, 6, 20) + + metadata = (['author', 'paper', + 'university'], [('author', 'writes', 'paper'), + ('university', 'employs', 'author')]) + conv = HGTConv(-1, 64, metadata, heads=1) + + out_dict = conv(data.x_dict, data.edge_index_dict) + assert out_dict['author'].size() == (4, 64) + assert out_dict['paper'].size() == (6, 64) + assert 'university' not in out_dict if __name__ == '__main__': diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index a97f2121abf3..9287f6501ba6 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -71,6 +71,10 @@ def __init__( self.heads = heads self.node_types = metadata[0] self.edge_types = metadata[1] + self.edge_types_map = { + edge_type: i + for i, edge_type in enumerate(metadata[1]) + } self.dst_node_types = set([key[-1] for key in self.edge_types]) @@ -83,10 +87,10 @@ def __init__( dim = out_channels // heads num_types = heads * len(self.edge_types) - self.k_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, - bias=False) - self.v_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, - bias=False) + self.k_rel = HeteroLinear(dim, dim, num_types, bias=False, + is_sorted=True) + self.v_rel = HeteroLinear(dim, dim, num_types, bias=False, + is_sorted=True) self.skip = ParameterDict({ node_type: Parameter(torch.Tensor(1)) @@ -121,36 +125,40 @@ def _cat(self, x_dict: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, int]]: return torch.cat(outs, dim=0), offset def _construct_src_node_feat( - self, - k_dict: Dict[str, Tensor], - v_dict: Dict[str, Tensor], + self, k_dict: Dict[str, Tensor], v_dict: Dict[str, Tensor], + edge_index_dict: Dict[EdgeType, Adj] ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]: """Constructs the source node representations.""" - count = 0 cumsum = 0 + num_edge_types = len(self.edge_types) H, D = self.heads, self.out_channels // self.heads # Flatten into a single tensor with shape [num_edge_types * heads, D]: ks: List[Tensor] = [] vs: List[Tensor] = [] - type_list: List[int] = [] + type_list: List[Tensor] = [] offset: Dict[EdgeType] = {} - for edge_type in self.edge_types: - src, _, _ = edge_type - - ks.append(k_dict[src].reshape(-1, D)) - vs.append(v_dict[src].reshape(-1, D)) - + for edge_type in edge_index_dict.keys(): + src = edge_type[0] N = k_dict[src].size(0) - for _ in range(H): - type_list.append(torch.full((N, ), count, dtype=torch.long)) - count += 1 offset[edge_type] = cumsum cumsum += N - type_vec = torch.cat(type_list, dim=0) - k = self.k_rel(torch.cat(ks, dim=0), type_vec).view(-1, H, D) - v = self.v_rel(torch.cat(vs, dim=0), type_vec).view(-1, H, D) + # construct type_vec for curr edge_type with shape [H, D] + edge_type_offset = self.edge_types_map[edge_type] + type_vec = torch.arange(H, dtype=torch.long).view(-1, 1).repeat( + 1, N) * num_edge_types + edge_type_offset + + type_list.append(type_vec) + ks.append(k_dict[src]) + vs.append(v_dict[src]) + + ks = torch.cat(ks, dim=0).transpose(0, 1).reshape(-1, D) + vs = torch.cat(vs, dim=0).transpose(0, 1).reshape(-1, D) + type_vec = torch.cat(type_list, dim=1).flatten() + + k = self.k_rel(ks, type_vec).view(H, -1, D).transpose(0, 1) + v = self.v_rel(vs, type_vec).view(H, -1, D).transpose(0, 1) return k, v, offset @@ -184,12 +192,14 @@ def forward( # Compute K, Q, V over node types: kqv_dict = self.kqv_lin(x_dict) for key, val in kqv_dict.items(): - k_dict[key] = val[:, :F].view(-1, H, D) - q_dict[key] = val[:, F:2 * F].view(-1, H, D) - v_dict[key] = val[:, 2 * F:].view(-1, H, D) + k, q, v = torch.tensor_split(val, 3, dim=1) + k_dict[key] = k.view(-1, H, D) + q_dict[key] = q.view(-1, H, D) + v_dict[key] = v.view(-1, H, D) q, dst_offset = self._cat(q_dict) - k, v, src_offset = self._construct_src_node_feat(k_dict, v_dict) + k, v, src_offset = self._construct_src_node_feat( + k_dict, v_dict, edge_index_dict) edge_index, edge_attr = construct_bipartite_edge_index( edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel) @@ -200,7 +210,8 @@ def forward( # Reconstruct output node embeddings dict: for node_type, start_offset in dst_offset.items(): end_offset = start_offset + q_dict[node_type].size(0) - out_dict[node_type] = out[start_offset:end_offset] + if node_type in self.dst_node_types: + out_dict[node_type] = out[start_offset:end_offset] # Transform output node embeddings: a_dict = self.out_lin({ @@ -210,11 +221,7 @@ def forward( # Iterate over node types: for node_type, out in out_dict.items(): - if node_type not in self.dst_node_types: - out_dict[node_type] = None - continue - else: - out = a_dict[node_type] + out = a_dict[node_type] if out.size(-1) == x_dict[node_type].size(-1): alpha = self.skip[node_type].sigmoid() diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index e042abb9ed6a..b13c474bd67c 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -387,7 +387,7 @@ def forward( biases.append(lin.bias) biases = None if biases[0] is None else biases outs = pyg_lib.ops.grouped_matmul(xs, weights, biases) - for key, out in zip(self.lins.keys(), outs): + for key, out in zip(x_dict.keys(), outs): if key in x_dict: out_dict[key] = out else: From 0c4ea3a43b7b2b9a95afc9ec41ea26140bb8480d Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Tue, 25 Apr 2023 04:34:19 -0700 Subject: [PATCH 1140/2432] minor fix for `qm9_pretrained_schnet.py` (#7228) ``` root@9135585df2bb:/workspace# python3 examples/qm9_pretrained_schnet.py Traceback (most recent call last): File "examples/qm9_pretrained_schnet.py", line 17, in dataset = QM9(osp.join()) TypeError: join() missing 1 required positional argument: 'a' ``` after this minor fix and pip installing ase and schnet==1.0.0 i get: ``` Traceback (most recent call last): File "examples/qm9_pretrained_schnet.py", line 22, in model, datasets = SchNet.from_qm9_pretrained(path, dataset, target) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/models/schnet.py", line 252, in from_qm9_pretrained net.readout = 'mean' if mean is True else 'add' File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1685, in __setattr__ raise TypeError("cannot assign '{}' as child module '{}' " TypeError: cannot assign 'str' as child module 'readout' (torch.nn.Module or None expected) ``` if i set net.readout = None then it works and I get to the part of the schnet that relies on torch cluster (which i will eventually make a part of pyg-lib along with a bunch of other torch-* functionalities that are needed to be moved to pyg-lib) ``` File "examples/qm9_pretrained_schnet.py", line 32, in pred = model(data.z, data.pos, data.batch) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1533, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/models/schnet.py", line 284, in forward edge_index, edge_weight = self.interaction_graph(pos, batch) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1533, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/models/schnet.py", line 352, in forward edge_index = radius_graph(pos, r=self.cutoff, batch=batch, File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/pool/__init__.py", line 210, in radius_graph return torch_cluster.radius_graph(x, r, batch, loop, max_num_neighbors, AttributeError: 'NoneType' object has no attribute 'radius_graph' ``` note that w/ schnet >= 2.0: ``` Traceback (most recent call last): File "examples/qm9_pretrained_schnet.py", line 22, in model, datasets = SchNet.from_qm9_pretrained(path, dataset, target) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/models/schnet.py", line 219, in from_qm9_pretrained state = torch.load(path, map_location='cpu') File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 817, in load return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args) File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 1045, in _legacy_load result = unpickler.load() File "/usr/lib/python3.8/pickle.py", line 1212, in load dispatch[key[0]](self) File "/usr/lib/python3.8/pickle.py", line 1528, in load_global klass = self.find_class(module, name) File "/usr/local/lib/python3.8/dist-packages/torch/serialization.py", line 850, in find_class return super().find_class(mod_name, name) File "/usr/local/lib/python3.8/dist-packages/pytorch_lightning/_graveyard/legacy_import_unpickler.py", line 24, in find_class return super().find_class(new_module, name) File "/usr/lib/python3.8/pickle.py", line 1579, in find_class __import__(module, level=0) ModuleNotFoundError: No module named 'schnetpack.atomistic.model' ``` --- examples/qm9_pretrained_schnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/qm9_pretrained_schnet.py b/examples/qm9_pretrained_schnet.py index da91f1db093a..61d6c31bf222 100644 --- a/examples/qm9_pretrained_schnet.py +++ b/examples/qm9_pretrained_schnet.py @@ -14,7 +14,7 @@ args = parser.parse_args() path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'QM9') -dataset = QM9(osp.join()) +dataset = QM9(path) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') From adcf75631d7519add5ff1ee4e6a27079883abeb7 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 26 Apr 2023 16:52:21 +0200 Subject: [PATCH 1141/2432] Update `to_hetero` error message (#7238) --- test/profile/test_profile_utils.py | 2 +- torch_geometric/nn/to_hetero_transformer.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/profile/test_profile_utils.py b/test/profile/test_profile_utils.py index c9215be68425..4f38aa21a24f 100644 --- a/test/profile/test_profile_utils.py +++ b/test/profile/test_profile_utils.py @@ -24,7 +24,7 @@ def test_count_parameters(): def test_get_model_size(): model_size = get_model_size(Linear(32, 128, bias=False)) - assert model_size >= 32 * 128 * 4 and model_size < 32 * 128 * 4 + 1000 + assert model_size >= 32 * 128 * 4 and model_size < 32 * 128 * 4 + 2000 def test_get_data_size(): diff --git a/torch_geometric/nn/to_hetero_transformer.py b/torch_geometric/nn/to_hetero_transformer.py index 219c05cdf104..e6c4b3a6e02d 100644 --- a/torch_geometric/nn/to_hetero_transformer.py +++ b/torch_geometric/nn/to_hetero_transformer.py @@ -401,7 +401,11 @@ def _recurse(value: Any) -> Any: self.find_by_name(f'{value.name}__{key2str(key[-1])}'), ) else: - raise NotImplementedError + raise ValueError(f"Cannot generate a graph node '{node}' " + f"for type '{key}' since it does not " + f"exist. Please make sure that all " + f"node types get updated during message " + f"passing.") elif isinstance(value, dict): return {k: _recurse(v) for k, v in value.items()} elif isinstance(value, list): From 5778c65686b0c6af40e745a3ec449e7b27628ead Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 26 Apr 2023 16:52:40 +0200 Subject: [PATCH 1142/2432] Add `batch_size` and `max_num_nodes` argument in `MemPooling` layer (#7239) It can be used to avoid additional calculations if a user is using fixed-size batch. --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/nn/pool/mem_pool.py | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be6c4b675b18..163647b09ef0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) - Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) +- Added an optional `batch_size` and `max_num_nodes` arguments to `MemPooling` layer ([#7239](https://github.com/pyg-team/pytorch_geometric/pull/7239)) ### Removed diff --git a/torch_geometric/nn/pool/mem_pool.py b/torch_geometric/nn/pool/mem_pool.py index 446916b60f49..00ea93808900 100644 --- a/torch_geometric/nn/pool/mem_pool.py +++ b/torch_geometric/nn/pool/mem_pool.py @@ -78,8 +78,14 @@ def kl_loss(S: Tensor) -> Tensor: loss = KLDivLoss(reduction='batchmean', log_target=False) return loss(S.clamp(EPS).log(), P.clamp(EPS)) - def forward(self, x: Tensor, batch: Optional[Tensor] = None, - mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: + def forward( + self, + x: Tensor, + batch: Optional[Tensor] = None, + mask: Optional[Tensor] = None, + max_num_nodes: Optional[int] = None, + batch_size: Optional[int] = None, + ) -> Tuple[Tensor, Tensor]: r""" Args: x (torch.Tensor): The node feature tensor of shape @@ -97,9 +103,15 @@ def forward(self, x: Tensor, batch: Optional[Tensor] = None, node features of shape :math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`. (default: :obj:`None`) + max_num_nodes (int, optional): The size of the :math:`B` node + dimension. Automatically calculated if not given. + (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) """ if x.dim() <= 2: - x, mask = to_dense_batch(x, batch) + x, mask = to_dense_batch(x, batch, max_num_nodes=max_num_nodes, + batch_size=batch_size) elif mask is None: mask = x.new_ones((x.size(0), x.size(1)), dtype=torch.bool) From 086e31de60ac9196d5847fcc7a41408ac5f98596 Mon Sep 17 00:00:00 2001 From: Ramona Bendias Date: Thu, 27 Apr 2023 14:36:13 +0200 Subject: [PATCH 1143/2432] Add link to MovieLens Regression Colab Notebook (#7243) --- docs/source/get_started/colabs.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/get_started/colabs.rst b/docs/source/get_started/colabs.rst index 46a20acbf4c4..9ad7726d2a60 100644 --- a/docs/source/get_started/colabs.rst +++ b/docs/source/get_started/colabs.rst @@ -16,6 +16,7 @@ We have prepared a list of :colab:`Colab` notebooks that practically introduces 8. `Node Classification Instrumented with `__ :wandb:`null` `Weights&Biases `__ 9. `Graph Classification Instrumented with `__ :wandb:`null` `Weights&Biases `__ 10. `Link Prediction on MovieLens `__ +11. `Link Regression on MovieLens `__ All :colab:`Colab` notebooks are released under the MIT license. From 259cfa7fb220d9cb504ab9de52bcd9dc5267befe Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 27 Apr 2023 16:13:39 +0200 Subject: [PATCH 1144/2432] Drop `object` inheritance specification (#7247) --- benchmark/kernel/datasets.py | 2 +- benchmark/runtime/dgl/hidden.py | 2 +- docs/source/tutorial/load_csv.rst | 6 +++--- examples/attentive_fp.py | 2 +- examples/colors_topk_pool.py | 2 +- examples/hetero/load_csv.py | 6 +++--- examples/proteins_diff_pool.py | 2 +- examples/qm9_nn_conv.py | 4 ++-- examples/triangles_sag_pool.py | 2 +- torch_geometric/data/batch.py | 2 +- torch_geometric/data/data.py | 2 +- torch_geometric/data/view.py | 2 +- torch_geometric/datasets/entities.py | 2 +- torch_geometric/graphgym/logger.py | 2 +- torch_geometric/nn/conv/utils/inspector.py | 2 +- torch_geometric/nn/fx.py | 2 +- torch_geometric/nn/models/re_net.py | 2 +- torch_geometric/nn/models/tgn.py | 2 +- 18 files changed, 23 insertions(+), 23 deletions(-) diff --git a/benchmark/kernel/datasets.py b/benchmark/kernel/datasets.py index e7accdb553d6..0033db2e59f3 100644 --- a/benchmark/kernel/datasets.py +++ b/benchmark/kernel/datasets.py @@ -7,7 +7,7 @@ from torch_geometric.utils import degree -class NormalizedDegree(object): +class NormalizedDegree: def __init__(self, mean, std): self.mean = mean self.std = std diff --git a/benchmark/runtime/dgl/hidden.py b/benchmark/runtime/dgl/hidden.py index d38590e8cc93..69e19bd6e9e0 100644 --- a/benchmark/runtime/dgl/hidden.py +++ b/benchmark/runtime/dgl/hidden.py @@ -5,7 +5,7 @@ warnings.filterwarnings('ignore') -class HiddenPrint(object): +class HiddenPrint: def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') diff --git a/docs/source/tutorial/load_csv.rst b/docs/source/tutorial/load_csv.rst index 57281859ce2c..22ff4402ad72 100644 --- a/docs/source/tutorial/load_csv.rst +++ b/docs/source/tutorial/load_csv.rst @@ -118,7 +118,7 @@ For this, we make use of the excellent `sentence-transformers Any: raise NotImplementedError diff --git a/torch_geometric/data/view.py b/torch_geometric/data/view.py index 75e1b2c0252a..d03b96723451 100644 --- a/torch_geometric/data/view.py +++ b/torch_geometric/data/view.py @@ -2,7 +2,7 @@ from typing import Iterable, List -class MappingView(object): +class MappingView: def __init__(self, mapping: Mapping, *args: List[str]): self._mapping = mapping self._args = args diff --git a/torch_geometric/datasets/entities.py b/torch_geometric/datasets/entities.py index 52fcc0981178..210d27864cde 100644 --- a/torch_geometric/datasets/entities.py +++ b/torch_geometric/datasets/entities.py @@ -204,7 +204,7 @@ def __repr__(self) -> str: return f'{self.name.upper()}{self.__class__.__name__}()' -class hide_stdout(object): +class hide_stdout: def __enter__(self): self.level = logging.getLogger().level logging.getLogger().setLevel(logging.ERROR) diff --git a/torch_geometric/graphgym/logger.py b/torch_geometric/graphgym/logger.py index ac753b63e7d0..38c04ecefb90 100644 --- a/torch_geometric/graphgym/logger.py +++ b/torch_geometric/graphgym/logger.py @@ -35,7 +35,7 @@ def set_printing(): logging.basicConfig(**logging_cfg) -class Logger(object): +class Logger: def __init__(self, name='train', task_type=None): self.name = name self.task_type = task_type diff --git a/torch_geometric/nn/conv/utils/inspector.py b/torch_geometric/nn/conv/utils/inspector.py index 538db7988a8f..e5d42b77aa17 100644 --- a/torch_geometric/nn/conv/utils/inspector.py +++ b/torch_geometric/nn/conv/utils/inspector.py @@ -6,7 +6,7 @@ from torch_geometric.nn.conv.utils.typing import parse_types -class Inspector(object): +class Inspector: def __init__(self, base_class: Any): self.base_class: Any = base_class self.params: Dict[str, Dict[str, Any]] = {} diff --git a/torch_geometric/nn/fx.py b/torch_geometric/nn/fx.py index faf3f6314a37..573e3f2bc506 100644 --- a/torch_geometric/nn/fx.py +++ b/torch_geometric/nn/fx.py @@ -10,7 +10,7 @@ GraphModule, Graph, Node = 'GraphModule', 'Graph', 'Node' -class Transformer(object): +class Transformer: r"""A :class:`Transformer` executes an FX graph node-by-node, applies transformations to each node, and produces a new :class:`torch.nn.Module`. It exposes a :func:`transform` method that returns the transformed diff --git a/torch_geometric/nn/models/re_net.py b/torch_geometric/nn/models/re_net.py index d15f173a097d..cb4ec8e2bcc8 100644 --- a/torch_geometric/nn/models/re_net.py +++ b/torch_geometric/nn/models/re_net.py @@ -94,7 +94,7 @@ def pre_transform(seq_len: int) -> Callable: of a :class:`torch_geometric.datasets.icews.EventDataset` with :math:`k` denoting the sequence length :obj:`seq_len`. """ - class PreTransform(object): + class PreTransform: def __init__(self, seq_len: int): self.seq_len = seq_len self.inc = 5000 diff --git a/torch_geometric/nn/models/tgn.py b/torch_geometric/nn/models/tgn.py index 9026fb4e8c8c..d224f9b06209 100644 --- a/torch_geometric/nn/models/tgn.py +++ b/torch_geometric/nn/models/tgn.py @@ -220,7 +220,7 @@ def forward(self, t: Tensor) -> Tensor: return self.lin(t.view(-1, 1)).cos() -class LastNeighborLoader(object): +class LastNeighborLoader: def __init__(self, num_nodes: int, size: int, device=None): self.size = size From d534de378b9f0dffcf46cbf9653c8c058c31cef1 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Thu, 27 Apr 2023 19:57:13 +0530 Subject: [PATCH 1145/2432] Create `CODEOWNERS` (#7245) These are just an initial version of codeowners. Will keep updating and adding new codeowners in the near term. Code owners automatically get requested for a review. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CODEOWNERS | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000000..ac674f04e8a2 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,34 @@ +# About code owners +# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners +# +# Code owners recieve review requests when parts of code they "own" change. + +*.py @rusty1s @wsad1 + +/torch_geometric/nn/ @EdisonLeeeee + +/torch_geometric/explain/ @RexYing @RBendias @dufourc1 + +/torch_geometric/data/ @mananshah99 + +/torch_geometric/loader/ @mananshah99 @pyg-team/intel-team + +/torch_geometric/sampler/ @mananshah99 @pyg-team/intel-team + +/torch_geometric/transforms/ @wsad1 + +/torch_geometric/utils/ @wsad1 + +/torch_geometric/datasets/ @wsad1 + +/torch_geometric/graphgym/ @JiaxuanYou + +/test/ @wsad1 + +/docs/ @rusty1s + +/graphgym/ @JiaxuanYou + +/examples/ @wsad1 + +/benchmark/ @pyg-team/intel-team From e919aa1930bf262a120544ddc1685ecec29f2a64 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 27 Apr 2023 19:30:37 +0200 Subject: [PATCH 1146/2432] Introduce `InMemoryDataset.save` and `InMemoryDataset.load` (#7250) --- CHANGELOG.md | 1 + test/data/test_dataset.py | 64 ++++++++++++++++++++++- test/data/test_hetero_data.py | 14 +++++ torch_geometric/data/hetero_data.py | 24 +++++++-- torch_geometric/data/in_memory_dataset.py | 29 +++++++++- torch_geometric/data/storage.py | 8 ++- 6 files changed, 133 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 163647b09ef0..abc96f64859a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250)) - Added an example for heterogeneous GNN explanation via `CaptumExplainer` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) - Added `visualize_feature_importance` functionality to `HeteroExplanation` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) - Added a `AddRemainingSelfLoops` transform ([#7192](https://github.com/pyg-team/pytorch_geometric/pull/7192)) diff --git a/test/data/test_dataset.py b/test/data/test_dataset.py index 067a055a12f1..d82cb09bdf42 100644 --- a/test/data/test_dataset.py +++ b/test/data/test_dataset.py @@ -10,10 +10,24 @@ class MyTestDataset(InMemoryDataset): def __init__(self, data_list, transform=None): - super().__init__('/tmp/MyTestDataset', transform=transform) + super().__init__(None, transform=transform) self.data, self.slices = self.collate(data_list) +class MyStoredTestDataset(InMemoryDataset): + def __init__(self, root, data_list, transform=None): + self.data_list = data_list + super().__init__(root, transform=transform) + self.load(self.processed_paths[0], data_cls=data_list[0].__class__) + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def process(self): + self.save(self.data_list, self.processed_paths[0]) + + def test_in_memory_dataset(): x1 = torch.Tensor([[1], [1], [1]]) x2 = torch.Tensor([[2], [2], [2]]) @@ -57,6 +71,54 @@ def test_in_memory_dataset(): assert torch.equal(dataset[1:].x, x2) +def test_stored_in_memory_dataset(tmp_path): + x1 = torch.Tensor([[1], [1], [1]]) + x2 = torch.Tensor([[2], [2], [2], [2]]) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + + data1 = Data(x1, edge_index, num_nodes=3, test_int=1, test_str='1') + data2 = Data(x2, edge_index, num_nodes=4, test_int=2, test_str='2') + + dataset = MyStoredTestDataset(tmp_path, [data1, data2]) + assert dataset._data.num_nodes == 7 + assert dataset._data._num_nodes == [3, 4] + + assert torch.equal(dataset[0].x, x1) + assert torch.equal(dataset[0].edge_index, edge_index) + assert dataset[0].num_nodes == 3 + assert torch.equal(dataset[0].test_int, torch.tensor([1])) + assert dataset[0].test_str == '1' + + assert torch.equal(dataset[1].x, x2) + assert torch.equal(dataset[1].edge_index, edge_index) + assert dataset[1].num_nodes == 4 + assert torch.equal(dataset[1].test_int, torch.tensor([2])) + assert dataset[1].test_str == '2' + + +def test_stored_hetero_in_memory_dataset(tmp_path): + x1 = torch.Tensor([[1], [1], [1]]) + x2 = torch.Tensor([[2], [2], [2], [2]]) + + data1 = HeteroData() + data1['paper'].x = x1 + data1['paper'].num_nodes = 3 + + data2 = HeteroData() + data2['paper'].x = x2 + data2['paper'].num_nodes = 4 + + dataset = MyStoredTestDataset(tmp_path, [data1, data2]) + assert dataset._data['paper'].num_nodes == 7 + assert dataset._data['paper']._num_nodes == [3, 4] + + assert torch.equal(dataset[0]['paper'].x, x1) + assert dataset[0]['paper'].num_nodes == 3 + + assert torch.equal(dataset[1]['paper'].x, x2) + assert dataset[1]['paper'].num_nodes == 4 + + def test_in_memory_num_classes(): dataset = MyTestDataset([Data(), Data()]) assert dataset.num_classes == 0 diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 1ea3c79bbe99..e0be1dd69f94 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -90,6 +90,20 @@ def test_init_hetero_data(): assert len(data.edge_items()) == 3 +def test_hetero_data_to_from_dict(): + data = HeteroData() + data.global_id = '1' + data['v1'].x = torch.randn(5, 16) + data['v2'].y = torch.randn(4, 16) + data['v1', 'v2'].edge_index = torch.tensor([[0, 1, 2, 3], [0, 1, 2, 3]]) + + out = HeteroData.from_dict(data.to_dict()) + assert out.global_id == data.global_id + assert torch.equal(out['v1'].x, data['v1'].x) + assert torch.equal(out['v2'].y, data['v2'].y) + assert torch.equal(out['v1', 'v2'].edge_index, data['v1', 'v2'].edge_index) + + def test_hetero_data_functions(): data = HeteroData() data['paper'].x = x_paper diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 7a9144e51873..abca8fd99d10 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -125,6 +125,23 @@ def __init__(self, _mapping: Optional[Dict[str, Any]] = None, **kwargs): else: setattr(self, key, value) + @classmethod + def from_dict(cls, mapping: Dict[str, Any]) -> 'HeteroData': + r"""Creates a :class:`~torch_geometric.data.HeteroData` object from a + Python dictionary.""" + out = cls() + for key, value in mapping.items(): + if key == '_global_store': + out.__dict__['_global_store'] = BaseStorage( + _parent=out, **value) + elif isinstance(key, str): + out._node_store_dict[key] = NodeStorage( + _parent=out, _key=key, **value) + else: + out._edge_store_dict[key] = EdgeStorage( + _parent=out, _key=key, **value) + return out + def __getattr__(self, key: str) -> Any: # `data.*_dict` => Link to node and edge stores. # `data.*` => Link to the `_global_store`. @@ -258,11 +275,12 @@ def edge_items(self) -> List[Tuple[EdgeType, EdgeStorage]]: return list(self._edge_store_dict.items()) def to_dict(self) -> Dict[str, Any]: - out = self._global_store.to_dict() + out_dict: Dict[str, Any] = {} + out_dict['_global_store'] = self._global_store.to_dict() for key, store in chain(self._node_store_dict.items(), self._edge_store_dict.items()): - out[key] = store.to_dict() - return out + out_dict[key] = store.to_dict() + return out_dict def to_namedtuple(self) -> NamedTuple: field_names = list(self._global_store.keys()) diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index 1e1b3ee19f67..9d54cf1b70fd 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -2,12 +2,24 @@ import warnings from abc import ABC from collections.abc import Mapping, Sequence -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union - +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Tuple, + Type, + Union, +) + +import torch from torch import Tensor from torch_geometric.data import Batch, Data from torch_geometric.data.collate import collate +from torch_geometric.data.data import BaseData from torch_geometric.data.dataset import Dataset, IndexType from torch_geometric.data.separate import separate @@ -93,6 +105,19 @@ def get(self, idx: int) -> Data: return data + @classmethod + def save(cls, data_list: List[BaseData], path: str): + r"""Saves a list of data objects to the file path :obj:`path`.""" + data, slices = cls.collate(data_list) + torch.save((data.to_dict(), slices), path) + + def load(self, path: str, data_cls: Type[BaseData] = Data): + r"""Loads the dataset from the file path :obj:`path`.""" + data, self.slices = torch.load(path) + if isinstance(data, dict): # Backward compatibility. + data = data_cls.from_dict(data) + self.data = data + @staticmethod def collate( data_list: List[Data]) -> Tuple[Data, Optional[Dict[str, Tensor]]]: diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index 41a68c699665..f43cf1254354 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -191,7 +191,13 @@ def get(self, key: str, value: Optional[Any] = None) -> Any: def to_dict(self) -> Dict[str, Any]: r"""Returns a dictionary of stored key/value pairs.""" - return copy.copy(self._mapping) + out_dict = copy.copy(self._mapping) + # Needed to preserve individual `num_nodes` attributes when calling + # `BaseData.collate`. + # TODO (matthias) Try to make this more generic. + if '_num_nodes' in self.__dict__: + out_dict['_num_nodes'] = self.__dict__['_num_nodes'] + return out_dict def to_namedtuple(self) -> NamedTuple: r"""Returns a :obj:`NamedTuple` of stored key/value pairs.""" From 64f6512788d4e514ca5c94b373aaf5fc0231e87e Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Fri, 28 Apr 2023 14:35:44 +0530 Subject: [PATCH 1147/2432] Update `HGTConv` doc (#7255) Removed reference to `FastHGTConv`. It was "merged" with `HGTConv` [here](https://github.com/pyg-team/pytorch_geometric/pull/7117) --- torch_geometric/nn/conv/hgt_conv.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index 9287f6501ba6..97071cbe42a8 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -25,11 +25,6 @@ class HGTConv(MessagePassing): `_. - .. note:: - - For a faster alternative, use :class:`FastHGTConv` which does not - iterate over individual node and edge types. - Args: in_channels (int or Dict[str, int]): Size of each input sample of every node type, or :obj:`-1` to derive the size from the first input(s) From cc6256e17581c7af86f19300973d7ff77516e013 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 28 Apr 2023 03:42:35 -0700 Subject: [PATCH 1148/2432] Deprecate `NeighborSampler` and `SplineConv` in examples (#7152) edited them to get them working, lmk if anything seems off --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/cora.py | 4 + examples/faust.py | 4 + examples/infomax_inductive.py | 53 +++++------ examples/mnist_graclus.py | 6 ++ examples/mnist_voxel_grid.py | 4 + examples/multi_gpu/data_parallel.py | 23 ++++- examples/ogbn_products_gat.py | 96 +++++++++----------- examples/ogbn_products_sage.py | 94 +++++++++---------- examples/point_transformer_classification.py | 14 ++- examples/point_transformer_segmentation.py | 7 +- examples/pointnet2_classification.py | 4 + examples/pointnet2_segmentation.py | 4 + examples/randlanet_classification.py | 4 + examples/randlanet_segmentation.py | 4 + torch_geometric/loader/utils.py | 7 +- torch_geometric/typing.py | 10 ++ 17 files changed, 197 insertions(+), 142 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index abc96f64859a..b30be2af51a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Updated examples to use `NeighborLoader` instead of `NeighborSampler` ([#7152](https://github.com/pyg-team/pytorch_geometric/pull/7152)) - Fixed `HGTConv` utility function `_construct_src_node_feat` ([#7194](https://github.com/pyg-team/pytorch_geometric/pull/7194)) - Extend dataset summary to create stats for each node/edge type ([#7203](https://github.com/pyg-team/pytorch_geometric/pull/7203)) - Added an optional `batch_size` argument to `avg_pool_x` and `max_pool_x` ([#7216](https://github.com/pyg-team/pytorch_geometric/pull/7216)) diff --git a/examples/cora.py b/examples/cora.py index 76f6142bd705..d30a0200f2d5 100644 --- a/examples/cora.py +++ b/examples/cora.py @@ -6,6 +6,10 @@ import torch_geometric.transforms as T from torch_geometric.datasets import Planetoid from torch_geometric.nn import SplineConv +from torch_geometric.typing import WITH_TORCH_SPLINE_CONV + +if not WITH_TORCH_SPLINE_CONV: + quit("This example requires 'torch-spline-conv'") dataset = 'Cora' transform = T.Compose([ diff --git a/examples/faust.py b/examples/faust.py index 0d63e2049ddd..899c6a1969bb 100644 --- a/examples/faust.py +++ b/examples/faust.py @@ -7,6 +7,10 @@ from torch_geometric.datasets import FAUST from torch_geometric.loader import DataLoader from torch_geometric.nn import SplineConv +from torch_geometric.typing import WITH_TORCH_SPLINE_CONV + +if not WITH_TORCH_SPLINE_CONV: + quit("This example requires 'torch-spline-conv'") path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'FAUST') pre_transform = T.Compose([T.FaceToEdge(), T.Constant(value=1)]) diff --git a/examples/infomax_inductive.py b/examples/infomax_inductive.py index 74a5354dd787..73e96e13734f 100644 --- a/examples/infomax_inductive.py +++ b/examples/infomax_inductive.py @@ -5,20 +5,18 @@ from tqdm import tqdm from torch_geometric.datasets import Reddit -from torch_geometric.loader import NeighborSampler +from torch_geometric.loader import NeighborLoader from torch_geometric.nn import DeepGraphInfomax, SAGEConv +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Reddit') dataset = Reddit(path) -data = dataset[0] - -train_loader = NeighborSampler(data.edge_index, node_idx=None, - sizes=[10, 10, 25], batch_size=256, - shuffle=True, num_workers=12) +data = dataset[0].to(device, 'x', 'edge_index') -test_loader = NeighborSampler(data.edge_index, node_idx=None, - sizes=[10, 10, 25], batch_size=256, - shuffle=False, num_workers=12) +train_loader = NeighborLoader(data, num_neighbors=[10, 10, 25], batch_size=256, + shuffle=True, num_workers=12) +test_loader = NeighborLoader(data, num_neighbors=[10, 10, 25], batch_size=256, + num_workers=12) class Encoder(nn.Module): @@ -37,19 +35,17 @@ def __init__(self, in_channels, hidden_channels): nn.PReLU(hidden_channels) ]) - def forward(self, x, adjs): - for i, (edge_index, _, size) in enumerate(adjs): - x_target = x[:size[1]] # Target nodes are always placed first. - x = self.convs[i]((x, x_target), edge_index) - x = self.activations[i](x) - return x + def forward(self, x, edge_index, batch_size): + for conv, act in zip(self.convs, self.activations): + x = conv(x, edge_index) + x = act(x) + return x[:batch_size] -def corruption(x, edge_index): - return x[torch.randperm(x.size(0))], edge_index +def corruption(x, edge_index, batch_size): + return x[torch.randperm(x.size(0))], edge_index, batch_size -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = DeepGraphInfomax( hidden_channels=512, encoder=Encoder(dataset.num_features, 512), summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)), @@ -58,20 +54,15 @@ def corruption(x, edge_index): model = model.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) -x, y = data.x.to(device), data.y.to(device) - def train(epoch): model.train() total_loss = total_examples = 0 - for batch_size, n_id, adjs in tqdm(train_loader, - desc=f'Epoch {epoch:02d}'): - # `adjs` holds a list of `(edge_index, e_id, size)` tuples. - adjs = [adj.to(device) for adj in adjs] - + for batch in tqdm(train_loader, desc=f'Epoch {epoch:02d}'): optimizer.zero_grad() - pos_z, neg_z, summary = model(x[n_id], adjs) + pos_z, neg_z, summary = model(batch.x, batch.edge_index, + batch.batch_size) loss = model.loss(pos_z, neg_z, summary) loss.backward() optimizer.step() @@ -86,13 +77,13 @@ def test(): model.eval() zs = [] - for i, (batch_size, n_id, adjs) in enumerate(test_loader): - adjs = [adj.to(device) for adj in adjs] - zs.append(model(x[n_id], adjs)[0]) + for batch in tqdm(test_loader, desc='Evaluating'): + pos_z, _, _ = model(batch.x, batch.edge_index, batch.batch_size) + zs.append(pos_z.cpu()) z = torch.cat(zs, dim=0) train_val_mask = data.train_mask | data.val_mask - acc = model.test(z[train_val_mask], y[train_val_mask], z[data.test_mask], - y[data.test_mask], max_iter=10000) + acc = model.test(z[train_val_mask], data.y[train_val_mask], + z[data.test_mask], data.y[data.test_mask], max_iter=10000) return acc diff --git a/examples/mnist_graclus.py b/examples/mnist_graclus.py index 2c1a85677e45..17e026f97264 100644 --- a/examples/mnist_graclus.py +++ b/examples/mnist_graclus.py @@ -13,8 +13,14 @@ max_pool, max_pool_x, ) +from torch_geometric.typing import WITH_TORCH_CLUSTER, WITH_TORCH_SPLINE_CONV from torch_geometric.utils import normalized_cut +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") +if not WITH_TORCH_SPLINE_CONV: + quit("This example requires 'torch-spline-conv'") + path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'MNIST') transform = T.Cartesian(cat=False) train_dataset = MNISTSuperpixels(path, True, transform=transform) diff --git a/examples/mnist_voxel_grid.py b/examples/mnist_voxel_grid.py index dc2a9195934f..62bad025cd23 100644 --- a/examples/mnist_voxel_grid.py +++ b/examples/mnist_voxel_grid.py @@ -7,6 +7,10 @@ from torch_geometric.datasets import MNISTSuperpixels from torch_geometric.loader import DataLoader from torch_geometric.nn import SplineConv, max_pool, max_pool_x, voxel_grid +from torch_geometric.typing import WITH_TORCH_SPLINE_CONV + +if not WITH_TORCH_SPLINE_CONV: + quit("This example requires 'torch-spline-conv'") path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'MNIST') transform = T.Cartesian(cat=False) diff --git a/examples/multi_gpu/data_parallel.py b/examples/multi_gpu/data_parallel.py index 42eb40feae22..e78fbafd1016 100644 --- a/examples/multi_gpu/data_parallel.py +++ b/examples/multi_gpu/data_parallel.py @@ -2,11 +2,18 @@ import torch import torch.nn.functional as F +from torch.nn import Linear, ReLU, Sequential import torch_geometric.transforms as T from torch_geometric.datasets import MNISTSuperpixels from torch_geometric.loader import DataListLoader -from torch_geometric.nn import DataParallel, SplineConv, global_mean_pool +from torch_geometric.nn import ( + DataParallel, + NNConv, + SplineConv, + global_mean_pool, +) +from torch_geometric.typing import WITH_TORCH_SPLINE_CONV path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data', 'MNIST') dataset = MNISTSuperpixels(path, transform=T.Cartesian()).shuffle() @@ -16,8 +23,18 @@ class Net(torch.nn.Module): def __init__(self): super().__init__() - self.conv1 = SplineConv(dataset.num_features, 32, dim=2, kernel_size=5) - self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5) + if WITH_TORCH_SPLINE_CONV: + self.conv1 = SplineConv(dataset.num_features, 32, dim=2, + kernel_size=5) + self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5) + else: + nn1 = Sequential(Linear(2, 25), ReLU(), + Linear(25, dataset.num_features * 32)) + self.conv1 = NNConv(dataset.num_features, 32, nn1, aggr='mean') + + nn2 = Sequential(Linear(2, 25), ReLU(), Linear(25, 32 * 64)) + self.conv2 = NNConv(32, 64, nn2, aggr='mean') + self.lin1 = torch.nn.Linear(64, 128) self.lin2 = torch.nn.Linear(128, dataset.num_classes) diff --git a/examples/ogbn_products_gat.py b/examples/ogbn_products_gat.py index 4688e8b61036..93a596178abc 100644 --- a/examples/ogbn_products_gat.py +++ b/examples/ogbn_products_gat.py @@ -8,22 +8,33 @@ from torch.nn import Linear as Lin from tqdm import tqdm -from torch_geometric.loader import NeighborSampler +from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GATConv +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') dataset = PygNodePropPredDataset('ogbn-products', root) split_idx = dataset.get_idx_split() evaluator = Evaluator(name='ogbn-products') -data = dataset[0] - -train_idx = split_idx['train'] -train_loader = NeighborSampler(data.edge_index, node_idx=train_idx, - sizes=[10, 10, 10], batch_size=512, - shuffle=True, num_workers=12) -subgraph_loader = NeighborSampler(data.edge_index, node_idx=None, sizes=[-1], - batch_size=1024, shuffle=False, - num_workers=12) +data = dataset[0].to(device, 'x', 'y') + +train_loader = NeighborLoader( + data, + input_nodes=split_idx['train'], + num_neighbors=[10, 10, 5], + batch_size=512, + shuffle=True, + num_workers=12, + persistent_workers=True, +) +subgraph_loader = NeighborLoader( + data, + input_nodes=None, + num_neighbors=[-1], + batch_size=2048, + num_workers=12, + persistent_workers=True, +) class GAT(torch.nn.Module): @@ -56,21 +67,13 @@ def reset_parameters(self): for skip in self.skips: skip.reset_parameters() - def forward(self, x, adjs): - # `train_loader` computes the k-hop neighborhood of a batch of nodes, - # and returns, for each layer, a bipartite graph object, holding the - # bipartite edges `edge_index`, the index `e_id` of the original edges, - # and the size/shape `size` of the bipartite graph. - # Target nodes are also included in the source nodes so that one can - # easily apply skip-connections or add self-loops. - for i, (edge_index, _, size) in enumerate(adjs): - x_target = x[:size[1]] # Target nodes are always placed first. - x = self.convs[i]((x, x_target), edge_index) - x = x + self.skips[i](x_target) + def forward(self, x, edge_index): + for i, (conv, skip) in enumerate(zip(self.convs, self.skips)): + x = conv(x, edge_index) + skip(x) if i != self.num_layers - 1: x = F.elu(x) x = F.dropout(x, p=0.5, training=self.training) - return x.log_softmax(dim=-1) + return x def inference(self, x_all): pbar = tqdm(total=x_all.size(0) * self.num_layers) @@ -79,22 +82,18 @@ def inference(self, x_all): # Compute representations of nodes layer by layer, using *all* # available edges. This leads to faster computation in contrast to # immediately computing the final representations of each batch. - total_edges = 0 for i in range(self.num_layers): xs = [] - for batch_size, n_id, adj in subgraph_loader: - edge_index, _, size = adj.to(device) - total_edges += edge_index.size(1) - x = x_all[n_id].to(device) - x_target = x[:size[1]] - x = self.convs[i]((x, x_target), edge_index) - x = x + self.skips[i](x_target) - + for batch in subgraph_loader: + x = x_all[batch.n_id].to(device) + edge_index = batch.edge_index.to(device) + x = self.convs[i](x, edge_index) + self.skips[i](x) + x = x[:batch.batch_size] if i != self.num_layers - 1: x = F.elu(x) xs.append(x.cpu()) - pbar.update(batch_size) + pbar.update(batch.batch_size) x_all = torch.cat(xs, dim=0) @@ -103,40 +102,33 @@ def inference(self, x_all): return x_all -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = GAT(dataset.num_features, 128, dataset.num_classes, num_layers=3, - heads=4) -model = model.to(device) - -x = data.x.to(device) -y = data.y.squeeze().to(device) + heads=4).to(device) def train(epoch): model.train() - pbar = tqdm(total=train_idx.size(0)) + pbar = tqdm(total=split_idx['train'].size(0)) pbar.set_description(f'Epoch {epoch:02d}') total_loss = total_correct = 0 - for batch_size, n_id, adjs in train_loader: - # `adjs` holds a list of `(edge_index, e_id, size)` tuples. - adjs = [adj.to(device) for adj in adjs] - + for batch in train_loader: optimizer.zero_grad() - out = model(x[n_id], adjs) - loss = F.nll_loss(out, y[n_id[:batch_size]]) + out = model(batch.x, batch.edge_index.to(device))[:batch.batch_size] + y = batch.y[:batch.batch_size].squeeze() + loss = F.cross_entropy(out, y) loss.backward() optimizer.step() total_loss += float(loss) - total_correct += int(out.argmax(dim=-1).eq(y[n_id[:batch_size]]).sum()) - pbar.update(batch_size) + total_correct += int(out.argmax(dim=-1).eq(y).sum()) + pbar.update(batch.batch_size) pbar.close() loss = total_loss / len(train_loader) - approx_acc = total_correct / train_idx.size(0) + approx_acc = total_correct / split_idx['train'].size(0) return loss, approx_acc @@ -145,9 +137,9 @@ def train(epoch): def test(): model.eval() - out = model.inference(x) + out = model.inference(data.x) - y_true = y.cpu().unsqueeze(-1) + y_true = data.y.cpu() y_pred = out.argmax(dim=-1, keepdim=True) train_acc = evaluator.eval({ @@ -168,9 +160,7 @@ def test(): test_accs = [] for run in range(1, 11): - print('') - print(f'Run {run:02d}:') - print('') + print(f'\nRun {run:02d}:\n') model.reset_parameters() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) diff --git a/examples/ogbn_products_sage.py b/examples/ogbn_products_sage.py index 0ff2406d63ad..7dbb96b481c2 100644 --- a/examples/ogbn_products_sage.py +++ b/examples/ogbn_products_sage.py @@ -7,22 +7,33 @@ from ogb.nodeproppred import Evaluator, PygNodePropPredDataset from tqdm import tqdm -from torch_geometric.loader import NeighborSampler +from torch_geometric.loader import NeighborLoader from torch_geometric.nn import SAGEConv +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') dataset = PygNodePropPredDataset('ogbn-products', root) split_idx = dataset.get_idx_split() evaluator = Evaluator(name='ogbn-products') -data = dataset[0] - -train_idx = split_idx['train'] -train_loader = NeighborSampler(data.edge_index, node_idx=train_idx, - sizes=[15, 10, 5], batch_size=1024, - shuffle=True, num_workers=12) -subgraph_loader = NeighborSampler(data.edge_index, node_idx=None, sizes=[-1], - batch_size=4096, shuffle=False, - num_workers=12) +data = dataset[0].to(device, 'x', 'y') + +train_loader = NeighborLoader( + data, + input_nodes=split_idx['train'], + num_neighbors=[15, 10, 5], + batch_size=1024, + shuffle=True, + num_workers=12, + persistent_workers=True, +) +subgraph_loader = NeighborLoader( + data, + input_nodes=None, + num_neighbors=[-1], + batch_size=4096, + num_workers=12, + persistent_workers=True, +) class SAGE(torch.nn.Module): @@ -41,20 +52,13 @@ def reset_parameters(self): for conv in self.convs: conv.reset_parameters() - def forward(self, x, adjs): - # `train_loader` computes the k-hop neighborhood of a batch of nodes, - # and returns, for each layer, a bipartite graph object, holding the - # bipartite edges `edge_index`, the index `e_id` of the original edges, - # and the size/shape `size` of the bipartite graph. - # Target nodes are also included in the source nodes so that one can - # easily apply skip-connections or add self-loops. - for i, (edge_index, _, size) in enumerate(adjs): - x_target = x[:size[1]] # Target nodes are always placed first. - x = self.convs[i]((x, x_target), edge_index) + def forward(self, x, edge_index): + for i, conv in enumerate(self.convs): + x = conv(x, edge_index) if i != self.num_layers - 1: - x = F.relu(x) + x = x.relu() x = F.dropout(x, p=0.5, training=self.training) - return x.log_softmax(dim=-1) + return x def inference(self, x_all): pbar = tqdm(total=x_all.size(0) * self.num_layers) @@ -63,20 +67,18 @@ def inference(self, x_all): # Compute representations of nodes layer by layer, using *all* # available edges. This leads to faster computation in contrast to # immediately computing the final representations of each batch. - total_edges = 0 for i in range(self.num_layers): xs = [] - for batch_size, n_id, adj in subgraph_loader: - edge_index, _, size = adj.to(device) - total_edges += edge_index.size(1) - x = x_all[n_id].to(device) - x_target = x[:size[1]] - x = self.convs[i]((x, x_target), edge_index) + for batch in subgraph_loader: + x = x_all[batch.n_id].to(device) + edge_index = batch.edge_index.to(device) + x = self.convs[i](x, edge_index) + x = x[:batch.batch_size] if i != self.num_layers - 1: - x = F.relu(x) + x = x.relu() xs.append(x.cpu()) - pbar.update(batch_size) + pbar.update(batch.batch_size) x_all = torch.cat(xs, dim=0) @@ -85,39 +87,33 @@ def inference(self, x_all): return x_all -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = SAGE(dataset.num_features, 256, dataset.num_classes, num_layers=3) model = model.to(device) -x = data.x.to(device) -y = data.y.squeeze().to(device) - def train(epoch): model.train() - pbar = tqdm(total=train_idx.size(0)) + pbar = tqdm(total=split_idx['train'].size(0)) pbar.set_description(f'Epoch {epoch:02d}') total_loss = total_correct = 0 - for batch_size, n_id, adjs in train_loader: - # `adjs` holds a list of `(edge_index, e_id, size)` tuples. - adjs = [adj.to(device) for adj in adjs] - + for batch in train_loader: optimizer.zero_grad() - out = model(x[n_id], adjs) - loss = F.nll_loss(out, y[n_id[:batch_size]]) + out = model(batch.x, batch.edge_index.to(device))[:batch.batch_size] + y = batch.y[:batch.batch_size].squeeze() + loss = F.cross_entropy(out, y) loss.backward() optimizer.step() total_loss += float(loss) - total_correct += int(out.argmax(dim=-1).eq(y[n_id[:batch_size]]).sum()) - pbar.update(batch_size) + total_correct += int(out.argmax(dim=-1).eq(y).sum()) + pbar.update(batch.batch_size) pbar.close() loss = total_loss / len(train_loader) - approx_acc = total_correct / train_idx.size(0) + approx_acc = total_correct / split_idx['train'].size(0) return loss, approx_acc @@ -126,9 +122,9 @@ def train(epoch): def test(): model.eval() - out = model.inference(x) + out = model.inference(data.x) - y_true = y.cpu().unsqueeze(-1) + y_true = data.y.cpu() y_pred = out.argmax(dim=-1, keepdim=True) train_acc = evaluator.eval({ @@ -149,9 +145,7 @@ def test(): test_accs = [] for run in range(1, 11): - print('') - print(f'Run {run:02d}:') - print('') + print(f'\nRun {run:02d}:\n') model.reset_parameters() optimizer = torch.optim.Adam(model.parameters(), lr=0.003) diff --git a/examples/point_transformer_classification.py b/examples/point_transformer_classification.py index 8556249c4a6e..9a22e6a2c490 100644 --- a/examples/point_transformer_classification.py +++ b/examples/point_transformer_classification.py @@ -3,14 +3,24 @@ import torch import torch.nn.functional as F from torch.nn import Linear as Lin -from torch_cluster import fps, knn_graph import torch_geometric.transforms as T from torch_geometric.datasets import ModelNet from torch_geometric.loader import DataLoader -from torch_geometric.nn import MLP, PointTransformerConv, global_mean_pool, knn +from torch_geometric.nn import ( + MLP, + PointTransformerConv, + fps, + global_mean_pool, + knn, + knn_graph, +) +from torch_geometric.typing import WITH_TORCH_CLUSTER from torch_geometric.utils import scatter +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") + path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data/ModelNet10') pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024) train_dataset = ModelNet(path, '10', True, transform, pre_transform) diff --git a/examples/point_transformer_segmentation.py b/examples/point_transformer_segmentation.py index 1d00268639d3..feeb247fe189 100644 --- a/examples/point_transformer_segmentation.py +++ b/examples/point_transformer_segmentation.py @@ -3,15 +3,18 @@ import torch import torch.nn.functional as F from point_transformer_classification import TransformerBlock, TransitionDown -from torch_cluster import knn_graph from torchmetrics.functional import jaccard_index import torch_geometric.transforms as T from torch_geometric.datasets import ShapeNet from torch_geometric.loader import DataLoader -from torch_geometric.nn import MLP, knn_interpolate +from torch_geometric.nn import MLP, knn_graph, knn_interpolate +from torch_geometric.typing import WITH_TORCH_CLUSTER from torch_geometric.utils import scatter +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") + category = 'Airplane' # Pass in `None` to train on all categories. path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') transform = T.Compose([ diff --git a/examples/pointnet2_classification.py b/examples/pointnet2_classification.py index 4bae3b395532..fa19754912af 100644 --- a/examples/pointnet2_classification.py +++ b/examples/pointnet2_classification.py @@ -7,6 +7,10 @@ from torch_geometric.datasets import ModelNet from torch_geometric.loader import DataLoader from torch_geometric.nn import MLP, PointNetConv, fps, global_max_pool, radius +from torch_geometric.typing import WITH_TORCH_CLUSTER + +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") class SAModule(torch.nn.Module): diff --git a/examples/pointnet2_segmentation.py b/examples/pointnet2_segmentation.py index 1c575b40352d..5ced25b060c3 100644 --- a/examples/pointnet2_segmentation.py +++ b/examples/pointnet2_segmentation.py @@ -9,8 +9,12 @@ from torch_geometric.datasets import ShapeNet from torch_geometric.loader import DataLoader from torch_geometric.nn import MLP, knn_interpolate +from torch_geometric.typing import WITH_TORCH_CLUSTER from torch_geometric.utils import scatter +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") + category = 'Airplane' # Pass in `None` to train on all categories. path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') transform = T.Compose([ diff --git a/examples/randlanet_classification.py b/examples/randlanet_classification.py index 9d9031e05181..f654070595b4 100644 --- a/examples/randlanet_classification.py +++ b/examples/randlanet_classification.py @@ -20,8 +20,12 @@ from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.pool import knn_graph from torch_geometric.nn.pool.decimation import decimation_indices +from torch_geometric.typing import WITH_TORCH_CLUSTER from torch_geometric.utils import softmax +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") + # Default activation and batch norm parameters used by RandLA-Net: lrelu02_kwargs = {'negative_slope': 0.2} bn099_kwargs = {'momentum': 0.01, 'eps': 1e-6} diff --git a/examples/randlanet_segmentation.py b/examples/randlanet_segmentation.py index cc5af07335bd..39602da37045 100644 --- a/examples/randlanet_segmentation.py +++ b/examples/randlanet_segmentation.py @@ -16,8 +16,12 @@ from torch_geometric.datasets import ShapeNet from torch_geometric.loader import DataLoader from torch_geometric.nn import knn_interpolate +from torch_geometric.typing import WITH_TORCH_CLUSTER from torch_geometric.utils import scatter +if not WITH_TORCH_CLUSTER: + quit("This example requires 'torch-cluster'") + category = 'Airplane' # Pass in `None` to train on all categories. category_num_classes = 4 # 4 for Airplane - see ShapeNet for details path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ShapeNet') diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 1a2eeb855b25..00eca9c7090b 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -7,6 +7,7 @@ import torch from torch import Tensor +import torch_geometric.typing from torch_geometric.data import ( Data, FeatureStore, @@ -41,7 +42,11 @@ def index_select(value: FeatureTensorType, index: Tensor, size = list(value.shape) size[dim] = index.numel() numel = math.prod(size) - storage = value.storage()._new_shared(numel) + if torch_geometric.typing.WITH_PT2: + storage = value.untyped_storage()._new_shared( + numel * value.element_size()) + else: + storage = value.storage()._new_shared(numel) out = value.new(storage).view(size) return torch.index_select(value, dim, index, out=out) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index a05fd2bd09a2..c0bc45a183c1 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -42,6 +42,16 @@ f"Disabling its usage. Stacktrace: {e}") WITH_TORCH_CLUSTER = False +try: + import torch_spline_conv # noqa + WITH_TORCH_SPLINE_CONV = True +except (ImportError, OSError) as e: + if isinstance(e, OSError): + warnings.warn( + f"An issue occurred while importing 'torch-spline-conv'. " + f"Disabling its usage. Stacktrace: {e}") + WITH_TORCH_SPLINE_CONV = False + try: import torch_sparse # noqa from torch_sparse import SparseStorage, SparseTensor From 8b37ad571b6e08d700f344cd9965724939f4bd4c Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 28 Apr 2023 06:05:05 -0700 Subject: [PATCH 1149/2432] Added support for `torch.sparse.Tensor` in `DataLoader` (#7252) this implementation isnt working yet, it currently fails with shape mismatch on a Linear layer but passes the collate part example repro: `cd /opt/pyg; pip uninstall -y torch-geometric torch-sparse; rm -rf pytorch_geometric; git clone -b collate_fix https://github.com/pyg-team/pytorch_geometric.git; cd /opt/pyg/pytorch_geometric; pip install .; python3 examples/gcn2_ppi.py` ``` e_idxs_to_cat.size()= [torch.Size([2, 48146]), torch.Size([2, 88335])] value.size()= torch.Size([4693, 2815]) Traceback (most recent call last): File "examples/gcn2_ppi.py", line 93, in loss = train() File "examples/gcn2_ppi.py", line 70, in train loss = criterion(model(data.x, data.adj_t), data.y) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1533, in _call_impl return forward_call(*args, **kwargs) File "examples/gcn2_ppi.py", line 46, in forward h = conv(h, x_0, adj_t) File "/usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py", line 1533, in _call_impl return forward_call(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/conv/gcn2_conv.py", line 138, in forward x = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=None) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/conv/message_passing.py", line 437, in propagate out = self.message_and_aggregate(edge_index, **msg_aggr_kwargs) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/nn/conv/gcn2_conv.py", line 159, in message_and_aggregate return spmm(adj_t, x, reduce=self.aggr) File "/usr/local/lib/python3.8/dist-packages/torch_geometric/utils/spmm.py", line 80, in spmm return torch.sparse.mm(src, other) ``` (just remove the check that triggers `This example requires 'torch-sparse'`) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/gcn2_ppi.py | 4 -- test/data/test_batch.py | 33 +++++++++++-- torch_geometric/data/collate.py | 11 +++-- torch_geometric/data/data.py | 4 +- torch_geometric/data/separate.py | 3 +- torch_geometric/utils/__init__.py | 4 +- torch_geometric/utils/sparse.py | 79 ++++++++++++++++++++++++++++++- 8 files changed, 124 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b30be2af51a1..87903de705bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for `torch.sparse.Tensor` in `DataLoader` ([#7252](https://github.com/pyg-team/pytorch_geometric/pull/7252)) - Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250)) - Added an example for heterogeneous GNN explanation via `CaptumExplainer` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) - Added `visualize_feature_importance` functionality to `HeteroExplanation` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) diff --git a/examples/gcn2_ppi.py b/examples/gcn2_ppi.py index 87fcbb4ce5da..a22cc5ffa64c 100644 --- a/examples/gcn2_ppi.py +++ b/examples/gcn2_ppi.py @@ -9,10 +9,6 @@ from torch_geometric.datasets import PPI from torch_geometric.loader import DataLoader from torch_geometric.nn import GCN2Conv -from torch_geometric.typing import WITH_TORCH_SPARSE - -if not WITH_TORCH_SPARSE: - quit("This example requires 'torch-sparse'") path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'GCN2_PPI') pre_transform = T.Compose([T.GCNNorm(), T.ToSparseTensor()]) diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 76dc187b625f..ca5f46324ede 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -1,12 +1,14 @@ import os.path as osp import numpy as np +import pytest import torch import torch_geometric from torch_geometric.data import Batch, Data, HeteroData from torch_geometric.testing import get_random_edge_index, withPackage from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_edge_index, to_torch_sparse_tensor def test_batch_basic(): @@ -466,12 +468,10 @@ def tr(n, m): d4 = Data(xs=[tr(4, 3), tr(16, 4), tr(1, 2)], a={"aa": tr(8, 3)}, x=tr(8, 5)) - # Dataset data_list = [d1, d2, d3, d4] batch = Batch.from_data_list(data_list, follow_batch=['xs', 'a']) - # assert shapes assert batch.xs[0].shape == (19, 3) assert batch.xs[1].shape == (56, 4) assert batch.xs[2].shape == (7, 2) @@ -480,7 +480,6 @@ def tr(n, m): assert len(batch.xs_batch) == 3 assert len(batch.a_batch) == 1 - # assert _batch assert batch.xs_batch[0].tolist() == \ [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3] assert batch.xs_batch[1].tolist() == \ @@ -490,3 +489,31 @@ def tr(n, m): assert batch.a_batch['aa'].tolist() == \ [0] * 11 + [1] * 2 + [2] * 4 + [3] * 8 + + +@withPackage('torch>=2.0.0') +@pytest.mark.parametrize('layout', [ + torch.sparse_coo, + torch.sparse_csr, + torch.sparse_csc, +]) +def test_torch_sparse_batch(layout): + x_dense = torch.randn(3, 4) + x = x_dense.to_sparse(layout=layout) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + edge_attr = torch.rand(4) + adj = to_torch_sparse_tensor(edge_index, edge_attr, layout=layout) + + data = Data(x=x, adj=adj) + + batch = Batch.from_data_list([data, data]) + + assert batch.x.size() == (6, 4) + assert batch.x.layout == layout + assert torch.equal(batch.x.to_dense(), torch.cat([x_dense, x_dense], 0)) + + assert batch.adj.size() == (6, 6) + assert batch.adj.layout == layout + out = to_edge_index(batch.adj.to_sparse(layout=torch.sparse_csr)) + assert torch.equal(out[0], torch.cat([edge_index, edge_index + 3], 1)) + assert torch.equal(out[1], torch.cat([edge_attr, edge_attr], 0)) diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index 3d76bc194de8..a9896916dc91 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -9,6 +9,8 @@ from torch_geometric.data.data import BaseData from torch_geometric.data.storage import BaseStorage, NodeStorage from torch_geometric.typing import SparseTensor, torch_sparse +from torch_geometric.utils import is_sparse, is_torch_sparse_tensor +from torch_geometric.utils.sparse import cat def collate( @@ -122,7 +124,7 @@ def _collate( elem = values[0] - if isinstance(elem, Tensor): + if isinstance(elem, Tensor) and not is_sparse(elem): # Concatenate a list of `torch.Tensor` along the `cat_dim`. # NOTE: We need to take care of incrementing elements appropriately. key = str(key) @@ -160,7 +162,7 @@ def _collate( value = torch.cat(values, dim=cat_dim or 0, out=out) return value, slices, incs - elif isinstance(elem, SparseTensor) and increment: + elif is_sparse(elem) and increment: # Concatenate a list of `SparseTensor` along the `cat_dim`. # NOTE: `cat_dim` may return a tuple to allow for diagonal stacking. key = str(key) @@ -168,7 +170,10 @@ def _collate( cat_dims = (cat_dim, ) if isinstance(cat_dim, int) else cat_dim repeats = [[value.size(dim) for dim in cat_dims] for value in values] slices = cumsum(repeats) - value = torch_sparse.cat(values, dim=cat_dim) + if is_torch_sparse_tensor(elem): + value = cat(values, dim=cat_dim) + else: + value = torch_sparse.cat(values, dim=cat_dim) return value, slices, None elif isinstance(elem, (int, float)): diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 33ca67e781c5..e620a5520251 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -37,7 +37,7 @@ OptTensor, SparseTensor, ) -from torch_geometric.utils import select, subgraph +from torch_geometric.utils import is_sparse, select, subgraph class BaseData: @@ -518,7 +518,7 @@ def update(self, data: Union['Data', Dict[str, Any]]) -> 'Data': return self def __cat_dim__(self, key: str, value: Any, *args, **kwargs) -> Any: - if isinstance(value, SparseTensor) and 'adj' in key: + if is_sparse(value) and 'adj' in key: return (0, 1) elif 'index' in key or key == 'face': return -1 diff --git a/torch_geometric/data/separate.py b/torch_geometric/data/separate.py index 5fcb98b9203c..c16e94cbeafa 100644 --- a/torch_geometric/data/separate.py +++ b/torch_geometric/data/separate.py @@ -65,7 +65,8 @@ def _separate( start, end = int(slices[idx]), int(slices[idx + 1]) value = narrow(value, cat_dim or 0, start, end - start) value = value.squeeze(0) if cat_dim is None else value - if decrement and (incs.dim() > 1 or int(incs[idx]) != 0): + if (decrement and incs is not None + and (incs.dim() > 1 or int(incs[idx]) != 0)): value = value - incs[idx].to(value.device) return value diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index f1a602a652f2..552a83c363cc 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -27,7 +27,8 @@ from .nested import to_nested_tensor, from_nested_tensor from .sparse import (dense_to_sparse, is_sparse, is_torch_sparse_tensor, to_torch_coo_tensor, to_torch_csr_tensor, - to_torch_csc_tensor, to_edge_index) + to_torch_csc_tensor, to_torch_sparse_tensor, + to_edge_index) from .spmm import spmm from .unbatch import unbatch, unbatch_edge_index from .one_hot import one_hot @@ -99,6 +100,7 @@ 'to_torch_coo_tensor', 'to_torch_csr_tensor', 'to_torch_csc_tensor', + 'to_torch_sparse_tensor', 'to_edge_index', 'spmm', 'unbatch', diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py index d05d91730802..ada6d2e6a51a 100644 --- a/torch_geometric/utils/sparse.py +++ b/torch_geometric/utils/sparse.py @@ -1,4 +1,4 @@ -from typing import Any, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple, Union import torch from torch import Tensor @@ -239,6 +239,44 @@ def to_torch_csc_tensor( return adj +def to_torch_sparse_tensor( + edge_index: Tensor, + edge_attr: Optional[Tensor] = None, + size: Optional[Union[int, Tuple[int, int]]] = None, + is_coalesced: bool = False, + layout: torch.layout = torch.sparse_coo, +): + r"""Converts a sparse adjacency matrix defined by edge indices and edge + attributes to a :class:`torch.sparse.Tensor` with custom :obj:`layout`. + See :meth:`~torch_geometric.utils.to_edge_index` for the reverse operation. + + Args: + edge_index (LongTensor): The edge indices. + edge_attr (Tensor, optional): The edge attributes. + (default: :obj:`None`) + size (int or (int, int), optional): The size of the sparse matrix. + If given as an integer, will create a quadratic sparse matrix. + If set to :obj:`None`, will infer a quadratic sparse matrix based + on :obj:`edge_index.max() + 1`. (default: :obj:`None`) + is_coalesced (bool): If set to :obj:`True`, will assume that + :obj:`edge_index` is already coalesced and thus avoids expensive + computation. (default: :obj:`False`) + layout (torch.layout, optional): The layout of the output sparse tensor + (:obj:`torch.sparse_coo`, :obj:`torch.sparse_csr`, + :obj:`torch.sparse_csc`). (default: :obj:`torch.sparse_coo`) + + :rtype: :class:`torch.sparse.Tensor` + """ + if layout == torch.sparse_coo: + return to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) + if layout == torch.sparse_csr: + return to_torch_csr_tensor(edge_index, edge_attr, size, is_coalesced) + if layout == torch.sparse_csc: + return to_torch_csc_tensor(edge_index, edge_attr, size, is_coalesced) + + raise ValueError(f"Unexpected sparse tensor layout (got '{layout}')") + + def to_edge_index(adj: Union[Tensor, SparseTensor]) -> Tuple[Tensor, Tensor]: r"""Converts a :class:`torch.sparse.Tensor` or a :class:`torch_sparse.SparseTensor` to edge indices and edge attributes. @@ -341,3 +379,42 @@ def ptr2index(ptr: Tensor) -> Tensor: def index2ptr(index: Tensor, size: int) -> Tensor: return torch._convert_indices_from_coo_to_csr( index, size, out_int32=index.dtype == torch.int32) + + +def cat(tensors: List[Tensor], dim: Union[int, Tuple[int, int]]) -> Tensor: + # TODO (matthias) We can make this more efficient by directly operating on + # the individual sparse tensor layouts. + assert dim in {0, 1, (0, 1)} + + size = [0, 0] + edge_indices = [] + edge_attrs = [] + for tensor in tensors: + assert is_torch_sparse_tensor(tensor) + edge_index, edge_attr = to_edge_index(tensor) + edge_index = edge_index.clone() + + if dim == 0: + edge_index[0] += size[0] + size[0] += tensor.size(0) + size[1] = max(size[1], tensor.size(1)) + elif dim == 1: + edge_index[1] += size[1] + size[0] = max(size[0], tensor.size(0)) + size[1] += tensor.size(1) + else: + edge_index[0] += size[0] + edge_index[1] += size[1] + size[0] += tensor.size(0) + size[1] += tensor.size(1) + + edge_indices.append(edge_index) + edge_attrs.append(edge_attr) + + return to_torch_sparse_tensor( + edge_index=torch.cat(edge_indices, dim=1), + edge_attr=torch.cat(edge_attrs, dim=0), + size=size, + is_coalesced=dim == (0, 1), + layout=tensors[0].layout, + ) From a0d94b1d929803761769aed4fc4f654407fc932a Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Mon, 1 May 2023 22:27:16 +0100 Subject: [PATCH 1150/2432] gh: move `CODEOWNERS` inside the `.github/` dir (#7267) This PR aims to move the `CODEOWNERS` file within the `.github/` dir. Github looks for a code of conduct file in the following order: `.github dir > project root > docs dir` ([source](https://docs.github.com/en/communities/setting-up-your-project-for-healthy-contributions/setting-guidelines-for-repository-contributors)). Moving the file to the `.github/` dir leads to a minimal project structure without any functional change. Similar to https://github.com/pyg-team/pytorch_geometric/pull/6768, therefore skipping changelog --- CODEOWNERS => .github/CODEOWNERS | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename CODEOWNERS => .github/CODEOWNERS (100%) diff --git a/CODEOWNERS b/.github/CODEOWNERS similarity index 100% rename from CODEOWNERS rename to .github/CODEOWNERS From 11f0b2239ad3ef572f4d744d71f239b5582d7730 Mon Sep 17 00:00:00 2001 From: Amund Vedal <22004000+vedal@users.noreply.github.com> Date: Tue, 2 May 2023 13:42:33 +0200 Subject: [PATCH 1151/2432] Minor equation update in documentation (#7272) Node index here should probably be "i" rather than "v" --------- Co-authored-by: Jintang Li --- torch_geometric/nn/conv/gcn_conv.py | 2 +- torch_geometric/nn/conv/pdn_conv.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/nn/conv/gcn_conv.py b/torch_geometric/nn/conv/gcn_conv.py index cc61dfc0b05f..7ad729ae9cde 100644 --- a/torch_geometric/nn/conv/gcn_conv.py +++ b/torch_geometric/nn/conv/gcn_conv.py @@ -124,7 +124,7 @@ class GCNConv(MessagePassing): .. math:: \mathbf{x}^{\prime}_i = \mathbf{\Theta}^{\top} \sum_{j \in - \mathcal{N}(v) \cup \{ i \}} \frac{e_{j,i}}{\sqrt{\hat{d}_j + \mathcal{N}(i) \cup \{ i \}} \frac{e_{j,i}}{\sqrt{\hat{d}_j \hat{d}_i}} \mathbf{x}_j with :math:`\hat{d}_i = 1 + \sum_{j \in \mathcal{N}(i)} e_{j,i}`, where diff --git a/torch_geometric/nn/conv/pdn_conv.py b/torch_geometric/nn/conv/pdn_conv.py index bc274059683b..bdb00f42ee0e 100644 --- a/torch_geometric/nn/conv/pdn_conv.py +++ b/torch_geometric/nn/conv/pdn_conv.py @@ -15,7 +15,7 @@ class PDNConv(MessagePassing): `_ paper .. math:: - \mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(v) \cup + \mathbf{x}^{\prime}_i = \sum_{j \in \mathcal{N}(i) \cup \{i\}}f_{\Theta}(\textbf{e}_{(j,i)}) \cdot f_{\Omega}(\mathbf{x}_{j}) where :math:`z_{i,j}` denotes the edge feature vector from source node From 483ef6aa0cbec6fc26108d24465c2edfebc5a02e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 2 May 2023 14:00:09 +0200 Subject: [PATCH 1152/2432] Fix TorchScript error in case `torch-sparse` is not installed (#7273) --- torch_geometric/typing.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index c0bc45a183c1..372739b93743 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -63,7 +63,21 @@ WITH_TORCH_SPARSE = False class SparseStorage: - def __init__(*args, **kwargs): + def __init__( + self, + row: Optional[Tensor] = None, + rowptr: Optional[Tensor] = None, + col: Optional[Tensor] = None, + value: Optional[Tensor] = None, + sparse_sizes: Optional[Tuple[Optional[int], Optional[int]]] = None, + rowcount: Optional[Tensor] = None, + colptr: Optional[Tensor] = None, + colcount: Optional[Tensor] = None, + csr2csc: Optional[Tensor] = None, + csc2csr: Optional[Tensor] = None, + is_sorted: bool = False, + trust_data: bool = False, + ): raise ImportError("'SparseStorage' requires 'torch-sparse'") class SparseTensor: From f0d5b225435db67e6746165e4c98864c4e2f4399 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 2 May 2023 14:00:25 +0200 Subject: [PATCH 1153/2432] Fix out-dated GraphGym doc on CPU usage (#7274) --- docs/source/advanced/graphgym.rst | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/source/advanced/graphgym.rst b/docs/source/advanced/graphgym.rst index 22d879ebd8f4..344898fab9b3 100644 --- a/docs/source/advanced/graphgym.rst +++ b/docs/source/advanced/graphgym.rst @@ -107,12 +107,7 @@ To use GraphGym, you need to clone :pyg:`PyG` from :github:`GitHub`, then change bash run_batch.sh # run a batch of experiments #. **Run GraphGym with CPU backend:** - GraphGym supports CPU backend as well -- you only need to add the line :obj:`device: cpu` to the :obj:`*.yaml` file. - Here we provide an example: - - .. code-block:: bash - - bash run_single_cpu.sh # run a single experiment using CPU backend + GraphGym supports CPU backend as well -- you only need to add the line :obj:`accelerator: cpu` to the :obj:`*.yaml` file. In-Depth Usage -------------- From 84ae27d9a9c0c65312b518e00683c48bfece74ce Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 2 May 2023 14:12:27 +0200 Subject: [PATCH 1154/2432] Option to preserve directed graphs in `CitationFull` datasets (#7275) --- CHANGELOG.md | 1 + torch_geometric/datasets/amazon.py | 12 ++++++++---- torch_geometric/datasets/citation_full.py | 16 ++++++++++++---- torch_geometric/datasets/coauthor.py | 12 ++++++++---- torch_geometric/io/npz.py | 12 ++++++++---- 5 files changed, 37 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87903de705bc..f7634c095f7e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) - Added support for `torch.sparse.Tensor` in `DataLoader` ([#7252](https://github.com/pyg-team/pytorch_geometric/pull/7252)) - Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250)) - Added an example for heterogeneous GNN explanation via `CaptumExplainer` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) diff --git a/torch_geometric/datasets/amazon.py b/torch_geometric/datasets/amazon.py index 6b3ebc3a36d7..176c8d08a626 100644 --- a/torch_geometric/datasets/amazon.py +++ b/torch_geometric/datasets/amazon.py @@ -54,9 +54,13 @@ class Amazon(InMemoryDataset): url = 'https://github.com/shchur/gnn-benchmark/raw/master/data/npz/' - def __init__(self, root: str, name: str, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None): + def __init__( + self, + root: str, + name: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): self.name = name.lower() assert self.name in ['computers', 'photo'] super().__init__(root, transform, pre_transform) @@ -82,7 +86,7 @@ def download(self): download_url(http://github.com/self.url%20+%20self.raw_file_names,%20self.raw_dir) def process(self): - data = read_npz(self.raw_paths[0]) + data = read_npz(self.raw_paths[0], to_undirected=True) data = data if self.pre_transform is None else self.pre_transform(data) data, slices = self.collate([data]) torch.save((data, slices), self.processed_paths[0]) diff --git a/torch_geometric/datasets/citation_full.py b/torch_geometric/datasets/citation_full.py index c3ec045e4f54..70c2d6f51b76 100644 --- a/torch_geometric/datasets/citation_full.py +++ b/torch_geometric/datasets/citation_full.py @@ -27,6 +27,8 @@ class CitationFull(InMemoryDataset): an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) + to_undirected (bool, optional): Whether the original graph is + converted to an undirected one. (default: :obj:`True`) **STATS:** @@ -68,10 +70,16 @@ class CitationFull(InMemoryDataset): url = 'https://github.com/abojchevski/graph2gauss/raw/master/data/%7B%7D.npz' - def __init__(self, root: str, name: str, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None): + def __init__( + self, + root: str, + name: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + to_undirected: bool = True, + ): self.name = name.lower() + self.to_undirected = to_undirected assert self.name in ['cora', 'cora_ml', 'citeseer', 'dblp', 'pubmed'] super().__init__(root, transform, pre_transform) self.data, self.slices = torch.load(self.processed_paths[0]) @@ -96,7 +104,7 @@ def download(self): download_url(http://github.com/self.url.format(self.name), self.raw_dir) def process(self): - data = read_npz(self.raw_paths[0]) + data = read_npz(self.raw_paths[0], to_undirected=self.to_undirected) data = data if self.pre_transform is None else self.pre_transform(data) data, slices = self.collate([data]) torch.save((data, slices), self.processed_paths[0]) diff --git a/torch_geometric/datasets/coauthor.py b/torch_geometric/datasets/coauthor.py index e6f015b60964..39db63e10570 100644 --- a/torch_geometric/datasets/coauthor.py +++ b/torch_geometric/datasets/coauthor.py @@ -53,9 +53,13 @@ class Coauthor(InMemoryDataset): url = 'https://github.com/shchur/gnn-benchmark/raw/master/data/npz/' - def __init__(self, root: str, name: str, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None): + def __init__( + self, + root: str, + name: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): assert name.lower() in ['cs', 'physics'] self.name = 'CS' if name.lower() == 'cs' else 'Physics' super().__init__(root, transform, pre_transform) @@ -81,7 +85,7 @@ def download(self): download_url(http://github.com/self.url%20+%20self.raw_file_names,%20self.raw_dir) def process(self): - data = read_npz(self.raw_paths[0]) + data = read_npz(self.raw_paths[0], to_undirected=True) data = data if self.pre_transform is None else self.pre_transform(data) data, slices = self.collate([data]) torch.save((data, slices), self.processed_paths[0]) diff --git a/torch_geometric/io/npz.py b/torch_geometric/io/npz.py index 075b9190093b..3e7ffd07f82d 100644 --- a/torch_geometric/io/npz.py +++ b/torch_geometric/io/npz.py @@ -1,17 +1,20 @@ +from typing import Any, Dict + import numpy as np import scipy.sparse as sp import torch from torch_geometric.data import Data -from torch_geometric.utils import remove_self_loops, to_undirected +from torch_geometric.utils import remove_self_loops +from torch_geometric.utils import to_undirected as to_undirected_fn -def read_npz(path): +def read_npz(path: str, to_undirected: bool = True) -> Data: with np.load(path) as f: return parse_npz(f) -def parse_npz(f): +def parse_npz(f: Dict[str, Any], to_undirected: bool = True) -> Data: x = sp.csr_matrix((f['attr_data'], f['attr_indices'], f['attr_indptr']), f['attr_shape']).todense() x = torch.from_numpy(x).to(torch.float) @@ -23,7 +26,8 @@ def parse_npz(f): col = torch.from_numpy(adj.col).to(torch.long) edge_index = torch.stack([row, col], dim=0) edge_index, _ = remove_self_loops(edge_index) - edge_index = to_undirected(edge_index, num_nodes=x.size(0)) + if to_undirected: + edge_index = to_undirected_fn(edge_index, num_nodes=x.size(0)) y = torch.from_numpy(f['labels']).to(torch.long) From 5feee57127b5f42d1095630cff915ed6515ecead Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 2 May 2023 14:22:21 +0200 Subject: [PATCH 1155/2432] use `pyg_lib.random_walk` implementation in `Node2Vec` (if applicable) (#7276) --- examples/node2vec.py | 20 ++++++++++++-------- test/nn/models/test_node2vec.py | 16 +++++++++++++--- torch_geometric/nn/models/node2vec.py | 25 +++++++++++++++---------- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/examples/node2vec.py b/examples/node2vec.py index e54dabc88afb..7c2d4ee35433 100644 --- a/examples/node2vec.py +++ b/examples/node2vec.py @@ -7,10 +7,6 @@ from torch_geometric.datasets import Planetoid from torch_geometric.nn import Node2Vec -from torch_geometric.typing import WITH_TORCH_CLUSTER - -if not WITH_TORCH_CLUSTER: - quit("This example requires 'torch-cluster'") def main(): @@ -20,9 +16,17 @@ def main(): data = dataset[0] device = 'cuda' if torch.cuda.is_available() else 'cpu' - model = Node2Vec(data.edge_index, embedding_dim=128, walk_length=20, - context_size=10, walks_per_node=10, - num_negative_samples=1, p=1, q=1, sparse=True).to(device) + model = Node2Vec( + data.edge_index, + embedding_dim=128, + walk_length=20, + context_size=10, + walks_per_node=10, + num_negative_samples=1, + p=1, + q=1, + sparse=True, + ).to(device) num_workers = 0 if sys.platform.startswith('win') else 4 loader = model.loader(batch_size=128, shuffle=True, @@ -52,7 +56,7 @@ def test(): for epoch in range(1, 101): loss = train() acc = test() - print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Acc: {acc:.4f}') + print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Acc: {acc:.4f}') @torch.no_grad() def plot_points(colors): diff --git a/test/nn/models/test_node2vec.py b/test/nn/models/test_node2vec.py index 77d195656253..0ce285ed543d 100644 --- a/test/nn/models/test_node2vec.py +++ b/test/nn/models/test_node2vec.py @@ -1,3 +1,4 @@ +import pytest import torch from torch_geometric.nn import Node2Vec @@ -5,12 +6,21 @@ @withCUDA +@withPackage('pyg_lib') @withPackage('torch_cluster') -def test_node2vec(device): +@pytest.mark.parametrize('p', [1.0]) +@pytest.mark.parametrize('q', [1.0, 0.5]) +def test_node2vec(device, p, q): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device) - model = Node2Vec(edge_index, embedding_dim=16, walk_length=2, - context_size=2).to(device) + model = Node2Vec( + edge_index, + embedding_dim=16, + walk_length=2, + context_size=2, + p=p, + q=q, + ).to(device) assert str(model) == 'Node2Vec(3, 16)' assert model(torch.arange(3, device=device)).size() == (3, 16) diff --git a/torch_geometric/nn/models/node2vec.py b/torch_geometric/nn/models/node2vec.py index ae91b4a49095..37ece5297797 100644 --- a/torch_geometric/nn/models/node2vec.py +++ b/torch_geometric/nn/models/node2vec.py @@ -5,16 +5,11 @@ from torch.nn import Embedding from torch.utils.data import DataLoader +from torch_geometric.typing import WITH_PYG_LIB, WITH_TORCH_CLUSTER from torch_geometric.utils import sort_edge_index from torch_geometric.utils.num_nodes import maybe_num_nodes from torch_geometric.utils.sparse import index2ptr -try: - import torch_cluster # noqa - random_walk = torch.ops.torch_cluster.random_walk -except ImportError: - random_walk = None - class Node2Vec(torch.nn.Module): r"""The Node2Vec model from the @@ -63,8 +58,18 @@ def __init__( ): super().__init__() - if random_walk is None: - raise ImportError('`Node2Vec` requires `torch-cluster`.') + if WITH_PYG_LIB and p == 1.0 and q == 1.0: + self.random_walk_fn = torch.ops.pyg.random_walk + elif WITH_TORCH_CLUSTER: + self.random_walk_fn = torch.ops.torch_cluster.random_walk + else: + if p == 1.0 and q == 1.0: + raise ImportError(f"'{self.__class__.__name__}' " + f"requires either the 'pyg-lib' or " + f"'torch-cluster' package") + else: + raise ImportError(f"'{self.__class__.__name__}' " + f"requires the 'torch-cluster' package") self.num_nodes = maybe_num_nodes(edge_index, num_nodes) @@ -103,8 +108,8 @@ def loader(self, **kwargs) -> DataLoader: @torch.jit.export def pos_sample(self, batch: Tensor) -> Tensor: batch = batch.repeat(self.walks_per_node) - rw = random_walk(self.rowptr, self.col, batch, self.walk_length, - self.p, self.q) + rw = self.random_walk_fn(self.rowptr, self.col, batch, + self.walk_length, self.p, self.q) if not isinstance(rw, Tensor): rw = rw[0] From 33ec8cef787d802fbdb2e5c6966494112bd29154 Mon Sep 17 00:00:00 2001 From: Remy Liu <36778645+RemyLau@users.noreply.github.com> Date: Wed, 3 May 2023 10:13:07 -0400 Subject: [PATCH 1156/2432] Fix GraphGym two-class multilabel classification logic (#7284) --- torch_geometric/graphgym/model_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index 90e7237fadf4..246ccfde5c1c 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -74,7 +74,7 @@ def create_model(to_device=True, dim_in=None, dim_out=None) -> GraphGymModule: dim_in = cfg.share.dim_in if dim_in is None else dim_in dim_out = cfg.share.dim_out if dim_out is None else dim_out # binary classification, output dim = 1 - if 'classification' in cfg.dataset.task_type and dim_out == 2: + if 'classification' == cfg.dataset.task_type and dim_out == 2: dim_out = 1 model = GraphGymModule(dim_in, dim_out, cfg) From 1b6c40f1bd6ed668a131f0f6e0dcf10f9bf54cb9 Mon Sep 17 00:00:00 2001 From: Gleb Bazhenov <43088667+gvbazhenov@users.noreply.github.com> Date: Wed, 3 May 2023 17:16:04 +0300 Subject: [PATCH 1157/2432] [Transforms] Add `NodePropertySplit` transform (#6894) New functional for creating node-level data splits, which allows to induce distributional shifts in graph and conduct experiments with graph models in more challenging setups. It is implemented via the `NodePropertySplit` transform and allows to use the class itself or its static method `mask_nodes_by_property` (the latter is for users who already knows their structural node property and just need to perform data splitting). Test coverage is provided and passed successfully. I decided to edit `transforms.rst` in `docs/source/modules` to make the static method `mask_nodes_by_property` visible in documentation, but not sure if this is the right way. --------- Co-authored-by: Jintang Li Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/transforms/test_node_property_split.py | 39 +++++ torch_geometric/transforms/__init__.py | 2 + .../transforms/node_property_split.py | 165 ++++++++++++++++++ 4 files changed, 207 insertions(+) create mode 100644 test/transforms/test_node_property_split.py create mode 100644 torch_geometric/transforms/node_property_split.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f7634c095f7e..e5f57ed66e6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) - Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) - Added support for `torch.sparse.Tensor` in `DataLoader` ([#7252](https://github.com/pyg-team/pytorch_geometric/pull/7252)) - Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250)) diff --git a/test/transforms/test_node_property_split.py b/test/transforms/test_node_property_split.py new file mode 100644 index 000000000000..27686fbc3059 --- /dev/null +++ b/test/transforms/test_node_property_split.py @@ -0,0 +1,39 @@ +import pytest +import torch + +from torch_geometric.datasets import graph_generator +from torch_geometric.testing import withPackage +from torch_geometric.transforms import NodePropertySplit + + +@withPackage('networkx') +@pytest.mark.parametrize('property_name', [ + 'popularity', + 'locality', + 'density', +]) +def test_node_property_split(property_name): + ratios = [0.3, 0.1, 0.1, 0.2, 0.3] + + transform = NodePropertySplit(property_name, ratios) + assert str(transform) == f'NodePropertySplit({property_name})' + + data = graph_generator.ERGraph(num_nodes=100, edge_prob=0.4)() + data = transform(data) + + node_ids = [] + for name, ratio in zip([ + 'id_train_mask', + 'id_val_mask', + 'id_test_mask', + 'ood_val_mask', + 'ood_test_mask', + ], ratios): + assert data[name].dtype == torch.bool + assert data[name].size() == (100, ) + assert int(data[name].sum()) == 100 * ratio + node_ids.extend(data[name].nonzero().view(-1).tolist()) + + # Check that masks are non-intersecting and cover all nodes: + node_ids = torch.tensor(node_ids) + assert node_ids.numel() == torch.unique(node_ids).numel() == 100 diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index 96612f7cc658..aef8b7913eb9 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -10,6 +10,7 @@ from .remove_training_classes import RemoveTrainingClasses from .random_node_split import RandomNodeSplit from .random_link_split import RandomLinkSplit +from .node_property_split import NodePropertySplit from .mask import IndexToMask, MaskToIndex from .pad import Pad @@ -72,6 +73,7 @@ 'RemoveTrainingClasses', 'RandomNodeSplit', 'RandomLinkSplit', + 'NodePropertySplit', 'IndexToMask', 'MaskToIndex', 'Pad', diff --git a/torch_geometric/transforms/node_property_split.py b/torch_geometric/transforms/node_property_split.py new file mode 100644 index 000000000000..df4a313a58bf --- /dev/null +++ b/torch_geometric/transforms/node_property_split.py @@ -0,0 +1,165 @@ +from typing import Any, Dict, List + +import torch +from torch import Tensor + +from torch_geometric.data import Data +from torch_geometric.data.datapipes import functional_transform +from torch_geometric.transforms import BaseTransform +from torch_geometric.utils import to_networkx + + +@functional_transform('node_property_split') +class NodePropertySplit(BaseTransform): + r"""Creates a node-level split with distributional shift based on a given + node property, as proposed in the `"Evaluating Robustness and Uncertainty + of Graph Models Under Structural Distributional Shifts" + `__ paper + (functional name: :obj:`node_property_split`). + + It splits the nodes in a given graph into five non-intersecting parts + based on their structural properties. + This can be used for transductive node prediction tasks with distributional + shifts. + It considers the in-distribution (ID) and out-of-distribution (OOD) subsets + of nodes. + The ID subset includes training, validation and testing parts, while + the OOD subset includes validation and testing parts. + As a result, it creates five associated node mask vectors for each graph, + three which are for the ID nodes (:obj:`id_train_mask`, + :obj:`id_val_mask`, :obj:`id_test_mask`), and two which are for the OOD + nodes (:obj:`ood_val_mask`, :obj:`ood_test_mask`). + + This class implements three particular strategies for inducing + distributional shifts in a graph — based on **popularity**, **locality** + or **density**. + + Args: + property_name (str): The name of the node property to be used + (:obj:`"popularity"`, :obj:`"locality"`, :obj:`"density"`). + ratios ([float]): A list of five ratio values for ID training, + ID validation, ID test, OOD validation and OOD test parts. + The values must sum to :obj:`1.0`. + ascending (bool, optional): Whether to sort nodes in ascending order + of the node property, so that nodes with greater values of the + property are considered to be OOD (default: :obj:`True`) + + Example: + + .. code-block:: python + + from torch_geometric.transforms import NodePropertySplit + from torch_geometric.datasets.graph_generator import ERGraph + + data = ERGraph(num_nodes=1000, edge_prob=0.4)() + + property_name = 'popularity' + ratios = [0.3, 0.1, 0.1, 0.3, 0.2] + tranaform = NodePropertySplit(property_name, ratios) + + data = transform(data) + """ + def __init__( + self, + property_name: str, + ratios: List[float], + ascending: bool = True, + ): + if property_name not in {'popularity', 'locality', 'density'}: + raise ValueError(f"Unexpected 'property_name' " + f"(got '{property_name}')") + + if len(ratios) != 5: + raise ValueError(f"'ratios' must contain 5 values " + f"(got {len(ratios)})") + + if sum(ratios) != 1.0: + raise ValueError(f"'ratios' must sum to 1.0 (got {sum(ratios)})") + + self.property_name = property_name + self.compute_fn = _property_name_to_compute_fn[property_name] + self.ratios = ratios + self.ascending = ascending + + def __call__(self, data: Data) -> Data: + + G = to_networkx(data, to_undirected=True, remove_self_loops=True) + property_values = self.compute_fn(G, self.ascending) + mask_dict = self._mask_nodes_by_property(property_values, self.ratios) + + for key, mask in mask_dict.items(): + data[key] = mask + + return data + + @staticmethod + def _compute_popularity_property(G: Any, ascending: bool = True) -> Tensor: + import networkx.algorithms as A + + property_values = torch.tensor(list(A.pagerank(G).values())) + property_values *= -1 if ascending else 1 + return property_values + + @staticmethod + def _compute_locality_property(G: Any, ascending: bool = True) -> Tensor: + import networkx.algorithms as A + + pagerank_values = torch.tensor(list(A.pagerank(G).values())) + + num_nodes = G.number_of_nodes() + personalization = dict(zip(range(num_nodes), [0.0] * num_nodes)) + personalization[int(pagerank_values.argmax())] = 1.0 + + property_values = torch.tensor( + list(A.pagerank(G, personalization=personalization).values())) + property_values *= -1 if ascending else 1 + return property_values + + @staticmethod + def _compute_density_property(G: Any, ascending: bool = True) -> Tensor: + import networkx.algorithms as A + + property_values = torch.tensor(list(A.clustering(G).values())) + property_values *= -1 if ascending else 1 + return property_values + + @staticmethod + def _mask_nodes_by_property( + property_values: Tensor, + ratios: List[float], + ) -> Dict[str, Tensor]: + + num_nodes = property_values.size(0) + sizes = (num_nodes * torch.tensor(ratios)).round().long() + sizes[-1] -= sizes.sum() - num_nodes + + perm = torch.randperm(num_nodes) + id_size = int(sizes[:3].sum()) + perm = perm[property_values[perm].argsort()] + perm[:id_size] = perm[:id_size][torch.randperm(id_size)] + + node_splits = perm.split(sizes.tolist()) + names = [ + 'id_train_mask', + 'id_val_mask', + 'id_test_mask', + 'ood_val_mask', + 'ood_test_mask', + ] + + split_masks = {} + for name, node_split in zip(names, node_splits): + split_mask = torch.zeros(num_nodes, dtype=torch.bool) + split_mask[node_split] = True + split_masks[name] = split_mask + return split_masks + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.property_name})' + + +_property_name_to_compute_fn = { + 'popularity': NodePropertySplit._compute_popularity_property, + 'locality': NodePropertySplit._compute_locality_property, + 'density': NodePropertySplit._compute_density_property, +} From 356a62ac8039e55fd049b322b4dc98f87fe53647 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 4 May 2023 09:28:19 +0200 Subject: [PATCH 1158/2432] Fix `readthedocs` build (#7291) --- readthedocs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/readthedocs.yml b/readthedocs.yml index aae9ffd433e1..35620da86a33 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -1,11 +1,11 @@ version: 2 build: - image: latest + os: ubuntu-22.04 + tools: + python: "3.8" python: - version: 3.8 - system_packages: true install: - requirements: docs/requirements.txt - method: pip From fb1d855d5e3d12092ffaa8d456e3a46252cf9723 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 4 May 2023 14:07:32 +0200 Subject: [PATCH 1159/2432] Allow tuples as keys in `ModuleDict`/`ParameterDict` (#7294) --- CHANGELOG.md | 1 + test/nn/test_module_dict.py | 23 ++++++++++++ test/nn/test_parameter_dict.py | 54 +++++++++++++++++++--------- torch_geometric/nn/module_dict.py | 41 +++++++++++++-------- torch_geometric/nn/parameter_dict.py | 43 ++++++++++++++-------- 5 files changed, 117 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5f57ed66e6a..73052b80ba94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for tuples as keys in `ModuleDict`/`ParameterDict` ([#7294](https://github.com/pyg-team/pytorch_geometric/pull/7294)) - Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) - Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) - Added support for `torch.sparse.Tensor` in `DataLoader` ([#7252](https://github.com/pyg-team/pytorch_geometric/pull/7252)) diff --git a/test/nn/test_module_dict.py b/test/nn/test_module_dict.py index 16fced0bc19d..6dbfc9fc6223 100644 --- a/test/nn/test_module_dict.py +++ b/test/nn/test_module_dict.py @@ -7,9 +7,13 @@ def test_internal_external_key_conversion(): assert ModuleDict.to_internal_key('a.b') == 'a#b' assert ModuleDict.to_internal_key('ab') == 'ab' assert ModuleDict.to_internal_key('a.b.c') == 'a#b#c' + assert ModuleDict.to_internal_key(('a', 'b')) == '' + assert ModuleDict.to_internal_key(('a.b', 'c')) == '' assert ModuleDict.to_external_key('a#b') == 'a.b' assert ModuleDict.to_external_key('a#b#c') == 'a.b.c' + assert ModuleDict.to_external_key('') == ('a', 'b') + assert ModuleDict.to_external_key('') == ('a.b', 'c') def test_dot_syntax_keys(): @@ -26,5 +30,24 @@ def test_dot_syntax_keys(): for key in expected_keys: assert key in module_dict + assert 'model.lin2' in module_dict del module_dict['model.lin2'] assert 'model.lin2' not in module_dict + + +def test_tuple_keys(): + module_dict = ModuleDict({ + ('a', 'b'): torch.nn.Linear(16, 16), + ('a.b', 'c'): torch.nn.Linear(8, 8), + }) + + expected_keys = {('a', 'b'), ('a.b', 'c')} + assert set(module_dict.keys()) == expected_keys + assert set([key for key, _ in module_dict.items()]) == expected_keys + + for key in expected_keys: + assert key in module_dict + + assert ('a', 'b') in module_dict + del module_dict['a', 'b'] + assert ('a', 'b') not in module_dict diff --git a/test/nn/test_parameter_dict.py b/test/nn/test_parameter_dict.py index 34cde8c5406b..29d44f926667 100644 --- a/test/nn/test_parameter_dict.py +++ b/test/nn/test_parameter_dict.py @@ -1,33 +1,55 @@ -from typing import Mapping - import torch -from torch.nn import Parameter from torch_geometric.nn.parameter_dict import ParameterDict def test_internal_external_key_conversion(): - assert ParameterDict.to_internal_key("a.b") == "a#b" - assert ParameterDict.to_internal_key("ab") == "ab" - assert ParameterDict.to_internal_key("a.b.c") == "a#b#c" + assert ParameterDict.to_internal_key('a.b') == 'a#b' + assert ParameterDict.to_internal_key('ab') == 'ab' + assert ParameterDict.to_internal_key('a.b.c') == 'a#b#c' + assert ParameterDict.to_internal_key(('a', 'b')) == '' + assert ParameterDict.to_internal_key(('a.b', 'c')) == '' - assert ParameterDict.to_external_key("a#b") == "a.b" - assert ParameterDict.to_external_key("a#b#c") == "a.b.c" + assert ParameterDict.to_external_key('a#b') == 'a.b' + assert ParameterDict.to_external_key('a#b#c') == 'a.b.c' + assert ParameterDict.to_external_key('') == ('a', 'b') + assert ParameterDict.to_external_key('') == ('a.b', 'c') def test_dot_syntax_keys(): - parameters: Mapping[str, Parameter] = { - "param1": Parameter(torch.Tensor(16, 16)), - "model.param2": Parameter(torch.Tensor(8, 8)), - "model.sub_model.param3": Parameter(torch.Tensor(4, 4)), + parameter_dict = { + 'param1': torch.nn.Parameter(torch.Tensor(16, 16)), + 'model.param2': torch.nn.Parameter(torch.Tensor(8, 8)), + 'model.sub_model.param3': torch.nn.Parameter(torch.Tensor(4, 4)), + } + parameter_dict = ParameterDict(parameter_dict) + + expected_keys = {'param1', 'model.param2', 'model.sub_model.param3'} + assert set(parameter_dict.keys()) == expected_keys + assert set([key for key, _ in parameter_dict.items()]) == expected_keys + + for key in expected_keys: + assert key in parameter_dict + + assert 'model.param2' in parameter_dict + del parameter_dict['model.param2'] + assert 'model.param2' not in parameter_dict + + +def test_tuple_keys(): + parameter_dict = { + ('a', 'b'): torch.nn.Parameter(torch.Tensor(16, 16)), + ('a.b', 'c'): torch.nn.Parameter(torch.Tensor(8, 8)), } - parameter_dict = ParameterDict(parameters) + parameter_dict = ParameterDict(parameter_dict) - expected_keys = {"param1", "model.param2", "model.sub_model.param3"} + expected_keys = {('a', 'b'), ('a.b', 'c')} assert set(parameter_dict.keys()) == expected_keys + assert set([key for key, _ in parameter_dict.items()]) == expected_keys for key in expected_keys: assert key in parameter_dict - del parameter_dict["model.param2"] - assert "model.param2" not in parameter_dict + assert ('a', 'b') in parameter_dict + del parameter_dict['a', 'b'] + assert ('a', 'b') not in parameter_dict diff --git a/torch_geometric/nn/module_dict.py b/torch_geometric/nn/module_dict.py index 09c3cab8c0ef..122e03a30acf 100644 --- a/torch_geometric/nn/module_dict.py +++ b/torch_geometric/nn/module_dict.py @@ -1,15 +1,20 @@ -from typing import Iterable, Mapping, Optional, Tuple +from typing import Iterable, Mapping, Optional, Tuple, Union import torch from torch.nn import Module +Key = Union[str, Tuple[str, ...]] + # `torch.nn.ModuleDict` doesn't allow `.` to be used in key names. # This `ModuleDict` will support it by converting the `.` to `#` in the # internal representation and converts it back to `.` in the external -# representation. +# representation. It also allows passing tuples as keys. class ModuleDict(torch.nn.ModuleDict): - def __init__(self, modules: Optional[Mapping[str, Module]] = None): + def __init__( + self, + modules: Optional[Mapping[Union[str, Tuple[str, ...]], Module]] = None, + ): if modules is not None: # Replace the keys in modules: modules = { self.to_internal_key(key): module @@ -18,28 +23,34 @@ def __init__(self, modules: Optional[Mapping[str, Module]] = None): super().__init__(modules) @staticmethod - def to_internal_key(key: str) -> str: - return key.replace(".", "#") + def to_internal_key(key: Key) -> str: + if isinstance(key, tuple): + assert len(key) > 1 + key = f"<{'___'.join(key)}>" + assert isinstance(key, str) + return key.replace('.', '#') @staticmethod - def to_external_key(key: str) -> str: - return key.replace("#", ".") + def to_external_key(key: str) -> Key: + key = key.replace('#', '.') + if key.startswith('<') and key.endswith('>') and '___' in key: + key = tuple(key[1:-1].split('___')) + return key - def __getitem__(self, key: str) -> Module: + def __getitem__(self, key: Key) -> Module: return super().__getitem__(self.to_internal_key(key)) - def __setitem__(self, key: str, module: Module): + def __setitem__(self, key: Key, module: Module): return super().__setitem__(self.to_internal_key(key), module) - def __delitem__(self, key: str): + def __delitem__(self, key: Key): return super().__delitem__(self.to_internal_key(key)) - def __contains__(self, key: str) -> bool: + def __contains__(self, key: Key) -> bool: return super().__contains__(self.to_internal_key(key)) - def keys(self) -> Iterable[str]: + def keys(self) -> Iterable[Key]: return [self.to_external_key(key) for key in super().keys()] - def items(self) -> Iterable[Tuple[str, Module]]: - return [(self.to_external_key(key), value) - for key, value in super().items()] + def items(self) -> Iterable[Tuple[Key, Module]]: + return [(self.to_external_key(k), v) for k, v in super().items()] diff --git a/torch_geometric/nn/parameter_dict.py b/torch_geometric/nn/parameter_dict.py index 1d8ebc59b67d..99d887bd1572 100644 --- a/torch_geometric/nn/parameter_dict.py +++ b/torch_geometric/nn/parameter_dict.py @@ -1,42 +1,57 @@ -from typing import Iterable, Mapping, Optional +from typing import Iterable, Mapping, Optional, Tuple, Union import torch from torch.nn import Parameter +Key = Union[str, Tuple[str, ...]] + # `torch.nn.ParameterDict` doesn't allow `.` to be used in key names. # This `ParameterDict` will support it by converting the `.` to `#` in the # internal representation and converts it back to `.` in the external -# representation. +# representation. It also allows passing tuples as keys. class ParameterDict(torch.nn.ParameterDict): - def __init__(self, parameters: Optional[Mapping[str, Parameter]] = None): + def __init__( + self, + parameters: Optional[Mapping[Key, Parameter]] = None, + ): # Replace the keys in modules. if parameters: parameters = { - self.to_internal_key(key): module - for key, module in parameters.items() + self.to_internal_key(key): parameter + for key, parameter in parameters.items() } super().__init__(parameters) @staticmethod - def to_internal_key(key: str) -> str: - return key.replace(".", "#") + def to_internal_key(key: str) -> Key: + if isinstance(key, tuple): + assert len(key) > 1 + key = f"<{'___'.join(key)}>" + assert isinstance(key, str) + return key.replace('.', '#') @staticmethod - def to_external_key(key: str) -> str: - return key.replace("#", ".") + def to_external_key(key: str) -> Key: + key = key.replace('#', '.') + if key.startswith('<') and key.endswith('>') and '___' in key: + key = tuple(key[1:-1].split('___')) + return key - def __getitem__(self, key: str) -> Parameter: + def __getitem__(self, key: Key) -> Parameter: return super().__getitem__(self.to_internal_key(key)) - def __setitem__(self, key: str, parameter: Parameter): + def __setitem__(self, key: Key, parameter: Parameter): return super().__setitem__(self.to_internal_key(key), parameter) - def __delitem__(self, key: str): + def __delitem__(self, key: Key): return super().__delitem__(self.to_internal_key(key)) - def __contains__(self, key: str) -> bool: + def __contains__(self, key: Key) -> bool: return super().__contains__(self.to_internal_key(key)) - def keys(self) -> Iterable[str]: + def keys(self) -> Iterable[Key]: return [self.to_external_key(key) for key in super().keys()] + + def items(self) -> Iterable[Tuple[Key, Parameter]]: + return [(self.to_external_key(k), v) for k, v in super().items()] From d7daf0a74ad685a8d51efaac1706a7a7f3cae6c6 Mon Sep 17 00:00:00 2001 From: Abdullah <37012364+Saydemr@users.noreply.github.com> Date: Sun, 7 May 2023 15:14:55 +0300 Subject: [PATCH 1160/2432] Update quick-start.html (#7313) Update the warning when PyTorch 2.0.* version is chosen along Cuda 11.6. Warning was `# PyTorch 1.13.* binaries do not support CUDA 11.6` though 2.0.* version is selected. --- CHANGELOG.md | 1 + docs/source/install/quick-start.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73052b80ba94..271fb5e51131 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) - Added an optional `batch_size` and `max_num_nodes` arguments to `MemPooling` layer ([#7239](https://github.com/pyg-team/pytorch_geometric/pull/7239)) +- Fixed the installation warning message when `PyTorch 2.0.*` is chosen along `Cuda 11.6`. ([#7313](https://github.com/pyg-team/pytorch_geometric/pull/7313)) ### Removed diff --git a/docs/source/install/quick-start.html b/docs/source/install/quick-start.html index 570383d063b6..ec6d723def7e 100644 --- a/docs/source/install/quick-start.html +++ b/docs/source/install/quick-start.html @@ -117,7 +117,7 @@ } else if (torch == "torch-2.0.0" && cuda == "cu116") { - $("#command pre").text('# PyTorch 1.13.* binaries do not support CUDA 11.6'); + $("#command pre").text('# PyTorch 2.0.* binaries do not support CUDA 11.6'); } else if (os == "windows" && package == "conda") { From 8826a544cbe7d56ae889bf3225101a544bceac44 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 8 May 2023 07:26:34 +0200 Subject: [PATCH 1161/2432] Fix `GraphStore` conversion in `Data` for isolated nodes (#7316) --- CHANGELOG.md | 1 - torch_geometric/data/data.py | 3 +++ torch_geometric/data/hetero_data.py | 4 ++++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 271fb5e51131..73052b80ba94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,7 +52,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) - Added an optional `batch_size` and `max_num_nodes` arguments to `MemPooling` layer ([#7239](https://github.com/pyg-team/pytorch_geometric/pull/7239)) -- Fixed the installation warning message when `PyTorch 2.0.*` is chosen along `Cuda 11.6`. ([#7313](https://github.com/pyg-team/pytorch_geometric/pull/7313)) ### Removed diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index e620a5520251..846c105dee6f 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -905,6 +905,9 @@ def _put_edge_index(self, edge_index: EdgeTensorType, return True def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: + if edge_attr.size is None: + edge_attr.size = self.size() # Modify in-place. + if edge_attr.layout == EdgeLayout.COO and 'edge_index' in self: row, col = self.edge_index return row, col diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index abca8fd99d10..a65d3c8b82bd 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -968,6 +968,10 @@ def _put_edge_index(self, edge_index: EdgeTensorType, def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: r"""Gets an edge index from edge storage, in the specified layout.""" store = self[edge_attr.edge_type] + + if edge_attr.size is None: + edge_attr.size = store.size() # Modify in-place. + if edge_attr.layout == EdgeLayout.COO and 'edge_index' in store: row, col = store.edge_index return row, col From c566f5cee90bf319cbe9e86808a60b1f1d283c94 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Mon, 8 May 2023 13:37:06 +0530 Subject: [PATCH 1162/2432] `Pooling`: Update base classes (#7307) Towards #6455. This PR makes the following changes 1. Removes the `Pooling` base class. The current implementation isn't flexible enough to support all pooling operators. Instead pooling operators will implement their own forward method, using `Select` , `Connect` and `Aggregate` operators. 2. Updated the `Select` operator. It now returns `SelectOutput` which contains `node_index`, `cluster_index`, `num_clusters` and `weight`. Where `weight` is the weight given to a node assignment to a cluster. --------- Co-authored-by: rusty1s --- test/nn/pool/test_pooling_base.py | 52 ------------ torch_geometric/nn/pool/base.py | 94 --------------------- torch_geometric/nn/pool/connect/__init__.py | 3 +- torch_geometric/nn/pool/connect/base.py | 76 ++++++++++++++++- torch_geometric/nn/pool/select/__init__.py | 3 +- torch_geometric/nn/pool/select/base.py | 90 ++++++++++++++------ 6 files changed, 142 insertions(+), 176 deletions(-) delete mode 100644 test/nn/pool/test_pooling_base.py delete mode 100644 torch_geometric/nn/pool/base.py diff --git a/test/nn/pool/test_pooling_base.py b/test/nn/pool/test_pooling_base.py deleted file mode 100644 index fc93935188a4..000000000000 --- a/test/nn/pool/test_pooling_base.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from torch_geometric.nn import MaxAggregation -from torch_geometric.nn.pool.base import Pooling, PoolingOutput -from torch_geometric.nn.pool.connect import Connect -from torch_geometric.nn.pool.select import Select -from torch_geometric.utils import scatter - - -class DummySelect(Select): - def forward(self, x, edge_index, edge_attr, batch): - # Pool into a single node for each graph. - if batch is None: - return edge_index.new_zeros(x.size(0), dtype=torch.long), 1 - return batch, int(batch.max()) + 1 - - -class DummyConnect(Connect): - def forward(self, x, edge_index, edge_attr, batch): - # Return empty graph connection: - if edge_attr is not None: - edge_attr = edge_attr.new_empty((0, ) + edge_attr.size()[1:]) - return edge_index.new_empty(2, 0), edge_attr - - -def test_pooling(): - pool = Pooling(DummySelect(), MaxAggregation(), DummyConnect()) - pool.reset_parameters() - assert str(pool) == ('Pooling(\n' - ' select=DummySelect(),\n' - ' reduce=MaxAggregation(),\n' - ' connect=DummyConnect(),\n' - ')') - - x = torch.randn(10, 8) - edge_index = torch.empty((2, 0), dtype=torch.long) - edge_attr = torch.empty(0, 4) - batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) - - out = pool(x, edge_index) - assert isinstance(out, PoolingOutput) - assert torch.allclose(out.x, x.max(dim=0, keepdim=True)[0]) - assert out.edge_index.size() == (2, 0) - assert out.edge_attr is None - assert out.batch is None - - out = pool(x, edge_index, edge_attr, batch) - assert isinstance(out, PoolingOutput) - assert torch.allclose(out.x, scatter(x, batch, reduce='max')) - assert out.edge_index.size() == (2, 0) - assert out.edge_attr.size() == (0, 4) - assert out.batch.tolist() == [0, 1] diff --git a/torch_geometric/nn/pool/base.py b/torch_geometric/nn/pool/base.py deleted file mode 100644 index 103728056481..000000000000 --- a/torch_geometric/nn/pool/base.py +++ /dev/null @@ -1,94 +0,0 @@ -from dataclasses import dataclass -from typing import Optional - -import torch -from torch import Tensor - -from torch_geometric.nn.aggr import Aggregation -from torch_geometric.nn.pool.connect import Connect -from torch_geometric.nn.pool.select import Select -from torch_geometric.utils.mixin import CastMixin - - -@dataclass -class PoolingOutput(CastMixin): - r"""The pooling output of a :class:`torch_geometric.nn.pool.Pooling` - module. - - Args: - x (torch.Tensor): The pooled node features. - edge_index (torch.Tensor): The coarsened edge indices. - edge_attr (torch.Tensor, optional): The edge features of the coarsened - graph. (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector of the pooled nodes. - """ - x: Tensor - edge_index: Tensor - edge_attr: Optional[Tensor] = None - batch: Optional[Tensor] = None - - -class Pooling(torch.nn.Module): - r"""A base class for pooling layers based on the - `"Understanding Pooling in Graph Neural Networks" - `_ paper. - - :class:`Pooling` decomposes a pooling layer into three components: - - #. :class:`Select` defines how input nodes map to supernodes. - - #. :class:`Reduce` defines how input node features are aggregated. - - #. :class:`Connect` decides how the supernodes are connected to each other. - - Args: - select (Select): The node selection operator. - reduce (Select): The node feature aggregation operator. - connect (Connect): The edge connection operator. - """ - def __init__(self, select: Select, reduce: Aggregation, connect: Connect): - super().__init__() - self.select = select - self.reduce = reduce - self.connect = connect - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.select.reset_parameters() - self.reduce.reset_parameters() - self.connect.reset_parameters() - - def forward( - self, - x: torch.Tensor, - edge_index: torch.Tensor, - edge_attr: Optional[torch.Tensor] = None, - batch: Optional[torch.Tensor] = None, - ) -> PoolingOutput: - r""" - Args: - x (torch.Tensor): The input node features. - edge_index (torch.Tensor): The edge indices. - edge_attr (torch.Tensor, optional): The edge features. - (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific graph. (default: :obj:`None`) - """ - cluster, num_clusters = self.select(x, edge_index, edge_attr, batch) - x = self.reduce(x, cluster, dim_size=num_clusters) - edge_index, edge_attr = self.connect(cluster, edge_index, edge_attr, - batch) - - if batch is not None: - batch = (torch.arange(num_clusters, device=x.device)).scatter_( - 0, cluster, batch) - - return PoolingOutput(x, edge_index, edge_attr, batch) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(\n' - f' select={self.select},\n' - f' reduce={self.reduce},\n' - f' connect={self.connect},\n' - f')') diff --git a/torch_geometric/nn/pool/connect/__init__.py b/torch_geometric/nn/pool/connect/__init__.py index aabde59575b4..52b45639fc63 100644 --- a/torch_geometric/nn/pool/connect/__init__.py +++ b/torch_geometric/nn/pool/connect/__init__.py @@ -1,5 +1,6 @@ -from .base import Connect +from .base import Connect, ConnectOutput __all__ = [ 'Connect', + 'ConnectOutput', ] diff --git a/torch_geometric/nn/pool/connect/base.py b/torch_geometric/nn/pool/connect/base.py index 36c02425768e..1b528e29a9be 100644 --- a/torch_geometric/nn/pool/connect/base.py +++ b/torch_geometric/nn/pool/connect/base.py @@ -1,16 +1,64 @@ +from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import Tensor +from torch_geometric.nn.pool.select import SelectOutput +from torch_geometric.utils.mixin import CastMixin + + +@dataclass(init=False) +class ConnectOutput(CastMixin): + r"""The output of the :class:`Connect` method, which holds the coarsened + graph structure, and optional pooled edge features and batch vectors. + + Args: + edge_index (torch.Tensor): The edge indices of the cooarsened graph. + edge_attr (torch.Tensor, optional): The pooled edge features of the + coarsened graph. (default: :obj:`None`) + batch (torch.Tensor, optional): The pooled batch vector of the + coarsened graph. (default: :obj:`None`) + """ + edge_index: Tensor + edge_attr: Optional[Tensor] = None + batch: Optional[Tensor] = None + + def __init__( + self, + edge_index: Tensor, + edge_attr: Optional[Tensor] = None, + batch: Optional[Tensor] = None, + ): + if edge_index.dim() != 2: + raise ValueError(f"Expected 'edge_index' to be two-dimensional " + f"(got {edge_index.dim()} dimensions)") + + if edge_index.size(0) != 2: + raise ValueError(f"Expected 'edge_index' to have size '2' in the " + f"first dimension (got '{edge_index.size(0)}')") + + if edge_attr is not None and edge_attr.size(0) != edge_index.size(1): + raise ValueError(f"Expected 'edge_index' and 'edge_attr' to " + f"hold the same number of edges (got " + f"{edge_index.size(1)} and {edge_attr.size(0)} " + f"edges)") + + self.edge_index = edge_index + self.edge_attr = edge_attr + self.batch = batch + class Connect(torch.nn.Module): - r"""An abstract base class implementing custom edge connection operators. + r"""An abstract base class for implementing custom edge connection + operators as described in the `"Understanding Pooling in Graph Neural + Networks" `_ paper. Specifically, :class:`Connect` determines for each pair of supernodes the presence or abscene of an edge based on the existing edges between the nodes in the two supernodes. - The operator also computes new coarsened edge features (if present). + The operator also computes pooled edge features and batch vectors + (if present). """ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" @@ -18,14 +66,14 @@ def reset_parameters(self): def forward( self, - cluster: Tensor, + select_output: SelectOutput, edge_index: Tensor, edge_attr: Optional[Tensor] = None, batch: Optional[Tensor] = None, ) -> Tuple[Tensor, Optional[Tensor]]: r""" Args: - cluster (torch.Tensor): The mapping from nodes to supernodes. + select_output (SelectOutput): The output of :class:`Select`. edge_index (torch.Tensor): The edge indices. edge_attr (torch.Tensor, optional): The edge features. (default: :obj:`None`) @@ -35,5 +83,25 @@ def forward( """ raise NotImplementedError + @staticmethod + def get_pooled_batch( + select_output: SelectOutput, + batch: Optional[Tensor], + ) -> Optional[Tensor]: + r"""Returns the batch vector of the coarsened graph. + + Args: + select_output (SelectOutput): The output of :class:`Select`. + batch (torch.Tensor, optional): The batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns + each element to a specific example. (default: :obj:`None`) + """ + if batch is None: + return batch + + out = torch.arange(select_output.num_clusters, device=batch.device) + return out.scatter_(0, select_output.cluster_index, + batch[select_output.node_index]) + def __repr__(self) -> str: return f'{self.__class__.__name__}()' diff --git a/torch_geometric/nn/pool/select/__init__.py b/torch_geometric/nn/pool/select/__init__.py index fe695d7563db..2a7278a33954 100644 --- a/torch_geometric/nn/pool/select/__init__.py +++ b/torch_geometric/nn/pool/select/__init__.py @@ -1,5 +1,6 @@ -from .base import Select +from .base import Select, SelectOutput __all__ = [ 'Select', + 'SelectOutput', ] diff --git a/torch_geometric/nn/pool/select/base.py b/torch_geometric/nn/pool/select/base.py index c23293ca20d9..ee40f17f055f 100644 --- a/torch_geometric/nn/pool/select/base.py +++ b/torch_geometric/nn/pool/select/base.py @@ -1,38 +1,80 @@ -from typing import Optional, Tuple +from dataclasses import dataclass +from typing import Optional import torch from torch import Tensor +from torch_geometric.utils.mixin import CastMixin + + +@dataclass(init=False) +class SelectOutput(CastMixin): + r"""The output of the :class:`Select` method, which holds an assignment + from selected nodes to their respective cluster(s). + + Args: + node_index (torch.Tensor): The indices of the selected nodes. + cluster_index (torch.Tensor): The indices of the clusters each node in + :obj:`node_index` is assigned to. + num_clusters (int): The number of clusters. + weight (torch.Tensor, optional): A weight vector, denoting the strength + of the assignment of a node to its cluster. (default: :obj:`None`) + """ + node_index: Tensor + cluster_index: Tensor + num_clusters: int + weight: Optional[Tensor] = None + + def __init__( + self, + node_index: Tensor, + cluster_index: Tensor, + num_clusters: int, + weight: Optional[Tensor] = None, + ): + if node_index.dim() != 1: + raise ValueError(f"Expected 'node_index' to be one-dimensional " + f"(got {node_index.dim()} dimensions)") + + if cluster_index.dim() != 1: + raise ValueError(f"Expected 'cluster_index' to be one-dimensional " + f"(got {cluster_index.dim()} dimensions)") + + if node_index.numel() != cluster_index.numel(): + raise ValueError(f"Expected 'node_index' and 'cluster_index' to " + f"hold the same number of values (got " + f"{node_index.numel()} and " + f"{cluster_index.numel()} values)") + + if weight is not None and weight.dim() != 1: + raise ValueError(f"Expected 'weight' vector to be one-dimensional " + f"(got {weight.dim()} dimensions)") + + if weight is not None and weight.numel() != node_index.numel(): + raise ValueError(f"Expected 'weight' to hold {node_index.numel()} " + f"values (got {weight.numel()} values)") + + self.node_index = node_index + self.cluster_index = cluster_index + self.num_clusters = num_clusters + self.weight = weight + class Select(torch.nn.Module): - r"""An abstract base class implementing custom node selections that map the - nodes of an input graph to supernodes of the pooled one. + r"""An abstract base class for implementing custom node selections as + described in the `"Understanding Pooling in Graph Neural Networks" + `_ paper, which maps the nodes of an + input graph to supernodes in the coarsened graph. - Specifically, :class:`Select` returns a mapping - :math:`\mathbf{c} \in {\{ -1, \ldots, C - 1\}}^N`, which assigns each node - to one of :math:`C` super nodes. - In addition, :class:`Select` returns the number of super nodes. + Specifically, :class:`Select` returns a :class:`SelectOutput` output, which + holds a (sparse) mapping :math:`\mathbf{C} \in {[0, 1]}^{N \times C}` that + assigns selected nodes to one or more of :math:`C` super nodes. """ def reset_parameters(self): + r"""Resets all learnable parameters of the module.""" pass - def forward( - self, - x: Tensor, - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - batch: Optional[Tensor] = None, - ) -> Tuple[Tensor, int]: - r""" - Args: - x (torch.Tensor): The input node features. - edge_index (torch.Tensor): The edge indices. - edge_attr (torch.Tensor, optional): The edge features. - (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific graph. (default: :obj:`None`) - """ + def forward(self, *args, **kwargs) -> SelectOutput: raise NotImplementedError def __repr__(self) -> str: From a43fc5644308ba52cdbc317010446a52bb294678 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Mon, 8 May 2023 15:29:19 +0530 Subject: [PATCH 1163/2432] Refactor `TopkPooling` into `SelectTopK` (#7308) Towards #6455. Do not review before #7307 is merged. --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/nn/pool/select/test_select_topk.py | 57 +++++++++ test/nn/pool/test_topk_pool.py | 29 +---- torch_geometric/nn/pool/asap.py | 2 +- torch_geometric/nn/pool/pan_pool.py | 3 +- torch_geometric/nn/pool/sag_pool.py | 3 +- torch_geometric/nn/pool/select/__init__.py | 2 + torch_geometric/nn/pool/select/base.py | 4 + torch_geometric/nn/pool/select/topk.py | 135 +++++++++++++++++++++ torch_geometric/nn/pool/topk_pool.py | 90 ++------------ 10 files changed, 214 insertions(+), 112 deletions(-) create mode 100644 test/nn/pool/select/test_select_topk.py create mode 100644 torch_geometric/nn/pool/select/topk.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 73052b80ba94..b1b616f2161a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308)) - Added support for tuples as keys in `ModuleDict`/`ParameterDict` ([#7294](https://github.com/pyg-team/pytorch_geometric/pull/7294)) - Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) - Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) diff --git a/test/nn/pool/select/test_select_topk.py b/test/nn/pool/select/test_select_topk.py new file mode 100644 index 000000000000..a0418fff9306 --- /dev/null +++ b/test/nn/pool/select/test_select_topk.py @@ -0,0 +1,57 @@ +import pytest +import torch + +from torch_geometric.nn.pool.select import SelectOutput, SelectTopK +from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.testing import is_full_test + + +def test_topk_ratio(): + x = torch.Tensor([2, 4, 5, 6, 2, 9]) + batch = torch.tensor([0, 0, 1, 1, 1, 1]) + + perm1 = topk(x, 0.5, batch) + assert perm1.tolist() == [1, 5, 3] + assert x[perm1].tolist() == [4, 9, 6] + assert batch[perm1].tolist() == [0, 1, 1] + + perm2 = topk(x, 2, batch) + assert perm2.tolist() == [1, 0, 5, 3] + assert x[perm2].tolist() == [4, 2, 9, 6] + assert batch[perm2].tolist() == [0, 0, 1, 1] + + perm3 = topk(x, 3, batch) + assert perm3.tolist() == [1, 0, 5, 3, 2] + assert x[perm3].tolist() == [4, 2, 9, 6, 5] + assert batch[perm3].tolist() == [0, 0, 1, 1, 1] + + if is_full_test(): + jit = torch.jit.script(topk) + assert torch.equal(jit(x, 0.5, batch), perm1) + assert torch.equal(jit(x, 2, batch), perm2) + assert torch.equal(jit(x, 3, batch), perm3) + + +@pytest.mark.parametrize('min_score', [None, 2.0]) +def test_select_topk(min_score): + if min_score is not None: + return + x = torch.randn(6, 16) + batch = torch.tensor([0, 0, 1, 1, 1, 1]) + + pool = SelectTopK(16, min_score=min_score) + + if min_score is None: + assert str(pool) == 'SelectTopK(16, ratio=0.5)' + else: + assert str(pool) == 'SelectTopK(16, min_score=2.0)' + + out = pool(x, batch) + assert isinstance(out, SelectOutput) + + assert out.num_nodes == 6 + assert out.num_clusters <= out.num_nodes + assert out.node_index.min() >= 0 + assert out.node_index.max() < out.num_nodes + assert out.cluster_index.min() == 0 + assert out.cluster_index.max() == out.num_clusters - 1 diff --git a/test/nn/pool/test_topk_pool.py b/test/nn/pool/test_topk_pool.py index 8201db53db7f..a0616183fe60 100644 --- a/test/nn/pool/test_topk_pool.py +++ b/test/nn/pool/test_topk_pool.py @@ -1,35 +1,10 @@ import torch -from torch_geometric.nn.pool.topk_pool import TopKPooling, filter_adj, topk +from torch_geometric.nn.pool import TopKPooling +from torch_geometric.nn.pool.topk_pool import filter_adj from torch_geometric.testing import is_full_test -def test_topk(): - x = torch.Tensor([2, 4, 5, 6, 2, 9]) - batch = torch.tensor([0, 0, 1, 1, 1, 1]) - - perm1 = topk(x, 0.5, batch) - assert perm1.tolist() == [1, 5, 3] - assert x[perm1].tolist() == [4, 9, 6] - assert batch[perm1].tolist() == [0, 1, 1] - - perm2 = topk(x, 2, batch) - assert perm2.tolist() == [1, 0, 5, 3] - assert x[perm2].tolist() == [4, 2, 9, 6] - assert batch[perm2].tolist() == [0, 0, 1, 1] - - perm3 = topk(x, 3, batch) - assert perm3.tolist() == [1, 0, 5, 3, 2] - assert x[perm3].tolist() == [4, 2, 9, 6, 5] - assert batch[perm3].tolist() == [0, 0, 1, 1, 1] - - if is_full_test(): - jit = torch.jit.script(topk) - assert torch.equal(jit(x, 0.5, batch), perm1) - assert torch.equal(jit(x, 2, batch), perm2) - assert torch.equal(jit(x, 3, batch), perm3) - - def test_filter_adj(): edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 3], [1, 3, 0, 2, 1, 3, 0, 2]]) diff --git a/torch_geometric/nn/pool/asap.py b/torch_geometric/nn/pool/asap.py index 4fa07277a144..ec391c43601c 100644 --- a/torch_geometric/nn/pool/asap.py +++ b/torch_geometric/nn/pool/asap.py @@ -7,7 +7,7 @@ from torch.nn import Linear from torch_geometric.nn import LEConv -from torch_geometric.nn.pool.topk_pool import topk +from torch_geometric.nn.pool.select.topk import topk from torch_geometric.utils import ( add_remaining_self_loops, remove_self_loops, diff --git a/torch_geometric/nn/pool/pan_pool.py b/torch_geometric/nn/pool/pan_pool.py index f2f9f43a89dd..f0c0ab66d02c 100644 --- a/torch_geometric/nn/pool/pan_pool.py +++ b/torch_geometric/nn/pool/pan_pool.py @@ -4,7 +4,8 @@ from torch import Tensor from torch.nn import Parameter -from torch_geometric.nn.pool.topk_pool import filter_adj, topk +from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.nn.pool.topk_pool import filter_adj from torch_geometric.typing import OptTensor, SparseTensor from torch_geometric.utils import scatter, softmax diff --git a/torch_geometric/nn/pool/sag_pool.py b/torch_geometric/nn/pool/sag_pool.py index 4bf80b50a4a6..580467949e24 100644 --- a/torch_geometric/nn/pool/sag_pool.py +++ b/torch_geometric/nn/pool/sag_pool.py @@ -4,7 +4,8 @@ from torch import Tensor from torch_geometric.nn import GraphConv -from torch_geometric.nn.pool.topk_pool import filter_adj, topk +from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.nn.pool.topk_pool import filter_adj from torch_geometric.typing import OptTensor from torch_geometric.utils import softmax diff --git a/torch_geometric/nn/pool/select/__init__.py b/torch_geometric/nn/pool/select/__init__.py index 2a7278a33954..2a724b3133e3 100644 --- a/torch_geometric/nn/pool/select/__init__.py +++ b/torch_geometric/nn/pool/select/__init__.py @@ -1,6 +1,8 @@ from .base import Select, SelectOutput +from .topk import SelectTopK __all__ = [ 'Select', 'SelectOutput', + 'SelectTopK', ] diff --git a/torch_geometric/nn/pool/select/base.py b/torch_geometric/nn/pool/select/base.py index ee40f17f055f..ad729776d43c 100644 --- a/torch_geometric/nn/pool/select/base.py +++ b/torch_geometric/nn/pool/select/base.py @@ -14,6 +14,7 @@ class SelectOutput(CastMixin): Args: node_index (torch.Tensor): The indices of the selected nodes. + num_nodes (int): The number of nodes. cluster_index (torch.Tensor): The indices of the clusters each node in :obj:`node_index` is assigned to. num_clusters (int): The number of clusters. @@ -21,6 +22,7 @@ class SelectOutput(CastMixin): of the assignment of a node to its cluster. (default: :obj:`None`) """ node_index: Tensor + num_nodes: int cluster_index: Tensor num_clusters: int weight: Optional[Tensor] = None @@ -28,6 +30,7 @@ class SelectOutput(CastMixin): def __init__( self, node_index: Tensor, + num_nodes: int, cluster_index: Tensor, num_clusters: int, weight: Optional[Tensor] = None, @@ -55,6 +58,7 @@ def __init__( f"values (got {weight.numel()} values)") self.node_index = node_index + self.num_nodes = num_nodes self.cluster_index = cluster_index self.num_clusters = num_clusters self.weight = weight diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py new file mode 100644 index 000000000000..aabb4a691d5c --- /dev/null +++ b/torch_geometric/nn/pool/select/topk.py @@ -0,0 +1,135 @@ +from typing import Callable, Optional, Union + +import torch +from torch import Tensor + +from torch_geometric.nn.inits import uniform +from torch_geometric.nn.resolver import activation_resolver +from torch_geometric.utils import scatter, softmax + +from .base import Select, SelectOutput + + +# TODO (matthias) Benchmark and document this method. +def topk( + x: Tensor, + ratio: Optional[Union[float, int]], + batch: Tensor, + min_score: Optional[float] = None, + tol: float = 1e-7, +) -> Tensor: + if min_score is not None: + # Make sure that we do not drop all nodes in a graph. + scores_max = scatter(x, batch, reduce='max')[batch] - tol + scores_min = scores_max.clamp(max=min_score) + + perm = (x > scores_min).nonzero().view(-1) + return perm + + if ratio is not None: + num_nodes = scatter(batch.new_ones(x.size(0)), batch, reduce='sum') + batch_size, max_num_nodes = num_nodes.size(0), int(num_nodes.max()) + + cum_num_nodes = torch.cat( + [num_nodes.new_zeros(1), + num_nodes.cumsum(dim=0)[:-1]], dim=0) + + index = torch.arange(batch.size(0), dtype=torch.long, device=x.device) + index = (index - cum_num_nodes[batch]) + (batch * max_num_nodes) + + dense_x = x.new_full((batch_size * max_num_nodes, ), -60000.0) + dense_x[index] = x + dense_x = dense_x.view(batch_size, max_num_nodes) + + _, perm = dense_x.sort(dim=-1, descending=True) + + perm = perm + cum_num_nodes.view(-1, 1) + perm = perm.view(-1) + + if ratio >= 1: + k = num_nodes.new_full((num_nodes.size(0), ), int(ratio)) + k = torch.min(k, num_nodes) + else: + k = (float(ratio) * num_nodes.to(x.dtype)).ceil().to(torch.long) + + if isinstance(ratio, int) and (k == ratio).all(): + # If all graphs have exactly `ratio` or more than `ratio` entries, + # we can just pick the first entries in `perm` batch-wise: + index = torch.arange(batch_size, device=x.device) * max_num_nodes + index = index.view(-1, 1).repeat(1, ratio).view(-1) + index += torch.arange(ratio, device=x.device).repeat(batch_size) + else: + # Otherwise, compute indices per graph: + index = torch.cat([ + torch.arange(k[i], device=x.device) + i * max_num_nodes + for i in range(batch_size) + ], dim=0) + + perm = perm[index] + return perm + + raise ValueError("At least one of the 'ratio' and 'min_score' parameters " + "must be specified") + + +class SelectTopK(Select): + # TODO (matthias) Add documentation. + def __init__( + self, + in_channels: int, + ratio: Union[int, float] = 0.5, + min_score: Optional[float] = None, + act: Union[str, Callable] = 'tanh', + ): + super().__init__() + + if ratio is None and min_score is None: + raise ValueError(f"At least one of the 'ratio' and 'min_score' " + f"parameters must be specified in " + f"'{self.__class__.__name__}'") + + self.in_channels = in_channels + self.ratio = ratio + self.min_score = min_score + self.act = activation_resolver(act) + + self.weight = torch.nn.Parameter(torch.Tensor(1, in_channels)) + + self.reset_parameters() + + def reset_parameters(self): + uniform(self.in_channels, self.weight) + + def forward( + self, + x: Tensor, + batch: Optional[Tensor] = None, + ) -> SelectOutput: + """""" + if batch is None: + batch = x.new_zeros(x.size(0), dtype=torch.long) + + x.view(-1, 1) if x.dim() == 1 else x + score = (x * self.weight).sum(dim=-1) + + if self.min_score is None: + score = self.act(score / self.weight.norm(p=2, dim=-1)) + else: + score = softmax(score, batch) + + node_index = topk(score, self.ratio, batch, self.min_score) + + return SelectOutput( + node_index=node_index, + num_nodes=x.size(0), + cluster_index=torch.arange(node_index.size(0), device=x.device), + num_clusters=node_index.size(0), + weight=score[node_index], + ) + + def __repr__(self) -> str: + if self.min_score is None: + arg = f'ratio={self.ratio}' + else: + arg = f'min_score={self.min_score}' + return f'{self.__class__.__name__}({self.in_channels}, {arg})' diff --git a/torch_geometric/nn/pool/topk_pool.py b/torch_geometric/nn/pool/topk_pool.py index 9984728704d6..661638e3a0f6 100644 --- a/torch_geometric/nn/pool/topk_pool.py +++ b/torch_geometric/nn/pool/topk_pool.py @@ -2,75 +2,11 @@ import torch from torch import Tensor -from torch.nn import Parameter -from torch_geometric.nn.inits import uniform -from torch_geometric.utils import scatter, softmax +from torch_geometric.nn.pool.select import SelectTopK from torch_geometric.utils.num_nodes import maybe_num_nodes -def topk( - x: Tensor, - ratio: Optional[Union[float, int]], - batch: Tensor, - min_score: Optional[float] = None, - tol: float = 1e-7, -) -> Tensor: - if min_score is not None: - # Make sure that we do not drop all nodes in a graph. - scores_max = scatter(x, batch, reduce='max')[batch] - tol - scores_min = scores_max.clamp(max=min_score) - - perm = (x > scores_min).nonzero().view(-1) - - elif ratio is not None: - num_nodes = scatter(batch.new_ones(x.size(0)), batch, reduce='sum') - batch_size, max_num_nodes = num_nodes.size(0), int(num_nodes.max()) - - cum_num_nodes = torch.cat( - [num_nodes.new_zeros(1), - num_nodes.cumsum(dim=0)[:-1]], dim=0) - - index = torch.arange(batch.size(0), dtype=torch.long, device=x.device) - index = (index - cum_num_nodes[batch]) + (batch * max_num_nodes) - - dense_x = x.new_full((batch_size * max_num_nodes, ), -60000.0) - dense_x[index] = x - dense_x = dense_x.view(batch_size, max_num_nodes) - - _, perm = dense_x.sort(dim=-1, descending=True) - - perm = perm + cum_num_nodes.view(-1, 1) - perm = perm.view(-1) - - if ratio >= 1: - k = num_nodes.new_full((num_nodes.size(0), ), int(ratio)) - k = torch.min(k, num_nodes) - else: - k = (float(ratio) * num_nodes.to(x.dtype)).ceil().to(torch.long) - - if isinstance(ratio, int) and (k == ratio).all(): - # If all graphs have exactly `ratio` or more than `ratio` entries, - # we can just pick the first entries in `perm` batch-wise: - index = torch.arange(batch_size, device=x.device) * max_num_nodes - index = index.view(-1, 1).repeat(1, ratio).view(-1) - index += torch.arange(ratio, device=x.device).repeat(batch_size) - else: - # Otherwise, compute indices per graph: - index = torch.cat([ - torch.arange(k[i], device=x.device) + i * max_num_nodes - for i in range(batch_size) - ], dim=0) - - perm = perm[index] - - else: - raise ValueError("At least one of 'min_score' and 'ratio' parameters " - "must be specified") - - return perm - - def filter_adj( edge_index: Tensor, edge_attr: Optional[Tensor], @@ -157,22 +93,18 @@ def __init__( ): super().__init__() - if isinstance(nonlinearity, str): - nonlinearity = getattr(torch, nonlinearity) - self.in_channels = in_channels self.ratio = ratio self.min_score = min_score self.multiplier = multiplier - self.nonlinearity = nonlinearity - self.weight = Parameter(torch.Tensor(1, in_channels)) + self.select = SelectTopK(in_channels, ratio, min_score, nonlinearity) self.reset_parameters() def reset_parameters(self): r"""Resets all learnable parameters of the module.""" - uniform(self.in_channels, self.weight) + self.select.reset_parameters() def forward( self, @@ -199,23 +131,17 @@ def forward( batch = edge_index.new_zeros(x.size(0)) attn = x if attn is None else attn - attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn - score = (attn * self.weight).sum(dim=-1) - - if self.min_score is None: - score = self.nonlinearity(score / self.weight.norm(p=2, dim=-1)) - else: - score = softmax(score, batch) + select_output = self.select(attn, batch) - perm = topk(score, self.ratio, batch, self.min_score) - x = x[perm] * score[perm].view(-1, 1) + perm = select_output.node_index + x = x[perm] * select_output.weight.view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x batch = batch[perm] edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, - num_nodes=score.size(0)) + num_nodes=select_output.num_nodes) - return x, edge_index, edge_attr, batch, perm, score[perm] + return x, edge_index, edge_attr, batch, perm, select_output.weight def __repr__(self) -> str: if self.min_score is None: From 6f710af791144a6bb7e9667c6c5901f91e901524 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 8 May 2023 15:56:53 +0200 Subject: [PATCH 1164/2432] Support `MultiAggregation` in `aggregation_resolver` (#7324) --- test/nn/test_resolver.py | 50 ++++++++++++++-------- torch_geometric/nn/conv/message_passing.py | 12 ++---- torch_geometric/nn/resolver.py | 3 ++ 3 files changed, 37 insertions(+), 28 deletions(-) diff --git a/test/nn/test_resolver.py b/test/nn/test_resolver.py index cc3dac69e26e..321df85d479c 100644 --- a/test/nn/test_resolver.py +++ b/test/nn/test_resolver.py @@ -23,16 +23,16 @@ def test_activation_resolver(): @pytest.mark.parametrize('aggr_tuple', [ - (torch_geometric.nn.aggr.MeanAggregation, 'mean'), - (torch_geometric.nn.aggr.SumAggregation, 'sum'), - (torch_geometric.nn.aggr.SumAggregation, 'add'), - (torch_geometric.nn.aggr.MaxAggregation, 'max'), - (torch_geometric.nn.aggr.MinAggregation, 'min'), - (torch_geometric.nn.aggr.MulAggregation, 'mul'), - (torch_geometric.nn.aggr.VarAggregation, 'var'), - (torch_geometric.nn.aggr.StdAggregation, 'std'), - (torch_geometric.nn.aggr.SoftmaxAggregation, 'softmax'), - (torch_geometric.nn.aggr.PowerMeanAggregation, 'powermean'), + (torch_geometric.nn.MeanAggregation, 'mean'), + (torch_geometric.nn.SumAggregation, 'sum'), + (torch_geometric.nn.SumAggregation, 'add'), + (torch_geometric.nn.MaxAggregation, 'max'), + (torch_geometric.nn.MinAggregation, 'min'), + (torch_geometric.nn.MulAggregation, 'mul'), + (torch_geometric.nn.VarAggregation, 'var'), + (torch_geometric.nn.StdAggregation, 'std'), + (torch_geometric.nn.SoftmaxAggregation, 'softmax'), + (torch_geometric.nn.PowerMeanAggregation, 'powermean'), ]) def test_aggregation_resolver(aggr_tuple): aggr_module, aggr_repr = aggr_tuple @@ -40,16 +40,28 @@ def test_aggregation_resolver(aggr_tuple): assert isinstance(aggregation_resolver(aggr_repr), aggr_module) +def test_multi_aggregation_resolver(): + aggr = aggregation_resolver(None) + assert aggr is None + + aggr = aggregation_resolver(['sum', 'mean', None]) + assert isinstance(aggr, torch_geometric.nn.MultiAggregation) + assert len(aggr.aggrs) == 3 + assert isinstance(aggr.aggrs[0], torch_geometric.nn.SumAggregation) + assert isinstance(aggr.aggrs[1], torch_geometric.nn.MeanAggregation) + assert aggr.aggrs[2] is None + + @pytest.mark.parametrize('norm_tuple', [ - (torch_geometric.nn.norm.BatchNorm, 'batch', (16, )), - (torch_geometric.nn.norm.BatchNorm, 'batch_norm', (16, )), - (torch_geometric.nn.norm.InstanceNorm, 'instance_norm', (16, )), - (torch_geometric.nn.norm.LayerNorm, 'layer_norm', (16, )), - (torch_geometric.nn.norm.GraphNorm, 'graph_norm', (16, )), - (torch_geometric.nn.norm.GraphSizeNorm, 'graphsize_norm', ()), - (torch_geometric.nn.norm.PairNorm, 'pair_norm', ()), - (torch_geometric.nn.norm.MessageNorm, 'message_norm', ()), - (torch_geometric.nn.norm.DiffGroupNorm, 'diffgroup_norm', (16, 4)), + (torch_geometric.nn.BatchNorm, 'batch', (16, )), + (torch_geometric.nn.BatchNorm, 'batch_norm', (16, )), + (torch_geometric.nn.InstanceNorm, 'instance_norm', (16, )), + (torch_geometric.nn.LayerNorm, 'layer_norm', (16, )), + (torch_geometric.nn.GraphNorm, 'graph_norm', (16, )), + (torch_geometric.nn.GraphSizeNorm, 'graphsize_norm', ()), + (torch_geometric.nn.PairNorm, 'pair_norm', ()), + (torch_geometric.nn.MessageNorm, 'message_norm', ()), + (torch_geometric.nn.DiffGroupNorm, 'diffgroup_norm', (16, 4)), ]) def test_normalization_resolver(norm_tuple): norm_module, norm_repr, norm_args = norm_tuple diff --git a/torch_geometric/nn/conv/message_passing.py b/torch_geometric/nn/conv/message_passing.py index 95339bf183d7..747f041c5d45 100644 --- a/torch_geometric/nn/conv/message_passing.py +++ b/torch_geometric/nn/conv/message_passing.py @@ -21,7 +21,7 @@ from torch import Tensor from torch.utils.hooks import RemovableHandle -from torch_geometric.nn.aggr import Aggregation, MultiAggregation +from torch_geometric.nn.aggr import Aggregation from torch_geometric.nn.conv.utils.inspector import ( Inspector, func_body_repr, @@ -130,18 +130,12 @@ def __init__( if aggr is None: self.aggr = None - self.aggr_module = None elif isinstance(aggr, (str, Aggregation)): self.aggr = str(aggr) - self.aggr_module = aggr_resolver(aggr, **(aggr_kwargs or {})) elif isinstance(aggr, (tuple, list)): self.aggr = [str(x) for x in aggr] - self.aggr_module = MultiAggregation(aggr, **(aggr_kwargs or {})) - else: - raise ValueError( - f"Only strings, list, tuples and instances of" - f"`torch_geometric.nn.aggr.Aggregation` are " - f"valid aggregation schemes (got '{type(aggr)}').") + + self.aggr_module = aggr_resolver(aggr, **(aggr_kwargs or {})) self.flow = flow diff --git a/torch_geometric/nn/resolver.py b/torch_geometric/nn/resolver.py index 099ec3ff736f..0bba264f25d0 100644 --- a/torch_geometric/nn/resolver.py +++ b/torch_geometric/nn/resolver.py @@ -63,6 +63,9 @@ def normalization_resolver(query: Union[Any, str], *args, **kwargs): def aggregation_resolver(query: Union[Any, str], *args, **kwargs): import torch_geometric.nn.aggr as aggr + if isinstance(query, (list, tuple)): + return aggr.MultiAggregation(query, *args, **kwargs) + base_cls = aggr.Aggregation aggrs = [ aggr for aggr in vars(aggr).values() From 3a7c4f40c6a117339f0b3ac4910ba086bb98ae2c Mon Sep 17 00:00:00 2001 From: Halve Luve <1063082756@qq.com> Date: Mon, 8 May 2023 22:41:46 +0800 Subject: [PATCH 1165/2432] Fixed a spelling error in HeteroBatchNorm.forward (#7325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a spelling error "self.running_Var" at line 178 in batch_norm.py (`forward` of `HeteroBatchNorm`) that would cause an AttributeError during testing (`model.eval()`). > epoch [1/200] train: 100%|██████████████████| 1081/1081 [01:39<00:00, 10.89it/s, train_loss=3.15] > 0%| | 0/270 [00:00 Traceback (most recent call last): > File "/home/amax/lhy/GraphCompton/train_hetero.py", line 98, in > out = model(batch) > File "/home/amax/anaconda3/envs/torch2/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl > return forward_call(*args, **kwargs) > File "/home/amax/lhy/GraphCompton/Models/GraphCompton.py", line 83, in forward > x = self.bn(x, type_vec) > File "/home/amax/anaconda3/envs/torch2/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl > return forward_call(*args, **kwargs) > File "/home/amax/anaconda3/envs/torch2/lib/python3.10/site-packages/torch_geometric/nn/norm/batch_norm.py", line 178, in forward > mean, var = self.running_mean, self.running_Var > File "/home/amax/anaconda3/envs/torch2/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1614, in __getattr__ > raise AttributeError("'{}' object has no attribute '{}'".format( > AttributeError: **'HeteroBatchNorm' object has no attribute 'running_Var'. Did you mean: 'running_var'?** It's just a simple spelling bug though. Actually this is my first time contributing to a large open-source project😂 BTW, could you guys provide some examples of implementing `HeteroBatchNorm`? It would be more easy to understand how to use it with some example, and I referred to [https://github.com/puririshi98/rgcn_pyg_lib_forward_bench/blob/70564721508f10cbe3717347dab09b729dcbdfa2/bench_heteronorm.py#L4](url) when I try. Hope it may help. Co-authored-by: Jintang Li --- torch_geometric/nn/norm/batch_norm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/nn/norm/batch_norm.py b/torch_geometric/nn/norm/batch_norm.py index 933d9fd4c9dd..6932cfad62cb 100644 --- a/torch_geometric/nn/norm/batch_norm.py +++ b/torch_geometric/nn/norm/batch_norm.py @@ -175,7 +175,7 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: type_vec (torch.Tensor): A vector that maps each entry to a type. """ if not self.training and self.track_running_stats: - mean, var = self.running_mean, self.running_Var + mean, var = self.running_mean, self.running_var else: with torch.no_grad(): mean, var = self.mean_var(x, type_vec, dim_size=self.num_types) From 9074e2e5f13bc2a0ab094f92febde40b29adddf3 Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Tue, 9 May 2023 13:28:36 +0800 Subject: [PATCH 1166/2432] Add `inter_cluster_edges` argument to `ClusterData` to keep inter-subgraph edge connection when do graph partitioning (#7326) Signed-off-by: Liu, Kaixuan Co-authored-by: ZhengHongming888 Co-authored-by: Matthias Fey Co-authored-by: Bartlomiej Wroblewski Co-authored-by: Krzysztof Kozlowski Co-authored-by: Rishi Puri Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Viktor Stenby Co-authored-by: Jinu Sunil Co-authored-by: Krzysztof Kozlowski Co-authored-by: Berke Kisin Co-authored-by: berke.kisin Co-authored-by: toensoff Co-authored-by: Piotr Chmiel Co-authored-by: Ramona Bendias Co-authored-by: Saurav Maheshkar Co-authored-by: Amund Vedal <22004000+vedal@users.noreply.github.com> Co-authored-by: Jintang Li Co-authored-by: Remy Liu <36778645+RemyLau@users.noreply.github.com> Co-authored-by: Gleb Bazhenov <43088667+gvbazhenov@users.noreply.github.com> --- CHANGELOG.md | 1 + test/loader/test_cluster.py | 31 +++++++++++++++++++++++++++++++ torch_geometric/loader/cluster.py | 20 ++++++++++++++++---- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1b616f2161a..3f97b84466b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) - Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308)) - Added support for tuples as keys in `ModuleDict`/`ParameterDict` ([#7294](https://github.com/pyg-team/pytorch_geometric/pull/7294)) - Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index d03b27cf6a3b..f59ffddc30aa 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -103,6 +103,37 @@ def test_cluster_gcn(): ] +@pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') +def test_keep_inter_cluster_edges(): + adj = torch.tensor([ + [1, 1, 1, 0, 1, 0], + [1, 1, 0, 1, 0, 1], + [1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1], + [1, 0, 1, 0, 1, 0], + [0, 1, 0, 1, 0, 1], + ]) + + x = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + edge_index = adj.nonzero(as_tuple=False).t() + edge_attr = torch.arange(edge_index.size(1)) + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + data.num_nodes = 6 + + cluster_data = ClusterData(data, num_parts=2, log=False, + keep_inter_cluster_edges=True) + + data = cluster_data[0] + assert data.edge_index.tolist() == [[0, 0, 0, 0, 1, 1, 1, 2, 2, 2], + [0, 1, 2, 3, 0, 1, 2, 0, 1, 2]] + assert data.edge_attr.tolist() == [0, 2, 3, 1, 8, 9, 10, 14, 15, 16] + + data = cluster_data[1] + assert data.edge_index.tolist() == [[0, 0, 0, 0, 1, 1, 1, 2, 2, 2], + [0, 3, 4, 5, 3, 4, 5, 3, 4, 5]] + assert data.edge_attr.tolist() == [4, 5, 6, 7, 11, 12, 13, 17, 18, 19] + + @onlyFullTest @pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') def test_cluster_gcn_correctness(get_dataset): diff --git a/torch_geometric/loader/cluster.py b/torch_geometric/loader/cluster.py index 75aa537a2eaa..23824f174ea6 100644 --- a/torch_geometric/loader/cluster.py +++ b/torch_geometric/loader/cluster.py @@ -29,13 +29,22 @@ class ClusterData(torch.utils.data.Dataset): :obj:`save_dir` directory for faster re-use. (default: :obj:`None`) log (bool, optional): If set to :obj:`False`, will not log any progress. (default: :obj:`True`) + keep_inter_cluster_edges (bool, optional): If set to :obj:`True`, + will keep inter-cluster edge connections. (default: :obj:`False`) """ - def __init__(self, data, num_parts: int, recursive: bool = False, - save_dir: Optional[str] = None, log: bool = True): - + def __init__( + self, + data, + num_parts: int, + recursive: bool = False, + save_dir: Optional[str] = None, + log: bool = True, + keep_inter_cluster_edges: bool = False, + ): assert data.edge_index is not None self.num_parts = num_parts + self.keep_inter_cluster_edges = keep_inter_cluster_edges recursive_str = '_recursive' if recursive else '' filename = f'partition_{num_parts}{recursive_str}.pt' @@ -85,7 +94,10 @@ def __getitem__(self, idx): data = copy.copy(self.data) adj, data.adj = data.adj, None - adj = adj.narrow(0, start, length).narrow(1, start, length) + adj = adj.narrow(0, start, length) + if not self.keep_inter_cluster_edges: + adj = adj.narrow(1, start, length) + edge_idx = adj.storage.value() for key, value in data: From 6ddb3e26c165185831f72001b3d10a1e8b9bed29 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 9 May 2023 07:46:00 +0200 Subject: [PATCH 1167/2432] Allow GraphGym modules to be used in isolation (#7327) --- torch_geometric/graphgym/model_builder.py | 5 +++++ torch_geometric/graphgym/models/head.py | 11 ++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/torch_geometric/graphgym/model_builder.py b/torch_geometric/graphgym/model_builder.py index 246ccfde5c1c..4990b6ec6cdb 100644 --- a/torch_geometric/graphgym/model_builder.py +++ b/torch_geometric/graphgym/model_builder.py @@ -61,6 +61,11 @@ def post_mp(self) -> torch.nn.Module: def pre_mp(self) -> torch.nn.Module: return self.model.pre_mp + def lr_scheduler_step(self, *args, **kwargs): + # Needed for PyTorch 2.0 since the base class of LR schedulers changed. + # TODO Remove once we only want to support PyTorch Lightning >= 2.0. + return super().lr_scheduler_step(*args, **kwargs) + def create_model(to_device=True, dim_in=None, dim_out=None) -> GraphGymModule: r"""Create model for graph machine learning. diff --git a/torch_geometric/graphgym/models/head.py b/torch_geometric/graphgym/models/head.py index e841972141b8..923ac32d6699 100644 --- a/torch_geometric/graphgym/models/head.py +++ b/torch_geometric/graphgym/models/head.py @@ -27,9 +27,14 @@ def __init__(self, dim_in, dim_out): has_act=False, has_bias=True, cfg=cfg)) def _apply_index(self, batch): - mask = '{}_mask'.format(batch.split) - return batch.x[batch[mask]], \ - batch.y[batch[mask]] + x = batch.x + y = batch.y if 'y' in batch else None + + if 'split' not in batch: + return x, y + + mask = batch[f'{batch.split}_mask'] + return x[mask], y[mask] if y is not None else None def forward(self, batch): batch = self.layer_post_mp(batch) From b2ee130059e7faff065dc9b8df632595e0724e18 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 10 May 2023 10:05:08 +0200 Subject: [PATCH 1168/2432] Drop sampling info for `SubgraphType.bidirectional` (#7338) --- test/loader/test_neighbor_loader.py | 2 -- torch_geometric/sampler/base.py | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 953b628822a6..ac965c2fb9e5 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -154,14 +154,12 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): # Test node type selection: assert set(batch.node_types) == {'paper', 'author'} - assert len(batch['paper']) == 5 if WITH_PYG_LIB else 4 assert batch['paper'].n_id.size() == (batch['paper'].num_nodes, ) assert batch['paper'].x.size(0) <= 100 assert batch['paper'].input_id.numel() == batch_size assert batch['paper'].batch_size == batch_size assert batch['paper'].x.min() >= 0 and batch['paper'].x.max() < 100 - assert len(batch['author']) == 3 if WITH_PYG_LIB else 2 assert batch['author'].n_id.size() == (batch['author'].num_nodes, ) assert batch['author'].x.size(0) <= 200 assert batch['author'].x.min() >= 100 and batch['author'].x.max() < 300 diff --git a/torch_geometric/sampler/base.py b/torch_geometric/sampler/base.py index 2d6d23e1fbe1..f1566932b753 100644 --- a/torch_geometric/sampler/base.py +++ b/torch_geometric/sampler/base.py @@ -167,7 +167,7 @@ def to_bidirectional(self) -> 'SamplerOutput': edge_id=self.edge, rev_edge_id=self.edge, ) - out.num_sampled_edges = None + out.num_sampled_nodes = out.num_sampled_edges = None return out @@ -280,7 +280,7 @@ def to_bidirectional(self) -> 'SamplerOutput': f"since the edge type {edge_type} does not " f"seem to have a reverse edge type") - out.num_sampled_edges = None + out.num_sampled_nodes = out.num_sampled_edges = None return out From cabcd4097442ba60aa1efa11e1619dd9bb8fb527 Mon Sep 17 00:00:00 2001 From: Kamil Andrzejewski Date: Wed, 10 May 2023 15:28:38 +0200 Subject: [PATCH 1169/2432] Add `add_pad_mask` argument to `Pad` transform (#7339) add_masks_to_data is an optional argument that will cause the addition of two masks to data object. They will indicate which elements in the data are real and which are added as a padding. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/transforms/test_pad.py | 161 +++++++++++++++++++++--------- torch_geometric/transforms/pad.py | 18 ++++ 3 files changed, 133 insertions(+), 47 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f97b84466b5..bea641c9a513 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) - Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) - Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308)) - Added support for tuples as keys in `ModuleDict`/`ParameterDict` ([#7294](https://github.com/pyg-team/pytorch_geometric/pull/7294)) diff --git a/test/transforms/test_pad.py b/test/transforms/test_pad.py index e1fea40cecab..69225a942c37 100644 --- a/test/transforms/test_pad.py +++ b/test/transforms/test_pad.py @@ -56,12 +56,23 @@ def _generate_heterodata_edges( yield edge_type, attr -def _check_homo_data_nodes(original: Data, padded: Data, - max_num_nodes: Union[int, Dict[NodeType, int]], - node_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): +def _check_homo_data_nodes( + original: Data, + padded: Data, + max_num_nodes: Union[int, Dict[NodeType, int]], + node_pad_value: Optional[Padding] = None, + is_mask_available: bool = False, + exclude_keys: Optional[List[str]] = None, +): assert padded.num_nodes == max_num_nodes + compare_pad_start_idx = original.num_nodes + + if is_mask_available: + assert padded.pad_node_mask.numel() == padded.num_nodes + assert torch.all(padded.pad_node_mask[:compare_pad_start_idx]) + assert not torch.any(padded.pad_node_mask[compare_pad_start_idx:]) + for attr in _generate_homodata_node_attrs(original): if attr in exclude_keys: assert attr not in padded.keys @@ -73,7 +84,7 @@ def _check_homo_data_nodes(original: Data, padded: Data, continue assert padded[attr].shape[0] == max_num_nodes - compare_pad_start_idx = original[attr].shape[0] + # Check values in padded area. pad_value = node_pad_value.get_value( None, attr) if node_pad_value is not None else 0.0 @@ -86,10 +97,14 @@ def _check_homo_data_nodes(original: Data, padded: Data, padded[attr][:compare_pad_start_idx]) -def _check_homo_data_edges(original: Data, padded: Data, - max_num_edges: Optional[int] = None, - edge_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): +def _check_homo_data_edges( + original: Data, + padded: Data, + max_num_edges: Optional[int] = None, + edge_pad_value: Optional[Padding] = None, + is_mask_available: bool = False, + exclude_keys: Optional[List[str]] = None, +): # Check edge index attribute. if max_num_edges is None: max_num_edges = padded.num_nodes**2 @@ -108,6 +123,11 @@ def _check_homo_data_edges(original: Data, padded: Data, assert torch.equal(original.edge_index, padded.edge_index[:, :compare_pad_start_idx]) + if is_mask_available: + assert padded.pad_edge_mask.numel() == padded.num_edges + assert torch.all(padded.pad_edge_mask[:compare_pad_start_idx]) + assert not torch.any(padded.pad_edge_mask[compare_pad_start_idx:]) + # Check other attributes. for attr in _generate_homodata_edge_attrs(original): if attr == 'edge_index': @@ -135,10 +155,17 @@ def _check_homo_data_edges(original: Data, padded: Data, padded[attr][:compare_pad_start_idx, :]) -def _check_hetero_data_nodes(original: HeteroData, padded: HeteroData, - max_num_nodes: Union[int, Dict[NodeType, int]], - node_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): +def _check_hetero_data_nodes( + original: HeteroData, + padded: HeteroData, + max_num_nodes: Union[int, Dict[NodeType, int]], + node_pad_value: Optional[Padding] = None, + is_mask_available: bool = False, + exclude_keys: Optional[List[str]] = None, +): + if is_mask_available: + for store in padded.node_stores: + assert 'pad_node_mask' in store expected_nodes = max_num_nodes @@ -152,9 +179,17 @@ def _check_hetero_data_nodes(original: HeteroData, padded: HeteroData, if not isinstance(padded[node_type][attr], torch.Tensor): continue - original_tensor = original[node_type][attr] + compare_pad_start_idx = original[node_type].num_nodes padded_tensor = padded[node_type][attr] + if attr == 'pad_node_mask': + assert padded_tensor.numel() == padded[node_type].num_nodes + assert torch.all(padded_tensor[:compare_pad_start_idx]) + assert not torch.any(padded_tensor[compare_pad_start_idx:]) + continue + + original_tensor = original[node_type][attr] + # Check the number of nodes. if isinstance(max_num_nodes, dict): expected_nodes = max_num_nodes[node_type] @@ -172,12 +207,17 @@ def _check_hetero_data_nodes(original: HeteroData, padded: HeteroData, padded_tensor[:compare_pad_start_idx]) -def _check_hetero_data_edges(original: HeteroData, padded: HeteroData, - max_num_edges: Optional[Union[int, - Dict[EdgeType, - int]]] = None, - edge_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): +def _check_hetero_data_edges( + original: HeteroData, + padded: HeteroData, + max_num_edges: Optional[Union[int, Dict[EdgeType, int]]] = None, + edge_pad_value: Optional[Padding] = None, + is_mask_available: bool = False, + exclude_keys: Optional[List[str]] = None, +): + if is_mask_available: + for store in padded.edge_stores: + assert 'pad_edge_mask' in store for edge_type, attr in _generate_heterodata_edges(padded): if attr in exclude_keys: @@ -190,9 +230,16 @@ def _check_hetero_data_edges(original: HeteroData, padded: HeteroData, continue compare_pad_start_idx = original[edge_type].num_edges - original_tensor = original[edge_type][attr] padded_tensor = padded[edge_type][attr] + if attr == 'pad_edge_mask': + assert padded_tensor.numel() == padded[edge_type].num_edges + assert torch.all(padded_tensor[:compare_pad_start_idx]) + assert not torch.any(padded_tensor[compare_pad_start_idx:]) + continue + + original_tensor = original[edge_type][attr] + if isinstance(max_num_edges, numbers.Number): expected_num_edges = max_num_edges elif max_num_edges is None or edge_type not in max_num_edges.keys(): @@ -235,33 +282,40 @@ def _check_hetero_data_edges(original: HeteroData, padded: HeteroData, padded_tensor[:compare_pad_start_idx, :]) -def _check_data(original: Union[Data, HeteroData], padded: Union[Data, - HeteroData], - max_num_nodes: Union[int, Dict[NodeType, int]], - max_num_edges: Optional[Union[int, Dict[EdgeType, - int]]] = None, - node_pad_value: Optional[Union[Padding, int, float]] = None, - edge_pad_value: Optional[Union[Padding, int, float]] = None, - exclude_keys: Optional[List[str]] = None): +def _check_data( + original: Union[Data, HeteroData], + padded: Union[Data, HeteroData], + max_num_nodes: Union[int, Dict[NodeType, int]], + max_num_edges: Optional[Union[int, Dict[EdgeType, int]]] = None, + node_pad_value: Optional[Union[Padding, int, float]] = None, + edge_pad_value: Optional[Union[Padding, int, float]] = None, + is_mask_available: bool = False, + exclude_keys: Optional[List[str]] = None, +): if not isinstance(node_pad_value, Padding) and node_pad_value is not None: node_pad_value = UniformPadding(node_pad_value) if not isinstance(edge_pad_value, Padding) and edge_pad_value is not None: edge_pad_value = UniformPadding(edge_pad_value) + if is_mask_available is None: + is_mask_available = False + if exclude_keys is None: exclude_keys = [] if isinstance(original, Data): _check_homo_data_nodes(original, padded, max_num_nodes, node_pad_value, - exclude_keys) + is_mask_available, exclude_keys) _check_homo_data_edges(original, padded, max_num_edges, edge_pad_value, - exclude_keys) + is_mask_available, exclude_keys) else: _check_hetero_data_nodes(original, padded, max_num_nodes, - node_pad_value, exclude_keys) + node_pad_value, is_mask_available, + exclude_keys) _check_hetero_data_edges(original, padded, max_num_edges, - edge_pad_value, exclude_keys) + edge_pad_value, is_mask_available, + exclude_keys) def test_pad_repr(): @@ -273,35 +327,42 @@ def test_pad_repr(): @pytest.mark.parametrize('data', [fake_data(), fake_hetero_data()]) @pytest.mark.parametrize('num_nodes', [32, 64]) -def test_pad_auto_edges(data, num_nodes): +@pytest.mark.parametrize('add_pad_mask', [True, False]) +def test_pad_auto_edges(data, num_nodes, add_pad_mask): original = data data = deepcopy(data) - transform = Pad(max_num_nodes=num_nodes) + transform = Pad(max_num_nodes=num_nodes, add_pad_mask=add_pad_mask) padded = transform(data) - _check_data(original, padded, num_nodes) + _check_data(original, padded, num_nodes, is_mask_available=add_pad_mask) @pytest.mark.parametrize('num_nodes', [32, 64]) @pytest.mark.parametrize('num_edges', [300, 411]) -def test_pad_data_explicit_edges(num_nodes, num_edges): +@pytest.mark.parametrize('add_pad_mask', [True, False]) +def test_pad_data_explicit_edges(num_nodes, num_edges, add_pad_mask): data = fake_data() original = deepcopy(data) - transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges) + transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges, + add_pad_mask=add_pad_mask) padded = transform(data) - _check_data(original, padded, num_nodes, num_edges) + _check_data(original, padded, num_nodes, num_edges, + is_mask_available=add_pad_mask) @pytest.mark.parametrize('num_nodes', [32, {'v0': 64, 'v1': 36}]) @pytest.mark.parametrize('num_edges', [300, {('v0', 'e0', 'v1'): 397}]) -def test_pad_heterodata_explicit_edges(num_nodes, num_edges): +@pytest.mark.parametrize('add_pad_mask', [True, False]) +def test_pad_heterodata_explicit_edges(num_nodes, num_edges, add_pad_mask): data = fake_hetero_data() original = deepcopy(data) - transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges) + transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges, + add_pad_mask=add_pad_mask) padded = transform(data) - _check_data(original, padded, num_nodes, num_edges) + _check_data(original, padded, num_nodes, num_edges, + is_mask_available=add_pad_mask) @pytest.mark.parametrize('node_pad_value', [10, AttrNamePadding({'x': 3.0})]) @@ -348,16 +409,22 @@ def test_pad_heterodata_pad_values(node_pad_value, edge_pad_value): @pytest.mark.parametrize('data', [fake_data(), fake_hetero_data()]) -@pytest.mark.parametrize('exclude_keys', - [['y'], ['edge_attr'], ['y', 'edge_attr']]) -def test_pad_data_exclude_keys(data, exclude_keys): +@pytest.mark.parametrize('add_pad_mask', [True, False]) +@pytest.mark.parametrize('exclude_keys', [ + ['y'], + ['edge_attr'], + ['y', 'edge_attr'], +]) +def test_pad_data_exclude_keys(data, add_pad_mask, exclude_keys): original = data data = deepcopy(data) num_nodes = 32 - transform = Pad(max_num_nodes=num_nodes, exclude_keys=exclude_keys) + transform = Pad(max_num_nodes=num_nodes, add_pad_mask=add_pad_mask, + exclude_keys=exclude_keys) padded = transform(data) - _check_data(original, padded, num_nodes, exclude_keys=exclude_keys) + _check_data(original, padded, num_nodes, is_mask_available=add_pad_mask, + exclude_keys=exclude_keys) @pytest.mark.parametrize('data', [fake_data(), fake_hetero_data(node_types=1)]) diff --git a/torch_geometric/transforms/pad.py b/torch_geometric/transforms/pad.py index f9fff7a66a55..b92b8a983884 100644 --- a/torch_geometric/transforms/pad.py +++ b/torch_geometric/transforms/pad.py @@ -265,6 +265,12 @@ class Pad(BaseTransform): mask_pad_value (bool, optional): The fill value to use for :obj:`train_mask`, :obj:`val_mask` and :obj:`test_mask` attributes (default: :obj:`False`). + add_pad_mask (bool, optional): If set to :obj:`True`, will attach + node-level :obj:`pad_node_mask` and edge-level :obj:`pad_edge_mask` + attributes to the output which indicates which elements in the data + are real (represented by :obj:`True`) and which were added as a + result of padding (represented by :obj:`False`). + (default: :obj:`False`) exclude_keys ([str], optional): Keys to be removed from the input data object. (default: :obj:`None`) """ @@ -275,6 +281,7 @@ def __init__( node_pad_value: Union[int, float, Padding] = 0.0, edge_pad_value: Union[int, float, Padding] = 0.0, mask_pad_value: bool = False, + add_pad_mask: bool = False, exclude_keys: Optional[List[str]] = None, ): self.max_num_nodes = self._NumNodes(max_num_nodes) @@ -293,6 +300,7 @@ def __init__( for key in ['train_mask', 'val_mask', 'test_mask'] } + self.add_pad_mask = add_pad_mask self.exclude_keys = set(exclude_keys or []) class _IntOrDict(ABC): @@ -448,6 +456,11 @@ def __pad_node_store(self, store: NodeStorage, get_dim_fn: Callable, f'({store.num_nodes}).' num_pad_nodes = num_target_nodes - store.num_nodes + if self.add_pad_mask: + pad_node_mask = torch.ones(num_target_nodes, dtype=torch.bool) + pad_node_mask[store.num_nodes:] = False + store.pad_node_mask = pad_node_mask + for attr_name in attrs_to_pad: attr = store[attr_name] pad_value = self.__get_node_padding(attr_name, node_type) @@ -470,6 +483,11 @@ def __pad_edge_store(self, store: EdgeStorage, get_dim_fn: Callable, f'({store.num_edges}).' num_pad_edges = num_target_edges - store.num_edges + if self.add_pad_mask: + pad_edge_mask = torch.ones(num_target_edges, dtype=torch.bool) + pad_edge_mask[store.num_edges:] = False + store.pad_edge_mask = pad_edge_mask + if isinstance(num_nodes, tuple): src_pad_value, dst_pad_value = num_nodes else: From 23c2836fe7ab25444d8ce0a51369f5a185f54709 Mon Sep 17 00:00:00 2001 From: Zecheng Zhang Date: Wed, 10 May 2023 07:21:39 -0700 Subject: [PATCH 1170/2432] [Aggregation] Clamp `StdAggregation` if there are edges for aggregation (#7334) If there are no edges for aggregation and we specified `dim_size`, IMO std should be all zeros. Previous implementation will clamp value to 0.00001 and in this scenario the aggregated std value will be around 0.0032. --------- Co-authored-by: rusty1s --- test/nn/aggr/test_basic.py | 11 +++++++++++ test/nn/aggr/test_fused.py | 18 +++++++++++++++--- torch_geometric/nn/aggr/basic.py | 6 +++++- torch_geometric/nn/aggr/fused.py | 7 ++++++- 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/test/nn/aggr/test_basic.py b/test/nn/aggr/test_basic.py index 45d04dc3f861..07841ec20423 100644 --- a/test/nn/aggr/test_basic.py +++ b/test/nn/aggr/test_basic.py @@ -74,6 +74,17 @@ def test_var_aggregation(): assert torch.allclose(out, expected, atol=1e-6) +def test_empty_std_aggregation(): + aggr = StdAggregation() + + x = torch.empty(0, 6).reshape(0, 6) + index = torch.empty(0, dtype=torch.long) + + out = aggr(x, index, dim_size=5) + assert out.size() == (5, 6) + assert float(out.abs().sum()) == 0.0 + + @pytest.mark.parametrize('Aggregation', [ SoftmaxAggregation, PowerMeanAggregation, diff --git a/test/nn/aggr/test_fused.py b/test/nn/aggr/test_fused.py index b6fea282a60f..b5a376283588 100644 --- a/test/nn/aggr/test_fused.py +++ b/test/nn/aggr/test_fused.py @@ -30,16 +30,28 @@ def test_fused_aggregation(aggrs): out = torch.cat(aggr(x, index), dim=-1) expected = torch.cat([aggr(y, index) for aggr in aggrs], dim=-1) - assert torch.allclose(out, expected, atol=1e-6) + assert torch.allclose(out, expected, atol=1e-5) jit = torch.jit.script(aggr) - assert torch.allclose(torch.cat(jit(x, index), dim=-1), out, atol=1e-6) + assert torch.allclose(torch.cat(jit(x, index), dim=-1), out, atol=1e-5) out.mean().backward() assert x.grad is not None expected.mean().backward() assert y.grad is not None - assert torch.allclose(x.grad, y.grad) + assert torch.allclose(x.grad, y.grad, atol=1e-5) + + +def test_empty_fused_std_aggregation(): + aggrs = [aggregation_resolver(aggr) for aggr in ['mean', 'var', 'std']] + aggr = FusedAggregation(aggrs) + + x = torch.empty(0, 6).reshape(0, 6) + index = torch.empty(0, dtype=torch.long) + + out = torch.cat(aggr(x, index, dim_size=5), dim=-1) + assert out.size() == (5, 18) + assert float(out.abs().sum()) == 0.0 if __name__ == '__main__': diff --git a/torch_geometric/nn/aggr/basic.py b/torch_geometric/nn/aggr/basic.py index e3a93211d443..324d4a7b4135 100644 --- a/torch_geometric/nn/aggr/basic.py +++ b/torch_geometric/nn/aggr/basic.py @@ -1,3 +1,4 @@ +import math from typing import Optional import torch @@ -131,7 +132,10 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: var = self.var_aggr(x, index, ptr, dim_size, dim) - return var.clamp(min=1e-5).sqrt() + # Allow "undefined" gradient at `sqrt(0.0)`: + out = var.clamp(min=1e-5).sqrt() + out = out.masked_fill(out <= math.sqrt(1e-5), 0.0) + return out class SoftmaxAggregation(Aggregation): diff --git a/torch_geometric/nn/aggr/fused.py b/torch_geometric/nn/aggr/fused.py index 965ac6517aee..97a00aad9b6a 100644 --- a/torch_geometric/nn/aggr/fused.py +++ b/torch_geometric/nn/aggr/fused.py @@ -1,3 +1,4 @@ +import math from typing import Dict, List, Optional, Tuple, Union from torch import Tensor @@ -316,7 +317,11 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, assert mean is not None var = (pow_sum / count) - (mean * mean) - outs[i] = var.clamp(min=1e-5).sqrt() + # Allow "undefined" gradient at `sqrt(0.0)`: + out = var.clamp(min=1e-5).sqrt() + out = out.masked_fill(out <= math.sqrt(1e-5), 0.0) + + outs[i] = out ####################################################################### From 4d4c91a9558e00e8b3a5e925ea6d1a984b1e2b22 Mon Sep 17 00:00:00 2001 From: Vuenc Date: Wed, 10 May 2023 16:48:59 +0200 Subject: [PATCH 1171/2432] Fixing `jit.trace` tracing a constant number of nodes with `add_self_loops` (#7330) Fix of issue #7226. The problem I described came from the `maybe_num_nodes` function which computed `int(edge_index.max()) + 1`. The `int()` call made the computed number of nodes a constant for the `torch.jit.trace` function. I fixed it with an if branch that is only executed while tracing. (I could not get the same code for all cases to work: the workaround used only for tracing now would also work for normal operation, but then would break when using `torch.jit.script`). --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/nn/conv/test_message_passing.py | 46 +++++++++++++++++++++++++++- torch_geometric/utils/num_nodes.py | 27 ++++++++++++---- 3 files changed, 67 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bea641c9a513..dd0833194744 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) - Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) - Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308)) diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index 0454fa093d30..81e35df151e8 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -15,7 +15,12 @@ Size, SparseTensor, ) -from torch_geometric.utils import scatter, spmm, to_torch_csc_tensor +from torch_geometric.utils import ( + add_self_loops, + scatter, + spmm, + to_torch_csc_tensor, +) class MyConv(MessagePassing): @@ -53,6 +58,17 @@ def message_and_aggregate(self, adj_t: SparseTensor, return spmm(adj_t, x[0], reduce=self.aggr) +class MyConvWithSelfLoops(MessagePassing): + def __init__(self, aggr: str = 'add'): + super().__init__(aggr=aggr) + + def forward(self, x: Tensor, edge_index: torch.Tensor) -> Tensor: + edge_index, _ = add_self_loops(edge_index) + + # propagate_type: (x: Tensor) + return self.propagate(edge_index, x=x, size=None) + + def test_my_conv_basic(): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) @@ -606,3 +622,31 @@ def cast_index_hook(module, inputs): conv.register_aggregate_forward_pre_hook(cast_index_hook) assert conv(x, edge_index, edge_weight).size() == (4, 32) + + +@pytest.mark.parametrize('num_nodes', [4, 8, 2, 0]) +def test_traceable_my_conv_with_self_loops(num_nodes): + # `torch.jit.trace` a `MessagePassing` layer that adds self loops and test + # it across different input sizes. + x = torch.randn(4, 16) + edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) + + conv = MyConvWithSelfLoops() + traced_conv = torch.jit.trace(conv, ((x, edge_index))) + scripted_conv = torch.jit.script(conv.jittable()) + + x = torch.randn(num_nodes, 16) + if num_nodes > 0: + edge_index = torch.stack([ + torch.arange(0, num_nodes - 1), + torch.arange(1, num_nodes), + ], dim=0) + else: + edge_index = torch.empty((2, 0), dtype=torch.long) + + out = conv(x, edge_index) + traced_out = traced_conv(x, edge_index) + scripted_out = scripted_conv(x, edge_index) + + assert torch.allclose(out, traced_out) + assert torch.allclose(out, scripted_out) diff --git a/torch_geometric/utils/num_nodes.py b/torch_geometric/utils/num_nodes.py index 767e7ffe64d5..c8c4f63887f3 100644 --- a/torch_geometric/utils/num_nodes.py +++ b/torch_geometric/utils/num_nodes.py @@ -1,37 +1,52 @@ from copy import copy -from typing import Optional # noqa +from typing import Dict, Optional, Union import torch from torch import Tensor import torch_geometric -from torch_geometric.typing import SparseTensor # noqa +from torch_geometric.typing import EdgeType, NodeType, SparseTensor @torch.jit._overload -def maybe_num_nodes(edge_index, num_nodes=None): +def maybe_num_nodes(edge_index, num_nodes): # type: (Tensor, Optional[int]) -> int pass @torch.jit._overload -def maybe_num_nodes(edge_index, num_nodes=None): +def maybe_num_nodes(edge_index, num_nodes): # type: (SparseTensor, Optional[int]) -> int pass -def maybe_num_nodes(edge_index, num_nodes=None): +def maybe_num_nodes( + edge_index: Union[Tensor, SparseTensor], + num_nodes: Optional[int] = None, +) -> int: if num_nodes is not None: return num_nodes elif isinstance(edge_index, Tensor): if torch_geometric.utils.is_torch_sparse_tensor(edge_index): return max(edge_index.size(0), edge_index.size(1)) + + if torch.jit.is_tracing(): + # Avoid non-traceable if-check for empty `edge_index` tensor: + tmp = torch.concat([ + edge_index.view(-1), + edge_index.new_full((1, ), fill_value=-1) + ]) + return tmp.max() + 1 + return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0 else: return max(edge_index.size(0), edge_index.size(1)) -def maybe_num_nodes_dict(edge_index_dict, num_nodes_dict=None): +def maybe_num_nodes_dict( + edge_index_dict: Dict[EdgeType, Tensor], + num_nodes_dict: Optional[Dict[NodeType, int]] = None, +) -> Dict[NodeType, int]: num_nodes_dict = {} if num_nodes_dict is None else copy(num_nodes_dict) found_types = list(num_nodes_dict.keys()) From 1bc5466f5af3b314381a5e114583d4264fbc8687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=9B=A7=E5=9B=A7?= Date: Wed, 10 May 2023 11:33:52 -0400 Subject: [PATCH 1172/2432] =?UTF-8?q?Add=20K=C3=B9zu=20remote=20backend=20?= =?UTF-8?q?examples=20(#7298)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds examples of Kùzu's remote backend integration with PyG. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + docs/source/advanced/remote.rst | 3 +- docs/source/external/resources.rst | 2 + examples/kuzu/README.md | 38 +++++++ examples/kuzu/papers_100M/README.md | 16 +++ examples/kuzu/papers_100M/prepare_data.py | 54 ++++++++++ examples/kuzu/papers_100M/train.py | 119 ++++++++++++++++++++++ 7 files changed, 232 insertions(+), 1 deletion(-) create mode 100644 examples/kuzu/README.md create mode 100644 examples/kuzu/papers_100M/README.md create mode 100644 examples/kuzu/papers_100M/prepare_data.py create mode 100644 examples/kuzu/papers_100M/train.py diff --git a/CHANGELOG.md b/CHANGELOG.md index dd0833194744..aa2dbefb272c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) - Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) diff --git a/docs/source/advanced/remote.rst b/docs/source/advanced/remote.rst index bb04394d7107..095ef463e2bb 100644 --- a/docs/source/advanced/remote.rst +++ b/docs/source/advanced/remote.rst @@ -107,7 +107,8 @@ An example usage of the interface is shown below: assert torch.equal(row, edge_index[0]) assert torch.equal(col, edge_index[1]) -Common implementations of the :class:`~torch_geometric.data.GraphStore` are graph databases, *e.g.*, :obj:`Neo4j`, :obj:`TigerGraph`, :obj:`ArangoDB` are all viable performant options. +Common implementations of the :class:`~torch_geometric.data.GraphStore` are graph databases, *e.g.*, :obj:`Neo4j`, :obj:`TigerGraph`, :obj:`ArangoDB`, :obj:`Kùzu` are all viable performant options. +We provide an example of using :pyg:`PyG` in combination with the :obj:`Kùzu` database `here __`. A graph sampler is tightly coupled to the given :class:`~torch_geometric.data.GraphStore`, and operates on the :class:`~torch_geometric.data.GraphStore` to produce sampled subgraphs from input nodes. Different sampling algorithms are implemented behind the :class:`torch_geometric.sampler.BaseSampler` interface. diff --git a/docs/source/external/resources.rst b/docs/source/external/resources.rst index bd437e376549..cd62497f9436 100644 --- a/docs/source/external/resources.rst +++ b/docs/source/external/resources.rst @@ -38,3 +38,5 @@ External Resources * Amitoz Azad: **Primal-Dual Algorithm for Total Variation Processing on Graphs** [`Jupyter `__] * Manan Goel: **Recommending Amazon Products using Graph Neural Networks in** :pyg:`null` **PyTorch Geometric** [:wandb:`null` `W&B Report `__] + +* Kùzu: **Remote Backend for** :pyg:`null` **PyTorch Geometric** [:colab:`null` `Colab `__] diff --git a/examples/kuzu/README.md b/examples/kuzu/README.md new file mode 100644 index 000000000000..298baf8f9493 --- /dev/null +++ b/examples/kuzu/README.md @@ -0,0 +1,38 @@ +# Using Kùzu as a Remote Backend for PyG + +[Kùzu](https://kuzudb.com/) is an in-process property graph database management system built for query speed and scalability. +It provides an integration with PyG via the [remote backend interface](https://pytorch-geometric.readthedocs.io/en/latest/advanced/remote.html) of PyG. +The Python API of Kùzu outputs a [`torch_geometric.data.FeatureStore`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.data.FeatureStore.html) and a [`torch_geometric.data.GraphStore`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.data.GraphStore.html) that can be plugged directly into existing familiar PyG interfaces such as [`NeighborLoader`](https://pytorch-geometric.readthedocs.io/en/latest/_modules/torch_geometric/loader/neighbor_loader.html) and enables training GNNs directly on graphs stored in Kùzu. +This is particularly useful if you would like to train graphs that don't fit on your CPU's memory. + +## Installation + +You can install Kùzu as follows: + +```bash +pip install kuzu +``` + +## Usage + +The API and design documentation of Kùzu can be found at [https://kuzudb.com/docs/](https://kuzudb.com/docs/). + +## Examples + +We provide the following examples to showcase the usage of Kùzu remote backend within PyG: + +### PubMed + + + Open In Colab + + +The PubMed example is hosted on [Google Colab](https://colab.research.google.com/drive/12fOSqPm1HQTz_m9caRW7E_92vaeD9xq6). +In this example, we work on a small dataset for demonstrative purposes. +The [PubMed](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.datasets.Planetoid.html) dataset consists of 19,717 papers as nodes and 88,648 citation relationships between them. + +### `papers_100M` + +This example shows how to use the remote backend feature of Kùzu to work with a large graph of papers and citations on a single machine. +The data used in this example is `ogbn-papers100M` from the [Open Graph Benchmark](https://ogb.stanford.edu/). +The dataset contains approximately 111 million nodes and 1.6 billion edges. diff --git a/examples/kuzu/papers_100M/README.md b/examples/kuzu/papers_100M/README.md new file mode 100644 index 000000000000..c23bc2a972f8 --- /dev/null +++ b/examples/kuzu/papers_100M/README.md @@ -0,0 +1,16 @@ +# `papers_100M` Example + +This example shows how to use the remote backend feature of [Kùzu](https://kuzudb.com) to work with a large graph of papers and citations on a single machine. +The data used in this example is `ogbn-papers100M` from the [Open Graph Benchmark](https://ogb.stanford.edu/). +The dataset contains approximately 100 million nodes and 1.6 billion edges. + +## Prepare the data + +1. Download the dataset from [`http://snap.stanford.edu/ogb/data/nodeproppred/papers100M-bin.zip`](http://snap.stanford.edu/ogb/data/nodeproppred/papers100M-bin.zip) and put the `*.zip` file into this directory. +2. Run `python prepare_data.py`. + The script will automatically extract the data and convert it to the format that Kùzu can read. + A Kùzu database instance is then created under `papers_100M` and the data is loaded into the it. + +## Train a Model + +Afterwards, run `python train.py` to train a three-layer [`GraphSAGE`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GraphSAGE.html) model on this dataset. diff --git a/examples/kuzu/papers_100M/prepare_data.py b/examples/kuzu/papers_100M/prepare_data.py new file mode 100644 index 000000000000..a4892a6df895 --- /dev/null +++ b/examples/kuzu/papers_100M/prepare_data.py @@ -0,0 +1,54 @@ +from multiprocessing import cpu_count +from os import path +from zipfile import ZipFile + +import kuzu +import numpy as np +from tqdm import tqdm + +with ZipFile("papers100M-bin.zip", 'r') as papers100M_zip: + print('Extracting papers100M-bin.zip...') + papers100M_zip.extractall() + +with ZipFile("papers100M-bin/raw/data.npz", 'r') as data_zip: + print('Extracting data.npz...') + data_zip.extractall() + +with ZipFile("papers100M-bin/raw/node-label.npz", 'r') as node_label_zip: + print('Extracting node-label.npz...') + node_label_zip.extractall() + +print("Converting edge_index to CSV...") +edge_index = np.load('edge_index.npy', mmap_mode='r') +csvfile = open('edge_index.csv', 'w') +csvfile.write('src,dst\n') +for i in tqdm(range(edge_index.shape[1])): + csvfile.write(str(edge_index[0, i]) + ',' + str(edge_index[1, i]) + '\n') +csvfile.close() + +print("Generating IDs for nodes...") +node_year = np.load('node_year.npy', mmap_mode='r') +length = node_year.shape[0] +ids = np.arange(length) +np.save('ids.npy', ids) + +ids_path = path.abspath(path.join('.', 'ids.npy')) +edge_index_path = path.abspath(path.join('.', 'edge_index.csv')) +node_label_path = path.abspath(path.join('.', 'node_label.npy')) +node_feature_path = path.abspath(path.join('.', 'node_feat.npy')) +node_year_path = path.abspath(path.join('.', 'node_year.npy')) + +print("Creating Kùzu database...") +db = kuzu.Database('papers100M') +conn = kuzu.Connection(db, num_threads=cpu_count()) +print("Creating Kùzu tables...") +conn.execute( + "CREATE NODE TABLE paper(id INT64, x FLOAT[128], year INT64, y FLOAT, " + "PRIMARY KEY (id));") +conn.execute("CREATE REL TABLE cites(FROM paper TO paper, MANY_MANY);") +print("Copying nodes to Kùzu tables...") +conn.execute('COPY paper FROM ("%s", "%s", "%s", "%s") BY COLUMN;' % + (ids_path, node_feature_path, node_year_path, node_label_path)) +print("Copying edges to Kùzu tables...") +conn.execute('COPY cites FROM "%s";' % (edge_index_path)) +print("All done!") diff --git a/examples/kuzu/papers_100M/train.py b/examples/kuzu/papers_100M/train.py new file mode 100644 index 000000000000..5b3da061eb79 --- /dev/null +++ b/examples/kuzu/papers_100M/train.py @@ -0,0 +1,119 @@ +import multiprocessing as mp +import os.path as osp + +import kuzu +import pandas as pd +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +from torch_geometric.loader import NeighborLoader +from torch_geometric.nn import MLP, BatchNorm, SAGEConv + +NUM_EPOCHS = 1 +LOADER_BATCH_SIZE = 1024 + +print('Batch size:', LOADER_BATCH_SIZE) +print('Number of epochs:', NUM_EPOCHS) + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +print('Using device:', device) + +# Load the train set: +train_path = osp.join('.', 'papers100M-bin', 'split', 'time', 'train.csv.gz') +train_df = pd.read_csv( + osp.abspath(train_path), + compression='gzip', + header=None, +) +input_nodes = torch.tensor(train_df[0].values, dtype=torch.long) + +######################################################################## +# The below code sets up the remote backend of Kùzu for PyG. +# Please refer to: https://kuzudb.com/docs/client-apis/python-api/overview.html +# for how to use the Python API of Kùzu. +######################################################################## + +# The buffer pool size of Kùzu is set to 40GB. You can change it to a smaller +# value if you have less memory. +KUZU_BM_SIZE = 40 * 1024**3 + +# Create Kùzu database: +db = kuzu.Database(osp.abspath(osp.join('.', 'papers100M')), KUZU_BM_SIZE) + +# Get remote backend for PyG: +feature_store, graph_store = db.get_torch_geometric_remote_backend( + mp.cpu_count()) + +# Plug the graph store and feature store into the `NeighborLoader`. +# Note that `filter_per_worker` is set to `False`. This is because the Kùzu +# database is already using multi-threading to scan the features in parallel +# and the database object is not fork-safe. +loader = NeighborLoader( + data=(feature_store, graph_store), + num_neighbors={('paper', 'cites', 'paper'): [12, 12, 12]}, + batch_size=LOADER_BATCH_SIZE, + input_nodes=('paper', input_nodes), + num_workers=4, + filter_per_worker=False, +) + + +class GraphSAGE(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, num_layers, + dropout=0.2): + super().__init__() + + self.convs = nn.ModuleList() + self.norms = nn.ModuleList() + + self.convs.append(SAGEConv(in_channels, hidden_channels)) + self.bns.append(BatchNorm(hidden_channels)) + for i in range(1, num_layers): + self.layers.append(SAGEConv(hidden_channels, hidden_channels)) + self.bns.append(BatchNorm(hidden_channels)) + + self.mlp = MLP( + in_channels=in_channels + num_layers * hidden_channels, + hidden_channels=2 * out_channels, + out_channels=out_channels, + num_layers=2, + norm='batch_norm', + act='leaky_relu', + ) + + def forward(self, x, edge_index): + x = F.dropout(x, p=self.dropout, training=self.training) + xs = [x] + for conv, norm in zip(self.convs, self.norms): + x = conv(x, edge_index) + x = norm(x) + x = x.relu() + x = F.dropout(x, p=self.dropout, training=self.training) + xs.append(x) + return self.mlp(torch.cat(xs, dim=-1)) + + +model = GraphSAGE(in_channels=128, hidden_channels=1024, out_channels=172, + num_layers=3).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) + +for epoch in range(1, NUM_EPOCHS + 1): + total_loss = total_examples = 0 + for batch in tqdm(loader): + batch = batch.to(device) + batch_size = batch['paper'].batch_size + + optimizer.zero_grad() + out = model(batch.x, batch.edge_index)[:batch_size] + y = batch.y[:batch_size].long().view(-1) + loss = F.cross_entropy_loss(out, y) + + loss.backward() + optimizer.step() + + total_loss += float(loss) * y.numel() + total_examples += y.numel() + + print(f'Epoch: {epoch:02d}, Loss: {total_loss / total_examples:.4f}') From 2395d708ad12ca2cc163fda2c078fc16d74154e1 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 10 May 2023 15:58:17 +0000 Subject: [PATCH 1173/2432] fix typo in documentation --- docs/source/advanced/remote.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/advanced/remote.rst b/docs/source/advanced/remote.rst index 095ef463e2bb..6b4bd9e1943e 100644 --- a/docs/source/advanced/remote.rst +++ b/docs/source/advanced/remote.rst @@ -108,7 +108,7 @@ An example usage of the interface is shown below: assert torch.equal(col, edge_index[1]) Common implementations of the :class:`~torch_geometric.data.GraphStore` are graph databases, *e.g.*, :obj:`Neo4j`, :obj:`TigerGraph`, :obj:`ArangoDB`, :obj:`Kùzu` are all viable performant options. -We provide an example of using :pyg:`PyG` in combination with the :obj:`Kùzu` database `here __`. +We provide an example of using :pyg:`PyG` in combination with the :obj:`Kùzu` database `here `__. A graph sampler is tightly coupled to the given :class:`~torch_geometric.data.GraphStore`, and operates on the :class:`~torch_geometric.data.GraphStore` to produce sampled subgraphs from input nodes. Different sampling algorithms are implemented behind the :class:`torch_geometric.sampler.BaseSampler` interface. From 82f31cf98a284fbc1de463c7bc39c0f2b4975914 Mon Sep 17 00:00:00 2001 From: andreazanetti Date: Wed, 10 May 2023 18:12:15 +0200 Subject: [PATCH 1174/2432] Adds an example for Hierarchical Sampling (#7244) It compares one epoch of training with and without Hierarchical Sampling. With pyg-lib>0.1.0 we return the sampled number of nodes/edges in [neighbor_sampler.py](https://github.com/pyg-team/pytorch_geometric/blob/e3e63d66e52aa9ca4553274f0572f1f066d99c41/torch_geometric/sampler/neighbor_sampler.py#L241) Leveraging this, the [training_benchmark.py](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/training/training_benchmark.py) refers to `BasicGNN` base class, in which [the forward pass does the trimming if required](https://github.com/pyg-team/pytorch_geometric/blob/e3e63d66e52aa9ca4553274f0572f1f066d99c41/torch_geometric/nn/models/basic_gnn.py#L201) (using the `--trim` flag with `training_benchmark.py`). Therefore, this is an example that mimics what is being done in the `training_benchmark.py,` to make evident for the user what this trimming/Hierarchical Sampling is about, how to test it, and have an idea of the advantage. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/hierarchical_sampling.py | 59 +++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 examples/hierarchical_sampling.py diff --git a/CHANGELOG.md b/CHANGELOG.md index aa2dbefb272c..6bb5987a28c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) diff --git a/examples/hierarchical_sampling.py b/examples/hierarchical_sampling.py new file mode 100644 index 000000000000..4edc8a42f3c8 --- /dev/null +++ b/examples/hierarchical_sampling.py @@ -0,0 +1,59 @@ +import os.path as osp + +import torch +import torch.nn.functional as F +from tqdm import tqdm + +from torch_geometric.datasets import Reddit +from torch_geometric.loader import NeighborLoader +from torch_geometric.nn.models.basic_gnn import GraphSAGE + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Reddit') +dataset = Reddit(path) + +# Already send node features/labels to GPU for faster access during sampling: +data = dataset[0].to(device, 'x', 'y') + +kwargs = {'batch_size': 1024, 'num_workers': 6, 'persistent_workers': True} +loader = NeighborLoader(data, input_nodes=data.train_mask, + num_neighbors=[20, 10, 5], shuffle=True, **kwargs) + +model = GraphSAGE( + dataset.num_features, + hidden_channels=64, + out_channels=dataset.num_classes, + num_layers=3, +).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + + +def train(trim=False): + for batch in tqdm(loader): + optimizer.zero_grad() + batch = batch.to(device) + + if not trim: + out = model(batch.x, batch.edge_index) + else: + out = model( + batch.x, + batch.edge_index, + num_sampled_nodes_per_hop=batch.num_sampled_nodes, + num_sampled_edges_per_hop=batch.num_sampled_edges, + ) + + out = out[:batch.batch_size] + y = batch.y[:batch.batch_size] + + loss = F.cross_entropy(out, y) + loss.backward() + optimizer.step() + + +print('One epoch training without Hierarchical Graph Sampling:') +train(trim=False) + +print('One epoch training with Hierarchical Graph Sampling:') +train(trim=True) From 76177777cb7e2b686f2e6f10a5722ddfac7d9543 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 10 May 2023 20:05:25 +0200 Subject: [PATCH 1175/2432] Fix TorchScript support in `TopKPooling` (#7344) --- docs/requirements.txt | 2 +- torch_geometric/nn/pool/connect/base.py | 4 ++-- torch_geometric/nn/pool/select/base.py | 5 ++--- torch_geometric/nn/pool/topk_pool.py | 7 +++++-- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index f29171c36251..3d437463d55c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,3 @@ -https://download.pytorch.org/whl/cpu/torch-1.9.0%2Bcpu-cp38-cp38-linux_x86_64.whl +https://download.pytorch.org/whl/cpu/torch-1.13.0%2Bcpu-cp38-cp38-linux_x86_64.whl numpy>=1.19.5 git+https://github.com/pyg-team/pyg_sphinx_theme.git diff --git a/torch_geometric/nn/pool/connect/base.py b/torch_geometric/nn/pool/connect/base.py index 1b528e29a9be..fd78a5c61dac 100644 --- a/torch_geometric/nn/pool/connect/base.py +++ b/torch_geometric/nn/pool/connect/base.py @@ -5,11 +5,11 @@ from torch import Tensor from torch_geometric.nn.pool.select import SelectOutput -from torch_geometric.utils.mixin import CastMixin +@torch.jit.script @dataclass(init=False) -class ConnectOutput(CastMixin): +class ConnectOutput: r"""The output of the :class:`Connect` method, which holds the coarsened graph structure, and optional pooled edge features and batch vectors. diff --git a/torch_geometric/nn/pool/select/base.py b/torch_geometric/nn/pool/select/base.py index ad729776d43c..fcde4823477c 100644 --- a/torch_geometric/nn/pool/select/base.py +++ b/torch_geometric/nn/pool/select/base.py @@ -4,11 +4,10 @@ import torch from torch import Tensor -from torch_geometric.utils.mixin import CastMixin - +@torch.jit.script @dataclass(init=False) -class SelectOutput(CastMixin): +class SelectOutput: r"""The output of the :class:`Select` method, which holds an assignment from selected nodes to their respective cluster(s). diff --git a/torch_geometric/nn/pool/topk_pool.py b/torch_geometric/nn/pool/topk_pool.py index 661638e3a0f6..d686e4629990 100644 --- a/torch_geometric/nn/pool/topk_pool.py +++ b/torch_geometric/nn/pool/topk_pool.py @@ -134,14 +134,17 @@ def forward( select_output = self.select(attn, batch) perm = select_output.node_index - x = x[perm] * select_output.weight.view(-1, 1) + score = select_output.weight + assert score is not None + + x = x[perm] * score.view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x batch = batch[perm] edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, num_nodes=select_output.num_nodes) - return x, edge_index, edge_attr, batch, perm, select_output.weight + return x, edge_index, edge_attr, batch, perm, score def __repr__(self) -> str: if self.min_score is None: From 097393494d1b2c0867bc9253ca8d8ddcd7b548cc Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 11 May 2023 08:03:58 +0200 Subject: [PATCH 1176/2432] Do not load `node_default` and `edge_default` attributes in `from_networkx` (#7348) --- CHANGELOG.md | 1 + torch_geometric/utils/convert.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bb5987a28c2..36c331f2b09c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Do not load `node_default` and `edge_default` attributes in `from_networkx` ([#7348](https://github.com/pyg-team/pytorch_geometric/pull/7348)) - Updated examples to use `NeighborLoader` instead of `NeighborSampler` ([#7152](https://github.com/pyg-team/pytorch_geometric/pull/7152)) - Fixed `HGTConv` utility function `_construct_src_node_feat` ([#7194](https://github.com/pyg-team/pytorch_geometric/pull/7194)) - Extend dataset summary to create stats for each node/edge type ([#7203](https://github.com/pyg-team/pytorch_geometric/pull/7203)) diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index 694048a08289..946d344127a4 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -236,6 +236,8 @@ def from_networkx( data[str(key)].append(value) for key, value in G.graph.items(): + if key == 'node_default' or key == 'edge_default': + continue # Do not load default attributes. key = f'graph_{key}' if key in node_attrs else key data[str(key)] = value From 06a86f6d57aa88c0958f5d759646a6dca429a62c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 11 May 2023 09:46:27 +0200 Subject: [PATCH 1177/2432] Fix `torch.jit.trace` in `utils.scatter` (#7350) --- torch_geometric/nn/pool/glob.py | 11 ++++------- torch_geometric/utils/scatter.py | 13 +++++-------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/torch_geometric/nn/pool/glob.py b/torch_geometric/nn/pool/glob.py index 2f2771084daf..85ee6883847a 100644 --- a/torch_geometric/nn/pool/glob.py +++ b/torch_geometric/nn/pool/glob.py @@ -26,11 +26,10 @@ def global_add_pool(x: Tensor, batch: Optional[Tensor], size (int, optional): The number of examples :math:`B`. Automatically calculated if not given. (default: :obj:`None`) """ - dim = -1 if x.dim() == 1 else -2 + dim = -1 if isinstance(x, Tensor) and x.dim() == 1 else -2 if batch is None: return x.sum(dim=dim, keepdim=x.dim() <= 2) - size = int(batch.max().item() + 1) if size is None else size return scatter(x, batch, dim=dim, dim_size=size, reduce='sum') @@ -55,12 +54,11 @@ def global_mean_pool(x: Tensor, batch: Optional[Tensor], size (int, optional): The number of examples :math:`B`. Automatically calculated if not given. (default: :obj:`None`) """ - dim = -1 if x.dim() == 1 else -2 + dim = -1 if isinstance(x, Tensor) and x.dim() == 1 else -2 if batch is None: return x.mean(dim=dim, keepdim=x.dim() <= 2) - size = int(batch.max().item() + 1) if size is None else size - return scatter(x, batch, dim=dim, dim_size=size, reduce='mean') + return scatter(x, batch, dim=-2, dim_size=size, reduce='mean') def global_max_pool(x: Tensor, batch: Optional[Tensor], @@ -84,9 +82,8 @@ def global_max_pool(x: Tensor, batch: Optional[Tensor], size (int, optional): The number of examples :math:`B`. Automatically calculated if not given. (default: :obj:`None`) """ - dim = -1 if x.dim() == 1 else -2 + dim = -1 if isinstance(x, Tensor) and x.dim() == 1 else -2 if batch is None: return x.max(dim=dim, keepdim=x.dim() <= 2)[0] - size = int(batch.max().item() + 1) if size is None else size return scatter(x, batch, dim=dim, dim_size=size, reduce='max') diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index de0457662e74..472d19599489 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -37,13 +37,13 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, :obj:`"mean"`, :obj:`"mul"`, :obj:`"min"` or :obj:`"max"`, :obj:`"any"`). (default: :obj:`"sum"`) """ - if index.dim() != 1: + if isinstance(index, Tensor) and index.dim() != 1: raise ValueError(f"The `index` argument must be one-dimensional " f"(got {index.dim()} dimensions)") dim = src.dim() + dim if dim < 0 else dim - if dim < 0 or dim >= src.dim(): + if isinstance(src, Tensor) and (dim < 0 or dim >= src.dim()): raise ValueError(f"The `dim` argument must lay between 0 and " f"{src.dim() - 1} (got {dim})") @@ -60,8 +60,7 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, # indices, but is therefore way slower in its backward implementation. # More insights can be found in `test/utils/test_scatter.py`. - size = list(src.size()) - size[dim] = dim_size + size = src.size()[:dim] + (dim_size, ) + src.size()[dim + 1:] # For "any" reduction, we use regular `scatter_`: if reduce == 'any': @@ -151,8 +150,7 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, if dim_size is None: dim_size = int(index.max()) + 1 if index.numel() > 0 else 0 - size = list(src.size()) - size[dim] = dim_size + size = src.size()[:dim] + (dim_size, ) + src.size()[dim + 1:] index = broadcast(index, src, dim) return src.new_zeros(size).scatter_(dim, index, src) @@ -164,6 +162,5 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, def broadcast(src: Tensor, ref: Tensor, dim: int) -> Tensor: - size = [1] * ref.dim() - size[dim] = -1 + size = ((1, ) * dim) + (-1, ) + ((1, ) * (ref.dim() - dim - 1)) return src.view(size).expand_as(ref) From 0d35d8d9f0ad73b05a0bfd02e86caa7131f64177 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 11 May 2023 15:21:49 +0200 Subject: [PATCH 1178/2432] Test PyTorch `coalesce` bug on `torch.load` (#7351) --- examples/multi_gpu/distributed_batching.py | 4 ++++ test/utils/test_sparse.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/examples/multi_gpu/distributed_batching.py b/examples/multi_gpu/distributed_batching.py index 7b502a794a1f..f5c05a176823 100644 --- a/examples/multi_gpu/distributed_batching.py +++ b/examples/multi_gpu/distributed_batching.py @@ -15,6 +15,10 @@ import torch_geometric.transforms as T from torch_geometric.loader import DataLoader from torch_geometric.nn import GINEConv, global_mean_pool +from torch_geometric.typing import WITH_TORCH_SPARSE + +if not WITH_TORCH_SPARSE: + quit("This example requires 'torch-sparse'") class GIN(torch.nn.Module): diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 2e1fcf571b21..aa4efcae4fba 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -1,3 +1,5 @@ +import os.path as osp + import torch import torch_geometric.typing @@ -179,6 +181,22 @@ def test_to_torch_csc_tensor(): edge_attr) +def test_to_torch_coo_tensor_save_load(tmp_path): + edge_index = torch.tensor([ + [0, 1, 1, 2, 2, 3], + [1, 0, 2, 1, 3, 2], + ]) + adj = to_torch_coo_tensor(edge_index, is_coalesced=False) + assert adj.is_coalesced() + + path = osp.join(tmp_path, 'adj.t') + torch.save(adj, path) + adj = torch.load(path) + + # This is obviously a bug in PyTorch. Wait for a fix... + assert not adj.is_coalesced() + + def test_to_edge_index(): adj = torch.tensor([ [0., 1., 0., 0.], From 6092039ebcaa2f769d8f11eb919a1cc2bcfb3db1 Mon Sep 17 00:00:00 2001 From: volltin Date: Fri, 12 May 2023 21:10:56 +0800 Subject: [PATCH 1179/2432] Fix typo in molecule_net.py (#7355) --- torch_geometric/datasets/molecule_net.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/torch_geometric/datasets/molecule_net.py b/torch_geometric/datasets/molecule_net.py index b1aac62431a9..90939f99a236 100644 --- a/torch_geometric/datasets/molecule_net.py +++ b/torch_geometric/datasets/molecule_net.py @@ -22,7 +22,7 @@ class MoleculeNet(InMemoryDataset): root (str): Root directory where the dataset should be saved. name (str): The name of the dataset (:obj:`"ESOL"`, :obj:`"FreeSolv"`, :obj:`"Lipo"`, :obj:`"PCBA"`, :obj:`"MUV"`, :obj:`"HIV"`, - :obj:`"BACE"`, :obj:`"BBPB"`, :obj:`"Tox21"`, :obj:`"ToxCast"`, + :obj:`"BACE"`, :obj:`"BBBP"`, :obj:`"Tox21"`, :obj:`"ToxCast"`, :obj:`"SIDER"`, :obj:`"ClinTox"`). transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed @@ -91,7 +91,7 @@ class MoleculeNet(InMemoryDataset): - ~73.7 - 9 - 1 - * - BBPB + * - BBBP - 2,050 - ~23.9 - ~51.6 @@ -136,7 +136,7 @@ class MoleculeNet(InMemoryDataset): slice(0, 17)], 'hiv': ['HIV', 'HIV.csv', 'HIV', 0, -1], 'bace': ['BACE', 'bace.csv', 'bace', 0, 2], - 'bbbp': ['BBPB', 'BBBP.csv', 'BBBP', -1, -2], + 'bbbp': ['BBBP', 'BBBP.csv', 'BBBP', -1, -2], 'tox21': ['Tox21', 'tox21.csv.gz', 'tox21', -1, slice(0, 12)], 'toxcast': From 79fe974366d785e98a515eab173ae349f900cd4a Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Sat, 13 May 2023 23:00:34 +0530 Subject: [PATCH 1180/2432] Add documentation for `SelectTopk` (#7359) Co-authored-by: rusty1s --- torch_geometric/nn/pool/select/topk.py | 41 +++++++++++++++++++++++++- torch_geometric/nn/pool/topk_pool.py | 9 +++--- 2 files changed, 45 insertions(+), 5 deletions(-) diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py index aabb4a691d5c..0d6ae4ad6304 100644 --- a/torch_geometric/nn/pool/select/topk.py +++ b/torch_geometric/nn/pool/select/topk.py @@ -73,7 +73,46 @@ def topk( class SelectTopK(Select): - # TODO (matthias) Add documentation. + r"""Selects the top-:math:`k` nodes with highest projection scores from the + `"Graph U-Nets" `_, `"Towards Sparse + Hierarchical Graph Classifiers" `_ + and `"Understanding Attention and Generalization in Graph Neural + Networks" `_ papers. + + If :obj:`min_score` :math:`\tilde{\alpha}` is :obj:`None`, computes: + + .. math:: + \mathbf{y} &= \sigma \left( \frac{\mathbf{X}\mathbf{p}}{\| + \mathbf{p} \|} \right) + + \mathbf{i} &= \mathrm{top}_k(\mathbf{y}) + + If :obj:`min_score` :math:`\tilde{\alpha}` is a value in :obj:`[0, 1]`, + computes: + + .. math:: + \mathbf{y} &= \mathrm{softmax}(\mathbf{X}\mathbf{p}) + + \mathbf{i} &= \mathbf{y}_i > \tilde{\alpha} + + where :math:`\mathbf{p}` is the learnable projection vector. + + Args: + in_channels (int): Size of each input sample. + ratio (float or int): The graph pooling ratio, which is used to compute + :math:`k = \lceil \mathrm{ratio} \cdot N \rceil`, or the value + of :math:`k` itself, depending on whether the type of :obj:`ratio` + is :obj:`float` or :obj:`int`. + This value is ignored if :obj:`min_score` is not :obj:`None`. + (default: :obj:`0.5`) + min_score (float, optional): Minimal node score :math:`\tilde{\alpha}` + which is used to compute indices of pooled nodes + :math:`\mathbf{i} = \mathbf{y}_i > \tilde{\alpha}`. + When this value is not :obj:`None`, the :obj:`ratio` argument is + ignored. (default: :obj:`None`) + act (str or callable, optional): The non-linearity :math:`\sigma`. + (default: :obj:`"tanh"`) + """ def __init__( self, in_channels: int, diff --git a/torch_geometric/nn/pool/topk_pool.py b/torch_geometric/nn/pool/topk_pool.py index d686e4629990..5560c9d3a413 100644 --- a/torch_geometric/nn/pool/topk_pool.py +++ b/torch_geometric/nn/pool/topk_pool.py @@ -40,7 +40,8 @@ class TopKPooling(torch.nn.Module): If :obj:`min_score` :math:`\tilde{\alpha}` is :obj:`None`, computes: .. math:: - \mathbf{y} &= \frac{\mathbf{X}\mathbf{p}}{\| \mathbf{p} \|} + \mathbf{y} &= \sigma \left( \frac{\mathbf{X}\mathbf{p}}{\| + \mathbf{p} \|} \right) \mathbf{i} &= \mathrm{top}_k(\mathbf{y}) @@ -66,7 +67,7 @@ class TopKPooling(torch.nn.Module): Args: in_channels (int): Size of each input sample. - ratio (float or int): Graph pooling ratio, which is used to compute + ratio (float or int): The graph pooling ratio, which is used to compute :math:`k = \lceil \mathrm{ratio} \cdot N \rceil`, or the value of :math:`k` itself, depending on whether the type of :obj:`ratio` is :obj:`float` or :obj:`int`. @@ -80,8 +81,8 @@ class TopKPooling(torch.nn.Module): multiplier (float, optional): Coefficient by which features gets multiplied after pooling. This can be useful for large graphs and when :obj:`min_score` is used. (default: :obj:`1`) - nonlinearity (str or callable, optional): The non-linearity to use. - (default: :obj:`"tanh"`) + nonlinearity (str or callable, optional): The non-linearity + :math:`\sigma`. (default: :obj:`"tanh"`) """ def __init__( self, From 5959fedb25d3c4cbc913ab8e22dd3453d9f30c4f Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Sun, 14 May 2023 02:09:07 +0800 Subject: [PATCH 1181/2432] Group datasets in documentation according to different categories (#6854) One of many PRs aimed at grouping datasets in PyG according to different categories., addressing https://github.com/pyg-team/pytorch_geometric/issues/6801. Part of the rendered page in dataset cheatsheet: image image image --------- Co-authored-by: Matthias Fey --- docs/source/cheatsheet/data_cheatsheet.rst | 59 ++++++++++++++- docs/source/modules/datasets.rst | 35 ++++++++- docs/source/notes/data_cheatsheet.rst | 59 ++++++++++++++- torch_geometric/datasets/__init__.py | 79 +++++++++++--------- torch_geometric/datasets/dblp.py | 39 ++++++++++ torch_geometric/datasets/utils/__init__.py | 3 +- torch_geometric/datasets/utils/cheatsheet.py | 4 + 7 files changed, 238 insertions(+), 40 deletions(-) diff --git a/docs/source/cheatsheet/data_cheatsheet.rst b/docs/source/cheatsheet/data_cheatsheet.rst index 910ec7cc6543..a0aad56a9e55 100644 --- a/docs/source/cheatsheet/data_cheatsheet.rst +++ b/docs/source/cheatsheet/data_cheatsheet.rst @@ -7,6 +7,63 @@ Dataset Cheatsheet Please consider helping us filling its content by providing statistics for individual datasets. See `here `__ and `here `__ for examples on how to do so. +Homogeneous Datasets +-------------------- + +.. list-table:: + :widths: 50 10 10 10 10 10 + :header-rows: 1 + + * - Name + - #graphs + - #nodes + - #edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.homo_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', default='') }} + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', child, default=1) }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} + +Heterogeneous Datasets +---------------------- + +.. list-table:: + :widths: 50 30 10 10 + :header-rows: 1 + + * - Name + - #nodes/#edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.hetero_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - + - + - + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ **{{torch_geometric.datasets.utils.get_type(child)}} Type**: {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes/#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} + +Synthetic Datasets +------------------ + .. list-table:: :widths: 50 10 10 10 10 10 :header-rows: 1 @@ -17,7 +74,7 @@ Dataset Cheatsheet - #edges - #features - #classes/#tasks -{% for cls in torch_geometric.datasets.classes %} +{% for cls in torch_geometric.datasets.synthetic_datasets %} * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} diff --git a/docs/source/modules/datasets.rst b/docs/source/modules/datasets.rst index c2bf28e3a95c..f8b3d8976329 100644 --- a/docs/source/modules/datasets.rst +++ b/docs/source/modules/datasets.rst @@ -1,7 +1,38 @@ torch_geometric.datasets ======================== -Benchmark Datasets +.. contents:: Contents + :local: + +Homogeneous Datasets +-------------------- + +.. currentmodule:: torch_geometric.datasets + +.. autosummary:: + :nosignatures: + :toctree: ../generated + :template: autosummary/only_class.rst + + {% for name in torch_geometric.datasets.homo_datasets %} + {{ name }} + {% endfor %} + +Heterogeneous Datasets +---------------------- + +.. currentmodule:: torch_geometric.datasets + +.. autosummary:: + :nosignatures: + :toctree: ../generated + :template: autosummary/only_class.rst + + {% for name in torch_geometric.datasets.hetero_datasets %} + {{ name }} + {% endfor %} + +Synthetic Datasets ------------------ .. currentmodule:: torch_geometric.datasets @@ -11,7 +42,7 @@ Benchmark Datasets :toctree: ../generated :template: autosummary/only_class.rst - {% for name in torch_geometric.datasets.classes %} + {% for name in torch_geometric.datasets.synthetic_datasets %} {{ name }} {% endfor %} diff --git a/docs/source/notes/data_cheatsheet.rst b/docs/source/notes/data_cheatsheet.rst index 723d2f61e427..11128a15c6e3 100644 --- a/docs/source/notes/data_cheatsheet.rst +++ b/docs/source/notes/data_cheatsheet.rst @@ -9,6 +9,63 @@ Dataset Cheatsheet Please consider helping us filling its content by providing statistics for individual datasets. See `here `__ and `here `__ for examples on how to do so. +Homogeneous Datasets +-------------------- + +.. list-table:: + :widths: 50 10 10 10 10 10 + :header-rows: 1 + + * - Name + - #graphs + - #nodes + - #edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.homo_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', default='') }} + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', child, default=1) }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} + +Heterogeneous Datasets +---------------------- + +.. list-table:: + :widths: 50 30 10 10 + :header-rows: 1 + + * - Name + - #nodes/#edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.hetero_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - + - + - + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ **{{torch_geometric.datasets.utils.get_type(child)}} Type**: {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes/#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} + +Synthetic Datasets +------------------ + .. list-table:: :widths: 50 10 10 10 10 10 :header-rows: 1 @@ -19,7 +76,7 @@ Dataset Cheatsheet - #edges - #features - #classes/#tasks -{% for cls in torch_geometric.datasets.classes %} +{% for cls in torch_geometric.datasets.synthetic_datasets %} * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index e9ee7c28b472..5b8f2c27a30a 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -1,8 +1,9 @@ +# flake8: noqa + from .karate import KarateClub from .tu_dataset import TUDataset from .gnn_benchmark_dataset import GNNBenchmarkDataset from .planetoid import Planetoid -from .fake import FakeDataset, FakeHeteroDataset from .nell import NELL from .citation_full import CitationFull, CoraFull from .coauthor import Coauthor @@ -38,12 +39,10 @@ from .icews import ICEWS18 from .gdelt import GDELT from .willow_object_class import WILLOWObjectClass -from .dbp15k import DBP15K from .pascal import PascalVOCKeypoints from .pascal_pf import PascalPF from .snap_dataset import SNAPDataset from .suite_sparse import SuiteSparseMatrixCollection -from .aminer import AMiner from .word_net import WordNet18, WordNet18RR from .freebase import FB15k_237 from .wikics import WikiCS @@ -51,14 +50,6 @@ from .wikipedia_network import WikipediaNetwork from .heterophilous_graph_dataset import HeterophilousGraphDataset from .actor import Actor -from .ogb_mag import OGB_MAG -from .dblp import DBLP -from .movie_lens import MovieLens -from .imdb import IMDB -from .last_fm import LastFM -from .hgb_dataset import HGBDataset -from .jodie import JODIEDataset -from .mixhop_synthetic_dataset import MixHopSyntheticDataset from .upfd import UPFD from .github import GitHub from .facebook import FacebookPagePage @@ -67,35 +58,46 @@ from .gemsec import GemsecDeezer from .twitch import Twitch from .airports import Airports -from .ba_shapes import BAShapes from .lrgb import LRGBDataset from .malnet_tiny import MalNetTiny from .omdb import OMDB from .polblogs import PolBlogs from .email_eu_core import EmailEUCore -from .sbm_dataset import StochasticBlockModelDataset -from .sbm_dataset import RandomPartitionGraphDataset from .linkx_dataset import LINKXDataset from .elliptic import EllipticBitcoinDataset from .elliptic_temporal import EllipticBitcoinTemporalDataset from .dgraph import DGraphFin from .hydro_net import HydroNet +from .airfrans import AirfRANS +from .jodie import JODIEDataset + +from .dbp15k import DBP15K +from .aminer import AMiner +from .ogb_mag import OGB_MAG +from .dblp import DBLP +from .movie_lens import MovieLens +from .imdb import IMDB +from .last_fm import LastFM +from .hgb_dataset import HGBDataset +from .taobao import Taobao + +from .fake import FakeDataset, FakeHeteroDataset +from .sbm_dataset import StochasticBlockModelDataset +from .sbm_dataset import RandomPartitionGraphDataset +from .mixhop_synthetic_dataset import MixHopSyntheticDataset from .explainer_dataset import ExplainerDataset from .infection_dataset import InfectionDataset from .ba2motif_dataset import BA2MotifDataset from .ba_multi_shapes import BAMultiShapesDataset -from .airfrans import AirfRANS -from .taobao import Taobao +from .ba_shapes import BAShapes import torch_geometric.datasets.utils # noqa -__all__ = [ +homo_datasets = [ 'KarateClub', 'TUDataset', 'GNNBenchmarkDataset', 'Planetoid', - 'FakeDataset', - 'FakeHeteroDataset', 'NELL', 'CitationFull', 'CoraFull', @@ -131,13 +133,11 @@ 'BitcoinOTC', 'ICEWS18', 'GDELT', - 'DBP15K', 'WILLOWObjectClass', 'PascalVOCKeypoints', 'PascalPF', 'SNAPDataset', 'SuiteSparseMatrixCollection', - 'AMiner', 'WordNet18', 'WordNet18RR', 'FB15k_237', @@ -146,14 +146,6 @@ 'WikipediaNetwork', 'HeterophilousGraphDataset', 'Actor', - 'OGB_MAG', - 'DBLP', - 'MovieLens', - 'IMDB', - 'LastFM', - 'HGBDataset', - 'JODIEDataset', - 'MixHopSyntheticDataset', 'UPFD', 'GitHub', 'FacebookPagePage', @@ -162,25 +154,42 @@ 'GemsecDeezer', 'Twitch', 'Airports', - 'BAShapes', 'LRGBDataset', 'MalNetTiny', 'OMDB', 'PolBlogs', 'EmailEUCore', - 'StochasticBlockModelDataset', - 'RandomPartitionGraphDataset', 'LINKXDataset', 'EllipticBitcoinDataset', 'EllipticBitcoinTemporalDataset', 'DGraphFin', 'HydroNet', + 'AirfRANS', + 'JODIEDataset', +] + +hetero_datasets = [ + 'DBP15K', + 'AMiner', + 'OGB_MAG', + 'DBLP', + 'MovieLens', + 'IMDB', + 'LastFM', + 'HGBDataset', + 'Taobao', +] +synthetic_datasets = [ + 'FakeDataset', + 'FakeHeteroDataset', + 'StochasticBlockModelDataset', + 'RandomPartitionGraphDataset', + 'MixHopSyntheticDataset', 'ExplainerDataset', 'InfectionDataset', 'BA2MotifDataset', 'BAMultiShapesDataset', - 'AirfRANS', - 'Taobao', + 'BAShapes', ] -classes = __all__ +__all__ = homo_datasets + hetero_datasets + synthetic_datasets diff --git a/torch_geometric/datasets/dblp.py b/torch_geometric/datasets/dblp.py index dcc9f1c94335..456a306cfdf3 100644 --- a/torch_geometric/datasets/dblp.py +++ b/torch_geometric/datasets/dblp.py @@ -37,6 +37,45 @@ class DBLP(InMemoryDataset): an :obj:`torch_geometric.data.HeteroData` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 20 10 10 10 + :header-rows: 1 + + * - Node/Edge Type + - #nodes/#edges + - #features + - #classes + * - Author + - 4,057 + - 334 + - 4 + * - Paper + - 14,328 + - 4,231 + - + * - Term + - 7,723 + - 50 + - + * - Conference + - 20 + - 0 + - + * - Author-Paper + - 196,425 + - + - + * - Paper-Term + - 85,810 + - + - + * - Conference-Paper + - 14,328 + - + - """ url = 'https://www.dropbox.com/s/yh4grpeks87ugr2/DBLP_processed.zip?dl=1' diff --git a/torch_geometric/datasets/utils/__init__.py b/torch_geometric/datasets/utils/__init__.py index 0236f004342d..4cbaa4f06538 100644 --- a/torch_geometric/datasets/utils/__init__.py +++ b/torch_geometric/datasets/utils/__init__.py @@ -1,8 +1,9 @@ -from .cheatsheet import paper_link, has_stats, get_stat, get_children +from .cheatsheet import paper_link, has_stats, get_stat, get_children, get_type __all__ = [ 'paper_link', 'has_stats', 'get_stat', 'get_children', + 'get_type', ] diff --git a/torch_geometric/datasets/utils/cheatsheet.py b/torch_geometric/datasets/utils/cheatsheet.py index 148983e5b804..ad5c084e17e4 100644 --- a/torch_geometric/datasets/utils/cheatsheet.py +++ b/torch_geometric/datasets/utils/cheatsheet.py @@ -21,6 +21,10 @@ def has_stats(cls: str) -> bool: return len(get_stats_table(cls)) > 0 +def get_type(cls: str) -> str: + return 'Edge' if '-' in cls else 'Node' + + def get_stat(cls: str, name: str, child: Optional[str] = None, default: Any = None) -> str: if child is None and len(get_children(cls)) > 0: From 50cbd435d06803e64f3d131ef692499c0cfac09a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 13 May 2023 22:21:29 +0200 Subject: [PATCH 1182/2432] Do not modify input in-place in `to_hetero_with_bases` (#7363) --- CHANGELOG.md | 1 + test/nn/test_to_hetero_with_bases_transformer.py | 4 ++-- torch_geometric/nn/to_hetero_with_bases_transformer.py | 5 +---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36c331f2b09c..6fb004059f0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed a bug in which inputs where modified in-place in `to_hetero_with_bases` ([#7363](https://github.com/pyg-team/pytorch_geometric/pull/7363)) - Do not load `node_default` and `edge_default` attributes in `from_networkx` ([#7348](https://github.com/pyg-team/pytorch_geometric/pull/7348)) - Updated examples to use `NeighborLoader` instead of `NeighborSampler` ([#7152](https://github.com/pyg-team/pytorch_geometric/pull/7152)) - Fixed `HGTConv` utility function `_construct_src_node_feat` ([#7194](https://github.com/pyg-team/pytorch_geometric/pull/7194)) diff --git a/test/nn/test_to_hetero_with_bases_transformer.py b/test/nn/test_to_hetero_with_bases_transformer.py index b26871a306a3..6f40f0cb20a9 100644 --- a/test/nn/test_to_hetero_with_bases_transformer.py +++ b/test/nn/test_to_hetero_with_bases_transformer.py @@ -152,7 +152,7 @@ def test_to_hetero_with_bases(): assert isinstance(out, dict) and len(out) == 2 assert out['paper'].size() == (100, 32) assert out['author'].size() == (100, 32) - assert sum(p.numel() for p in model.parameters()) == 6076 + assert sum(p.numel() for p in model.parameters()) == 5948 model = Net3() in_channels = {'x': 16, 'edge_attr': 8} @@ -164,7 +164,7 @@ def test_to_hetero_with_bases(): assert out['author'].size() == (100, 32) model = Net4() - in_channels = {'x': 16} + in_channels = {'x0': 16} model = to_hetero_with_bases(model, metadata, num_bases=4, in_channels=in_channels, debug=False) out = model(x_dict, edge_index_dict) diff --git a/torch_geometric/nn/to_hetero_with_bases_transformer.py b/torch_geometric/nn/to_hetero_with_bases_transformer.py index 93c71c571dfe..726f1634b18e 100644 --- a/torch_geometric/nn/to_hetero_with_bases_transformer.py +++ b/torch_geometric/nn/to_hetero_with_bases_transformer.py @@ -393,10 +393,7 @@ def __init__(self, keys: List[Union[NodeType, EdgeType]], def forward( self, x_dict: Dict[Union[NodeType, EdgeType], Tensor] ) -> Dict[Union[NodeType, EdgeType], Tensor]: - - for key, x in x_dict.items(): - x_dict[key] = self.lins[key2str(key)](x) - return x_dict + return {key: self.lins[key2str(key)](x) for key, x in x_dict.items()} def __repr__(self) -> str: return (f'{self.__class__.__name__}(num_relations={len(self.lins)}, ' From 239c4b5a8d7b9862ba942c6482cb2d5077533475 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Mon, 15 May 2023 11:46:58 -0700 Subject: [PATCH 1183/2432] Heuristic for `segment_matmul` (#7258) good to go --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Serge Panev Co-authored-by: Jinu Sunil --- torch_geometric/nn/conv/rgcn_conv.py | 12 ++- torch_geometric/nn/dense/linear.py | 59 +++++++------- torch_geometric/nn/to_hetero_module.py | 4 +- torch_geometric/utils/hetero.py | 103 +++++++++++++++++++++++++ 4 files changed, 141 insertions(+), 37 deletions(-) diff --git a/torch_geometric/nn/conv/rgcn_conv.py b/torch_geometric/nn/conv/rgcn_conv.py index 938f687590bc..d52daa9e9f47 100644 --- a/torch_geometric/nn/conv/rgcn_conv.py +++ b/torch_geometric/nn/conv/rgcn_conv.py @@ -16,6 +16,7 @@ torch_sparse, ) from torch_geometric.utils import index_sort, one_hot, scatter, spmm +from torch_geometric.utils.hetero import segmatmul_heuristic from torch_geometric.utils.sparse import index2ptr @@ -126,7 +127,7 @@ def __init__( self.num_bases = num_bases self.num_blocks = num_blocks self.is_sorted = is_sorted - + self.use_segmm: int = -1 if isinstance(in_channels, int): in_channels = (in_channels, in_channels) self.in_channels_l = in_channels[0] @@ -201,7 +202,6 @@ def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]], x_r = x[1] size = (x_l.size(0), x_r.size(0)) - if isinstance(edge_index, SparseTensor): edge_type = edge_index.storage.value() assert edge_type is not None @@ -230,14 +230,18 @@ def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]], else: # No regularization/Basis-decomposition ======================== if (torch_geometric.typing.WITH_PYG_LIB and self.num_bases is None - and x_l.is_floating_point() - and isinstance(edge_index, Tensor)): + and x_l.is_floating_point() and isinstance( + edge_index, Tensor)) and (self.use_segmm == -1 + or bool(self.use_segmm)): if not self.is_sorted: if (edge_type[1:] < edge_type[:-1]).any(): edge_type, perm = index_sort( edge_type, max_value=self.num_relations) edge_index = edge_index[:, perm] edge_type_ptr = index2ptr(edge_type, self.num_relations) + if self.use_segmm == -1: + self.use_segmm = segmatmul_heuristic( + x_l, edge_type_ptr, self.weight) out = self.propagate(edge_index, x=x_l, edge_type_ptr=edge_type_ptr, size=size) else: diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index b13c474bd67c..ce005242045c 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -11,6 +11,7 @@ from torch_geometric.nn import inits from torch_geometric.typing import pyg_lib from torch_geometric.utils import index_sort +from torch_geometric.utils.hetero import segmatmul_heuristic from torch_geometric.utils.sparse import index2ptr @@ -215,40 +216,26 @@ def __init__(self, in_channels: int, out_channels: int, num_types: int, self.num_types = num_types self.is_sorted = is_sorted self.kwargs = kwargs - - if torch_geometric.typing.WITH_PYG_LIB: - self.lins = None - if self.in_channels == -1: - self.weight = nn.parameter.UninitializedParameter() - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - else: - self.weight = torch.nn.Parameter( - torch.Tensor(num_types, in_channels, out_channels)) - if kwargs.get('bias', True): - self.bias = Parameter(torch.Tensor(num_types, out_channels)) - else: - self.register_parameter('bias', None) + self.use_segmm: int = -1 + if self.in_channels == -1: + self.weight = nn.parameter.UninitializedParameter() + self._hook = self.register_forward_pre_hook( + self.initialize_parameters) + else: + self.weight = torch.nn.Parameter( + torch.Tensor(num_types, in_channels, out_channels)) + if kwargs.get('bias', True): + self.bias = Parameter(torch.Tensor(num_types, out_channels)) else: - self.lins = torch.nn.ModuleList([ - Linear(in_channels, out_channels, **kwargs) - for _ in range(num_types) - ]) - self.register_parameter('weight', None) self.register_parameter('bias', None) - self.reset_parameters() def reset_parameters(self): r"""Resets all learnable parameters of the module.""" - if torch_geometric.typing.WITH_PYG_LIB: - reset_weight_(self.weight, self.in_channels, - self.kwargs.get('weight_initializer', None)) - reset_weight_(self.bias, self.in_channels, - self.kwargs.get('bias_initializer', None)) - else: - for lin in self.lins: - lin.reset_parameters() + reset_weight_(self.weight, self.in_channels, + self.kwargs.get('weight_initializer', None)) + reset_weight_(self.bias, self.in_channels, + self.kwargs.get('bias_initializer', None)) def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: r""" @@ -256,7 +243,9 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: x (torch.Tensor): The input features. type_vec (torch.Tensor): A vector that maps each entry to a type. """ - if torch_geometric.typing.WITH_PYG_LIB: + + if torch_geometric.typing.WITH_PYG_LIB and (self.use_segmm == -1 + or bool(self.use_segmm)): assert self.weight is not None perm: Optional[Tensor] = None @@ -266,6 +255,9 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: x = x[perm] type_vec_ptr = index2ptr(type_vec, self.num_types) + if self.use_segmm == -1: + self.use_segmm = segmatmul_heuristic(x, type_vec_ptr, + self.weight) out = pyg_lib.ops.segment_matmul(x, type_vec_ptr, self.weight) if self.bias is not None: out += self.bias[type_vec] @@ -275,11 +267,14 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: out_unsorted[perm] = out out = out_unsorted else: - assert self.lins is not None out = x.new_empty(x.size(0), self.out_channels) - for i, lin in enumerate(self.lins): + for i in range(self.num_types): mask = type_vec == i - out[mask] = lin(x[mask]) + if mask.numel() == 0: + continue + out[mask] = F.linear(x[mask], self.weight[i].T) + if self.bias is not None: + out += self.bias[type_vec] return out @torch.no_grad() diff --git a/torch_geometric/nn/to_hetero_module.py b/torch_geometric/nn/to_hetero_module.py index 82e3298a9c8e..61592185979f 100644 --- a/torch_geometric/nn/to_hetero_module.py +++ b/torch_geometric/nn/to_hetero_module.py @@ -3,6 +3,7 @@ from typing import Dict, List, Optional, Union import torch +import torch.nn.functional as F from torch import Tensor import torch_geometric @@ -54,7 +55,8 @@ def dict_forward( if not torch_geometric.typing.WITH_PYG_LIB: return { - key: self.hetero_module.lins[i](x_dict[key]) + key: F.linear(x_dict[key], self.hetero_module.weight[i].T) + + self.hetero_module.bias[i] for i, key in enumerate(self.types) } diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index 1e9b8c21704e..b215e09a1413 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -9,6 +9,109 @@ from torch_geometric.utils.num_nodes import maybe_num_nodes_dict +def learn_sklearn_heuristic(): + import os + import time + + from torch_geometric.nn.dense import HeteroLinear, Linear + os.environ['NVIDIA_TF32_OVERRIDE'] = '0' + fused_times = {} + loop_times = {} + try: + for num_nodes_per_type in [10**2, 10**3, 10**4, 10**5]: + for out_feats in [2, 4, 8, 16, 32, 64, 128, 256]: + for n_feats in [4, 8, 16, 32, 64, 128, 256, 512]: + for num_types in [4, 8, 16, 32, 64, 128, 256, 512]: + try: + if n_feats < out_feats: + continue + print("benchmarking", num_types, "types w/", + num_nodes_per_type, "nodes per type and", + n_feats, "input features and", out_feats, + "outuput feats") + x_dict = { + 'v' + str(i): torch.randn( + (num_nodes_per_type, n_feats)).cuda() + for i in range(num_types) + } + x = torch.cat(list(x_dict.values()), dim=0) + node_type = torch.cat([ + (j * torch.ones(x_j.shape[0])).long() + for j, x_j in enumerate(x_dict.values()) + ]).cuda() + lin = Linear(n_feats, out_feats).cuda() + heterolin = HeteroLinear(n_feats, out_feats, + len(list(x_dict.keys())), + True).cuda() + for i in range(60): + if i == 10: + since = time.time() + heterolin(x=x, type_vec=node_type) + key = (num_types, num_nodes_per_type, n_feats, + out_feats) + fused_times[key] = ((time.time() - since) / 50.0) + print("Avg time for fuse based=", fused_times[key]) + for i in range(60): + if i == 10: + since = time.time() + o = x.new_empty(x.size(0), out_feats) + for j in range(num_types): + mask = j == node_type + o[mask] = lin(x[mask]) + loop_times[key] = ((time.time() - since) / 50.0) + print("Avg time for for-loop=", loop_times[key]) + except: # noqa + continue + except: # noqa + pass + import numpy as np + X = np.zeros((len(loop_times), 4)) + y = np.zeros(len(loop_times)) + for i, key in enumerate(loop_times.keys()): + X[i, :] = key + loop_time, fused_time = loop_times[key], fused_times[key] + y[i] = int(fused_time <= loop_time) + from sklearn.pipeline import make_pipeline + from sklearn.preprocessing import StandardScaler + from sklearn.svm import LinearSVC + scaler = StandardScaler() + svm = LinearSVC() + clf = make_pipeline(scaler, svm) + clf.fit(X, y) + + print("scaler mean=", scaler.mean_) + print("scaler scale=", scaler.scale_) + print("svm weights=", svm.coef_) + print("svm bias=", svm.intercept_) + # results on A100: + # scaler mean= + # [ 125.11603189 12133.21523472 163.81222321 32.43755536] + # scaler scale= + # [ 163.34480422 27572.94543809 177.6426489 56.82103934] + # svm weights= + # [[ 2.43877659e+00 1.67583047e+00 -5.20527282e-04 3.43925501e-01]] + # svm bias= + # [1.20236999] + + +def segmatmul_heuristic(inputs: Tensor, type_ptr, weight: Tensor): + num_types = len(type_ptr) - 1 + max_num_nodes_per_types = (type_ptr[1:] - type_ptr[:-1]).max() + in_feat = inputs.size(1) + out_feat = weight.size(-1) + # this heuristic was learned with learn_sklearn_heuristic on an A100 + x = torch.tensor([num_types, max_num_nodes_per_types, in_feat, out_feat]) + scale_mean = torch.tensor( + [125.11603189, 12133.21523472, 163.81222321, 32.43755536]) + scale_scale = torch.tensor( + [163.34480422, 27572.94543809, 177.6426489, 56.82103934]) + svm_weights = torch.tensor( + [2.43877659e+00, 1.67583047e+00, -5.20527282e-04, 3.43925501e-01]) + bias = 1.20236999 + x = (x - scale_mean) / scale_scale + return int(x.dot(svm_weights) >= bias) + + def group_hetero_graph(edge_index_dict, num_nodes_dict=None): num_nodes_dict = maybe_num_nodes_dict(edge_index_dict, num_nodes_dict) From f0e91ade196f88ea343baac2525bfe3817eda370 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 16 May 2023 20:48:21 +0200 Subject: [PATCH 1184/2432] Use recommended `script_if_tracing` in library code (#7375) --- test/nn/pool/select/test_select_topk.py | 2 +- torch_geometric/nn/pool/connect/base.py | 2 +- torch_geometric/nn/pool/select/base.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/nn/pool/select/test_select_topk.py b/test/nn/pool/select/test_select_topk.py index a0418fff9306..571eaa57af6d 100644 --- a/test/nn/pool/select/test_select_topk.py +++ b/test/nn/pool/select/test_select_topk.py @@ -47,7 +47,7 @@ def test_select_topk(min_score): assert str(pool) == 'SelectTopK(16, min_score=2.0)' out = pool(x, batch) - assert isinstance(out, SelectOutput) + assert isinstance(out, SelectOutput.__original_fn) assert out.num_nodes == 6 assert out.num_clusters <= out.num_nodes diff --git a/torch_geometric/nn/pool/connect/base.py b/torch_geometric/nn/pool/connect/base.py index fd78a5c61dac..262937603f11 100644 --- a/torch_geometric/nn/pool/connect/base.py +++ b/torch_geometric/nn/pool/connect/base.py @@ -7,7 +7,7 @@ from torch_geometric.nn.pool.select import SelectOutput -@torch.jit.script +@torch.jit.script_if_tracing @dataclass(init=False) class ConnectOutput: r"""The output of the :class:`Connect` method, which holds the coarsened diff --git a/torch_geometric/nn/pool/select/base.py b/torch_geometric/nn/pool/select/base.py index fcde4823477c..ce9281653dc6 100644 --- a/torch_geometric/nn/pool/select/base.py +++ b/torch_geometric/nn/pool/select/base.py @@ -5,7 +5,7 @@ from torch import Tensor -@torch.jit.script +@torch.jit.script_if_tracing @dataclass(init=False) class SelectOutput: r"""The output of the :class:`Select` method, which holds an assignment From c51436c92af44573aa03324f2f2cf29594c6ec31 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 17 May 2023 11:11:23 +0200 Subject: [PATCH 1185/2432] Add first version of `GPUPrefetcher` (#7376) --- CHANGELOG.md | 1 + test/loader/test_prefetch.py | 19 +++++++ torch_geometric/loader/prefetch.py | 81 ++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 test/loader/test_prefetch.py create mode 100644 torch_geometric/loader/prefetch.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fb004059f0f..bfbbfc41a2e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `GPUPrefetcher` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376)) - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) diff --git a/test/loader/test_prefetch.py b/test/loader/test_prefetch.py new file mode 100644 index 000000000000..44df6f318d99 --- /dev/null +++ b/test/loader/test_prefetch.py @@ -0,0 +1,19 @@ +import torch + +from torch_geometric.loader.prefetch import GPUPrefetcher +from torch_geometric.testing import onlyCUDA + + +@onlyCUDA +def test_gpu_prefetcher(): + data = [torch.randn(5, 5) for _ in range(10)] + + loader = GPUPrefetcher(data, device='cuda') + assert str(loader).startswith('GPUPrefetcher') + assert len(loader) == 10 + + for i, batch in enumerate(loader): + assert batch.is_cuda + assert torch.equal(batch.cpu(), data[i]) + assert loader.idx > 0 + assert loader.idx == 0 diff --git a/torch_geometric/loader/prefetch.py b/torch_geometric/loader/prefetch.py new file mode 100644 index 000000000000..482c4ac43630 --- /dev/null +++ b/torch_geometric/loader/prefetch.py @@ -0,0 +1,81 @@ +from queue import Queue +from threading import Thread +from typing import Any, Optional + +import torch +from torch.utils.data import DataLoader + + +class GPUPrefetcher: + r"""A GPU prefetcher class for asynchronously loading data from a + :class:`torch.utils.data.DataLoader` from host memory to device memory. + + Args: + loader (torch.utils.DataLoader): A data loader object. + device (torch.device): The CUDA device to load the data to. + prefetch_size (int, optional): The number of batches to prefetch at + once. (default: :obj:`1`) + """ + def __init__( + self, + loader: DataLoader, + device: torch.device, + prefetch_size: int = 1, + ): + if prefetch_size < 1: + raise ValueError(f"'prefetch_size' must be greater than 0 " + f"(got {prefetch_size})") + + self.loader = loader + self.device = torch.device(device) + self.prefetch_size = prefetch_size + + self.load_stream = torch.cuda.Stream(device=device) + self.queue = Queue(maxsize=prefetch_size) + self.worker: Optional[Thread] = None + + self.idx = 0 + + def non_blocking_transfer(self, batch: Any) -> Any: + # (Recursive) non-blocking device transfer: + if isinstance(batch, (list, tuple)): + return [self.non_blocking_transfer(v) for v in batch] + if isinstance(batch, dict): + return {k: self.non_blocking_transfer(v) for k, v in batch.items()} + + with torch.cuda.stream(self.load_stream): + if not batch.is_pinned(): + batch = batch.pin_memory() + return batch.to(self.device, non_blocking=True) + + def load_loop(self): + for batch in self.loader: + self.queue.put(self.non_blocking_transfer(batch)) + + def __iter__(self) -> 'GPUPrefetcher': + is_dead = self.worker is None or not self.worker.is_alive() + if is_dead and self.queue.empty() and self.idx == 0: + self.worker = Thread(target=self.load_loop) + self.worker.daemon = True + self.worker.start() + + return self + + def __next__(self) -> Any: + is_dead = not self.worker.is_alive() + if (is_dead and self.queue.empty()) or self.idx >= len(self): + self.idx = 0 + self.queue.join() + self.worker.join() + raise StopIteration + + out = self.queue.get() + self.queue.task_done() + self.idx += 1 + return out + + def __len__(self) -> int: + return len(self.loader) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.loader})' From 249681497fb6a3c9ebed25759cff6b7cdf28d3c5 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 17 May 2023 17:29:21 +0200 Subject: [PATCH 1186/2432] `GPUPrefetcher` benchmark (#7378) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` Forward pass without prefetching... 100%|██████| 200/200 [00:08<00:00, 22.94it/s] Forward pass with prefetching... 100%|██████| 200/200 [00:07<00:00, 25.25it/s] ``` --- CHANGELOG.md | 2 +- test/loader/test_prefetch.py | 41 ++++++++++++++++++++++++++++++ torch_geometric/loader/prefetch.py | 3 +-- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfbbfc41a2e2..2ade79105676 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added `GPUPrefetcher` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376)) +- Added `GPUPrefetcher` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378)) - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) diff --git a/test/loader/test_prefetch.py b/test/loader/test_prefetch.py index 44df6f318d99..32da97e15399 100644 --- a/test/loader/test_prefetch.py +++ b/test/loader/test_prefetch.py @@ -1,6 +1,8 @@ import torch +from torch_geometric.loader import NeighborLoader from torch_geometric.loader.prefetch import GPUPrefetcher +from torch_geometric.nn import GraphSAGE from torch_geometric.testing import onlyCUDA @@ -17,3 +19,42 @@ def test_gpu_prefetcher(): assert torch.equal(batch.cpu(), data[i]) assert loader.idx > 0 assert loader.idx == 0 + + +if __name__ == '__main__': + import argparse + + from ogb.nodeproppred import PygNodePropPredDataset + from tqdm import tqdm + + parser = argparse.ArgumentParser() + parser.add_argument('--num_workers', type=int, default=8) + args = parser.parse_args() + + data = PygNodePropPredDataset('ogbn-products', root='/tmp/ogb')[0] + + model = GraphSAGE( + in_channels=data.x.size(-1), + hidden_channels=64, + num_layers=2, + ).cuda() + + loader = NeighborLoader( + data, + input_nodes=torch.arange(1024 * 200), + batch_size=1024, + num_neighbors=[10, 10], + num_workers=args.num_workers, + filter_per_worker=True, + ) + + print('Forward pass without prefetching...') + for batch in tqdm(loader): + with torch.no_grad(): + batch = batch.cuda() + model(batch.x, batch.edge_index) + + print('Forward pass with prefetching...') + for batch in tqdm(GPUPrefetcher(loader, device='cuda')): + with torch.no_grad(): + model(batch.x, batch.edge_index) diff --git a/torch_geometric/loader/prefetch.py b/torch_geometric/loader/prefetch.py index 482c4ac43630..d6dae8f63704 100644 --- a/torch_geometric/loader/prefetch.py +++ b/torch_geometric/loader/prefetch.py @@ -44,8 +44,7 @@ def non_blocking_transfer(self, batch: Any) -> Any: return {k: self.non_blocking_transfer(v) for k, v in batch.items()} with torch.cuda.stream(self.load_stream): - if not batch.is_pinned(): - batch = batch.pin_memory() + batch = batch.pin_memory() return batch.to(self.device, non_blocking=True) def load_loop(self): From fd5c1122a2a02960dc7e55bd60d4929ba713bd97 Mon Sep 17 00:00:00 2001 From: Kasper Piskorski Date: Wed, 17 May 2023 17:16:04 +0100 Subject: [PATCH 1187/2432] Update GraphGPS example (#7377) Addressing training problems of the GraphGPS example. It is now more aligned with the reference implementation by: - having a LR scheduler, - concatenating initial node features and PEs - having an extra post-GPS MLP layer. I plotted loss and test-MAE for the example before and after changes together with results from PNA example (same dataset) for reference. ![loss](https://github.com/pyg-team/pytorch_geometric/assets/10519067/d3542a1e-4976-4e6c-97c5-b1b411f3fd7e) ![mae_gps](https://github.com/pyg-team/pytorch_geometric/assets/10519067/71efac79-1ae5-430e-9387-3056dc8a5c62) ![mae_updated](https://github.com/pyg-team/pytorch_geometric/assets/10519067/f9bf2b3c-df23-49fd-afe9-ce20c5470829) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + examples/graph_gps.py | 37 ++++++++++++++++++++++++++++--------- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ade79105676..20fcf89f1fe6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) - Added an optional `batch_size` and `max_num_nodes` arguments to `MemPooling` layer ([#7239](https://github.com/pyg-team/pytorch_geometric/pull/7239)) +- Fixed training issues of the GraphGPS example ([#7377](https://github.com/pyg-team/pytorch_geometric/pull/7377)) ### Removed diff --git a/examples/graph_gps.py b/examples/graph_gps.py index 161148e5026b..1ad7ad7deedb 100644 --- a/examples/graph_gps.py +++ b/examples/graph_gps.py @@ -1,7 +1,15 @@ import os.path as osp import torch -from torch.nn import Embedding, Linear, ModuleList, ReLU, Sequential +from torch.nn import ( + BatchNorm1d, + Embedding, + Linear, + ModuleList, + ReLU, + Sequential, +) +from torch.optim.lr_scheduler import ReduceLROnPlateau import torch_geometric.transforms as T from torch_geometric.datasets import ZINC @@ -20,11 +28,12 @@ class GPS(torch.nn.Module): - def __init__(self, channels: int, num_layers: int): + def __init__(self, channels: int, pe_dim: int, num_layers: int): super().__init__() - self.node_emb = Embedding(21, channels) - self.pe_lin = Linear(20, channels) + self.node_emb = Embedding(28, channels - pe_dim) + self.pe_lin = Linear(20, pe_dim) + self.pe_norm = BatchNorm1d(20) self.edge_emb = Embedding(4, channels) self.convs = ModuleList() @@ -37,24 +46,33 @@ def __init__(self, channels: int, num_layers: int): conv = GPSConv(channels, GINEConv(nn), heads=4, attn_dropout=0.5) self.convs.append(conv) - self.lin = Linear(channels, 1) + self.mlp = Sequential( + Linear(channels, channels // 2), + ReLU(), + Linear(channels // 2, channels // 4), + ReLU(), + Linear(channels // 4, 1), + ) def forward(self, x, pe, edge_index, edge_attr, batch): - x = self.node_emb(x.squeeze(-1)) + self.pe_lin(pe) + x_pe = self.pe_norm(pe) + x = torch.cat((self.node_emb(x.squeeze(-1)), self.pe_lin(x_pe)), 1) edge_attr = self.edge_emb(edge_attr) for conv in self.convs: x = conv(x, edge_index, batch, edge_attr=edge_attr) x = global_add_pool(x, batch) - return self.lin(x) + return self.mlp(x) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = GPS(channels=64, num_layers=10).to(device) +model = GPS(channels=64, pe_dim=8, num_layers=10).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5) +scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, + min_lr=0.00001) -def train(epoch): +def train(): model.train() total_loss = 0 @@ -87,5 +105,6 @@ def test(loader): loss = train(epoch) val_mae = test(val_loader) test_mae = test(test_loader) + scheduler.step(val_mae) print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_mae:.4f}, ' f'Test: {test_mae:.4f}') From 474fa01ed329b7ee02057e78ab0b6a75cef7fc29 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 17 May 2023 19:24:55 +0200 Subject: [PATCH 1188/2432] Fix broken link in documentation (#7381) --- docs/source/tutorial/explain.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/tutorial/explain.rst b/docs/source/tutorial/explain.rst index 3edc23787b47..8cfc293d471e 100644 --- a/docs/source/tutorial/explain.rst +++ b/docs/source/tutorial/explain.rst @@ -34,7 +34,7 @@ The :class:`~torch_geometric.explain.Explainer` generates an :class:`~torch_geom .. note:: - You can read more about the :class:`torch_geometric.explain` package in this [blog post](https://medium.com/@pytorch_geometric/graph-machine-learning-explainability-with-pyg-ff13cffc23c2). + You can read more about the :class:`torch_geometric.explain` package in this `blog post `__. Examples -------- From 3c671c4614abb8a830985e6c25d3243a6ee60e27 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 17 May 2023 20:44:20 +0200 Subject: [PATCH 1189/2432] `SortAggregation`: Added an optional `max_num_nodes` argument (#7367) - Added an optional `max_num_nodes` argument to the `SortAggregation` layer - Added the possibility to pass the `fill_value` argument as `torch.tensor` to the `utils.to_dense_batch` function. - Remove converting `fill_value` to Python float in SortAggregation forward. Pass `fill_value` as `torch. tensor` to the `utils.to_dense_batch` function. Reducing the number of Python operations. Converting to a Python value may cause graph breaks and splits during torch.compile and as a result the need for the host instead device calculations (in that case x.min().item() - 1) on some architectures which which leads to performance degradation. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 + test/utils/test_to_dense_batch.py | 48 +++++++++++++++--------- torch_geometric/nn/aggr/base.py | 4 +- torch_geometric/nn/aggr/sort.py | 19 +++++++--- torch_geometric/utils/sort_edge_index.py | 8 ++-- torch_geometric/utils/to_dense_batch.py | 30 ++++++++++++--- 6 files changed, 76 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20fcf89f1fe6..0b101f23b8ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added an optional `max_num_elements` argument to `SortAggregation` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) +- Added the option to pass `fill_value` as a `torch.tensor` to `utils.to_dense_batch` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) - Fixed a bug in which inputs where modified in-place in `to_hetero_with_bases` ([#7363](https://github.com/pyg-team/pytorch_geometric/pull/7363)) - Do not load `node_default` and `edge_default` attributes in `from_networkx` ([#7348](https://github.com/pyg-team/pytorch_geometric/pull/7348)) - Updated examples to use `NeighborLoader` instead of `NeighborSampler` ([#7152](https://github.com/pyg-team/pytorch_geometric/pull/7152)) diff --git a/test/utils/test_to_dense_batch.py b/test/utils/test_to_dense_batch.py index 958542880591..00723569e900 100644 --- a/test/utils/test_to_dense_batch.py +++ b/test/utils/test_to_dense_batch.py @@ -1,54 +1,68 @@ +from typing import Tuple + +import pytest import torch +from torch import Tensor -from torch_geometric.testing import is_full_test +from torch_geometric.testing import onlyFullTest from torch_geometric.utils import to_dense_batch -def test_to_dense_batch(): +@pytest.mark.parametrize('fill', [70.0, torch.tensor(49.0)]) +def test_to_dense_batch(fill): x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) batch = torch.tensor([0, 0, 1, 2, 2, 2]) + item = fill.item() if isinstance(fill, Tensor) else fill expected = torch.Tensor([ - [[1, 2], [3, 4], [0, 0]], - [[5, 6], [0, 0], [0, 0]], + [[1, 2], [3, 4], [item, item]], + [[5, 6], [item, item], [item, item]], [[7, 8], [9, 10], [11, 12]], ]) - out, mask = to_dense_batch(x, batch) + out, mask = to_dense_batch(x, batch, fill_value=fill) assert out.size() == (3, 3, 2) assert torch.equal(out, expected) assert mask.tolist() == [[1, 1, 0], [1, 0, 0], [1, 1, 1]] - if is_full_test(): - jit = torch.jit.script(to_dense_batch) - out, mask = jit(x, batch) - assert torch.equal(out, expected) - assert mask.tolist() == [[1, 1, 0], [1, 0, 0], [1, 1, 1]] - - out, mask = to_dense_batch(x, batch, max_num_nodes=2) + out, mask = to_dense_batch(x, batch, max_num_nodes=2, fill_value=fill) assert out.size() == (3, 2, 2) assert torch.equal(out, expected[:, :2]) assert mask.tolist() == [[1, 1], [1, 0], [1, 1]] - out, mask = to_dense_batch(x, batch, max_num_nodes=5) + out, mask = to_dense_batch(x, batch, max_num_nodes=5, fill_value=fill) assert out.size() == (3, 5, 2) assert torch.equal(out[:, :3], expected) assert mask.tolist() == [[1, 1, 0, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 0, 0]] - out, mask = to_dense_batch(x) + out, mask = to_dense_batch(x, fill_value=fill) assert out.size() == (1, 6, 2) assert torch.equal(out[0], x) assert mask.tolist() == [[1, 1, 1, 1, 1, 1]] - out, mask = to_dense_batch(x, max_num_nodes=2) + out, mask = to_dense_batch(x, max_num_nodes=2, fill_value=fill) assert out.size() == (1, 2, 2) assert torch.equal(out[0], x[:2]) assert mask.tolist() == [[1, 1]] - out, mask = to_dense_batch(x, max_num_nodes=10) + out, mask = to_dense_batch(x, max_num_nodes=10, fill_value=fill) assert out.size() == (1, 10, 2) assert torch.equal(out[0, :6], x) assert mask.tolist() == [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]] - out, mask = to_dense_batch(x, batch, batch_size=4) + out, mask = to_dense_batch(x, batch, batch_size=4, fill_value=fill) assert out.size() == (4, 3, 2) + + +@onlyFullTest +def test_to_dense_batch_jit(): + @torch.jit.script + def to_dense_batch_jit(x: Tensor, batch: Tensor) -> Tuple[Tensor, Tensor]: + return to_dense_batch(x, batch) + + x = torch.randn(6, 2) + batch = torch.tensor([0, 0, 1, 2, 2, 2]) + + out, mask = to_dense_batch_jit(x, batch) + assert out.size() == (3, 3, 2) + assert mask.size() == (3, 3) diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 160caed02c9c..0dc2166cdcec 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple +from typing import Optional, Tuple, Union import torch from torch import Tensor @@ -161,7 +161,7 @@ def to_dense_batch( ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2, - fill_value: float = 0., + fill_value: Union[Optional[float], Tensor] = None, max_num_elements: Optional[int] = None, ) -> Tuple[Tensor, Tensor]: diff --git a/torch_geometric/nn/aggr/sort.py b/torch_geometric/nn/aggr/sort.py index 05299d47f077..f492d9522666 100644 --- a/torch_geometric/nn/aggr/sort.py +++ b/torch_geometric/nn/aggr/sort.py @@ -20,13 +20,20 @@ def __init__(self, k: int): super().__init__() self.k = k - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - - fill_value = x.min().item() - 1 + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: + + fill_value = x.min() - 1 batch_x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, - fill_value=fill_value) + fill_value=fill_value, + max_num_elements=max_num_elements) B, N, D = batch_x.size() _, perm = batch_x[:, :, -1].sort(dim=-1, descending=True) diff --git a/torch_geometric/utils/sort_edge_index.py b/torch_geometric/utils/sort_edge_index.py index 8ca3d3a73c2f..3b7a6fe27361 100644 --- a/torch_geometric/utils/sort_edge_index.py +++ b/torch_geometric/utils/sort_edge_index.py @@ -11,24 +11,24 @@ @torch.jit._overload -def sort_edge_index(edge_index, edge_attr, num_nodes, sort_by_row): +def sort_edge_index(edge_index, edge_attr, num_nodes, sort_by_row): # noqa # type: (Tensor, str, Optional[int], bool) -> Tensor # noqa pass @torch.jit._overload -def sort_edge_index(edge_index, edge_attr, num_nodes, sort_by_row): +def sort_edge_index(edge_index, edge_attr, num_nodes, sort_by_row): # noqa # type: (Tensor, Optional[Tensor], Optional[int], bool) -> Tuple[Tensor, Optional[Tensor]] # noqa pass @torch.jit._overload -def sort_edge_index(edge_index, edge_attr, num_nodes, sort_by_row): +def sort_edge_index(edge_index, edge_attr, num_nodes, sort_by_row): # noqa # type: (Tensor, List[Tensor], Optional[int], bool) -> Tuple[Tensor, List[Tensor]] # noqa pass -def sort_edge_index( +def sort_edge_index( # noqa edge_index: Tensor, edge_attr: Union[OptTensor, List[Tensor], str] = MISSING, num_nodes: Optional[int] = None, diff --git a/torch_geometric/utils/to_dense_batch.py b/torch_geometric/utils/to_dense_batch.py index a13bc06db1d6..c90088181358 100644 --- a/torch_geometric/utils/to_dense_batch.py +++ b/torch_geometric/utils/to_dense_batch.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple +from typing import Optional, Tuple, Union import torch from torch import Tensor @@ -6,9 +6,25 @@ from torch_geometric.utils import scatter -def to_dense_batch(x: Tensor, batch: Optional[Tensor] = None, - fill_value: float = 0., max_num_nodes: Optional[int] = None, - batch_size: Optional[int] = None) -> Tuple[Tensor, Tensor]: +@torch.jit._overload +def to_dense_batch(x, batch, fill_value, max_num_nodes, batch_size): # noqa + # type: (Tensor, Optional[Tensor], Optional[float], Optional[int], Optional[int]) -> Tuple[Tensor, Tensor] # noqa + pass + + +@torch.jit._overload +def to_dense_batch(x, batch, fill_value, max_num_nodes, batch_size): # noqa + # type: (Tensor, Optional[Tensor], Tensor, Optional[int], Optional[int]) -> Tuple[Tensor, Tensor] # noqa + pass + + +def to_dense_batch( # noqa + x: Tensor, + batch: Optional[Tensor] = None, + fill_value: Union[Optional[float], Tensor] = None, + max_num_nodes: Optional[int] = None, + batch_size: Optional[int] = None, +) -> Tuple[Tensor, Tensor]: r"""Given a sparse batch of node features :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}` (with :math:`N_i` indicating the number of nodes in graph :math:`i`), creates a @@ -25,8 +41,8 @@ def to_dense_batch(x: Tensor, batch: Optional[Tensor] = None, batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. Must be ordered. (default: :obj:`None`) - fill_value (float, optional): The value for invalid entries in the - resulting dense output tensor. (default: :obj:`0`) + fill_value (float or torch.Tensor, optional): The value for invalid + entries in the resulting dense output tensor. (default: :obj:`0`) max_num_nodes (int, optional): The size of the output node dimension. (default: :obj:`None`) batch_size (int, optional) The batch size. (default: :obj:`None`) @@ -85,6 +101,8 @@ def to_dense_batch(x: Tensor, batch: Optional[Tensor] = None, [ True, False, False, False], [ True, True, True, False]]) """ + fill_value = 0.0 if fill_value is None else fill_value + if batch is None and max_num_nodes is None: mask = torch.ones(1, x.size(0), dtype=torch.bool, device=x.device) return x.unsqueeze(0), mask From b5e47fbbd975f850bd5aa377221e7175beca1373 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 17 May 2023 20:55:10 +0200 Subject: [PATCH 1190/2432] Add `batch_size` argument for `fps`, `knn` and `radius` functions (#7368) Propagate change from https://github.com/rusty1s/pytorch_cluster/pull/175/commits/d149d431a6f24771e8ae886454ac23ba7833941c --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/pool/__init__.py | 126 +++++++++++++++++++++------- torch_geometric/typing.py | 1 + 3 files changed, 97 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b101f23b8ae..27c4f2b1ccbc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `GPUPrefetcher` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378)) - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index 4c986ace3650..3ba92393e092 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -1,5 +1,7 @@ +from typing import Optional from torch import Tensor +import torch_geometric.typing from torch_geometric.typing import OptTensor from .asap import ASAPooling @@ -20,8 +22,13 @@ torch_cluster = None -def fps(x: Tensor, batch: OptTensor = None, ratio: float = 0.5, - random_start: bool = True) -> Tensor: +def fps( + x: Tensor, + batch: OptTensor = None, + ratio: float = 0.5, + random_start: bool = True, + batch_size: Optional[int] = None, +) -> Tensor: r"""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space" `_ paper, which iteratively samples the @@ -45,15 +52,26 @@ def fps(x: Tensor, batch: OptTensor = None, ratio: float = 0.5, ratio (float, optional): Sampling ratio. (default: :obj:`0.5`) random_start (bool, optional): If set to :obj:`False`, use the first node in :math:`\mathbf{X}` as starting node. (default: obj:`True`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) :rtype: :class:`torch.Tensor` """ - return torch_cluster.fps(x, batch, ratio, random_start) - - -def knn(x: Tensor, y: Tensor, k: int, batch_x: OptTensor = None, - batch_y: OptTensor = None, cosine: bool = False, - num_workers: int = 1) -> Tensor: + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: + return torch_cluster.fps(x, batch, ratio, random_start) + return torch_cluster.fps(x, batch, ratio, random_start, batch_size) + + +def knn( + x: Tensor, + y: Tensor, + k: int, + batch_x: OptTensor = None, + batch_y: OptTensor = None, + cosine: bool = False, + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> Tensor: r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in :obj:`x`. @@ -86,15 +104,28 @@ def knn(x: Tensor, y: Tensor, k: int, batch_x: OptTensor = None, num_workers (int, optional): Number of workers to use for computation. Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) :rtype: :class:`torch.Tensor` """ - return torch_cluster.knn(x, y, k, batch_x, batch_y, cosine, num_workers) - - -def knn_graph(x: Tensor, k: int, batch: OptTensor = None, loop: bool = False, - flow: str = 'source_to_target', cosine: bool = False, - num_workers: int = 1) -> Tensor: + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: + return torch_cluster.knn(x, y, k, batch_x, batch_y, cosine, + num_workers) + return torch_cluster.knn(x, y, k, batch_x, batch_y, cosine, num_workers, + batch_size) + + +def knn_graph( + x: Tensor, + k: int, + batch: OptTensor = None, + loop: bool = False, + flow: str = 'source_to_target', + cosine: bool = False, + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> Tensor: r"""Computes graph edges to the nearest :obj:`k` points. .. code-block:: python @@ -124,16 +155,28 @@ def knn_graph(x: Tensor, k: int, batch: OptTensor = None, loop: bool = False, num_workers (int, optional): Number of workers to use for computation. Has no effect in case :obj:`batch` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) :rtype: :class:`torch.Tensor` """ + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: + return torch_cluster.knn_graph(x, k, batch, loop, flow, cosine, + num_workers) return torch_cluster.knn_graph(x, k, batch, loop, flow, cosine, - num_workers) - - -def radius(x: Tensor, y: Tensor, r: float, batch_x: OptTensor = None, - batch_y: OptTensor = None, max_num_neighbors: int = 32, - num_workers: int = 1) -> Tensor: + num_workers, batch_size) + + +def radius( + x: Tensor, + y: Tensor, + r: float, + batch_x: OptTensor = None, + batch_y: OptTensor = None, + max_num_neighbors: int = 32, + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> Tensor: r"""Finds for each element in :obj:`y` all points in :obj:`x` within distance :obj:`r`. @@ -147,7 +190,8 @@ def radius(x: Tensor, y: Tensor, r: float, batch_x: OptTensor = None, y = torch.Tensor([[-1, 0], [1, 0]]) batch_y = torch.tensor([0, 0]) assign_index = radius(x, y, 1.5, batch_x, batch_y) - + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) Args: x (torch.Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. @@ -165,17 +209,28 @@ def radius(x: Tensor, y: Tensor, r: float, batch_x: OptTensor = None, num_workers (int, optional): Number of workers to use for computation. Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) :rtype: :class:`torch.Tensor` """ + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: + return torch_cluster.radius(x, y, r, batch_x, batch_y, + max_num_neighbors, num_workers) return torch_cluster.radius(x, y, r, batch_x, batch_y, max_num_neighbors, - num_workers) - - -def radius_graph(x: Tensor, r: float, batch: OptTensor = None, - loop: bool = False, max_num_neighbors: int = 32, - flow: str = 'source_to_target', - num_workers: int = 1) -> Tensor: + num_workers, batch_size) + + +def radius_graph( + x: Tensor, + r: float, + batch: OptTensor = None, + loop: bool = False, + max_num_neighbors: int = 32, + flow: str = 'source_to_target', + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> Tensor: r"""Computes graph edges to all points within a given distance. .. code-block:: python @@ -204,15 +259,24 @@ def radius_graph(x: Tensor, r: float, batch: OptTensor = None, num_workers (int, optional): Number of workers to use for computation. Has no effect in case :obj:`batch` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) :rtype: :class:`torch.Tensor` """ + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: + return torch_cluster.radius_graph(x, r, batch, loop, max_num_neighbors, + flow, num_workers) return torch_cluster.radius_graph(x, r, batch, loop, max_num_neighbors, - flow, num_workers) + flow, num_workers, batch_size) -def nearest(x: Tensor, y: Tensor, batch_x: OptTensor = None, - batch_y: OptTensor = None) -> Tensor: +def nearest( + x: Tensor, + y: Tensor, + batch_x: OptTensor = None, + batch_y: OptTensor = None, +) -> Tensor: r"""Finds for each element in :obj:`y` the :obj:`k` nearest point in :obj:`x`. diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 372739b93743..7db085dfc15f 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -36,6 +36,7 @@ try: import torch_cluster # noqa WITH_TORCH_CLUSTER = True + WITH_TORCH_CLUSTER_BATCH_SIZE = 'batch_size' in torch_cluster.knn.__doc__ except (ImportError, OSError) as e: if isinstance(e, OSError): warnings.warn(f"An issue occurred while importing 'torch-cluster'. " From 7c4aef850f6606f971287b661217c6482720647b Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 17 May 2023 12:03:50 -0700 Subject: [PATCH 1191/2432] Address CUDA numerical mismatch in test (#7380) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- test/nn/dense/test_linear.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index cfa20fc404a4..271600e8e431 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -125,7 +125,7 @@ def test_hetero_linear(device): assert out.size() == (3, 32) jit = torch.jit.script(lin) - assert torch.allclose(jit(x, type_vec), out) + assert torch.allclose(jit(x, type_vec), out, atol=1e-3) @withCUDA From def5301f503bf27c27e8d19bcf0e252275990634 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 17 May 2023 12:17:34 -0700 Subject: [PATCH 1192/2432] Remove warning in `segmatmul_heuristic` (#7379) otherwise I get some warning --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- torch_geometric/nn/pool/__init__.py | 3 +-- torch_geometric/utils/hetero.py | 7 ++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index 3ba92393e092..2162ca2b1fcd 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -190,8 +190,7 @@ def radius( y = torch.Tensor([[-1, 0], [1, 0]]) batch_y = torch.tensor([0, 0]) assign_index = radius(x, y, 1.5, batch_x, batch_y) - batch_size (int, optional): The number of examples :math:`B`. - Automatically calculated if not given. (default: :obj:`None`) + Args: x (torch.Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index b215e09a1413..bb74a56c46b3 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -100,7 +100,12 @@ def segmatmul_heuristic(inputs: Tensor, type_ptr, weight: Tensor): in_feat = inputs.size(1) out_feat = weight.size(-1) # this heuristic was learned with learn_sklearn_heuristic on an A100 - x = torch.tensor([num_types, max_num_nodes_per_types, in_feat, out_feat]) + x = torch.tensor([ + int(num_types), + int(max_num_nodes_per_types), + int(in_feat), + int(out_feat) + ]) scale_mean = torch.tensor( [125.11603189, 12133.21523472, 163.81222321, 32.43755536]) scale_scale = torch.tensor( From 2e64b0f7162f3d2e9853da25ec637857c8018e58 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 17 May 2023 23:15:16 +0200 Subject: [PATCH 1193/2432] Drop multi-threading in `PrefetchLoader` (#7383) --- CHANGELOG.md | 2 +- test/loader/test_prefetch.py | 22 ++++---- torch_geometric/loader/__init__.py | 2 + torch_geometric/loader/prefetch.py | 90 ++++++++++++++---------------- 4 files changed, 54 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27c4f2b1ccbc..0ccdb13eb0a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) -- Added `GPUPrefetcher` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378)) +- Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) diff --git a/test/loader/test_prefetch.py b/test/loader/test_prefetch.py index 32da97e15399..caa7ba3ae314 100644 --- a/test/loader/test_prefetch.py +++ b/test/loader/test_prefetch.py @@ -1,24 +1,21 @@ import torch -from torch_geometric.loader import NeighborLoader -from torch_geometric.loader.prefetch import GPUPrefetcher +from torch_geometric.loader import NeighborLoader, PrefetchLoader from torch_geometric.nn import GraphSAGE -from torch_geometric.testing import onlyCUDA +from torch_geometric.testing import withCUDA -@onlyCUDA -def test_gpu_prefetcher(): +@withCUDA +def test_prefetch_loader(device): data = [torch.randn(5, 5) for _ in range(10)] - loader = GPUPrefetcher(data, device='cuda') - assert str(loader).startswith('GPUPrefetcher') + loader = PrefetchLoader(data, device=device) + assert str(loader).startswith('PrefetchLoader') assert len(loader) == 10 for i, batch in enumerate(loader): - assert batch.is_cuda + assert batch.device == device assert torch.equal(batch.cpu(), data[i]) - assert loader.idx > 0 - assert loader.idx == 0 if __name__ == '__main__': @@ -28,7 +25,7 @@ def test_gpu_prefetcher(): from tqdm import tqdm parser = argparse.ArgumentParser() - parser.add_argument('--num_workers', type=int, default=8) + parser.add_argument('--num_workers', type=int, default=0) args = parser.parse_args() data = PygNodePropPredDataset('ogbn-products', root='/tmp/ogb')[0] @@ -46,6 +43,7 @@ def test_gpu_prefetcher(): num_neighbors=[10, 10], num_workers=args.num_workers, filter_per_worker=True, + persistent_workers=args.num_workers > 0, ) print('Forward pass without prefetching...') @@ -55,6 +53,6 @@ def test_gpu_prefetcher(): model(batch.x, batch.edge_index) print('Forward pass with prefetching...') - for batch in tqdm(GPUPrefetcher(loader, device='cuda')): + for batch in tqdm(PrefetchLoader(loader)): with torch.no_grad(): model(batch.x, batch.edge_index) diff --git a/torch_geometric/loader/__init__.py b/torch_geometric/loader/__init__.py index 600d3483e3fa..494a380023e2 100644 --- a/torch_geometric/loader/__init__.py +++ b/torch_geometric/loader/__init__.py @@ -18,6 +18,7 @@ from .neighbor_sampler import NeighborSampler from .imbalanced_sampler import ImbalancedSampler from .dynamic_batch_sampler import DynamicBatchSampler +from .prefetch import PrefetchLoader from .mixin import AffinityMixin __all__ = classes = [ @@ -42,6 +43,7 @@ 'NeighborSampler', 'ImbalancedSampler', 'DynamicBatchSampler', + 'PrefetchLoader', 'AffinityMixin', ] diff --git a/torch_geometric/loader/prefetch.py b/torch_geometric/loader/prefetch.py index d6dae8f63704..3bbfd69c6978 100644 --- a/torch_geometric/loader/prefetch.py +++ b/torch_geometric/loader/prefetch.py @@ -1,77 +1,69 @@ -from queue import Queue -from threading import Thread +from contextlib import nullcontext +from functools import partial from typing import Any, Optional import torch from torch.utils.data import DataLoader -class GPUPrefetcher: - r"""A GPU prefetcher class for asynchronously loading data from a +class PrefetchLoader: + r"""A GPU prefetcher class for asynchronously transferring data of a :class:`torch.utils.data.DataLoader` from host memory to device memory. Args: - loader (torch.utils.DataLoader): A data loader object. - device (torch.device): The CUDA device to load the data to. - prefetch_size (int, optional): The number of batches to prefetch at - once. (default: :obj:`1`) + loader (torch.utils.data.DataLoader): The data loader. + device (torch.device, optional): The device to load the data to. + (default: :obj:`None`) """ def __init__( self, loader: DataLoader, - device: torch.device, - prefetch_size: int = 1, + device: Optional[torch.device] = None, ): - if prefetch_size < 1: - raise ValueError(f"'prefetch_size' must be greater than 0 " - f"(got {prefetch_size})") + if device is None: + device = 'cuda' if torch.cuda.is_available() else 'cpu' self.loader = loader self.device = torch.device(device) - self.prefetch_size = prefetch_size - self.load_stream = torch.cuda.Stream(device=device) - self.queue = Queue(maxsize=prefetch_size) - self.worker: Optional[Thread] = None - - self.idx = 0 + self.is_cuda = torch.cuda.is_available() and self.device.type == 'cuda' def non_blocking_transfer(self, batch: Any) -> Any: - # (Recursive) non-blocking device transfer: + if not self.is_cuda: + return batch if isinstance(batch, (list, tuple)): return [self.non_blocking_transfer(v) for v in batch] if isinstance(batch, dict): return {k: self.non_blocking_transfer(v) for k, v in batch.items()} - with torch.cuda.stream(self.load_stream): - batch = batch.pin_memory() - return batch.to(self.device, non_blocking=True) - - def load_loop(self): - for batch in self.loader: - self.queue.put(self.non_blocking_transfer(batch)) - - def __iter__(self) -> 'GPUPrefetcher': - is_dead = self.worker is None or not self.worker.is_alive() - if is_dead and self.queue.empty() and self.idx == 0: - self.worker = Thread(target=self.load_loop) - self.worker.daemon = True - self.worker.start() - - return self - - def __next__(self) -> Any: - is_dead = not self.worker.is_alive() - if (is_dead and self.queue.empty()) or self.idx >= len(self): - self.idx = 0 - self.queue.join() - self.worker.join() - raise StopIteration - - out = self.queue.get() - self.queue.task_done() - self.idx += 1 - return out + batch = batch.pin_memory() + return batch.to(self.device, non_blocking=True) + + def __iter__(self) -> Any: + first = True + if self.is_cuda: + stream = torch.cuda.Stream() + stream_context = partial(torch.cuda.stream, stream=stream) + else: + stream = None + stream_context = nullcontext + + for next_batch in self.loader: + + with stream_context(): + next_batch = self.non_blocking_transfer(next_batch) + + if not first: + yield batch # noqa + else: + first = False + + if stream is not None: + torch.cuda.current_stream().wait_stream(stream) + + batch = next_batch + + yield batch def __len__(self) -> int: return len(self.loader) From ce84dd9216738e772ecb0214daea2ec5d5a23105 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 17 May 2023 15:08:40 -0700 Subject: [PATCH 1194/2432] Add padding capabilities to `HeteroData.to_homogeneous()` (#7374) useful for putting randomly generated FakeHeteroDataset data into a fused GNN like RGCNConv, otherwise the fakeheterodataset usually has x's w/ diff num of features and the resulting Data would not have any node features w/o this PR --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_hetero_data.py | 16 ++++++++++++++++ torch_geometric/data/hetero_data.py | 29 +++++++++++++++++++++++++---- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ccdb13eb0a4..888cb7c7d5c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index e0be1dd69f94..ddb3cd32b19e 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -489,6 +489,22 @@ def test_to_homogeneous_and_vice_versa(): assert out['author'].num_nodes == 200 +def test_to_homogeneous_padding(): + data = HeteroData() + data['paper'].x = torch.randn(100, 128) + data['author'].x = torch.randn(50, 64) + + out = data.to_homogeneous() + assert len(out) == 2 + assert out.node_type.size() == (150, ) + assert out.node_type[:100].abs().sum() == 0 + assert out.node_type[100:].sub(1).abs().sum() == 0 + assert out.x.size() == (150, 128) + assert torch.equal(out.x[:100], data['paper'].x) + assert torch.equal(out.x[100:, :64], data['author'].x) + assert out.x[100:, 64:].abs().sum() == 0 + + def test_hetero_data_to_canonical(): data = HeteroData() assert isinstance(data['user', 'product'], EdgeStorage) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index a65d3c8b82bd..99c78b0f5765 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -828,10 +828,20 @@ def fill_dummy_(stores: List[BaseStorage], def _consistent_size(stores: List[BaseStorage]) -> List[str]: sizes_dict = get_sizes(stores) - return [ - key for key, sizes in sizes_dict.items() - if len(sizes) == len(stores) and len(set(sizes)) == 1 - ] + keys = [] + for key, sizes in sizes_dict.items(): + # The attribute needs to exist in all types: + if len(sizes) != len(stores): + continue + # The attributes needs to have the same number of dimensions: + lengths = set([len(size) for size in sizes]) + if len(lengths) != 1: + continue + # The attributes needs to have the same size in all dimensions: + if len(sizes[0]) != 1 and len(set(sizes)) != 1: + continue + keys.append(key) + return keys if dummy_values: self = copy.copy(self) @@ -855,6 +865,17 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: continue values = [store[key] for store in self.node_stores] dim = self.__cat_dim__(key, values[0], self.node_stores[0]) + dim = values[0].dim() + dim if dim < 0 else dim + # For two-dimensional features, we allow arbitrary shapes and pad + # them with zeros if necessary in case their size doesn't match: + if values[0].dim() == 2 and dim == 0: + _max = max([value.size(-1) for value in values]) + for i, v in enumerate(values): + if v.size(-1) < _max: + values[i] = torch.cat( + [v, v.new_zeros(v.size(0), _max - v.size(-1))], + dim=-1, + ) value = torch.cat(values, dim) if len(values) > 1 else values[0] data[key] = value From 21d27a8dfa53ec6555a14a144c949d318887f9f5 Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Thu, 18 May 2023 15:27:17 +0800 Subject: [PATCH 1195/2432] Added `from_dense` method to `SparseTensor` (#7387) Added `from_dense` method to `SparseTensor` to raise `ImportError` if `torch_sparse` is not installed. Fix #7386. --- CHANGELOG.md | 2 +- torch_geometric/typing.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 888cb7c7d5c0..83984b37f3e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -217,7 +217,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Infer correct CUDA device ID in `profileit` decorator ([#6164](https://github.com/pyg-team/pytorch_geometric/pull/6164)) - Correctly use edge weights in `GDC` example ([#6159](https://github.com/pyg-team/pytorch_geometric/pull/6159)) - Breaking Change: Moved PyTorch Lightning data modules to `torch_geometric.data.lightning` ([#6140](https://github.com/pyg-team/pytorch_geometric/pull/6140)) -- Make `torch_sparse` an optional dependency ([#6132](https://github.com/pyg-team/pytorch_geometric/pull/6132), [#6134](https://github.com/pyg-team/pytorch_geometric/pull/6134), [#6138](https://github.com/pyg-team/pytorch_geometric/pull/6138), [#6139](https://github.com/pyg-team/pytorch_geometric/pull/6139)) +- Make `torch_sparse` an optional dependency ([#6132](https://github.com/pyg-team/pytorch_geometric/pull/6132), [#6134](https://github.com/pyg-team/pytorch_geometric/pull/6134), [#6138](https://github.com/pyg-team/pytorch_geometric/pull/6138), [#6139](https://github.com/pyg-team/pytorch_geometric/pull/6139), [#7387](https://github.com/pyg-team/pytorch_geometric/pull/7387)) - Optimized `utils.softmax` implementation ([#6113](https://github.com/pyg-team/pytorch_geometric/pull/6113), [#6155](https://github.com/pyg-team/pytorch_geometric/pull/6155), [#6805](https://github.com/pyg-team/pytorch_geometric/pull/6805)) - Optimized `topk` implementation for large enough graphs ([#6123](https://github.com/pyg-team/pytorch_geometric/pull/6123)) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 7db085dfc15f..d25605ea7e3d 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -105,6 +105,11 @@ def from_edge_index( ) -> 'SparseTensor': raise ImportError("'SparseTensor' requires 'torch-sparse'") + @classmethod + def from_dense(self, mat: Tensor, + has_value: bool = True) -> 'SparseTensor': + raise ImportError("'SparseTensor' requires 'torch-sparse'") + def size(self, dim: int) -> int: raise ImportError("'SparseTensor' requires 'torch-sparse'") From 6b72fe5d3075501ac7e7c22d5f11c98e18af4ab9 Mon Sep 17 00:00:00 2001 From: Wendy Mak <6398157+wwymak@users.noreply.github.com> Date: Thu, 18 May 2023 11:06:22 +0200 Subject: [PATCH 1196/2432] `PMLP` implementation (#7370) I am not quite sure if the tests are sufficient. Also, at the moment, I have limited the MLP layers to have the same bias/dropouts etc as per the reference repo, but wondering if they should follow more the format of the MLP model where you can input a list? --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + README.md | 1 + test/nn/conv/test_simple_conv.py | 25 ++++--- test/nn/models/test_pmlp.py | 23 +++++++ test/nn/models/test_rect.py | 4 +- torch_geometric/nn/conv/simple_conv.py | 20 +++++- torch_geometric/nn/models/__init__.py | 2 + torch_geometric/nn/models/pmlp.py | 92 ++++++++++++++++++++++++++ 8 files changed, 152 insertions(+), 16 deletions(-) create mode 100644 test/nn/models/test_pmlp.py create mode 100644 torch_geometric/nn/models/pmlp.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 83984b37f3e4..3f3c7d399f1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `PMLP` model ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370)) - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) diff --git a/README.md b/README.md index e67c1572ccfe..be467cc17d5d 100644 --- a/README.md +++ b/README.md @@ -279,6 +279,7 @@ Unlike simple stacking of GNN layers, these models could involve pre-processing, * **[Deep Graph Infomax](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DeepGraphInfomax.html)** from Veličković *et al.*: [Deep Graph Infomax](https://arxiv.org/abs/1809.10341) (ICLR 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_transductive.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_inductive.py)] * **Deep Multiplex Graph Infomax** from Park *et al.*: [Unsupervised Attributed Multiplex Network Embedding](https://arxiv.org/abs/1911.06750) (AAAI 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/dmgi_unsup.py)] * **[Masked Label Prediction](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MaskLabel.html)** from Shi *et al.*: [Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification](https://arxiv.org/abs/2009.03509) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/unimp_arxiv.py)] +* **[PMLP](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.PMLP.html)** from Yang *et al.*: [Graph Neural Networks are Inherently Good Generalizers: Insights by Bridging GNNs and MLPs](https://arxiv.org/abs/2212.09034) (ICLR 2023)
Expand to see all implemented GNN models... diff --git a/test/nn/conv/test_simple_conv.py b/test/nn/conv/test_simple_conv.py index ed251cd15ced..28d7cffdb7c3 100644 --- a/test/nn/conv/test_simple_conv.py +++ b/test/nn/conv/test_simple_conv.py @@ -12,11 +12,12 @@ ('mean', None), ('sum', 'sum'), (['mean', 'max'], 'cat'), + ('mean', 'self_loop'), ]) def test_simple_conv(aggr, combine_root): x1 = torch.randn(4, 8) x2 = torch.randn(2, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 1]]) adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) conv = SimpleConv(aggr, combine_root) @@ -46,13 +47,15 @@ def test_simple_conv(aggr, combine_root): assert torch.allclose(jit(x1, adj2.t()), out) # Test bipartite message passing: - adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - - out = conv((x1, x2), edge_index) - assert out.size() == (2, output_size) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out) - assert torch.allclose(conv((x1, x2), adj1.t()), out) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) - assert torch.allclose(conv((x1, x2), adj2.t()), out) + if combine_root != 'self_loop': + adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) + + out = conv((x1, x2), edge_index) + assert out.size() == (2, output_size) + assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out) + assert torch.allclose(conv((x1, x2), adj1.t()), out) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, + sparse_sizes=(4, 2)) + assert torch.allclose(conv((x1, x2), adj2.t()), out) diff --git a/test/nn/models/test_pmlp.py b/test/nn/models/test_pmlp.py new file mode 100644 index 000000000000..c7933e959b5e --- /dev/null +++ b/test/nn/models/test_pmlp.py @@ -0,0 +1,23 @@ +import pytest +import torch + +from torch_geometric.nn.models import PMLP + + +def test_pmlp(): + x = torch.randn(4, 16) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + + pmlp = PMLP(in_channels=16, hidden_channels=32, out_channels=2, + num_layers=4) + assert str(pmlp) == 'PMLP(16, 2, num_layers=4)' + + pmlp.training = True + assert pmlp(x).size() == (4, 2) + + pmlp.training = False + assert pmlp(x, edge_index).size() == (4, 2) + + with pytest.raises(ValueError, match="'edge_index' needs to be present"): + pmlp.training = False + pmlp(x) diff --git a/test/nn/models/test_rect.py b/test/nn/models/test_rect.py index 74f2b0320d07..6364dbe211b9 100644 --- a/test/nn/models/test_rect.py +++ b/test/nn/models/test_rect.py @@ -19,13 +19,13 @@ def test_rect(): assert out.size() == (6, 8) if torch_geometric.typing.WITH_TORCH_SPARSE: adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) - assert torch.allclose(out, model(x, adj.t())) + assert torch.allclose(out, model(x, adj.t()), atol=1e-6) # Test `embed`: embed_out = model.embed(x, edge_index) assert embed_out.size() == (6, 16) if torch_geometric.typing.WITH_TORCH_SPARSE: - assert torch.allclose(embed_out, model.embed(x, adj.t())) + assert torch.allclose(embed_out, model.embed(x, adj.t()), atol=1e-6) # Test `get_semantic_labels`: labeds_out = model.get_semantic_labels(x, y, mask) diff --git a/torch_geometric/nn/conv/simple_conv.py b/torch_geometric/nn/conv/simple_conv.py index 5a11a95e8099..532f5ebc7e6f 100644 --- a/torch_geometric/nn/conv/simple_conv.py +++ b/torch_geometric/nn/conv/simple_conv.py @@ -11,8 +11,9 @@ OptTensor, Size, SparseTensor, + torch_sparse, ) -from torch_geometric.utils import spmm +from torch_geometric.utils import add_self_loops, spmm class SimpleConv(MessagePassing): @@ -34,7 +35,7 @@ class SimpleConv(MessagePassing): that automatically resolves to it). (default: :obj:`"sum"`) combine_root (str, optional): Specifies whether or how to combine the central node representation (one of :obj:`"sum"`, :obj:`"cat"`, - :obj:`None`). (default: :obj:`None`) + :obj:`"self_loop"`, :obj:`None`). (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.MessagePassing`. @@ -53,7 +54,7 @@ def __init__( combine_root: Optional[str] = None, **kwargs, ): - if combine_root not in ['sum', 'cat', None]: + if combine_root not in ['sum', 'cat', 'self_loop', None]: raise ValueError(f"Received invalid value for 'combine_root' " f"(got '{combine_root}')") @@ -62,6 +63,19 @@ def __init__( def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_weight: OptTensor = None, size: Size = None) -> Tensor: + + if self.combine_root is not None: + if self.combine_root == 'self_loop': + if not isinstance(x, Tensor) or (size is not None + and size[0] != size[1]): + raise ValueError("Cannot use `combine_root='self_loop'` " + "for bipartite message passing") + if isinstance(edge_index, Tensor): + edge_index, edge_weight = add_self_loops( + edge_index, edge_weight, num_nodes=x.size(0)) + elif isinstance(edge_index, SparseTensor): + edge_index = torch_sparse.set_diag(edge_index) + if isinstance(x, Tensor): x: OptPairTensor = (x, x) diff --git a/torch_geometric/nn/models/__init__.py b/torch_geometric/nn/models/__init__.py index 46b9fd26fb82..9b716e21e858 100644 --- a/torch_geometric/nn/models/__init__.py +++ b/torch_geometric/nn/models/__init__.py @@ -23,6 +23,7 @@ from .mask_label import MaskLabel from .rev_gnn import GroupAddRev from .gnnff import GNNFF +from .pmlp import PMLP __all__ = classes = [ 'MLP', @@ -62,4 +63,5 @@ 'MaskLabel', 'GroupAddRev', 'GNNFF', + 'PMLP', ] diff --git a/torch_geometric/nn/models/pmlp.py b/torch_geometric/nn/models/pmlp.py new file mode 100644 index 000000000000..7677e0b3462f --- /dev/null +++ b/torch_geometric/nn/models/pmlp.py @@ -0,0 +1,92 @@ +from typing import Optional + +import torch +import torch.nn.functional as F +from torch import Tensor + +from torch_geometric.nn import SimpleConv +from torch_geometric.nn.dense.linear import Linear + + +class PMLP(torch.nn.Module): + r"""The P(ropagational)MLP model from the `"Graph Neural Networks are + Inherently Good Generalizers: Insights by Bridging GNNs and MLPs" + `_ paper. + :class:`PMLP` is identical to a standard MLP during training, but then + adopts a GNN architecture during testing. + + Args: + in_channels (int): Size of each input sample. + hidden_channels (int): Size of each hidden sample. + out_channels (int): Size of each output sample. + num_layers (int): The number of layers. + dropout (float, optional): Dropout probability of each hidden + embedding. (default: :obj:`0.`) + bias (bool, optional): If set to :obj:`False`, the module + will not learn additive biases. (default: :obj:`True`) + """ + def __init__( + self, + in_channels: int, + hidden_channels: int, + out_channels: int, + num_layers: int, + dropout: float = 0., + bias: bool = True, + ): + super().__init__() + + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.num_layers = num_layers + self.dropout = dropout + self.bias = bias + + self.lins = torch.nn.ModuleList() + self.lins.append(Linear(in_channels, hidden_channels, self.bias)) + for _ in range(self.num_layers - 2): + lin = Linear(hidden_channels, hidden_channels, self.bias) + self.lins.append(lin) + self.lins.append(Linear(hidden_channels, out_channels, self.bias)) + + self.norm = torch.nn.BatchNorm1d(hidden_channels, affine=False, + track_running_stats=False) + + self.conv = SimpleConv(aggr='mean', combine_root='self_loop') + + self.reset_parameters() + + def reset_parameters(self): + r"""Resets all learnable parameters of the module.""" + for lin in self.lins: + torch.nn.init.xavier_uniform_(lin.weight, gain=1.414) + if self.bias: + torch.nn.init.zeros_(lin.bias) + + def forward( + self, + x: torch.Tensor, + edge_index: Optional[Tensor] = None, + ) -> torch.Tensor: + """""" + if not self.training and edge_index is None: + raise ValueError(f"'edge_index' needs to be present during " + f"inference in '{self.__class__.__name__}'") + + for i in range(self.num_layers): + x = x @ self.lins[i].weight.t() + if not self.training: + x = self.conv(x, edge_index) + if self.bias: + x = x + self.lins[i].bias + if i != self.num_layers - 1: + x = self.norm(x) + x = x.relu() + x = F.dropout(x, p=self.dropout, training=self.training) + + return x + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.out_channels}, num_layers={self.num_layers})') From dfd32668aea953c8bb56f97364d8e028f267bde6 Mon Sep 17 00:00:00 2001 From: Zecheng Zhang Date: Thu, 18 May 2023 03:34:24 -0700 Subject: [PATCH 1197/2432] Fix `graph_gps` example minor error (#7388) Fix graph gps example minor error --- examples/graph_gps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/graph_gps.py b/examples/graph_gps.py index 1ad7ad7deedb..d2a606ef8a9e 100644 --- a/examples/graph_gps.py +++ b/examples/graph_gps.py @@ -102,7 +102,7 @@ def test(loader): for epoch in range(1, 101): - loss = train(epoch) + loss = train() val_mae = test(val_loader) test_mae = test(test_loader) scheduler.step(val_mae) From da72fa77335f4ab9d6cd0d87d2fece0ed5424e7f Mon Sep 17 00:00:00 2001 From: Charles Dufour <34485907+dufourc1@users.noreply.github.com> Date: Thu, 18 May 2023 15:44:38 +0200 Subject: [PATCH 1198/2432] Update initialization of `attribution_method` in `CaptumExplainer` (#7391) This allows the algorithm to be called multiple time without raising an error, close #7390 --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/explain/algorithm/captum_explainer.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f3c7d399f1c..f940ab896f9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) - Added an optional `batch_size` and `max_num_nodes` arguments to `MemPooling` layer ([#7239](https://github.com/pyg-team/pytorch_geometric/pull/7239)) - Fixed training issues of the GraphGPS example ([#7377](https://github.com/pyg-team/pytorch_geometric/pull/7377)) +- Allowed `CaptumExplainer` to be called multiple times in a row ([#7391](https://github.com/pyg-team/pytorch_geometric/pull/7391)) ### Removed diff --git a/torch_geometric/explain/algorithm/captum_explainer.py b/torch_geometric/explain/algorithm/captum_explainer.py index 64b4e6962015..1af5c202dd32 100644 --- a/torch_geometric/explain/algorithm/captum_explainer.py +++ b/torch_geometric/explain/algorithm/captum_explainer.py @@ -150,7 +150,7 @@ def forward( metadata = None captum_model = CaptumModel(model, mask_type, index) - self.attribution_method = self.attribution_method(captum_model) + attribution_method = self.attribution_method(captum_model) # In captum, the target is the index for which # the attribution is computed. @@ -159,7 +159,7 @@ def forward( else: target = target[index] - attributions = self.attribution_method.attribute( + attributions = attribution_method.attribute( inputs=inputs, target=target, additional_forward_args=add_forward_args, From ec8fba0ab954593af5ebecfc0884472e0d2553bb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 18 May 2023 16:09:03 +0200 Subject: [PATCH 1199/2432] Fix broken master (#7392) --- test/nn/pool/select/test_select_topk.py | 2 +- test/utils/test_to_dense_batch.py | 10 +++++++--- torch_geometric/nn/aggr/base.py | 4 ++-- torch_geometric/nn/pool/connect/base.py | 2 +- torch_geometric/nn/pool/select/base.py | 2 +- torch_geometric/utils/to_dense_batch.py | 22 +++++----------------- 6 files changed, 17 insertions(+), 25 deletions(-) diff --git a/test/nn/pool/select/test_select_topk.py b/test/nn/pool/select/test_select_topk.py index 571eaa57af6d..a0418fff9306 100644 --- a/test/nn/pool/select/test_select_topk.py +++ b/test/nn/pool/select/test_select_topk.py @@ -47,7 +47,7 @@ def test_select_topk(min_score): assert str(pool) == 'SelectTopK(16, min_score=2.0)' out = pool(x, batch) - assert isinstance(out, SelectOutput.__original_fn) + assert isinstance(out, SelectOutput) assert out.num_nodes == 6 assert out.num_clusters <= out.num_nodes diff --git a/test/utils/test_to_dense_batch.py b/test/utils/test_to_dense_batch.py index 00723569e900..0c84aef88641 100644 --- a/test/utils/test_to_dense_batch.py +++ b/test/utils/test_to_dense_batch.py @@ -57,12 +57,16 @@ def test_to_dense_batch(fill): @onlyFullTest def test_to_dense_batch_jit(): @torch.jit.script - def to_dense_batch_jit(x: Tensor, batch: Tensor) -> Tuple[Tensor, Tensor]: - return to_dense_batch(x, batch) + def to_dense_batch_jit( + x: Tensor, + batch: Tensor, + fill_value: Tensor, + ) -> Tuple[Tensor, Tensor]: + return to_dense_batch(x, batch, fill_value=fill_value) x = torch.randn(6, 2) batch = torch.tensor([0, 0, 1, 2, 2, 2]) - out, mask = to_dense_batch_jit(x, batch) + out, mask = to_dense_batch_jit(x, batch, fill_value=torch.tensor(0.0)) assert out.size() == (3, 3, 2) assert mask.size() == (3, 3) diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 0dc2166cdcec..3de22b8e7c80 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, Union +from typing import Optional, Tuple import torch from torch import Tensor @@ -161,7 +161,7 @@ def to_dense_batch( ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2, - fill_value: Union[Optional[float], Tensor] = None, + fill_value: float = 0.0, max_num_elements: Optional[int] = None, ) -> Tuple[Tensor, Tensor]: diff --git a/torch_geometric/nn/pool/connect/base.py b/torch_geometric/nn/pool/connect/base.py index 262937603f11..fd78a5c61dac 100644 --- a/torch_geometric/nn/pool/connect/base.py +++ b/torch_geometric/nn/pool/connect/base.py @@ -7,7 +7,7 @@ from torch_geometric.nn.pool.select import SelectOutput -@torch.jit.script_if_tracing +@torch.jit.script @dataclass(init=False) class ConnectOutput: r"""The output of the :class:`Connect` method, which holds the coarsened diff --git a/torch_geometric/nn/pool/select/base.py b/torch_geometric/nn/pool/select/base.py index ce9281653dc6..fcde4823477c 100644 --- a/torch_geometric/nn/pool/select/base.py +++ b/torch_geometric/nn/pool/select/base.py @@ -5,7 +5,7 @@ from torch import Tensor -@torch.jit.script_if_tracing +@torch.jit.script @dataclass(init=False) class SelectOutput: r"""The output of the :class:`Select` method, which holds an assignment diff --git a/torch_geometric/utils/to_dense_batch.py b/torch_geometric/utils/to_dense_batch.py index c90088181358..1e5538fe3c0e 100644 --- a/torch_geometric/utils/to_dense_batch.py +++ b/torch_geometric/utils/to_dense_batch.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, Union +from typing import Optional, Tuple import torch from torch import Tensor @@ -6,22 +6,10 @@ from torch_geometric.utils import scatter -@torch.jit._overload -def to_dense_batch(x, batch, fill_value, max_num_nodes, batch_size): # noqa - # type: (Tensor, Optional[Tensor], Optional[float], Optional[int], Optional[int]) -> Tuple[Tensor, Tensor] # noqa - pass - - -@torch.jit._overload -def to_dense_batch(x, batch, fill_value, max_num_nodes, batch_size): # noqa - # type: (Tensor, Optional[Tensor], Tensor, Optional[int], Optional[int]) -> Tuple[Tensor, Tensor] # noqa - pass - - -def to_dense_batch( # noqa +def to_dense_batch( x: Tensor, batch: Optional[Tensor] = None, - fill_value: Union[Optional[float], Tensor] = None, + fill_value: float = 0.0, max_num_nodes: Optional[int] = None, batch_size: Optional[int] = None, ) -> Tuple[Tensor, Tensor]: @@ -41,8 +29,8 @@ def to_dense_batch( # noqa batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. Must be ordered. (default: :obj:`None`) - fill_value (float or torch.Tensor, optional): The value for invalid - entries in the resulting dense output tensor. (default: :obj:`0`) + fill_value (float, optional): The value for invalid entries in the + resulting dense output tensor. (default: :obj:`0`) max_num_nodes (int, optional): The size of the output node dimension. (default: :obj:`None`) batch_size (int, optional) The batch size. (default: :obj:`None`) From 15573f4674b2a37b1b9adc967df69ef6eee573ea Mon Sep 17 00:00:00 2001 From: happykygo <62350285+happykygo@users.noreply.github.com> Date: Fri, 19 May 2023 03:19:47 -0400 Subject: [PATCH 1200/2432] `LightGCN` bug fixes (#7384) 1. In BPRLoss(_Loss)::forward, log_prob is divided by n_pairs twice. Changed the return clause to: return -log_prob + regularization / n_pairs 2. In LightGCN()::recommendation_loss, it calls BPRLoss::forward and passes in the embeddings of all nodes. It should pass in only the embeddings of the nodes in the mini-batch. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/models/test_lightgcn.py | 7 ++++++- torch_geometric/nn/models/lightgcn.py | 24 ++++++++++++++++++------ 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f940ab896f9a..01117b82724f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed a bug in `LightGCN.recommendation_loss()` to only use the embeddings of the nodes involved in the current mini-batch ([#7384](https://github.com/pyg-team/pytorch_geometric/pull/7384)) - Added an optional `max_num_elements` argument to `SortAggregation` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) - Added the option to pass `fill_value` as a `torch.tensor` to `utils.to_dense_batch` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) - Fixed a bug in which inputs where modified in-place in `to_hetero_with_bases` ([#7363](https://github.com/pyg-team/pytorch_geometric/pull/7363)) diff --git a/test/nn/models/test_lightgcn.py b/test/nn/models/test_lightgcn.py index 6722689f200b..9f49ab902142 100644 --- a/test/nn/models/test_lightgcn.py +++ b/test/nn/models/test_lightgcn.py @@ -21,7 +21,12 @@ def test_lightgcn_ranking(embedding_dim, with_edge_weight, lambda_reg, alpha): pred = model(edge_index, edge_label_index, edge_weight) assert pred.size() == (100, ) - loss = model.recommendation_loss(pred[:50], pred[50:], lambda_reg) + loss = model.recommendation_loss( + pos_edge_rank=pred[:50], + neg_edge_rank=pred[50:], + node_id=edge_index.unique(), + lambda_reg=lambda_reg, + ) assert loss.dim() == 0 and loss > 0 out = model.recommend(edge_index, edge_weight, k=2) diff --git a/torch_geometric/nn/models/lightgcn.py b/torch_geometric/nn/models/lightgcn.py index 9a29a4f6cea6..97495bed427f 100644 --- a/torch_geometric/nn/models/lightgcn.py +++ b/torch_geometric/nn/models/lightgcn.py @@ -132,6 +132,7 @@ def forward( out_src = out[edge_label_index[0]] out_dst = out[edge_label_index[1]] + return (out_src * out_dst).sum(dim=-1) def predict_link( @@ -201,8 +202,14 @@ def link_pred_loss(self, pred: Tensor, edge_label: Tensor, loss_fn = torch.nn.BCEWithLogitsLoss(**kwargs) return loss_fn(pred, edge_label.to(pred.dtype)) - def recommendation_loss(self, pos_edge_rank: Tensor, neg_edge_rank: Tensor, - lambda_reg: float = 1e-4, **kwargs) -> Tensor: + def recommendation_loss( + self, + pos_edge_rank: Tensor, + neg_edge_rank: Tensor, + node_id: Optional[Tensor] = None, + lambda_reg: float = 1e-4, + **kwargs, + ) -> Tensor: r"""Computes the model loss for a ranking objective via the Bayesian Personalized Ranking (BPR) loss. @@ -215,6 +222,9 @@ def recommendation_loss(self, pos_edge_rank: Tensor, neg_edge_rank: Tensor, Args: pos_edge_rank (torch.Tensor): Positive edge rankings. neg_edge_rank (torch.Tensor): Negative edge rankings. + node_id (torch.Tensor): The indices of the nodes involved for + deriving a prediction for both positive and negative edges. + If set to :obj:`None`, all nodes will be used. lambda_reg (int, optional): The :math:`L_2` regularization strength of the Bayesian Personalized Ranking (BPR) loss. (default: :obj:`1e-4`) @@ -223,7 +233,9 @@ def recommendation_loss(self, pos_edge_rank: Tensor, neg_edge_rank: Tensor, function. """ loss_fn = BPRLoss(lambda_reg, **kwargs) - return loss_fn(pos_edge_rank, neg_edge_rank, self.embedding.weight) + emb = self.embedding.weight + emb = emb if node_id is None else emb[node_id] + return loss_fn(pos_edge_rank, neg_edge_rank, emb) def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.num_nodes}, ' @@ -275,11 +287,11 @@ def forward(self, positives: Tensor, negatives: Tensor, should be used for :math:`L_2` regularization (default: :obj:`None`). """ - n_pairs = positives.size(0) log_prob = F.logsigmoid(positives - negatives).mean() - regularization = 0 + regularization = 0 if self.lambda_reg != 0: regularization = self.lambda_reg * parameters.norm(p=2).pow(2) + regularization = regularization / positives.size(0) - return (-log_prob + regularization) / n_pairs + return -log_prob + regularization From d26eba009c44a34dc68a6af41c9d845d59ec68f2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 21 May 2023 10:26:06 +0200 Subject: [PATCH 1201/2432] Fix `test_inits` thresholds (#7400) --- test/nn/test_inits.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/nn/test_inits.py b/test/nn/test_inits.py index 1c04394f0f5a..1984d1928c60 100644 --- a/test/nn/test_inits.py +++ b/test/nn/test_inits.py @@ -21,12 +21,12 @@ def test_inits(): assert x.max() <= 0.5 glorot(x) - assert x.min() >= -2.0 - assert x.max() <= 2.0 + assert x.min() >= -1.1 + assert x.max() <= 1.1 glorot_orthogonal(x, scale=1.0) - assert x.min() >= -2.0 - assert x.max() <= 2.0 + assert x.min() >= -2.5 + assert x.max() <= 2.5 zeros(x) assert x.tolist() == [[0, 0, 0, 0]] @@ -40,12 +40,12 @@ def test_inits(): assert nn.weight[0].max() <= 0.5 glorot(nn.weight) - assert nn.weight[0].min() >= -1.25 - assert nn.weight[0].max() <= 1.25 + assert nn.weight[0].min() >= -0.45 + assert nn.weight[0].max() <= 0.45 glorot_orthogonal(nn.weight, scale=1.0) - assert nn.weight[0].min() >= -1.25 - assert nn.weight[0].max() <= 1.25 + assert nn.weight[0].min() >= -2.5 + assert nn.weight[0].max() <= 2.5 def test_reset(): From 3d010dcde2d25bf174d00318d017721c95328552 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 21 May 2023 10:26:47 +0200 Subject: [PATCH 1202/2432] Infer `filter_per_worker` option (#7399) --- CHANGELOG.md | 1 + benchmark/inference/inference_benchmark.py | 6 ----- benchmark/loader/neighbor_loader.py | 14 +++++----- benchmark/training/training_benchmark.py | 5 ---- docs/source/advanced/cpu_affinity.rst | 18 +++++-------- test/loader/test_link_neighbor_loader.py | 2 +- test/loader/test_neighbor_loader.py | 4 +-- test/loader/test_prefetch.py | 1 - torch_geometric/loader/hgt_loader.py | 21 ++++++++------- torch_geometric/loader/link_loader.py | 25 ++++++++++------- .../loader/link_neighbor_loader.py | 21 ++++++++------- torch_geometric/loader/neighbor_loader.py | 21 ++++++++------- torch_geometric/loader/node_loader.py | 25 ++++++++++------- torch_geometric/loader/utils.py | 12 ++++++++- torch_geometric/loader/zip_loader.py | 27 ++++++++++++------- 15 files changed, 114 insertions(+), 89 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 01117b82724f..750d39950cf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- The `filter_per_worker` option will not get automatically inferred by default based on the device of the underlying data ([#7399](https://github.com/pyg-team/pytorch_geometric/pull/7399)) - Fixed a bug in `LightGCN.recommendation_loss()` to only use the embeddings of the nodes involved in the current mini-batch ([#7384](https://github.com/pyg-team/pytorch_geometric/pull/7384)) - Added an optional `max_num_elements` argument to `SortAggregation` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) - Added the option to pass `fill_value` as a `torch.tensor` to `utils.to_dense_batch` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) diff --git a/benchmark/inference/inference_benchmark.py b/benchmark/inference/inference_benchmark.py index 4567d3ea56a1..380c1d53f5fa 100644 --- a/benchmark/inference/inference_benchmark.py +++ b/benchmark/inference/inference_benchmark.py @@ -101,7 +101,6 @@ def run(args: argparse.ArgumentParser): num_neighbors=[-1], # layer-wise inference input_nodes=mask, sampler=sampler, - filter_per_worker=args.filter_per_worker, **kwargs, ) if with_loader else None if args.evaluate and not args.full_batch: @@ -110,7 +109,6 @@ def run(args: argparse.ArgumentParser): num_neighbors=[-1], # layer-wise inference input_nodes=test_mask, sampler=None, - filter_per_worker=args.filter_per_worker, **kwargs, ) @@ -123,7 +121,6 @@ def run(args: argparse.ArgumentParser): num_neighbors=num_neighbors, input_nodes=mask, sampler=sampler, - filter_per_worker=args.filter_per_worker, **kwargs, ) if with_loader else None if args.evaluate and not args.full_batch: @@ -132,7 +129,6 @@ def run(args: argparse.ArgumentParser): num_neighbors=num_neighbors, input_nodes=test_mask, sampler=None, - filter_per_worker=args.filter_per_worker, **kwargs, ) @@ -287,8 +283,6 @@ def run(args: argparse.ArgumentParser): help='Use DataLoader affinitzation.') add('--loader-cores', nargs='+', default=[], type=int, help="List of CPU core IDs to use for DataLoader workers") - add('--filter-per-worker', action='http://github.com/store_true', - help='Enable filter-per-worker feature of the dataloader.') add('--measure-load-time', action='http://github.com/store_true') add('--full-batch', action='http://github.com/store_true', help='Use full batch mode') add('--evaluate', action='http://github.com/store_true') diff --git a/benchmark/loader/neighbor_loader.py b/benchmark/loader/neighbor_loader.py index e5c906cf7cf0..da3e6b7900bc 100644 --- a/benchmark/loader/neighbor_loader.py +++ b/benchmark/loader/neighbor_loader.py @@ -44,10 +44,13 @@ def run(args: argparse.ArgumentParser): print(f'Training sampling with {num_neighbors} neighbors') for batch_size in args.batch_sizes: train_loader = NeighborLoader( - data, num_neighbors=num_neighbors, - input_nodes=train_idx, batch_size=batch_size, - shuffle=True, num_workers=args.num_workers, - filter_per_worker=args.filter_per_worker) + data, + num_neighbors=num_neighbors, + input_nodes=train_idx, + batch_size=batch_size, + shuffle=True, + num_workers=args.num_workers, + ) cpu_affinity = train_loader.enable_cpu_affinity( args.loader_cores ) if args.cpu_affinity else nullcontext() @@ -78,7 +81,6 @@ def run(args: argparse.ArgumentParser): batch_size=batch_size, shuffle=False, num_workers=args.num_workers, - filter_per_worker=args.filter_per_worker, ) cpu_affinity = subgraph_loader.enable_cpu_affinity( args.loader_cores) if args.cpu_affinity else nullcontext() @@ -123,8 +125,6 @@ def run(args: argparse.ArgumentParser): help="Number of iterations for each test setting.") add('--profile', default=False, action='http://github.com/store_true', help="Run torch.profiler.") - add('--filter-per-worker', default=False, action='http://github.com/store_true', - help="Use filter per worker.") add('--cpu-affinity', default=False, action='http://github.com/store_true', help="Use DataLoader affinitzation.") add('--loader-cores', nargs='+', default=[], type=int, diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index 9e3b7a936e7e..15a4b2da7522 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -148,7 +148,6 @@ def run(args: argparse.ArgumentParser): data, input_nodes=mask, sampler=sampler, - filter_per_worker=args.filter_per_worker, **kwargs, ) if args.evaluate: @@ -156,14 +155,12 @@ def run(args: argparse.ArgumentParser): data, input_nodes=val_mask, sampler=None, - filter_per_worker=args.filter_per_worker, **kwargs, ) test_loader = NeighborLoader( data, input_nodes=test_mask, sampler=None, - filter_per_worker=args.filter_per_worker, **kwargs, ) for hidden_channels in args.num_hidden_channels: @@ -326,8 +323,6 @@ def run(args: argparse.ArgumentParser): help="Use DataLoader affinitzation.") add('--loader-cores', nargs='+', default=[], type=int, help="List of CPU core IDs to use for DataLoader workers.") - add('--filter-per-worker', action='http://github.com/store_true', - help='Enable filter-per-worker feature of the dataloader.') add('--measure-load-time', action='http://github.com/store_true') add('--evaluate', action='http://github.com/store_true') add('--write-csv', choices=[None, 'bench', 'prof'], default=None, diff --git a/docs/source/advanced/cpu_affinity.rst b/docs/source/advanced/cpu_affinity.rst index c128aea4ecec..c1da03ce3501 100644 --- a/docs/source/advanced/cpu_affinity.rst +++ b/docs/source/advanced/cpu_affinity.rst @@ -16,8 +16,8 @@ The following article discusses readily available tools and environment settings .. note:: Overall, CPU affinity can be a useful tool for improving the performance and predictability of certain types of applications, but one configuration does not necessarily fit all cases: it is important to carefully consider whether CPU affinity is appropriate for your use case, and to test and measure the impact of any changes you make. -Using CPU affinity and :attr:`filter_per_worker` ------------------------------------------------- +Using CPU affinity +------------------ Each :pyg:`PyG` workload can be parallelized using the :pytorch:`PyTorch` iterator class :class:`MultiProcessingDataLoaderIter`, which is automatically enabled in case :obj:`num_workers > 0` is passed to a :class:`torch.utils.data.DataLoader`. Under the hood, it creates :obj:`num_workers` many sub-processes that will run in parallel to the main process. @@ -39,7 +39,6 @@ The recommended number of workers to start with lies between :obj:`[2, 4]`, and loader = NeigborLoader( data, num_workers=3, - filter_per_worker=True, ..., ) @@ -47,14 +46,11 @@ The recommended number of workers to start with lies between :obj:`[2, 4]`, and for batch in loader: pass -It is generally adivisable to use :obj:`filter_per_worker=True` when enabling multi-process dataloaders. +It is generally adivisable to use :obj:`filter_per_worker=True` for any multi-process CPU workloads (:obj:`True` by default). The workers then prepare each mini-batch: first by sampling the node indices using pre-defined a sampler, and secondly filtering node and edge features according to sampled nodes and edges. The filtering function selects node feature vectors from the complete input :class:`~torch_geometric.data.Data` tensor loaded into DRAM. -This is a memory-expensive call which takes a significant time of each :class:`~torch.utisl.data.DataLoader` iteration. -By default :attr:`filter_per_worker` is set to :attr:`False`, which causes that this execution is sent back to the main process. -However, this can cause performance issues, because the main process will not be able to process all requests efficiently, especially with larger number of workers. When :attr:`filter_per_worker` is set to :attr:`True`, each worker's subprocess performs the filtering within it's CPU resource. -This, main process resources are relieved and can be secured only for GNN computation. +Hence, main process resources are relieved and can be secured only for GNN computation. Binding processes to physical cores ----------------------------------- @@ -133,7 +129,7 @@ The general guidelines for achieving the best performance with CPU affinity can #. Enable multi-process data loaders by setting :attr:`num_workers > 0`. A good estimate for :obj:`num_workers` lies in the range :obj:`[2, 4]`. However, for more complex datasets you might want to experiment with larger number of workers. - Enable :pyg:`PyG` data loaders with :obj:`filter_per_worker=True` and use the :meth:`~torch_geometric.loader.AffinityMixin.enable_cpu_affinity` feature to affinitize :class:`~torch.utils.data.DataLoader` cores. + Use the :meth:`~torch_geometric.loader.AffinityMixin.enable_cpu_affinity` feature to affinitize :class:`~torch.utils.data.DataLoader` cores. #. Bind execution to physical cores. Alternatively, hyperthreading can be disabled completely at a system-level. #. Separate the cores used for main process from the data loader workers' cores by using :obj:`numactl`, :obj:`KMP_AFFINITY` of the :obj:`libiomp5` library, or :obj:`GOMP_CPU_AFFINITY` of the :obj:`libgomp` library. @@ -165,14 +161,14 @@ Three different affinity configurations are presented: .. code-block:: console - LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-num_workers) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C --localalloc python training_benchmark.py --cpu-affinity --filter_per_worker --num-workers … + LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-num_workers) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C --localalloc python training_benchmark.py --cpu-affinity --num-workers … * **Aff+SocketSep** - data loader process on first socket, main process on second socket, 60 threads: .. code-block:: console - LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-M) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C -m 1 python training_benchmark.py --cpu-affinity --filter_per_worker --num-workers ... + LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-M) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C -m 1 python training_benchmark.py --cpu-affinity --num-workers ... Training times for each model/dataset combination were obtained by taking a mean of results at a variable number of dataloader workers: :obj:`[0, 2, 4, 8, 16]` for the baseline and :obj:`[2, 4, 8, 16]` workers for each affinity configuration. Then, the affinity means were normalized with respect to the mean baseline measurement. diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 874f2df45879..0fc8724d7781 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -19,7 +19,7 @@ def unique_edge_pairs(edge_index): @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) -@pytest.mark.parametrize('filter_per_worker', [True, False]) +@pytest.mark.parametrize('filter_per_worker', [None, True, False]) def test_homo_link_neighbor_loader_basic(subgraph_type, neg_sampling_ratio, filter_per_worker): pos_edge_index = get_random_edge_index(50, 50, 500) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index ac965c2fb9e5..5f46593f0ec4 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -38,7 +38,7 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) -@pytest.mark.parametrize('filter_per_worker', [True, False]) +@pytest.mark.parametrize('filter_per_worker', [None, True, False]) def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): if dtype != torch.int64 and not WITH_PYG_LIB: return @@ -597,7 +597,7 @@ def test_cpu_affinity_neighbor_loader(loader_cores): out = [] with loader.enable_cpu_affinity(loader_cores): - iterator = loader._get_iterator().iterator + iterator = loader._get_iterator() workers = iterator._workers for worker in workers: sleep(1) # Gives time for worker to initialize. diff --git a/test/loader/test_prefetch.py b/test/loader/test_prefetch.py index caa7ba3ae314..16b863a60b3f 100644 --- a/test/loader/test_prefetch.py +++ b/test/loader/test_prefetch.py @@ -42,7 +42,6 @@ def test_prefetch_loader(device): batch_size=1024, num_neighbors=[10, 10], num_workers=args.num_workers, - filter_per_worker=True, persistent_workers=args.num_workers > 0, ) diff --git a/torch_geometric/loader/hgt_loader.py b/torch_geometric/loader/hgt_loader.py index 2a13826c2cbe..7ed35e66c4ea 100644 --- a/torch_geometric/loader/hgt_loader.py +++ b/torch_geometric/loader/hgt_loader.py @@ -86,14 +86,17 @@ class HGTLoader(NodeLoader): re-sorting of the data and can improve runtime and memory efficiency. (default: :obj:`False`) filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) + the returned data in each worker's subprocess. + If set to :obj:`False`, will filter the returned data in the main + process. + If set to :obj:`None`, will automatically infer the decision based + on whether data partially lives on the GPU + (:obj:`filter_per_worker=True`) or entirely on the CPU + (:obj:`filter_per_worker=False`). + There exists different trade-offs for setting this option. + Specifically, setting this option to :obj:`True` for in-memory + datasets will move all features to shared memory, which may result + in too many open file handles. (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -106,7 +109,7 @@ def __init__( is_sorted: bool = False, transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, - filter_per_worker: bool = False, + filter_per_worker: Optional[bool] = None, **kwargs, ): hgt_sampler = HGTSampler( diff --git a/torch_geometric/loader/link_loader.py b/torch_geometric/loader/link_loader.py index ccfd23056e56..461aad3de4bf 100644 --- a/torch_geometric/loader/link_loader.py +++ b/torch_geometric/loader/link_loader.py @@ -11,6 +11,7 @@ filter_data, filter_hetero_data, get_edge_label_index, + infer_filter_per_worker, ) from torch_geometric.sampler import ( BaseSampler, @@ -98,14 +99,17 @@ class LinkLoader(torch.utils.data.DataLoader, AffinityMixin): that takes in a :class:`torch_geometric.sampler.SamplerOutput` and returns a transformed version. (default: :obj:`None`) filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) + the returned data in each worker's subprocess. + If set to :obj:`False`, will filter the returned data in the main + process. + If set to :obj:`None`, will automatically infer the decision based + on whether data partially lives on the GPU + (:obj:`filter_per_worker=True`) or entirely on the CPU + (:obj:`filter_per_worker=False`). + There exists different trade-offs for setting this option. + Specifically, setting this option to :obj:`True` for in-memory + datasets will move all features to shared memory, which may result + in too many open file handles. (default: :obj:`None`) custom_cls (HeteroData, optional): A custom :class:`~torch_geometric.data.HeteroData` class to return for mini-batches in case of remote backends. (default: :obj:`None`) @@ -124,11 +128,14 @@ def __init__( neg_sampling_ratio: Optional[Union[int, float]] = None, transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, - filter_per_worker: bool = False, + filter_per_worker: Optional[bool] = None, custom_cls: Optional[HeteroData] = None, input_id: OptTensor = None, **kwargs, ): + if filter_per_worker is None: + filter_per_worker = infer_filter_per_worker(data) + # Remove for PyTorch Lightning: kwargs.pop('dataset', None) kwargs.pop('collate_fn', None) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 61d5a5f74861..997014e52fa7 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -170,14 +170,17 @@ class LinkNeighborLoader(LinkLoader): This avoids internal re-sorting of the data and can improve runtime and memory efficiency. (default: :obj:`False`) filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) + the returned data in each worker's subprocess. + If set to :obj:`False`, will filter the returned data in the main + process. + If set to :obj:`None`, will automatically infer the decision based + on whether data partially lives on the GPU + (:obj:`filter_per_worker=True`) or entirely on the CPU + (:obj:`filter_per_worker=False`). + There exists different trade-offs for setting this option. + Specifically, setting this option to :obj:`True` for in-memory + datasets will move all features to shared memory, which may result + in too many open file handles. (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -199,7 +202,7 @@ def __init__( transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, is_sorted: bool = False, - filter_per_worker: bool = False, + filter_per_worker: Optional[bool] = None, neighbor_sampler: Optional[NeighborSampler] = None, directed: bool = True, # Deprecated. **kwargs, diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 0c0ab7ffe97e..634b7f387835 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -168,14 +168,17 @@ class NeighborLoader(NodeLoader): This avoids internal re-sorting of the data and can improve runtime and memory efficiency. (default: :obj:`False`) filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) + the returned data in each worker's subprocess. + If set to :obj:`False`, will filter the returned data in the main + process. + If set to :obj:`None`, will automatically infer the decision based + on whether data partially lives on the GPU + (:obj:`filter_per_worker=True`) or entirely on the CPU + (:obj:`filter_per_worker=False`). + There exists different trade-offs for setting this option. + Specifically, setting this option to :obj:`True` for in-memory + datasets will move all features to shared memory, which may result + in too many open file handles. (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -194,7 +197,7 @@ def __init__( transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, is_sorted: bool = False, - filter_per_worker: bool = False, + filter_per_worker: Optional[bool] = None, neighbor_sampler: Optional[NeighborSampler] = None, directed: bool = True, # Deprecated. **kwargs, diff --git a/torch_geometric/loader/node_loader.py b/torch_geometric/loader/node_loader.py index e67c2ae339b6..ca7715cad8f1 100644 --- a/torch_geometric/loader/node_loader.py +++ b/torch_geometric/loader/node_loader.py @@ -11,6 +11,7 @@ filter_data, filter_hetero_data, get_input_nodes, + infer_filter_per_worker, ) from torch_geometric.sampler import ( BaseSampler, @@ -58,14 +59,17 @@ class NodeLoader(torch.utils.data.DataLoader, AffinityMixin): that takes in a :class:`torch_geometric.sampler.SamplerOutput` and returns a transformed version. (default: :obj:`None`) filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) + the returned data in each worker's subprocess. + If set to :obj:`False`, will filter the returned data in the main + process. + If set to :obj:`None`, will automatically infer the decision based + on whether data partially lives on the GPU + (:obj:`filter_per_worker=True`) or entirely on the CPU + (:obj:`filter_per_worker=False`). + There exists different trade-offs for setting this option. + Specifically, setting this option to :obj:`True` for in-memory + datasets will move all features to shared memory, which may result + in too many open file handles. (default: :obj:`None`) custom_cls (HeteroData, optional): A custom :class:`~torch_geometric.data.HeteroData` class to return for mini-batches in case of remote backends. (default: :obj:`None`) @@ -81,11 +85,14 @@ def __init__( input_time: OptTensor = None, transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, - filter_per_worker: bool = False, + filter_per_worker: Optional[bool] = None, custom_cls: Optional[HeteroData] = None, input_id: OptTensor = None, **kwargs, ): + if filter_per_worker is None: + filter_per_worker = infer_filter_per_worker(data) + # Remove for PyTorch Lightning: kwargs.pop('dataset', None) kwargs.pop('collate_fn', None) diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 00eca9c7090b..835ce62fa281 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -1,7 +1,8 @@ import copy +import logging import math from collections.abc import Sequence -from typing import Dict, Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple, Union import numpy as np import torch @@ -327,3 +328,12 @@ def _get_edge_index(edge_type): return edge_type, _get_edge_index(edge_type) return edge_type, edge_label_index + + +def infer_filter_per_worker(data: Any) -> bool: + out = True + if isinstance(data, (Data, HeteroData)) and data.is_cuda: + out = False + logging.debug(f"Inferred 'filter_per_worker={out}' option for feature " + f"fetching routines of the data loader") + return out diff --git a/torch_geometric/loader/zip_loader.py b/torch_geometric/loader/zip_loader.py index cfdf29cd5436..60836579634c 100644 --- a/torch_geometric/loader/zip_loader.py +++ b/torch_geometric/loader/zip_loader.py @@ -1,4 +1,4 @@ -from typing import Any, Iterator, List, Tuple, Union +from typing import Any, Iterator, List, Optional, Tuple, Union import torch from torch import Tensor @@ -6,6 +6,7 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import LinkLoader, NodeLoader from torch_geometric.loader.base import DataLoaderIterator +from torch_geometric.loader.utils import infer_filter_per_worker class ZipLoader(torch.utils.data.DataLoader): @@ -15,14 +16,17 @@ class ZipLoader(torch.utils.data.DataLoader): Args: loaders (List[NodeLoader] or List[LinkLoader]): The loader instances. filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) + the returned data in each worker's subprocess. + If set to :obj:`False`, will filter the returned data in the main + process. + If set to :obj:`None`, will automatically infer the decision based + on whether data partially lives on the GPU + (:obj:`filter_per_worker=True`) or entirely on the CPU + (:obj:`filter_per_worker=False`). + There exists different trade-offs for setting this option. + Specifically, setting this option to :obj:`True` for in-memory + datasets will move all features to shared memory, which may result + in too many open file handles. (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. @@ -30,9 +34,12 @@ class ZipLoader(torch.utils.data.DataLoader): def __init__( self, loaders: Union[List[NodeLoader], List[LinkLoader]], - filter_per_worker: bool = False, + filter_per_worker: Optional[bool] = None, **kwargs, ): + if filter_per_worker is None: + filter_per_worker = infer_filter_per_worker(loaders[0].data) + # Remove for PyTorch Lightning: kwargs.pop('dataset', None) kwargs.pop('collate_fn', None) From 1de9dee1b19b0d48fe33cf764fa0673bc0057bff Mon Sep 17 00:00:00 2001 From: Thomas Kwok Date: Sun, 21 May 2023 10:16:45 +0100 Subject: [PATCH 1203/2432] Add MovieLens-100K heterogeneous dataset (#7398) Co-authored-by: Jintang Li Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/movie_lens_100k.py | 180 ++++++++++++++++++++ 3 files changed, 183 insertions(+) create mode 100644 torch_geometric/datasets/movie_lens_100k.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 750d39950cf2..47159c479701 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) - Added the `PMLP` model ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370)) - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 5b8f2c27a30a..88796c3577b4 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -76,6 +76,7 @@ from .ogb_mag import OGB_MAG from .dblp import DBLP from .movie_lens import MovieLens +from .movie_lens_100k import MovieLens100K from .imdb import IMDB from .last_fm import LastFM from .hgb_dataset import HGBDataset @@ -174,6 +175,7 @@ 'OGB_MAG', 'DBLP', 'MovieLens', + 'MovieLens100K', 'IMDB', 'LastFM', 'HGBDataset', diff --git a/torch_geometric/datasets/movie_lens_100k.py b/torch_geometric/datasets/movie_lens_100k.py new file mode 100644 index 000000000000..7b29e840a5c9 --- /dev/null +++ b/torch_geometric/datasets/movie_lens_100k.py @@ -0,0 +1,180 @@ +import os +import os.path as osp +import shutil +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + HeteroData, + InMemoryDataset, + download_url, + extract_zip, +) + +MOVIE_HEADERS = [ + "movieId", "title", "releaseDate", "videoReleaseDate", "IMDb URL", + "unknown", "Action", "Adventure", "Animation", "Children's", "Comedy", + "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", + "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" +] +USER_HEADERS = ["userId", "age", "gender", "occupation", "zipCode"] +RATING_HEADERS = ["userId", "movieId", "rating", "timestamp"] + + +class MovieLens100K(InMemoryDataset): + r"""The MovieLens 100K heterogeneous rating dataset, assembled by GroupLens + Research from the `MovieLens web site `__, + consisting of movies (1,682 nodes) and users (943 nodes) with 100K + ratings between them. + User ratings for movies are available as ground truth labels. + Features of users and movies are encoded according to the `"Inductive + Matrix Completion Based on Graph Neural Networks" + `__ paper. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 20 10 10 10 + :header-rows: 1 + + * - Node/Edge Type + - #nodes/#edges + - #features + - #tasks + * - Movie + - 1,682 + - 18 + - + * - User + - 943 + - 24 + - + * - User-Movie + - 80,000 + - 1 + - 1 + """ + + url = 'https://files.grouplens.org/datasets/movielens/ml-100k.zip' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_file_names(self) -> List[str]: + return ['u.item', 'u.user', 'u1.base', 'u1.test'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.root) + extract_zip(path, self.root) + os.remove(path) + folder = osp.join(self.root, 'ml-100k') + shutil.rmtree(self.raw_dir) + os.rename(folder, self.raw_dir) + + def process(self): + import pandas as pd + + data = HeteroData() + + # Process movie data: + df = pd.read_csv( + self.raw_paths[0], + sep='|', + header=None, + names=MOVIE_HEADERS, + index_col='movieId', + encoding='ISO-8859-1', + ) + movie_mapping = {idx: i for i, idx in enumerate(df.index)} + + x = df[MOVIE_HEADERS[6:]].values + data['movie'].x = torch.from_numpy(x).to(torch.float) + + # Process user data: + df = pd.read_csv( + self.raw_paths[1], + sep='|', + header=None, + names=USER_HEADERS, + index_col='userId', + encoding='ISO-8859-1', + ) + user_mapping = {idx: i for i, idx in enumerate(df.index)} + + age = df['age'].values / df['age'].values.max() + age = torch.from_numpy(age).to(torch.float).view(-1, 1) + + gender = df['gender'].str.get_dummies().values + gender = torch.from_numpy(gender).to(torch.float) + + occupation = df['occupation'].str.get_dummies().values + occupation = torch.from_numpy(occupation).to(torch.float) + + data['user'].x = torch.cat([age, gender, occupation], dim=-1) + + # Process rating data for training: + df = pd.read_csv( + self.raw_paths[2], + sep='\t', + header=None, + names=RATING_HEADERS, + ) + + src = [user_mapping[idx] for idx in df['userId']] + dst = [movie_mapping[idx] for idx in df['movieId']] + edge_index = torch.tensor([src, dst]) + data['user', 'rates', 'movie'].edge_index = edge_index + + rating = torch.from_numpy(df['rating'].values).to(torch.float) + data['user', 'rates', 'movie'].rating = rating + + time = torch.from_numpy(df['timestamp'].values) + data['user', 'rates', 'movie'].time = time + + data['movie', 'rated_by', 'user'].edge_index = edge_index.flip([0]) + data['movie', 'rated_by', 'user'].rating = rating + data['movie', 'rated_by', 'user'].time = time + + # Process rating data for testing: + df = pd.read_csv( + self.raw_paths[3], + sep='\t', + header=None, + names=RATING_HEADERS, + ) + + src = [user_mapping[idx] for idx in df['userId']] + dst = [movie_mapping[idx] for idx in df['movieId']] + edge_label_index = torch.tensor([src, dst]) + data['user', 'rates', 'movie'].edge_label_index = edge_label_index + + edge_label = torch.from_numpy(df['rating'].values).to(torch.float) + data['user', 'rates', 'movie'].edge_label = edge_label + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From 18f16ad0acbc65057d1210c2d2bcf11cdf3874eb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 23 May 2023 08:21:22 +0200 Subject: [PATCH 1204/2432] Fix `SortAggregation` when `fill_value` requires gradients (#7412) --- benchmark/kernel/sort_pool.py | 8 ++++---- examples/seal_link_pred.py | 10 +++++----- torch_geometric/nn/aggr/sort.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmark/kernel/sort_pool.py b/benchmark/kernel/sort_pool.py index 1d68a921712a..926a816572ce 100644 --- a/benchmark/kernel/sort_pool.py +++ b/benchmark/kernel/sort_pool.py @@ -2,19 +2,19 @@ import torch.nn.functional as F from torch.nn import Conv1d, Linear -from torch_geometric.nn import SAGEConv, global_sort_pool +from torch_geometric.nn import SAGEConv, SortAggregation class SortPool(torch.nn.Module): def __init__(self, dataset, num_layers, hidden): super().__init__() - self.k = 30 self.conv1 = SAGEConv(dataset.num_features, hidden) self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(SAGEConv(hidden, hidden)) + self.pool = SortAggregation(k=30) self.conv1d = Conv1d(hidden, 32, 5) - self.lin1 = Linear(32 * (self.k - 5 + 1), hidden) + self.lin1 = Linear(32 * (30 - 5 + 1), hidden) self.lin2 = Linear(hidden, dataset.num_classes) def reset_parameters(self): @@ -30,7 +30,7 @@ def forward(self, data): x = F.relu(self.conv1(x, edge_index)) for conv in self.convs: x = F.relu(conv(x, edge_index)) - x = global_sort_pool(x, batch, self.k) + x = self.pool(x, batch) x = x.view(len(x), self.k, -1).permute(0, 2, 1) x = F.relu(self.conv1d(x)) x = x.view(len(x), -1) diff --git a/examples/seal_link_pred.py b/examples/seal_link_pred.py index c68c539c2d9d..256083fff3a4 100644 --- a/examples/seal_link_pred.py +++ b/examples/seal_link_pred.py @@ -12,7 +12,7 @@ from torch_geometric.data import Data, InMemoryDataset from torch_geometric.datasets import Planetoid from torch_geometric.loader import DataLoader -from torch_geometric.nn import MLP, GCNConv, global_sort_pool +from torch_geometric.nn import MLP, GCNConv, SortAggregation from torch_geometric.transforms import RandomLinkSplit from torch_geometric.utils import k_hop_subgraph, to_scipy_sparse_matrix @@ -142,8 +142,7 @@ def __init__(self, hidden_channels, num_layers, GNN=GCNConv, k=0.6): if k < 1: # Transform percentile to number. num_nodes = sorted([data.num_nodes for data in train_dataset]) k = num_nodes[int(math.ceil(k * len(num_nodes))) - 1] - k = max(10, k) - self.k = int(k) + k = int(max(10, k)) self.convs = ModuleList() self.convs.append(GNN(train_dataset.num_features, hidden_channels)) @@ -156,10 +155,11 @@ def __init__(self, hidden_channels, num_layers, GNN=GCNConv, k=0.6): conv1d_kws = [total_latent_dim, 5] self.conv1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0]) + self.pool = SortAggregation(k) self.maxpool1d = MaxPool1d(2, 2) self.conv2 = Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1) - dense_dim = int((self.k - 2) / 2 + 1) + dense_dim = int((k - 2) / 2 + 1) dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] self.mlp = MLP([dense_dim, 128, 1], dropout=0.5, norm=None) @@ -170,7 +170,7 @@ def forward(self, x, edge_index, batch): x = torch.cat(xs[1:], dim=-1) # Global pooling. - x = global_sort_pool(x, batch, self.k) + x = self.pool(x, batch) x = x.unsqueeze(1) # [num_graphs, 1, k * hidden] x = self.conv1(x).relu() x = self.maxpool1d(x) diff --git a/torch_geometric/nn/aggr/sort.py b/torch_geometric/nn/aggr/sort.py index f492d9522666..3ddce63ec5bf 100644 --- a/torch_geometric/nn/aggr/sort.py +++ b/torch_geometric/nn/aggr/sort.py @@ -30,7 +30,7 @@ def forward( max_num_elements: Optional[int] = None, ) -> Tensor: - fill_value = x.min() - 1 + fill_value = x.detach().min() - 1 batch_x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, fill_value=fill_value, max_num_elements=max_num_elements) From dd20c20659275e500aac0d6738417e691343ccd8 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Tue, 23 May 2023 09:01:27 +0200 Subject: [PATCH 1205/2432] Fix a bug in the `QuantileAggregation` with the `dim size` parameter passed (#7407) Passing the `dim_size` parameter led to an `index out of range` error, during the `index_select` operation. Setting parameters `dim_size` and `fill_value` has not been tested, appropriate tests have been added. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/aggr/test_quantile.py | 16 ++++++++++++---- torch_geometric/nn/aggr/quantile.py | 5 +++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47159c479701..9a05610aadbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed an index-out-of-range bug in `QuantileAggregation` when `dim_size` is passed ([#7407](https://github.com/pyg-team/pytorch_geometric/pull/7407)) - The `filter_per_worker` option will not get automatically inferred by default based on the device of the underlying data ([#7399](https://github.com/pyg-team/pytorch_geometric/pull/7399)) - Fixed a bug in `LightGCN.recommendation_loss()` to only use the embeddings of the nodes involved in the current mini-batch ([#7384](https://github.com/pyg-team/pytorch_geometric/pull/7384)) - Added an optional `max_num_elements` argument to `SortAggregation` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) diff --git a/test/nn/aggr/test_quantile.py b/test/nn/aggr/test_quantile.py index 1d68c572e11e..d1c9de0b828e 100644 --- a/test/nn/aggr/test_quantile.py +++ b/test/nn/aggr/test_quantile.py @@ -7,7 +7,9 @@ @pytest.mark.parametrize('q', [0., .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.]) @pytest.mark.parametrize('interpolation', QuantileAggregation.interpolations) @pytest.mark.parametrize('dim', [0, 1]) -def test_quantile_aggregation(q, interpolation, dim): +@pytest.mark.parametrize('dim_size', [None, 15]) +@pytest.mark.parametrize('fill_value', [0.0, 10.0]) +def test_quantile_aggregation(q, interpolation, dim, dim_size, fill_value): x = torch.tensor([ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], @@ -22,12 +24,18 @@ def test_quantile_aggregation(q, interpolation, dim): ]) index = torch.zeros(x.size(dim), dtype=torch.long) - aggr = QuantileAggregation(q=q, interpolation=interpolation) + aggr = QuantileAggregation(q=q, interpolation=interpolation, + fill_value=fill_value) assert str(aggr) == f"QuantileAggregation(q={q})" - out = aggr(x, index, dim=dim) + out = aggr(x, index, dim=dim, dim_size=dim_size) expected = x.quantile(q, dim, interpolation=interpolation, keepdim=True) - assert torch.allclose(out, expected) + + assert torch.allclose(out.narrow(dim, 0, 1), expected) + + if out.size(0) > index.max() + 1: + padding = out.narrow(dim, 1, out.size(dim) - 1) + assert torch.allclose(padding, torch.tensor(fill_value)) def test_median_aggregation(): diff --git a/torch_geometric/nn/aggr/quantile.py b/torch_geometric/nn/aggr/quantile.py index e3c5ba9efa2f..4c720d1cc863 100644 --- a/torch_geometric/nn/aggr/quantile.py +++ b/torch_geometric/nn/aggr/quantile.py @@ -78,6 +78,11 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, count = torch.bincount(index, minlength=dim_size or 0) cumsum = torch.cumsum(count, dim=0) - count + # In case there exists dangling indices (`dim_size > index.max()`), we + # need to clamp them to prevent out-of-bound issues: + if dim_size is not None: + cumsum = cumsum.clamp(max=x.size(dim) - 1) + q_point = self.q * (count - 1) + cumsum q_point = q_point.t().reshape(-1) From e2c4b76a064762dea278daca9ccea66f71baaede Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 23 May 2023 09:25:44 +0200 Subject: [PATCH 1206/2432] Intrdouce `pickle`-agnostic `save/load` logic in `Planetoid`/`TUDataset` (#7413) --- CHANGELOG.md | 2 +- torch_geometric/datasets/planetoid.py | 4 ++-- torch_geometric/datasets/tu_dataset.py | 13 ++++++++++--- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a05610aadbd..46163ff28439 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) - Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) - Added support for `torch.sparse.Tensor` in `DataLoader` ([#7252](https://github.com/pyg-team/pytorch_geometric/pull/7252)) -- Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250)) +- Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250), [#7413](https://github.com/pyg-team/pytorch_geometric/pull/7413)) - Added an example for heterogeneous GNN explanation via `CaptumExplainer` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) - Added `visualize_feature_importance` functionality to `HeteroExplanation` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) - Added a `AddRemainingSelfLoops` transform ([#7192](https://github.com/pyg-team/pytorch_geometric/pull/7192)) diff --git a/torch_geometric/datasets/planetoid.py b/torch_geometric/datasets/planetoid.py index 9c16a378815b..2ace35493cb6 100644 --- a/torch_geometric/datasets/planetoid.py +++ b/torch_geometric/datasets/planetoid.py @@ -90,7 +90,7 @@ def __init__(self, root: str, name: str, split: str = "public", assert self.split in ['public', 'full', 'geom-gcn', 'random'] super().__init__(root, transform, pre_transform) - self.data, self.slices = torch.load(self.processed_paths[0]) + self.load(self.processed_paths[0]) if split == 'full': data = self.get(0) @@ -162,7 +162,7 @@ def process(self): data.test_mask = torch.stack(test_masks, dim=1) data = data if self.pre_transform is None else self.pre_transform(data) - torch.save(self.collate([data]), self.processed_paths[0]) + self.save([data], self.processed_paths[0]) def __repr__(self) -> str: return f'{self.name}()' diff --git a/torch_geometric/datasets/tu_dataset.py b/torch_geometric/datasets/tu_dataset.py index 83524f4db55a..aa6835bcb9a3 100644 --- a/torch_geometric/datasets/tu_dataset.py +++ b/torch_geometric/datasets/tu_dataset.py @@ -5,7 +5,12 @@ import torch -from torch_geometric.data import InMemoryDataset, download_url, extract_zip +from torch_geometric.data import ( + Data, + InMemoryDataset, + download_url, + extract_zip, +) from torch_geometric.io import read_tu_data @@ -131,7 +136,8 @@ def __init__(self, root: str, name: str, "If this error occurred while loading an already existing " "dataset, remove the 'processed/' directory in the dataset's " "root folder and try again.") - self.data, self.slices, self.sizes = out + data, self.slices, self.sizes = out + self.data = Data.from_dict(data) if isinstance(data, dict) else data if self._data.x is not None and not use_node_attr: num_node_attributes = self.num_node_attributes @@ -199,7 +205,8 @@ def process(self): self.data, self.slices = self.collate(data_list) self._data_list = None # Reset cache. - torch.save((self._data, self.slices, sizes), self.processed_paths[0]) + torch.save((self._data.to_dict(), self.slices, sizes), + self.processed_paths[0]) def __repr__(self) -> str: return f'{self.name}({len(self)})' From ad24911fbc5f5e4f009794db9bd8b8c1319087cd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 23 May 2023 16:49:48 +0200 Subject: [PATCH 1207/2432] Pass `dim` in `global_mean_pool` (#7417) --- torch_geometric/nn/pool/glob.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/nn/pool/glob.py b/torch_geometric/nn/pool/glob.py index 85ee6883847a..a50dc9ebd715 100644 --- a/torch_geometric/nn/pool/glob.py +++ b/torch_geometric/nn/pool/glob.py @@ -58,7 +58,7 @@ def global_mean_pool(x: Tensor, batch: Optional[Tensor], if batch is None: return x.mean(dim=dim, keepdim=x.dim() <= 2) - return scatter(x, batch, dim=-2, dim_size=size, reduce='mean') + return scatter(x, batch, dim=dim, dim_size=size, reduce='mean') def global_max_pool(x: Tensor, batch: Optional[Tensor], From 9bacb430c8e43ccb1d5bcbef063bc5a636e4b51b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 23 May 2023 18:55:25 +0200 Subject: [PATCH 1208/2432] Add `ClusterLoader` benchmark (#7418) --- test/loader/test_cluster.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index f59ffddc30aa..884eac42ed74 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -147,3 +147,26 @@ def test_cluster_gcn_correctness(get_dataset): batch2 = data.subgraph(batch1.n_id) assert batch1.num_nodes == batch2.num_nodes assert batch1.num_edges == batch2.num_edges + + +if __name__ == '__main__': + import argparse + + from ogb.nodeproppred import PygNodePropPredDataset + from tqdm import tqdm + + parser = argparse.ArgumentParser() + parser.add_argument('--num_workers', type=int, default=0) + args = parser.parse_args() + + data = PygNodePropPredDataset('ogbn-products', root='/tmp/ogb')[0] + + loader = ClusterLoader( + ClusterData(data, num_parts=15_000, save_dir='/tmp/ogb/ogbn_products'), + batch_size=32, + shuffle=True, + num_workers=args.num_workers, + ) + + for batch in tqdm(loader): + pass From 627d310d3bea3bad064be7f5872a06d1206c9112 Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Tue, 23 May 2023 09:56:17 -0700 Subject: [PATCH 1209/2432] ARM support in AffinityMixin (#7419) `enable_cpu_affinity` with `loader_cores=None` has a different behavior when the arch changes. Which results in inconsistent behavior accross archs and this test failing with ARM: ``` ___________________ test_cpu_affinity_neighbor_loader[None] ____________________ loader_cores = None @onlyLinux @onlyNeighborSampler @pytest.mark.parametrize('loader_cores', [None, [1]]) def test_cpu_affinity_neighbor_loader(loader_cores): data = Data(x=torch.randn(1, 1)) loader = NeighborLoader(data, num_neighbors=[-1], batch_size=1, num_workers=1) out = [] with loader.enable_cpu_affinity(loader_cores): iterator = loader._get_iterator().iterator workers = iterator._workers for worker in workers: sleep(1) # Gives time for worker to initialize. process = subprocess.Popen( ['taskset', '-c', '-p', f'{worker.pid}'], stdout=subprocess.PIPE) stdout = process.communicate()[0].decode('utf-8') out.append(int(stdout.split(':')[1].strip())) if not loader_cores: > assert out == [0] E assert [40] == [0] E At index 0 diff: 40 != 0 E Use -v to get more diff test/loader/test_neighbor_loader.py:610: AssertionError ``` Signed-off-by: Serge Panev --- torch_geometric/loader/mixin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/torch_geometric/loader/mixin.py b/torch_geometric/loader/mixin.py index 5f7cac9112fa..d31e147a71b2 100644 --- a/torch_geometric/loader/mixin.py +++ b/torch_geometric/loader/mixin.py @@ -141,6 +141,7 @@ def init_fn(worker_id): if numa_info and len(numa_info[0]) > self.num_workers: # Take one thread per each node 0 core: node0_cores = [cpus[0] for core_id, cpus in numa_info[0]] + node0_cores.sort() else: node0_cores = list(range(psutil.cpu_count(logical=False))) From 508fc59cedb02dfc2dcb1f8a9f57d675eb71c7e7 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Tue, 23 May 2023 23:41:53 -0700 Subject: [PATCH 1210/2432] Fix `atol` in `RGCNConv` test (#7422) --- test/nn/conv/test_rgcn_conv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index 791be8cec71a..1df7aaf65144 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -83,7 +83,7 @@ def test_rgcn_conv(cls, conf, device): if is_full_test(): t = '(OptTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, edge_type), out1) + assert torch.allclose(jit(x1, edge_index, edge_type), out1, atol=1e-3) if num_blocks is None: assert torch.allclose(jit(idx1, edge_index, edge_type), out2, atol=1e-3) From 99d7f78c98c5ba8791f42b37b0dcedc8b821569e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 24 May 2023 08:44:49 +0200 Subject: [PATCH 1211/2432] Re-factor `ClusterLoader` + Integrate `pyg-lib` metis computation (#7416) --- CHANGELOG.md | 1 + test/data/test_data.py | 6 +- test/data/test_hetero_data.py | 8 +- test/loader/test_cluster.py | 164 ++++++++++--------- test/utils/test_map.py | 21 +++ torch_geometric/data/data.py | 11 +- torch_geometric/data/hetero_data.py | 13 +- torch_geometric/loader/cluster.py | 239 ++++++++++++++++++++-------- torch_geometric/typing.py | 2 + torch_geometric/utils/map.py | 50 ++++++ 10 files changed, 349 insertions(+), 166 deletions(-) create mode 100644 test/utils/test_map.py create mode 100644 torch_geometric/utils/map.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 46163ff28439..4d396ae2bd24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Re-factored `ClusterLoader` to integrate `pyg-lib` METIS routine ([#7416](https://github.com/pyg-team/pytorch_geometric/pull/7416)) - Fixed an index-out-of-range bug in `QuantileAggregation` when `dim_size` is passed ([#7407](https://github.com/pyg-team/pytorch_geometric/pull/7407)) - The `filter_per_worker` option will not get automatically inferred by default based on the device of the underlying data ([#7399](https://github.com/pyg-team/pytorch_geometric/pull/7399)) - Fixed a bug in `LightGCN.recommendation_loss()` to only use the embeddings of the nodes involved in the current mini-batch ([#7384](https://github.com/pyg-team/pytorch_geometric/pull/7384)) diff --git a/test/data/test_data.py b/test/data/test_data.py index 5badd09eb055..b38578c7f936 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -208,12 +208,12 @@ def test_data_subgraph(): assert torch.equal(out.edge_weight, edge_weight[torch.arange(2, 6)]) assert out.num_nodes == 3 - # test for unordered selection + # Test unordered selection: out = data.subgraph(torch.tensor([3, 1, 2])) assert len(out) == 5 - assert torch.equal(out.x, torch.arange(1, 4)) + assert torch.equal(out.x, torch.tensor([3, 1, 2])) assert torch.equal(out.y, data.y) - assert out.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] + assert out.edge_index.tolist() == [[1, 2, 2, 0], [2, 1, 0, 2]] assert torch.equal(out.edge_weight, edge_weight[torch.arange(2, 6)]) assert out.num_nodes == 3 diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index ddb3cd32b19e..5698ce7a0312 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -214,8 +214,6 @@ def test_hetero_data_subgraph(): 'conf': torch.randperm(x_conference.size(0))[:2], } - subset_sorted = {key: torch.sort(idx)[0] for key, idx in subset.items()} - out = data.subgraph(subset) out.validate(raise_on_error=True) @@ -224,7 +222,7 @@ def test_hetero_data_subgraph(): for key in out.node_types: assert len(out[key]) == len(data[key]) - assert torch.allclose(out[key].x, data[key].x[subset_sorted[key]]) + assert torch.allclose(out[key].x, data[key].x[subset[key]]) assert out[key].num_nodes == subset[key].size(0) if key == 'paper': assert out['paper'].name == 'paper' @@ -235,8 +233,8 @@ def test_hetero_data_subgraph(): for key in out.node_types: node_mask[key] = torch.zeros((data[key].num_nodes, ), dtype=torch.bool) node_map[key] = torch.zeros((data[key].num_nodes, ), dtype=torch.long) - node_mask[key][subset_sorted[key]] = True - node_map[key][subset_sorted[key]] = torch.arange(subset[key].size(0)) + node_mask[key][subset[key]] = True + node_map[key][subset[key]] = torch.arange(subset[key].size(0)) edge_mask = {} # for each edge type a mask of edges in the subgraph subgraph_edge_index = { diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index 884eac42ed74..c934eb52e5b8 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -1,21 +1,22 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.data import Data from torch_geometric.loader import ClusterData, ClusterLoader from torch_geometric.testing import onlyFullTest -from torch_geometric.utils import to_dense_adj +from torch_geometric.utils import sort_edge_index try: rowptr = torch.tensor([0, 1]) col = torch.tensor([0]) torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) - with_metis = True + WITH_METIS = True except (AttributeError, RuntimeError): - with_metis = False + WITH_METIS = False or torch_geometric.typing.WITH_METIS -@pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_cluster_gcn(): adj = torch.tensor([ [1, 1, 1, 0, 1, 0], @@ -29,81 +30,86 @@ def test_cluster_gcn(): x = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) edge_index = adj.nonzero(as_tuple=False).t() edge_attr = torch.arange(edge_index.size(1)) - data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + n_id = torch.arange(6) + data = Data(x=x, n_id=n_id, edge_index=edge_index, edge_attr=edge_attr) data.num_nodes = 6 cluster_data = ClusterData(data, num_parts=2, log=False) - assert cluster_data.partptr.tolist() == [0, 3, 6] - assert cluster_data.perm.tolist() == [0, 2, 4, 1, 3, 5] - assert cluster_data.data.x.tolist() == [ - [0, 0], - [2, 2], - [4, 4], - [1, 1], - [3, 3], - [5, 5], - ] - assert cluster_data.data.adj.to_dense().tolist() == [ - [0, 2, 3, 1, 0, 0], - [8, 9, 10, 0, 0, 0], - [14, 15, 16, 0, 0, 0], - [4, 0, 0, 5, 6, 7], - [0, 0, 0, 11, 12, 13], - [0, 0, 0, 17, 18, 19], + partition = cluster_data._partition( + edge_index, cluster=torch.tensor([0, 1, 0, 1, 0, 1])) + assert partition.partptr.tolist() == [0, 3, 6] + assert partition.node_perm.tolist() == [0, 2, 4, 1, 3, 5] + assert partition.edge_perm.tolist() == [ + 0, 2, 3, 1, 8, 9, 10, 14, 15, 16, 4, 5, 6, 7, 11, 12, 13, 17, 18, 19 ] - data = cluster_data[0] - assert data.num_nodes == 3 - assert data.x.tolist() == [[0, 0], [2, 2], [4, 4]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [0, 2, 3, 8, 9, 10, 14, 15, 16] + assert cluster_data.partition.partptr.tolist() == [0, 3, 6] + assert torch.equal( + cluster_data.partition.node_perm.sort()[0], + torch.arange(data.num_nodes), + ) + assert torch.equal( + cluster_data.partition.edge_perm.sort()[0], + torch.arange(data.num_edges), + ) - data = cluster_data[1] - assert data.num_nodes == 3 - assert data.x.tolist() == [[1, 1], [3, 3], [5, 5]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [5, 6, 7, 11, 12, 13, 17, 18, 19] + out = cluster_data[0] + expected = data.subgraph(out.n_id) + out.validate() + assert out.num_nodes == 3 + assert out.n_id.size() == (3, ) + assert torch.equal(out.x, expected.x) + tmp = sort_edge_index(expected.edge_index, expected.edge_attr) + assert torch.equal(out.edge_index, tmp[0]) + assert torch.equal(out.edge_attr, tmp[1]) + + out = cluster_data[1] + out.validate() + assert out.num_nodes == 3 + assert out.n_id.size() == (3, ) + expected = data.subgraph(out.n_id) + assert torch.equal(out.x, expected.x) + tmp = sort_edge_index(expected.edge_index, expected.edge_attr) + assert torch.equal(out.edge_index, tmp[0]) + assert torch.equal(out.edge_attr, tmp[1]) loader = ClusterLoader(cluster_data, batch_size=1) iterator = iter(loader) - data = next(iterator) - assert data.x.tolist() == [[0, 0], [2, 2], [4, 4]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [0, 2, 3, 8, 9, 10, 14, 15, 16] - - data = next(iterator) - assert data.x.tolist() == [[1, 1], [3, 3], [5, 5]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [5, 6, 7, 11, 12, 13, 17, 18, 19] + out = next(iterator) + out.validate() + assert out.num_nodes == 3 + assert out.n_id.size() == (3, ) + expected = data.subgraph(out.n_id) + assert torch.equal(out.x, expected.x) + tmp = sort_edge_index(expected.edge_index, expected.edge_attr) + assert torch.equal(out.edge_index, tmp[0]) + assert torch.equal(out.edge_attr, tmp[1]) + + out = next(iterator) + out.validate() + assert out.num_nodes == 3 + assert out.n_id.size() == (3, ) + expected = data.subgraph(out.n_id) + assert torch.equal(out.x, expected.x) + tmp = sort_edge_index(expected.edge_index, expected.edge_attr) + assert torch.equal(out.edge_index, tmp[0]) + assert torch.equal(out.edge_attr, tmp[1]) loader = ClusterLoader(cluster_data, batch_size=2, shuffle=False) - data = next(iter(loader)) - assert data.num_nodes == 6 - assert data.x.tolist() == [ - [0, 0], - [2, 2], - [4, 4], - [1, 1], - [3, 3], - [5, 5], - ] - assert to_dense_adj(data.edge_index).squeeze().tolist() == [ - [1, 1, 1, 1, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - ] - - -@pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') + out = next(iter(loader)) + out.validate() + assert out.num_nodes == 6 + assert out.n_id.size() == (6, ) + expected = data.subgraph(out.n_id) + assert torch.equal(out.x, expected.x) + tmp = sort_edge_index(expected.edge_index, expected.edge_attr) + assert torch.equal(out.edge_index, tmp[0]) + assert torch.equal(out.edge_attr, tmp[1]) + + +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_keep_inter_cluster_edges(): adj = torch.tensor([ [1, 1, 1, 0, 1, 0], @@ -124,29 +130,39 @@ def test_keep_inter_cluster_edges(): keep_inter_cluster_edges=True) data = cluster_data[0] - assert data.edge_index.tolist() == [[0, 0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 3, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [0, 2, 3, 1, 8, 9, 10, 14, 15, 16] + assert data.edge_index[0].min() == 0 + assert data.edge_index[0].max() == 2 + assert data.edge_index[1].min() == 0 + assert data.edge_index[1].max() > 2 + assert data.edge_index.size(1) == data.edge_attr.size(0) data = cluster_data[1] - assert data.edge_index.tolist() == [[0, 0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 3, 4, 5, 3, 4, 5, 3, 4, 5]] - assert data.edge_attr.tolist() == [4, 5, 6, 7, 11, 12, 13, 17, 18, 19] + assert data.edge_index[0].min() == 0 + assert data.edge_index[0].max() == 2 + assert data.edge_index[1].min() == 0 + assert data.edge_index[1].max() > 2 + assert data.edge_index.size(1) == data.edge_attr.size(0) @onlyFullTest -@pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_cluster_gcn_correctness(get_dataset): dataset = get_dataset('Cora') data = dataset[0].clone() data.n_id = torch.arange(data.num_nodes) - cluster_data = ClusterData(data, num_parts=10) + cluster_data = ClusterData(data, num_parts=10, log=False) loader = ClusterLoader(cluster_data, batch_size=3, shuffle=False) for batch1 in loader: + batch1.validate() batch2 = data.subgraph(batch1.n_id) assert batch1.num_nodes == batch2.num_nodes assert batch1.num_edges == batch2.num_edges + assert torch.equal(batch1.x, batch2.x) + assert torch.equal( + batch1.edge_index, + sort_edge_index(batch2.edge_index), + ) if __name__ == '__main__': diff --git a/test/utils/test_map.py b/test/utils/test_map.py new file mode 100644 index 000000000000..923bb5b811fb --- /dev/null +++ b/test/utils/test_map.py @@ -0,0 +1,21 @@ +import torch + +from torch_geometric.utils.map import map_index + + +def test_map_index(): + src = torch.tensor([2, 0, 1, 0, 3]) + index = torch.tensor([3, 2, 0, 1]) + + out, mask = map_index(src, index) + assert out.tolist() == [1, 2, 3, 2, 0] + assert mask.tolist() == [True, True, True, True, True] + + +def test_map_index_na(): + src = torch.tensor([2, 0, 1, 0, 3]) + index = torch.tensor([3, 2, 0]) + + out, mask = map_index(src, index) + assert out.tolist() == [1, 2, 2, 0] + assert mask.tolist() == [True, True, False, True, True] diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 846c105dee6f..dcba8718707b 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -589,12 +589,6 @@ def subgraph(self, subset: Tensor) -> 'Data': Args: subset (LongTensor or BoolTensor): The nodes to keep. """ - if subset.dtype == torch.bool: - num_nodes = int(subset.sum()) - else: - num_nodes = subset.size(0) - subset = torch.unique(subset, sorted=True) - out = subgraph(subset, self.edge_index, relabel_nodes=True, num_nodes=self.num_nodes, return_edge_mask=True) edge_index, _, edge_mask = out @@ -605,7 +599,10 @@ def subgraph(self, subset: Tensor) -> 'Data': if key == 'edge_index': data.edge_index = edge_index elif key == 'num_nodes': - data.num_nodes = num_nodes + if subset.dtype == torch.bool: + data.num_nodes = int(subset.sum()) + else: + data.num_nodes = subset.size(0) elif self.is_node_attr(key): cat_dim = self.__cat_dim__(key, value) data[key] = select(value, subset, dim=cat_dim) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 99c78b0f5765..da19896d040a 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -638,17 +638,12 @@ def subgraph(self, subset_dict: Dict[NodeType, Tensor]) -> 'HeteroData': subset_dict = copy.copy(subset_dict) for node_type, subset in subset_dict.items(): - - if subset.dtype == torch.bool: - num_nodes = int(subset.sum()) - else: - num_nodes = subset.size(0) - subset = torch.unique(subset, sorted=True) - subset_dict[node_type] = subset - for key, value in self[node_type].items(): if key == 'num_nodes': - data[node_type].num_nodes = num_nodes + if subset.dtype == torch.bool: + data[node_type].num_nodes = int(subset.sum()) + else: + data[node_type].num_nodes = subset.size(0) elif self[node_type].is_node_attr(key): data[node_type][key] = value[subset] else: diff --git a/torch_geometric/loader/cluster.py b/torch_geometric/loader/cluster.py index 23824f174ea6..4b3cbb51a072 100644 --- a/torch_geometric/loader/cluster.py +++ b/torch_geometric/loader/cluster.py @@ -1,13 +1,28 @@ import copy import os.path as osp import sys -from typing import Optional +from dataclasses import dataclass +from typing import List, Optional import torch import torch.utils.data +from torch import Tensor -from torch_geometric.typing import SparseTensor, torch_sparse -from torch_geometric.utils import narrow, select +import torch_geometric.typing +from torch_geometric.data import Data +from torch_geometric.typing import pyg_lib +from torch_geometric.utils import index_sort, narrow, select, sort_edge_index +from torch_geometric.utils.map import map_index +from torch_geometric.utils.sparse import index2ptr, ptr2index + + +@dataclass +class Partition: + rowptr: Tensor + col: Tensor + partptr: Tensor + node_perm: Tensor + edge_perm: Tensor class ClusterData(torch.utils.data.Dataset): @@ -44,82 +59,139 @@ def __init__( assert data.edge_index is not None self.num_parts = num_parts + self.recursive = recursive self.keep_inter_cluster_edges = keep_inter_cluster_edges recursive_str = '_recursive' if recursive else '' - filename = f'partition_{num_parts}{recursive_str}.pt' + filename = f'metis_{num_parts}{recursive_str}.pt' path = osp.join(save_dir or '', filename) if save_dir is not None and osp.exists(path): - adj, partptr, perm = torch.load(path) + self.partition = torch.load(path) else: if log: # pragma: no cover print('Computing METIS partitioning...', file=sys.stderr) - N, E = data.num_nodes, data.num_edges - adj = SparseTensor( - row=data.edge_index[0], col=data.edge_index[1], - value=torch.arange(E, device=data.edge_index.device), - sparse_sizes=(N, N)) - adj, partptr, perm = adj.partition(num_parts, recursive) + cluster = self._metis(data.edge_index, data.num_nodes) + self.partition = self._partition(data.edge_index, cluster) if save_dir is not None: - torch.save((adj, partptr, perm), path) + torch.save(self.partition, path) if log: # pragma: no cover print('Done!', file=sys.stderr) - self.data = self._permute_data(data, perm, adj) - self.partptr = partptr - self.perm = perm - - def _permute_data(self, data, node_idx, adj): + self.data = self._permute_data(data, self.partition) + + def _metis(self, edge_index: Tensor, num_nodes: int) -> Tensor: + # Computes a node-level partition assignment vector via METIS. + + # Calculate CSR representation: + row, col = sort_edge_index(edge_index, num_nodes=num_nodes) + rowptr = index2ptr(row, size=num_nodes) + + # Compute METIS partitioning: + if torch_geometric.typing.WITH_METIS: + return pyg_lib.partition.metis( + rowptr.cpu(), + col.cpu(), + self.num_parts, + recursive=self.recursive, + ).to(edge_index.device) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + return torch.ops.torch_sparse.partition( + rowptr.cpu(), + col.cpu(), + None, + self.num_parts, + self.recursive, + ).to(edge_index.device) + + raise ImportError(f"'{self.__class__.__name__}' requires either " + f"'pyg-lib' or 'torch-sparse'") + + def _partition(self, edge_index: Tensor, cluster: Tensor) -> Partition: + # Computes node-level and edge-level permutations and permutes the edge + # connectivity accordingly: + + # Sort `cluster` and compute boundaries `partptr`: + cluster, node_perm = index_sort(cluster, max_value=self.num_parts) + partptr = index2ptr(cluster, size=self.num_parts) + + # Permute `edge_index` based on node permutation: + edge_perm = torch.arange(edge_index.size(1), device=edge_index.device) + arange = torch.empty_like(node_perm) + arange[node_perm] = torch.arange(cluster.numel(), + device=cluster.device) + edge_index = arange[edge_index] + + # Compute final CSR representation: + (row, col), edge_perm = sort_edge_index( + edge_index, + edge_attr=edge_perm, + num_nodes=cluster.numel(), + ) + rowptr = index2ptr(row, size=cluster.numel()) + + return Partition(rowptr, col, partptr, node_perm, edge_perm) + + def _permute_data(self, data: Data, partition: Partition) -> Data: + # Permute node-level and edge-level attributes according to the + # calculated permutations in `Partition`: out = copy.copy(data) for key, value in data.items(): - if data.is_node_attr(key): + if key == 'edge_index': + continue + elif data.is_node_attr(key): cat_dim = data.__cat_dim__(key, value) - out[key] = select(value, node_idx, dim=cat_dim) - + out[key] = select(value, partition.node_perm, dim=cat_dim) + elif data.is_edge_attr(key): + cat_dim = data.__cat_dim__(key, value) + out[key] = select(value, partition.edge_perm, dim=cat_dim) out.edge_index = None - out.adj = adj return out - def __len__(self): - return self.partptr.numel() - 1 - - def __getitem__(self, idx): - start = int(self.partptr[idx]) - length = int(self.partptr[idx + 1]) - start - - data = copy.copy(self.data) - adj, data.adj = data.adj, None - - adj = adj.narrow(0, start, length) + def __len__(self) -> int: + return self.partition.partptr.numel() - 1 + + def __getitem__(self, idx: int) -> Data: + node_start = int(self.partition.partptr[idx]) + node_end = int(self.partition.partptr[idx + 1]) + node_length = node_end - node_start + + rowptr = self.partition.rowptr[node_start:node_end + 1] + edge_start = int(rowptr[0]) + edge_end = int(rowptr[-1]) + edge_length = edge_end - edge_start + rowptr = rowptr - edge_start + row = ptr2index(rowptr) + col = self.partition.col[edge_start:edge_end] if not self.keep_inter_cluster_edges: - adj = adj.narrow(1, start, length) + edge_mask = (col >= node_start) & (col < node_end) + row = row[edge_mask] + col = col[edge_mask] - node_start - edge_idx = adj.storage.value() + out = copy.copy(self.data) - for key, value in data: + for key, value in self.data.items(): if key == 'num_nodes': - data.num_nodes = length + out.num_nodes = node_length elif self.data.is_node_attr(key): cat_dim = self.data.__cat_dim__(key, value) - data[key] = narrow(value, cat_dim, start, length) + out[key] = narrow(value, cat_dim, node_start, node_length) elif self.data.is_edge_attr(key): cat_dim = self.data.__cat_dim__(key, value) - data[key] = select(value, edge_idx, dim=cat_dim) + out[key] = narrow(value, cat_dim, edge_start, edge_length) + if not self.keep_inter_cluster_edges: + out[key] = out[key][edge_mask] - row, col, _ = adj.coo() - data.edge_index = torch.stack([row, col], dim=0) + out.edge_index = torch.stack([row, col], dim=0) - return data + return out - def __repr__(self): - return (f'{self.__class__.__name__}(\n' - f' data={self.data},\n' - f' num_parts={self.num_parts}\n' - f')') + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.num_parts})' class ClusterLoader(torch.utils.data.DataLoader): @@ -149,36 +221,67 @@ class ClusterLoader(torch.utils.data.DataLoader): """ def __init__(self, cluster_data, **kwargs): self.cluster_data = cluster_data + iterator = range(len(cluster_data)) + super().__init__(iterator, collate_fn=self._collate, **kwargs) - super().__init__(range(len(cluster_data)), collate_fn=self._collate, - **kwargs) - - def _collate(self, batch): + def _collate(self, batch: List[int]) -> Data: if not isinstance(batch, torch.Tensor): batch = torch.tensor(batch) - start = self.cluster_data.partptr[batch].tolist() - end = self.cluster_data.partptr[batch + 1].tolist() - node_idx = torch.cat([torch.arange(s, e) for s, e in zip(start, end)]) - - data = copy.copy(self.cluster_data.data) - - adj, data.adj = self.cluster_data.data.adj, None - adj = torch_sparse.cat( - [adj.narrow(0, s, e - s) for s, e in zip(start, end)], dim=0) - adj = adj.index_select(1, node_idx) - row, col, edge_idx = adj.coo() - - for key, value in data: + global_rowptr = self.cluster_data.partition.rowptr + global_col = self.cluster_data.partition.col + + # Get all node-level and edge-level start and end indices for the + # current mini-batch: + node_start = self.cluster_data.partition.partptr[batch] + node_end = self.cluster_data.partition.partptr[batch + 1] + edge_start = global_rowptr[node_start] + edge_end = global_rowptr[node_end] + + # Iterate over each partition in the batch and calculate new edge + # connectivity. This is done by slicing the corresponding source and + # destination indices for each partition and adjusting their indices to + # start from zero: + rows, cols, nodes, cumsum = [], [], [], 0 + for i in range(batch.numel()): + nodes.append(torch.arange(node_start[i], node_end[i])) + rowptr = global_rowptr[node_start[i]:node_end[i] + 1] + rowptr = rowptr - edge_start[i] + row = ptr2index(rowptr) + cumsum + col = global_col[edge_start[i]:edge_end[i]] + rows.append(row) + cols.append(col) + cumsum += rowptr.numel() - 1 + + node = torch.cat(nodes, dim=0) + row = torch.cat(rows, dim=0) + col = torch.cat(cols, dim=0) + + # Map `col` vector to valid entries and remove any entries that do not + # connect two nodes within the same mini-batch: + col, edge_mask = map_index(col, node) + row = row[edge_mask] + + out = copy.copy(self.cluster_data.data) + + # Slice node-level and edge-level attributes according to its offsets: + for key, value in self.cluster_data.data.items(): if key == 'num_nodes': - data.num_nodes = node_idx.numel() + out.num_nodes = cumsum elif self.cluster_data.data.is_node_attr(key): cat_dim = self.cluster_data.data.__cat_dim__(key, value) - data[key] = select(value, node_idx, dim=cat_dim) + out[key] = torch.cat([ + narrow(out[key], cat_dim, s, e - s) + for s, e in zip(node_start, node_end) + ], dim=cat_dim) elif self.cluster_data.data.is_edge_attr(key): cat_dim = self.cluster_data.data.__cat_dim__(key, value) - data[key] = select(value, edge_idx, dim=cat_dim) + value = torch.cat([ + narrow(out[key], cat_dim, s, e - s) + for s, e in zip(edge_start, edge_end) + ], dim=cat_dim) + out[key] = select(value, edge_mask, dim=cat_dim) - data.edge_index = torch.stack([row, col], dim=0) + out.edge_index = torch.stack([row, col], dim=0) - return data + return out diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index d25605ea7e3d..3fd739f2c0fe 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -13,6 +13,7 @@ WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul') WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add') WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort') + WITH_METIS = hasattr(pyg_lib, 'partition') except (ImportError, OSError) as e: if isinstance(e, OSError): warnings.warn(f"An issue occurred while importing 'pyg-lib'. " @@ -22,6 +23,7 @@ WITH_GMM = False WITH_SAMPLED_OP = False WITH_INDEX_SORT = False + WITH_METIS = False try: import torch_scatter # noqa diff --git a/torch_geometric/utils/map.py b/torch_geometric/utils/map.py new file mode 100644 index 000000000000..cea56b0edd42 --- /dev/null +++ b/torch_geometric/utils/map.py @@ -0,0 +1,50 @@ +from typing import Tuple + +import torch +from torch import Tensor + + +def map_index(src: Tensor, index: Tensor) -> Tuple[Tensor, Tensor]: + r"""Maps indices in :obj:`src` to the positional value of their + corresponding occurence in :obj:`index`. + + Args: + src (torch.Tensor): The source tensor to map. + index (torch.Tensor): The index tensor that denotes the new mapping. + + :rtype: (:class:`torch.Tensor`, :class:`torch.BoolTensor`) + + Examples: + + >>> src = torch.tensor([2, 0, 1, 0, 3]) + >>> index = torch.tensor([3, 2, 0, 1]) + + >>> map_index(src, index) + (tensor([1, 2, 3, 2, 0]), tensor([True, True, True, True, True])) + + >>> src = torch.tensor([2, 0, 1, 0, 3]) + >>> index = torch.tensor([3, 2, 0]) + + >>> map_index(src, index) + (tensor([1, 2, 2, 0]), tensor([True, True, False, True, True])) + """ + import pandas as pd + + assert src.dim() == 1 and index.dim() == 1 + assert not src.is_floating_point() + assert not index.is_floating_point() + + arange = pd.RangeIndex(0, index.size(0)) + df = pd.DataFrame(index=index.detach().cpu().numpy(), data={'out': arange}) + ser = pd.Series(src.detach().cpu(), name='key') + result = df.merge(ser, how='right', left_index=True, right_on='key') + out = torch.from_numpy(result['out'].values).to(index.device) + + if out.is_floating_point(): + mask = torch.isnan(out).logical_not_() + out = out[mask].to(index.dtype) + return out, mask + + out = out.to(index.dtype) + mask = torch.ones_like(out, dtype=torch.bool) + return out, mask From 937f32f807da3031644edb434766c921ca450eea Mon Sep 17 00:00:00 2001 From: andreazanetti Date: Wed, 24 May 2023 11:41:05 +0200 Subject: [PATCH 1212/2432] Adds support for benchmarking bidirectional sampling (#7415) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simple addition to enable bidirectional neighbor loader benchmarking. Results show a relative drop in performance moving from directional to bidirectional (example below) maybe due to the fact that, currently, bidirectional sampling is implemented as a post-processing of regular sampling [#7200](https://github.com/pyg-team/pytorch_geometric/pull/7200/files#) With directional sampling: Dataset: products Training sampling with [10, 5] neighbors 100%|█████████████████████████████████193/193 [00:03<00:00, 48.96it/s] 100%|█████████████████████████████████ 193/193 [00:03<00:00, 49.59it/s] 100%|█████████████████████████████████ 193/193 [00:03<00:00, 49.01it/s] batch size=1024, iterations=579, runtimes=[3.943, 3.892, 3.939], average runtime=3.925 while with bidirectional sampling: Dataset: products Training sampling with [10, 5] neighbors 100%|██████████████████████████████████ 193/193 [00:04<00:00, 42.22it/s] 100%|██████████████████████████████████193/193 [00:04<00:00, 43.18it/s] 100%|██████████████████████████████████193/193 [00:04<00:00, 42.56it/s] batch size=1024, iterations=579, runtimes=[4.572, 4.47, 4.535], average runtime=4.526 --------- Co-authored-by: rusty1s --- benchmark/loader/neighbor_loader.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/benchmark/loader/neighbor_loader.py b/benchmark/loader/neighbor_loader.py index da3e6b7900bc..1732522dd714 100644 --- a/benchmark/loader/neighbor_loader.py +++ b/benchmark/loader/neighbor_loader.py @@ -50,6 +50,7 @@ def run(args: argparse.ArgumentParser): batch_size=batch_size, shuffle=True, num_workers=args.num_workers, + subgraph_type=args.subgraph_type, ) cpu_affinity = train_loader.enable_cpu_affinity( args.loader_cores @@ -129,4 +130,6 @@ def run(args: argparse.ArgumentParser): help="Use DataLoader affinitzation.") add('--loader-cores', nargs='+', default=[], type=int, help="List of CPU core IDs to use for DataLoader workers.") + add('--subgraph-type', type=str, default='directional', + help="The type of the returned subgraph (directional, bidirectional)") run(parser.parse_args()) From 8e818e36ccfc50032cf23561c214d954c534d7b4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 24 May 2023 19:54:40 +0200 Subject: [PATCH 1213/2432] Fix gradient computation of edge weights in `utils.spmm` (#7428) --- CHANGELOG.md | 1 + torch_geometric/typing.py | 3 +++ torch_geometric/utils/spmm.py | 8 ++++---- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d396ae2bd24..ea275772ee26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed gradient computation of edge weights in `utils.spmm` ([#7428](https://github.com/pyg-team/pytorch_geometric/pull/7428)) - Re-factored `ClusterLoader` to integrate `pyg-lib` METIS routine ([#7416](https://github.com/pyg-team/pytorch_geometric/pull/7416)) - Fixed an index-out-of-range bug in `QuantileAggregation` when `dim_size` is passed ([#7407](https://github.com/pyg-team/pytorch_geometric/pull/7407)) - The `filter_per_worker` option will not get automatically inferred by default based on the device of the underlying data ([#7399](https://github.com/pyg-team/pytorch_geometric/pull/7399)) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 3fd739f2c0fe..a4b102f27073 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -135,6 +135,9 @@ def coo(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]: def csr(self) -> Tuple[Tensor, Tensor, Optional[Tensor]]: raise ImportError("'SparseTensor' requires 'torch-sparse'") + def requires_grad(self) -> bool: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + def to_torch_sparse_csr_tensor( self, dtype: Optional[torch.dtype] = None, diff --git a/torch_geometric/utils/spmm.py b/torch_geometric/utils/spmm.py index e2755d85657f..290182dce7c0 100644 --- a/torch_geometric/utils/spmm.py +++ b/torch_geometric/utils/spmm.py @@ -24,10 +24,10 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: """Matrix product of sparse matrix with dense matrix. Args: - src (Tensor or torch_sparse.SparseTensor): The input sparse matrix, - either a :pyg:`PyG` :class:`torch_sparse.SparseTensor` or a + src (torch.Tensor or torch_sparse.SparseTensor): The input sparse + matrix, either a :pyg:`PyG` :class:`torch_sparse.SparseTensor` or a :pytorch:`PyTorch` :class:`torch.sparse.Tensor`. - other (Tensor): The input dense matrix. + other (torch.Tensor): The input dense matrix. reduce (str, optional): The reduce operation to use (:obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`). (default: :obj:`"sum"`) @@ -41,7 +41,7 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: if isinstance(src, SparseTensor): if (torch_geometric.typing.WITH_PT2 and other.dim() == 2 - and not src.is_cuda()): + and not src.is_cuda() and not src.requires_grad()): # Use optimized PyTorch `torch.sparse.mm` path: csr = src.to_torch_sparse_csr_tensor() return torch.sparse.mm(csr, other, reduce) From b3f2f2fa74ac2f4054ae7a2a78e6b29f432c33ea Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 24 May 2023 20:01:11 +0200 Subject: [PATCH 1214/2432] `QuantileAggregation` - pass `output_size` to the `repeat_interleave` operation (#7426) Repeat interleave meta backend https://github.com/pytorch/pytorch/blob/ee95e37a692a2ada87b6523aeae822fe73a2d304/torch/_meta_registrations.py#L1526 throws an exception if output_size is None. Meta backends are used in custom torch.compile backends. Output size can be easily set in repeat interleave in QuantileAggregation operation. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/aggr/quantile.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea275772ee26..d7deeb3bea8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Set `output_size` in the `repeat_interleave` operation in `QuantileAggregation` ([#7426](https://github.com/pyg-team/pytorch_geometric/pull/7426)) - Fixed gradient computation of edge weights in `utils.spmm` ([#7428](https://github.com/pyg-team/pytorch_geometric/pull/7428)) - Re-factored `ClusterLoader` to integrate `pyg-lib` METIS routine ([#7416](https://github.com/pyg-team/pytorch_geometric/pull/7416)) - Fixed an index-out-of-range bug in `QuantileAggregation` when `dim_size` is passed ([#7407](https://github.com/pyg-team/pytorch_geometric/pull/7407)) diff --git a/torch_geometric/nn/aggr/quantile.py b/torch_geometric/nn/aggr/quantile.py index 4c720d1cc863..9f70dd600be2 100644 --- a/torch_geometric/nn/aggr/quantile.py +++ b/torch_geometric/nn/aggr/quantile.py @@ -115,7 +115,9 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, quantile = 0.5 * l_quant + 0.5 * r_quant # If the number of elements is zero, fill with pre-defined value: - mask = (count == 0).repeat_interleave(self.q.numel()).view(shape) + repeats = self.q.numel() + mask = (count == 0).repeat_interleave( + repeats, output_size=repeats * count.numel()).view(shape) out = quantile.masked_fill(mask, self.fill_value) if self.q.numel() > 1: From 73003d26cfd0b236ceab84cb225ecf7cd481ebc6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 24 May 2023 20:37:57 +0200 Subject: [PATCH 1215/2432] Add immutable transforms (#7429) --- CHANGELOG.md | 1 + examples/colors_topk_pool.py | 2 ++ examples/proteins_diff_pool.py | 14 ++++++-------- examples/qm9_nn_conv.py | 6 ++++-- examples/triangles_sag_pool.py | 2 ++ torch_geometric/data/in_memory_dataset.py | 1 + torch_geometric/transforms/add_metapaths.py | 4 ++-- .../transforms/add_positional_encoding.py | 4 ++-- .../transforms/add_remaining_self_loops.py | 2 +- torch_geometric/transforms/add_self_loops.py | 2 +- torch_geometric/transforms/base_transform.py | 9 +++++++-- torch_geometric/transforms/cartesian.py | 2 +- torch_geometric/transforms/center.py | 2 +- torch_geometric/transforms/compose.py | 2 +- torch_geometric/transforms/constant.py | 2 +- torch_geometric/transforms/delaunay.py | 2 +- torch_geometric/transforms/distance.py | 2 +- torch_geometric/transforms/face_to_edge.py | 2 +- torch_geometric/transforms/feature_propagation.py | 2 +- torch_geometric/transforms/fixed_points.py | 2 +- torch_geometric/transforms/gcn_norm.py | 2 +- torch_geometric/transforms/gdc.py | 2 +- .../transforms/generate_mesh_normals.py | 2 +- torch_geometric/transforms/grid_sampling.py | 2 +- torch_geometric/transforms/knn_graph.py | 2 +- torch_geometric/transforms/laplacian_lambda_max.py | 2 +- .../transforms/largest_connected_components.py | 2 +- torch_geometric/transforms/line_graph.py | 2 +- .../transforms/linear_transformation.py | 4 ++-- torch_geometric/transforms/local_cartesian.py | 2 +- torch_geometric/transforms/local_degree_profile.py | 2 +- torch_geometric/transforms/mask.py | 4 ++-- torch_geometric/transforms/node_property_split.py | 3 +-- torch_geometric/transforms/normalize_features.py | 2 +- torch_geometric/transforms/normalize_rotation.py | 2 +- torch_geometric/transforms/normalize_scale.py | 2 +- torch_geometric/transforms/one_hot_degree.py | 2 +- torch_geometric/transforms/pad.py | 2 +- torch_geometric/transforms/point_pair_features.py | 2 +- torch_geometric/transforms/polar.py | 2 +- torch_geometric/transforms/radius_graph.py | 2 +- torch_geometric/transforms/random_flip.py | 2 +- torch_geometric/transforms/random_jitter.py | 2 +- torch_geometric/transforms/random_link_split.py | 2 +- torch_geometric/transforms/random_node_split.py | 2 +- torch_geometric/transforms/random_rotate.py | 2 +- torch_geometric/transforms/random_scale.py | 2 +- torch_geometric/transforms/random_shear.py | 2 +- .../transforms/remove_duplicated_edges.py | 2 +- .../transforms/remove_isolated_nodes.py | 2 +- .../transforms/remove_training_classes.py | 2 +- torch_geometric/transforms/rooted_subgraph.py | 2 +- torch_geometric/transforms/sample_points.py | 2 +- torch_geometric/transforms/sign.py | 2 +- torch_geometric/transforms/spherical.py | 2 +- .../transforms/svd_feature_reduction.py | 2 +- torch_geometric/transforms/target_indegree.py | 2 +- torch_geometric/transforms/to_dense.py | 2 +- torch_geometric/transforms/to_device.py | 2 +- torch_geometric/transforms/to_sparse_tensor.py | 2 +- torch_geometric/transforms/to_superpixels.py | 2 +- torch_geometric/transforms/to_undirected.py | 2 +- torch_geometric/transforms/two_hop.py | 2 +- torch_geometric/transforms/virtual_node.py | 2 +- 64 files changed, 84 insertions(+), 74 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d7deeb3bea8f..03674baac49f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- All transforms are now immutable, i.e., they perform a shallow-copy of the data and therefore do not longer modify data in-place ([#7429](https://github.com/pyg-team/pytorch_geometric/pull/7429)) - Set `output_size` in the `repeat_interleave` operation in `QuantileAggregation` ([#7426](https://github.com/pyg-team/pytorch_geometric/pull/7426)) - Fixed gradient computation of edge weights in `utils.spmm` ([#7428](https://github.com/pyg-team/pytorch_geometric/pull/7428)) - Re-factored `ClusterLoader` to integrate `pyg-lib` METIS routine ([#7416](https://github.com/pyg-team/pytorch_geometric/pull/7416)) diff --git a/examples/colors_topk_pool.py b/examples/colors_topk_pool.py index ce9d91e2af16..2b1318ffb1c0 100644 --- a/examples/colors_topk_pool.py +++ b/examples/colors_topk_pool.py @@ -1,3 +1,4 @@ +import copy import os.path as osp import torch @@ -14,6 +15,7 @@ class HandleNodeAttention: def __call__(self, data): + data = copy.copy(data) data.attn = torch.softmax(data.x[:, 0], dim=0) data.x = data.x[:, 1:] return data diff --git a/examples/proteins_diff_pool.py b/examples/proteins_diff_pool.py index e021b1d36b6c..dce64ecfc4b9 100644 --- a/examples/proteins_diff_pool.py +++ b/examples/proteins_diff_pool.py @@ -11,16 +11,14 @@ max_nodes = 150 - -class MyFilter: - def __call__(self, data): - return data.num_nodes <= max_nodes - - path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PROTEINS_dense') -dataset = TUDataset(path, name='PROTEINS', transform=T.ToDense(max_nodes), - pre_filter=MyFilter()) +dataset = TUDataset( + path, + name='PROTEINS', + transform=T.ToDense(max_nodes), + pre_filter=lambda data: data.num_nodes <= max_nodes, +) dataset = dataset.shuffle() n = (len(dataset) + 9) // 10 test_dataset = dataset[:n] diff --git a/examples/qm9_nn_conv.py b/examples/qm9_nn_conv.py index 55998d8bdd05..f15ba0048dfb 100644 --- a/examples/qm9_nn_conv.py +++ b/examples/qm9_nn_conv.py @@ -1,3 +1,4 @@ +import copy import os.path as osp import torch @@ -16,13 +17,14 @@ class MyTransform: def __call__(self, data): - # Specify target. - data.y = data.y[:, target] + data = copy.copy(data) + data.y = data.y[:, target] # Specify target. return data class Complete: def __call__(self, data): + data = copy.copy(data) device = data.edge_index.device row = torch.arange(data.num_nodes, dtype=torch.long, device=device) diff --git a/examples/triangles_sag_pool.py b/examples/triangles_sag_pool.py index fc70a21969e0..a3d3898f78e7 100644 --- a/examples/triangles_sag_pool.py +++ b/examples/triangles_sag_pool.py @@ -1,3 +1,4 @@ +import copy import os.path as osp import torch @@ -15,6 +16,7 @@ class HandleNodeAttention: def __call__(self, data): + data = copy.copy(data) data.attn = torch.softmax(data.x, dim=0).flatten() data.x = None return data diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index 9d54cf1b70fd..90333354f6fe 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -85,6 +85,7 @@ def len(self) -> int: return 0 def get(self, idx: int) -> Data: + # TODO (matthias) Avoid unnecessary copy here. if self.len() == 1: return copy.copy(self._data) diff --git a/torch_geometric/transforms/add_metapaths.py b/torch_geometric/transforms/add_metapaths.py index 90735f3c8830..27195dd8df88 100644 --- a/torch_geometric/transforms/add_metapaths.py +++ b/torch_geometric/transforms/add_metapaths.py @@ -129,7 +129,7 @@ def __init__( self.max_sample = max_sample self.weighted = weighted - def __call__(self, data: HeteroData) -> HeteroData: + def forward(self, data: HeteroData) -> HeteroData: edge_types = data.edge_types # save original edge types data.metapath_dict = {} @@ -244,7 +244,7 @@ def __init__( assert len(walks_per_node) == len(metapaths) self.walks_per_node = walks_per_node - def __call__(self, data: HeteroData) -> HeteroData: + def forward(self, data: HeteroData) -> HeteroData: edge_types = data.edge_types # save original edge types data.metapath_dict = {} diff --git a/torch_geometric/transforms/add_positional_encoding.py b/torch_geometric/transforms/add_positional_encoding.py index 3fecc169b3e9..c66f60421ab7 100644 --- a/torch_geometric/transforms/add_positional_encoding.py +++ b/torch_geometric/transforms/add_positional_encoding.py @@ -64,7 +64,7 @@ def __init__( self.is_undirected = is_undirected self.kwargs = kwargs - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: from scipy.sparse.linalg import eigs, eigsh eig_fn = eigs if not self.is_undirected else eigsh @@ -117,7 +117,7 @@ def __init__( self.walk_length = walk_length self.attr_name = attr_name - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: row, col = data.edge_index N = data.num_nodes diff --git a/torch_geometric/transforms/add_remaining_self_loops.py b/torch_geometric/transforms/add_remaining_self_loops.py index 74a1470a079d..4150f30254ba 100644 --- a/torch_geometric/transforms/add_remaining_self_loops.py +++ b/torch_geometric/transforms/add_remaining_self_loops.py @@ -32,7 +32,7 @@ def __init__(self, attr: Optional[str] = 'edge_weight', self.attr = attr self.fill_value = fill_value - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/add_self_loops.py b/torch_geometric/transforms/add_self_loops.py index c305260b27aa..c6594970a5c9 100644 --- a/torch_geometric/transforms/add_self_loops.py +++ b/torch_geometric/transforms/add_self_loops.py @@ -32,7 +32,7 @@ def __init__(self, attr: Optional[str] = 'edge_weight', self.attr = attr self.fill_value = fill_value - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/base_transform.py b/torch_geometric/transforms/base_transform.py index 405fb28d1867..1f98ef8965e2 100644 --- a/torch_geometric/transforms/base_transform.py +++ b/torch_geometric/transforms/base_transform.py @@ -1,4 +1,5 @@ -from abc import ABC +import copy +from abc import ABC, abstractmethod from typing import Any @@ -27,7 +28,11 @@ class BaseTransform(ABC): data = transform(data) # Explicitly transform data. """ def __call__(self, data: Any) -> Any: - raise NotImplementedError + return self.forward(copy.copy(data)) + + @abstractmethod + def forward(self, data: Any) -> Any: + pass def __repr__(self) -> str: return f'{self.__class__.__name__}()' diff --git a/torch_geometric/transforms/cartesian.py b/torch_geometric/transforms/cartesian.py index bcd56f06169e..0f59922c67e4 100644 --- a/torch_geometric/transforms/cartesian.py +++ b/torch_geometric/transforms/cartesian.py @@ -32,7 +32,7 @@ def __init__( self.max = max_value self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr cart = pos[row] - pos[col] diff --git a/torch_geometric/transforms/center.py b/torch_geometric/transforms/center.py index eca11b34e7d5..abd4d97c9a3c 100644 --- a/torch_geometric/transforms/center.py +++ b/torch_geometric/transforms/center.py @@ -9,7 +9,7 @@ class Center(BaseTransform): r"""Centers node positions :obj:`data.pos` around the origin (functional name: :obj:`center`).""" - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/compose.py b/torch_geometric/transforms/compose.py index 45f8e8f18610..128dfc773493 100644 --- a/torch_geometric/transforms/compose.py +++ b/torch_geometric/transforms/compose.py @@ -13,7 +13,7 @@ class Compose(BaseTransform): def __init__(self, transforms: List[Callable]): self.transforms = transforms - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/constant.py b/torch_geometric/transforms/constant.py index 91fcda2757fc..2cfe87df7881 100644 --- a/torch_geometric/transforms/constant.py +++ b/torch_geometric/transforms/constant.py @@ -34,7 +34,7 @@ def __init__( self.cat = cat self.node_types = node_types - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/delaunay.py b/torch_geometric/transforms/delaunay.py index 8a25fe151642..11ec9c0e2d41 100644 --- a/torch_geometric/transforms/delaunay.py +++ b/torch_geometric/transforms/delaunay.py @@ -10,7 +10,7 @@ class Delaunay(BaseTransform): r"""Computes the delaunay triangulation of a set of points (functional name: :obj:`delaunay`).""" - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: if data.pos.size(0) < 2: data.edge_index = torch.tensor([], dtype=torch.long, device=data.pos.device).view(2, 0) diff --git a/torch_geometric/transforms/distance.py b/torch_geometric/transforms/distance.py index 72741f1d36a5..70404da7b4a5 100644 --- a/torch_geometric/transforms/distance.py +++ b/torch_geometric/transforms/distance.py @@ -27,7 +27,7 @@ def __init__(self, norm: bool = True, max_value: Optional[float] = None, self.max = max_value self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr dist = torch.norm(pos[col] - pos[row], p=2, dim=-1).view(-1, 1) diff --git a/torch_geometric/transforms/face_to_edge.py b/torch_geometric/transforms/face_to_edge.py index fed49de78d33..b885c3e0e6a0 100644 --- a/torch_geometric/transforms/face_to_edge.py +++ b/torch_geometric/transforms/face_to_edge.py @@ -18,7 +18,7 @@ class FaceToEdge(BaseTransform): def __init__(self, remove_faces: bool = True): self.remove_faces = remove_faces - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: if hasattr(data, 'face'): face = data.face edge_index = torch.cat([face[:2], face[1:], face[::2]], dim=1) diff --git a/torch_geometric/transforms/feature_propagation.py b/torch_geometric/transforms/feature_propagation.py index 5a9f89b6c1e7..56f12b7dc311 100644 --- a/torch_geometric/transforms/feature_propagation.py +++ b/torch_geometric/transforms/feature_propagation.py @@ -40,7 +40,7 @@ def __init__(self, missing_mask: Tensor, num_iterations: int = 40): self.missing_mask = missing_mask self.num_iterations = num_iterations - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: assert 'edge_index' in data or 'adj_t' in data assert data.x.size() == self.missing_mask.size() diff --git a/torch_geometric/transforms/fixed_points.py b/torch_geometric/transforms/fixed_points.py index 1a32ec65fe18..0340dd835025 100644 --- a/torch_geometric/transforms/fixed_points.py +++ b/torch_geometric/transforms/fixed_points.py @@ -37,7 +37,7 @@ def __init__( self.replace = replace self.allow_duplicates = allow_duplicates - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: num_nodes = data.num_nodes if self.replace: diff --git a/torch_geometric/transforms/gcn_norm.py b/torch_geometric/transforms/gcn_norm.py index 66ca9e59dac3..fb70e9e0ccd0 100644 --- a/torch_geometric/transforms/gcn_norm.py +++ b/torch_geometric/transforms/gcn_norm.py @@ -19,7 +19,7 @@ class GCNNorm(BaseTransform): def __init__(self, add_self_loops: bool = True): self.add_self_loops = add_self_loops - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: gcn_norm = torch_geometric.nn.conv.gcn_conv.gcn_norm assert 'edge_index' in data or 'adj_t' in data diff --git a/torch_geometric/transforms/gdc.py b/torch_geometric/transforms/gdc.py index a482289ea2c0..a9edd428288b 100644 --- a/torch_geometric/transforms/gdc.py +++ b/torch_geometric/transforms/gdc.py @@ -97,7 +97,7 @@ def __init__( assert exact or self_loop_weight == 1 @torch.no_grad() - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: N = data.num_nodes edge_index = data.edge_index if data.edge_attr is None: diff --git a/torch_geometric/transforms/generate_mesh_normals.py b/torch_geometric/transforms/generate_mesh_normals.py index 9cf5d6c5147c..0bc5e5e6816b 100644 --- a/torch_geometric/transforms/generate_mesh_normals.py +++ b/torch_geometric/transforms/generate_mesh_normals.py @@ -11,7 +11,7 @@ class GenerateMeshNormals(BaseTransform): r"""Generate normal vectors for each mesh node based on neighboring faces (functional name: :obj:`generate_mesh_normals`).""" - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: assert 'face' in data pos, face = data.pos, data.face diff --git a/torch_geometric/transforms/grid_sampling.py b/torch_geometric/transforms/grid_sampling.py index 4042abe6ae78..c1f2d99ae99a 100644 --- a/torch_geometric/transforms/grid_sampling.py +++ b/torch_geometric/transforms/grid_sampling.py @@ -36,7 +36,7 @@ def __init__(self, size: Union[float, List[float], Tensor], self.start = start self.end = end - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: num_nodes = data.num_nodes batch = data.get('batch', None) diff --git a/torch_geometric/transforms/knn_graph.py b/torch_geometric/transforms/knn_graph.py index 94c47a1e34b8..bba6776f3627 100644 --- a/torch_geometric/transforms/knn_graph.py +++ b/torch_geometric/transforms/knn_graph.py @@ -45,7 +45,7 @@ def __init__( self.cosine = cosine self.num_workers = num_workers - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: data.edge_attr = None batch = data.batch if 'batch' in data else None diff --git a/torch_geometric/transforms/laplacian_lambda_max.py b/torch_geometric/transforms/laplacian_lambda_max.py index ce1dfcd1e21d..6c08536f47dc 100644 --- a/torch_geometric/transforms/laplacian_lambda_max.py +++ b/torch_geometric/transforms/laplacian_lambda_max.py @@ -40,7 +40,7 @@ def __init__( self.normalization = normalization self.is_undirected = is_undirected - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: edge_weight = data.edge_attr if edge_weight is not None and edge_weight.numel() != data.num_edges: edge_weight = None diff --git a/torch_geometric/transforms/largest_connected_components.py b/torch_geometric/transforms/largest_connected_components.py index 403cbd79592c..bf7ee8eb9387 100644 --- a/torch_geometric/transforms/largest_connected_components.py +++ b/torch_geometric/transforms/largest_connected_components.py @@ -28,7 +28,7 @@ def __init__(self, num_components: int = 1, connection: str = 'weak'): self.num_components = num_components self.connection = connection - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: import numpy as np import scipy.sparse as sp diff --git a/torch_geometric/transforms/line_graph.py b/torch_geometric/transforms/line_graph.py index 531e13ce3ac7..83c492b26abb 100644 --- a/torch_geometric/transforms/line_graph.py +++ b/torch_geometric/transforms/line_graph.py @@ -34,7 +34,7 @@ class LineGraph(BaseTransform): def __init__(self, force_directed: bool = False): self.force_directed = force_directed - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: N = data.num_nodes edge_index, edge_attr = data.edge_index, data.edge_attr edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes=N) diff --git a/torch_geometric/transforms/linear_transformation.py b/torch_geometric/transforms/linear_transformation.py index 8cd7541351e7..70be3c980bb6 100644 --- a/torch_geometric/transforms/linear_transformation.py +++ b/torch_geometric/transforms/linear_transformation.py @@ -26,10 +26,10 @@ def __init__(self, matrix: Tensor): f'Transformation matrix should be square (got {matrix.size()})') # Store the matrix as its transpose. - # We do this to enable post-multiplication in `__call__`. + # We do this to enable post-multiplication in `forward`. self.matrix = matrix.t() - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/local_cartesian.py b/torch_geometric/transforms/local_cartesian.py index fdc715a4bc00..ee7916aeb297 100644 --- a/torch_geometric/transforms/local_cartesian.py +++ b/torch_geometric/transforms/local_cartesian.py @@ -23,7 +23,7 @@ def __init__(self, norm: bool = True, cat: bool = True): self.norm = norm self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr cart = pos[row] - pos[col] diff --git a/torch_geometric/transforms/local_degree_profile.py b/torch_geometric/transforms/local_degree_profile.py index f517ff9c45cb..3eb72e192ded 100644 --- a/torch_geometric/transforms/local_degree_profile.py +++ b/torch_geometric/transforms/local_degree_profile.py @@ -24,7 +24,7 @@ def __init__(self): from torch_geometric.nn.aggr.fused import FusedAggregation self.aggr = FusedAggregation(['min', 'max', 'mean', 'std']) - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: row, col = data.edge_index N = data.num_nodes diff --git a/torch_geometric/transforms/mask.py b/torch_geometric/transforms/mask.py index 56741b383641..ccb05cea91d0 100644 --- a/torch_geometric/transforms/mask.py +++ b/torch_geometric/transforms/mask.py @@ -52,7 +52,7 @@ def __init__( self.sizes = sizes self.replace = replace - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: @@ -106,7 +106,7 @@ def __init__( self.attrs = [attrs] if isinstance(attrs, str) else attrs self.replace = replace - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/node_property_split.py b/torch_geometric/transforms/node_property_split.py index df4a313a58bf..1d540e36a7a1 100644 --- a/torch_geometric/transforms/node_property_split.py +++ b/torch_geometric/transforms/node_property_split.py @@ -81,8 +81,7 @@ def __init__( self.ratios = ratios self.ascending = ascending - def __call__(self, data: Data) -> Data: - + def forward(self, data: Data) -> Data: G = to_networkx(data, to_undirected=True, remove_self_loops=True) property_values = self.compute_fn(G, self.ascending) mask_dict = self._mask_nodes_by_property(property_values, self.ratios) diff --git a/torch_geometric/transforms/normalize_features.py b/torch_geometric/transforms/normalize_features.py index 57ca3237678f..afcf67283eda 100644 --- a/torch_geometric/transforms/normalize_features.py +++ b/torch_geometric/transforms/normalize_features.py @@ -17,7 +17,7 @@ class NormalizeFeatures(BaseTransform): def __init__(self, attrs: List[str] = ["x"]): self.attrs = attrs - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/normalize_rotation.py b/torch_geometric/transforms/normalize_rotation.py index bb4b36f6c95f..5bf077071874 100644 --- a/torch_geometric/transforms/normalize_rotation.py +++ b/torch_geometric/transforms/normalize_rotation.py @@ -24,7 +24,7 @@ def __init__(self, max_points: int = -1, sort: bool = False): self.max_points = max_points self.sort = sort - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: pos = data.pos if self.max_points > 0 and pos.size(0) > self.max_points: diff --git a/torch_geometric/transforms/normalize_scale.py b/torch_geometric/transforms/normalize_scale.py index 165f8198ba03..97f60c6d14b7 100644 --- a/torch_geometric/transforms/normalize_scale.py +++ b/torch_geometric/transforms/normalize_scale.py @@ -11,7 +11,7 @@ class NormalizeScale(BaseTransform): def __init__(self): self.center = Center() - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: data = self.center(data) scale = (1 / data.pos.abs().max()) * 0.999999 diff --git a/torch_geometric/transforms/one_hot_degree.py b/torch_geometric/transforms/one_hot_degree.py index d3cdf96661db..188c58e0c4f4 100644 --- a/torch_geometric/transforms/one_hot_degree.py +++ b/torch_geometric/transforms/one_hot_degree.py @@ -29,7 +29,7 @@ def __init__( self.in_degree = in_degree self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: idx, x = data.edge_index[1 if self.in_degree else 0], data.x deg = degree(idx, data.num_nodes, dtype=torch.long) deg = one_hot(deg, num_classes=self.max_degree + 1) diff --git a/torch_geometric/transforms/pad.py b/torch_geometric/transforms/pad.py index b92b8a983884..50489f7593bd 100644 --- a/torch_geometric/transforms/pad.py +++ b/torch_geometric/transforms/pad.py @@ -399,7 +399,7 @@ def __get_edge_padding( edge_type: Optional[EdgeType] = None) -> Union[int, float]: return self.edge_pad.get_value(edge_type, attr_name) - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/point_pair_features.py b/torch_geometric/transforms/point_pair_features.py index 1cabdecac839..252992ddc656 100644 --- a/torch_geometric/transforms/point_pair_features.py +++ b/torch_geometric/transforms/point_pair_features.py @@ -28,7 +28,7 @@ class PointPairFeatures(BaseTransform): def __init__(self, cat: bool = True): self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: ppf_func = torch_geometric.nn.conv.ppf_conv.point_pair_features assert data.edge_index is not None diff --git a/torch_geometric/transforms/polar.py b/torch_geometric/transforms/polar.py index 0e6474b02ca1..3bc42c29f05e 100644 --- a/torch_geometric/transforms/polar.py +++ b/torch_geometric/transforms/polar.py @@ -33,7 +33,7 @@ def __init__( self.max = max_value self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr assert pos.dim() == 2 and pos.size(1) == 2 diff --git a/torch_geometric/transforms/radius_graph.py b/torch_geometric/transforms/radius_graph.py index 39a1ddce272d..4fe8817715aa 100644 --- a/torch_geometric/transforms/radius_graph.py +++ b/torch_geometric/transforms/radius_graph.py @@ -37,7 +37,7 @@ def __init__( self.flow = flow self.num_workers = num_workers - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: data.edge_attr = None batch = data.batch if 'batch' in data else None diff --git a/torch_geometric/transforms/random_flip.py b/torch_geometric/transforms/random_flip.py index cb9fdc63f233..160b5daeef6b 100644 --- a/torch_geometric/transforms/random_flip.py +++ b/torch_geometric/transforms/random_flip.py @@ -19,7 +19,7 @@ def __init__(self, axis: int, p: float = 0.5): self.axis = axis self.p = p - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: if random.random() < self.p: pos = data.pos.clone() pos[..., self.axis] = -pos[..., self.axis] diff --git a/torch_geometric/transforms/random_jitter.py b/torch_geometric/transforms/random_jitter.py index 4b39035e9044..89275951cfd6 100644 --- a/torch_geometric/transforms/random_jitter.py +++ b/torch_geometric/transforms/random_jitter.py @@ -26,7 +26,7 @@ class RandomJitter(BaseTransform): def __init__(self, translate: Union[float, int, Sequence]): self.translate = translate - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: (n, dim), t = data.pos.size(), self.translate if isinstance(t, numbers.Number): t = list(repeat(t, times=dim)) diff --git a/torch_geometric/transforms/random_link_split.py b/torch_geometric/transforms/random_link_split.py index f02f4f31691a..0a81ce2ae33b 100644 --- a/torch_geometric/transforms/random_link_split.py +++ b/torch_geometric/transforms/random_link_split.py @@ -116,7 +116,7 @@ def __init__( self.edge_types = edge_types self.rev_edge_types = rev_edge_types - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/random_node_split.py b/torch_geometric/transforms/random_node_split.py index cbad46e91456..1b80f3531b05 100644 --- a/torch_geometric/transforms/random_node_split.py +++ b/torch_geometric/transforms/random_node_split.py @@ -69,7 +69,7 @@ def __init__( self.num_test = num_test self.key = key - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/random_rotate.py b/torch_geometric/transforms/random_rotate.py index 04bb99d2b4d9..832eeb082c8d 100644 --- a/torch_geometric/transforms/random_rotate.py +++ b/torch_geometric/transforms/random_rotate.py @@ -30,7 +30,7 @@ def __init__(self, degrees: Union[Tuple[float, float], float], self.degrees = degrees self.axis = axis - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: degree = math.pi * random.uniform(*self.degrees) / 180.0 sin, cos = math.sin(degree), math.cos(degree) diff --git a/torch_geometric/transforms/random_scale.py b/torch_geometric/transforms/random_scale.py index a8ebbe4ea6d5..6fcd685a8de4 100644 --- a/torch_geometric/transforms/random_scale.py +++ b/torch_geometric/transforms/random_scale.py @@ -30,7 +30,7 @@ def __init__(self, scales: Tuple[float, float]): assert isinstance(scales, (tuple, list)) and len(scales) == 2 self.scales = scales - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: scale = random.uniform(*self.scales) data.pos = data.pos * scale return data diff --git a/torch_geometric/transforms/random_shear.py b/torch_geometric/transforms/random_shear.py index 479c7dd01406..70d7f068befa 100644 --- a/torch_geometric/transforms/random_shear.py +++ b/torch_geometric/transforms/random_shear.py @@ -29,7 +29,7 @@ class RandomShear(BaseTransform): def __init__(self, shear: Union[float, int]): self.shear = abs(shear) - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: dim = data.pos.size(-1) matrix = data.pos.new_empty(dim, dim).uniform_(-self.shear, self.shear) diff --git a/torch_geometric/transforms/remove_duplicated_edges.py b/torch_geometric/transforms/remove_duplicated_edges.py index 9a2d19e43c1b..4091a465674a 100644 --- a/torch_geometric/transforms/remove_duplicated_edges.py +++ b/torch_geometric/transforms/remove_duplicated_edges.py @@ -31,7 +31,7 @@ def __init__( self.keys = key self.reduce = reduce - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/remove_isolated_nodes.py b/torch_geometric/transforms/remove_isolated_nodes.py index 11914f173826..4a4e013586be 100644 --- a/torch_geometric/transforms/remove_isolated_nodes.py +++ b/torch_geometric/transforms/remove_isolated_nodes.py @@ -13,7 +13,7 @@ class RemoveIsolatedNodes(BaseTransform): r"""Removes isolated nodes from the graph (functional name: :obj:`remove_isolated_nodes`).""" - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/remove_training_classes.py b/torch_geometric/transforms/remove_training_classes.py index 13af7dd501ea..1d29ff91c56f 100644 --- a/torch_geometric/transforms/remove_training_classes.py +++ b/torch_geometric/transforms/remove_training_classes.py @@ -17,7 +17,7 @@ class RemoveTrainingClasses(BaseTransform): def __init__(self, classes: List[int]): self.classes = classes - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: data.train_mask = data.train_mask.clone() for i in self.classes: data.train_mask[data.y == i] = False diff --git a/torch_geometric/transforms/rooted_subgraph.py b/torch_geometric/transforms/rooted_subgraph.py index fde7e2eb4f4f..f98a3013939e 100644 --- a/torch_geometric/transforms/rooted_subgraph.py +++ b/torch_geometric/transforms/rooted_subgraph.py @@ -92,7 +92,7 @@ def map( return sub_edge_index, n_id, e_id, n_sub_batch, e_sub_batch - def __call__(self, data: Data) -> RootedSubgraphData: + def forward(self, data: Data) -> RootedSubgraphData: out = self.extract(data) d = RootedSubgraphData.from_dict(data.to_dict()) d.sub_edge_index, d.n_id, d.e_id, d.n_sub_batch, d.e_sub_batch = out diff --git a/torch_geometric/transforms/sample_points.py b/torch_geometric/transforms/sample_points.py index 18aed6a6ac3d..0512b77e8b70 100644 --- a/torch_geometric/transforms/sample_points.py +++ b/torch_geometric/transforms/sample_points.py @@ -27,7 +27,7 @@ def __init__( self.remove_faces = remove_faces self.include_normals = include_normals - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: pos, face = data.pos, data.face assert pos.size(1) == 3 and face.size(0) == 3 diff --git a/torch_geometric/transforms/sign.py b/torch_geometric/transforms/sign.py index c9e0d9e0b6be..55d9fe1569bb 100644 --- a/torch_geometric/transforms/sign.py +++ b/torch_geometric/transforms/sign.py @@ -34,7 +34,7 @@ class SIGN(BaseTransform): def __init__(self, K: int): self.K = K - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: assert data.edge_index is not None row, col = data.edge_index N = data.num_nodes diff --git a/torch_geometric/transforms/spherical.py b/torch_geometric/transforms/spherical.py index 67f269d37771..9f68637aeab9 100644 --- a/torch_geometric/transforms/spherical.py +++ b/torch_geometric/transforms/spherical.py @@ -33,7 +33,7 @@ def __init__( self.max = max_value self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr assert pos.dim() == 2 and pos.size(1) == 3 diff --git a/torch_geometric/transforms/svd_feature_reduction.py b/torch_geometric/transforms/svd_feature_reduction.py index a73c2db166ed..760d4e50d2a8 100644 --- a/torch_geometric/transforms/svd_feature_reduction.py +++ b/torch_geometric/transforms/svd_feature_reduction.py @@ -17,7 +17,7 @@ class SVDFeatureReduction(BaseTransform): def __init__(self, out_channels: int): self.out_channels = out_channels - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: if data.x.size(-1) > self.out_channels: U, S, _ = torch.linalg.svd(data.x) data.x = torch.mm(U[:, :self.out_channels], diff --git a/torch_geometric/transforms/target_indegree.py b/torch_geometric/transforms/target_indegree.py index b9262e6f5a67..a471645dd6c2 100644 --- a/torch_geometric/transforms/target_indegree.py +++ b/torch_geometric/transforms/target_indegree.py @@ -33,7 +33,7 @@ def __init__( self.max = max_value self.cat = cat - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: col, pseudo = data.edge_index[1], data.edge_attr deg = degree(col, data.num_nodes) diff --git a/torch_geometric/transforms/to_dense.py b/torch_geometric/transforms/to_dense.py index 4e2fee658df1..6ff2743542b7 100644 --- a/torch_geometric/transforms/to_dense.py +++ b/torch_geometric/transforms/to_dense.py @@ -20,7 +20,7 @@ class ToDense(BaseTransform): def __init__(self, num_nodes: Optional[int] = None): self.num_nodes = num_nodes - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: assert data.edge_index is not None orig_num_nodes = data.num_nodes diff --git a/torch_geometric/transforms/to_device.py b/torch_geometric/transforms/to_device.py index f1230bea2715..acf6ee581a98 100644 --- a/torch_geometric/transforms/to_device.py +++ b/torch_geometric/transforms/to_device.py @@ -29,7 +29,7 @@ def __init__( self.attrs = attrs or [] self.non_blocking = non_blocking - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/to_sparse_tensor.py b/torch_geometric/transforms/to_sparse_tensor.py index cfa7ed7420f9..0aed1ab04312 100644 --- a/torch_geometric/transforms/to_sparse_tensor.py +++ b/torch_geometric/transforms/to_sparse_tensor.py @@ -68,7 +68,7 @@ def __init__( self.fill_cache = fill_cache self.layout = layout - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/to_superpixels.py b/torch_geometric/transforms/to_superpixels.py index e9235c775ebe..579bc42eeffc 100644 --- a/torch_geometric/transforms/to_superpixels.py +++ b/torch_geometric/transforms/to_superpixels.py @@ -41,7 +41,7 @@ def __init__(self, add_seg: bool = False, add_img: bool = False, **kwargs): self.add_img = add_img self.kwargs = kwargs - def __call__(self, img: Tensor) -> Data: + def forward(self, img: Tensor) -> Data: from skimage.segmentation import slic img = img.permute(1, 2, 0) diff --git a/torch_geometric/transforms/to_undirected.py b/torch_geometric/transforms/to_undirected.py index 05b2b6014ecf..c754b4be09ff 100644 --- a/torch_geometric/transforms/to_undirected.py +++ b/torch_geometric/transforms/to_undirected.py @@ -34,7 +34,7 @@ def __init__(self, reduce: str = "add", merge: bool = True): self.reduce = reduce self.merge = merge - def __call__( + def forward( self, data: Union[Data, HeteroData], ) -> Union[Data, HeteroData]: diff --git a/torch_geometric/transforms/two_hop.py b/torch_geometric/transforms/two_hop.py index 10f08ad5b37f..ec53d99167ea 100644 --- a/torch_geometric/transforms/two_hop.py +++ b/torch_geometric/transforms/two_hop.py @@ -15,7 +15,7 @@ class TwoHop(BaseTransform): r"""Adds the two hop edges to the edge indices (functional name: :obj:`two_hop`).""" - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: edge_index, edge_attr = data.edge_index, data.edge_attr N = data.num_nodes diff --git a/torch_geometric/transforms/virtual_node.py b/torch_geometric/transforms/virtual_node.py index d86acbbafd65..d9ba5be1412e 100644 --- a/torch_geometric/transforms/virtual_node.py +++ b/torch_geometric/transforms/virtual_node.py @@ -24,7 +24,7 @@ class VirtualNode(BaseTransform): Furthermore, special edge types will be added both for in-coming and out-going information to and from the virtual node. """ - def __call__(self, data: Data) -> Data: + def forward(self, data: Data) -> Data: num_nodes, (row, col) = data.num_nodes, data.edge_index edge_type = data.get('edge_type', torch.zeros_like(row)) From ce6e97a498328678ed1bb980cafcefd48b94ebb5 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 25 May 2023 10:23:44 +0200 Subject: [PATCH 1216/2432] Ensure that `BasicGNN` models do not lead to graph breaks during compilation (#7433) --- test/nn/models/test_basic_gnn.py | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 158d398991a0..d9517f24b8ca 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -293,6 +293,41 @@ def test_trim_to_layer(): assert torch.allclose(out1, out2) +num_compile_calls = 0 + + +@onlyLinux +@disableExtensions +@withPackage('torch>=2.0.0') +@pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA]) +def test_compile_graph_breaks(Model): + x = torch.randn(3, 8) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + + kwargs = {} + if Model in {GCN, GAT}: + # Adding self-loops inside the model leads to graph breaks :( + kwargs['add_self_loops'] = False + + if Model in {PNA}: # `PNA` requires additional arguments: + kwargs['aggregators'] = ['sum', 'mean', 'min', 'max', 'var', 'std'] + kwargs['scalers'] = ['identity', 'amplification', 'attenuation'] + kwargs['deg'] = torch.tensor([1, 2, 1]) + + model = Model(in_channels=8, hidden_channels=16, num_layers=2, **kwargs) + + def my_custom_backend(gm, *args): + global num_compile_calls + num_compile_calls += 1 + return gm.forward + + model = torch_geometric.compile(model, backend=my_custom_backend) + + num_previous_compile_calls = num_compile_calls + model(x, edge_index) + assert num_compile_calls - num_previous_compile_calls == 1 + + if __name__ == '__main__': import argparse From 08c4aa33995502bf09e239d669519d8b0d370fbc Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 25 May 2023 13:47:16 +0200 Subject: [PATCH 1217/2432] Remove `abstractmethod` decorator in `BaseTransform` (#7435) --- torch_geometric/transforms/base_transform.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/transforms/base_transform.py b/torch_geometric/transforms/base_transform.py index 1f98ef8965e2..ead771ad1ac3 100644 --- a/torch_geometric/transforms/base_transform.py +++ b/torch_geometric/transforms/base_transform.py @@ -1,5 +1,5 @@ import copy -from abc import ABC, abstractmethod +from abc import ABC from typing import Any @@ -28,9 +28,9 @@ class BaseTransform(ABC): data = transform(data) # Explicitly transform data. """ def __call__(self, data: Any) -> Any: + # Shallow-copy the data so that we prevent in-place data modification. return self.forward(copy.copy(data)) - @abstractmethod def forward(self, data: Any) -> Any: pass From 4b1d748fe398ffc4084c016274c2c66351853dfd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 25 May 2023 19:21:59 +0200 Subject: [PATCH 1218/2432] Fix TorchScript support in `HeteroDictLinear` (#7436) --- test/nn/aggr/test_multi.py | 9 ++------- test/nn/dense/test_linear.py | 10 ++-------- torch_geometric/nn/dense/linear.py | 2 +- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/test/nn/aggr/test_multi.py b/test/nn/aggr/test_multi.py index e37eb3923fa4..222696b62ec9 100644 --- a/test/nn/aggr/test_multi.py +++ b/test/nn/aggr/test_multi.py @@ -45,10 +45,5 @@ def test_multi_aggr(multi_aggr_tuple): else: assert torch.allclose(out, aggr(x, ptr=ptr)) - if aggr_kwargs['mode'] == 'attn' and torch_geometric.typing.WITH_GMM: - # See: https://github.com/pytorch/pytorch/pull/97960 - with pytest.raises(RuntimeError, match="Unknown builtin op"): - jit = torch.jit.script(aggr) - else: - jit = torch.jit.script(aggr) - assert torch.allclose(out, jit(x, index)) + jit = torch.jit.script(aggr) + assert torch.allclose(out, jit(x, index)) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index 271600e8e431..9e7fd118220f 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -8,7 +8,6 @@ from torch.nn import Linear as PTLinear from torch.nn.parameter import UninitializedParameter -import torch_geometric.typing from torch_geometric.nn import HeteroDictLinear, HeteroLinear, Linear from torch_geometric.profile import benchmark from torch_geometric.testing import withCUDA, withPackage @@ -180,13 +179,8 @@ def test_hetero_dict_linear_jit(): lin = HeteroDictLinear({'v': 16, 'w': 8}, 32) - if torch_geometric.typing.WITH_GMM: - # See: https://github.com/pytorch/pytorch/pull/97960 - with pytest.raises(RuntimeError, match="Unknown builtin op"): - jit = torch.jit.script(lin) - else: - jit = torch.jit.script(lin) - assert len(jit(x_dict)) == 2 + jit = torch.jit.script(lin) + assert len(jit(x_dict)) == 2 @withCUDA diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index ce005242045c..6719ffe21059 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -373,7 +373,7 @@ def forward( """ out_dict = {} - if torch_geometric.typing.WITH_GMM: + if torch_geometric.typing.WITH_GMM and not torch.jit.is_scripting(): xs, weights, biases = [], [], [] for key, lin in self.lins.items(): if key in x_dict: From 38da3c68f0be67feb6cc77584ee68bcd32059739 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Thu, 25 May 2023 23:17:59 -0700 Subject: [PATCH 1219/2432] `HeteroDictLinear` heuristic (#7430) ``` Loop Times: {4: 0.00011243343353271484, 8: 0.00021158695220947266, 16: 0.0007293605804443359, 20: 0.0013348865509033203, 21: 0.0015002059936523437, 22: 0.001708526611328125, 23: 0.0018759918212890624, 24: 0.0020225000381469726, 32: 0.003490099906921387, 40: 0.004931988716125488, 45: 0.005780115127563477, 50: 0.006725430488586426, 55: 0.007553043365478515, 60: 0.008383927345275878, 64: 0.009244050979614258, 128: 0.01996610164642334, 256: 0.04032662868499756, 512: 0.0806538200378418, 1024: 0.16130864143371582} Dict Times: {4: 0.00010602951049804688, 8: 0.00021188735961914063, 16: 0.0007947540283203125, 20: 0.0012229681015014648, 21: 0.0012968969345092773, 22: 0.0014571046829223633, 23: 0.0015320730209350585, 24: 0.0016141510009765624, 32: 0.002482771873474121, 40: 0.0033274221420288084, 45: 0.0039056396484375, 50: 0.004541130065917969, 55: 0.005016393661499023, 60: 0.0055756521224975585, 64: 0.006009225845336914, 128: 0.012227153778076172, 256: 0.024424519538879395, 512: 0.048862857818603514, 1024: 0.09928223609924317} ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- torch_geometric/nn/dense/linear.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 6719ffe21059..761accdffc4f 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -373,7 +373,10 @@ def forward( """ out_dict = {} - if torch_geometric.typing.WITH_GMM and not torch.jit.is_scripting(): + # Only apply fused kernel for more than 10 types, otherwise default + # back to sequential computation (which is faster for these cases). + if (torch_geometric.typing.WITH_GMM and not torch.jit.is_scripting() + and len(x_dict) >= 10): xs, weights, biases = [], [], [] for key, lin in self.lins.items(): if key in x_dict: From 7b07d38c9b12eeb99aa08cf7222cb4102d1c71dc Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Sat, 27 May 2023 09:52:20 +0100 Subject: [PATCH 1220/2432] Fix error message when invalid values are passed to `LinkNeighborLoader` (#7440) The main change is: ```diff - while 'input_time' is + while 'time_attr' is ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- torch_geometric/loader/link_neighbor_loader.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 997014e52fa7..955f4b862f82 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -212,8 +212,9 @@ def __init__( f"Received conflicting 'edge_label_time' and 'time_attr' " f"arguments: 'edge_label_time' is " f"{'set' if edge_label_time is not None else 'not set'} " - f"while 'input_time' is " - f"{'set' if time_attr is not None else 'not set'}.") + f"while 'time_attr' is " + f"{'set' if time_attr is not None else 'not set'}. " + f"Both arguments must be provided for temporal sampling.") if neighbor_sampler is None: neighbor_sampler = NeighborSampler( From 9e06952e5925cee9daa0ffcfb061ecd923621f20 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 27 May 2023 12:42:13 +0200 Subject: [PATCH 1221/2432] Drop bold text in `print(HeteroData)` (#7446) Not supported by every terminal :( --- torch_geometric/data/data.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index dcba8718707b..06e2b371f8a5 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -974,15 +974,12 @@ def size_repr(key: Any, value: Any, indent: int = 0) -> str: out = '{ ' + ', '.join(lines) + ' }' elif isinstance(value, Mapping): lines = [size_repr(k, v, indent + 2) for k, v in value.items()] - out = '{\n' + ',\n'.join(lines) + '\n' + pad + '}' + out = '{\n' + ',\n'.join(lines) + ',\n' + pad + '}' else: out = str(value) key = str(key).replace("'", '') - if isinstance(value, BaseStorage): - return f'{pad}\033[1m{key}\033[0m={out}' - else: - return f'{pad}{key}={out}' + return f'{pad}{key}={out}' def warn_or_raise(msg: str, raise_on_error: bool = True): From 801723efacee5ad2597256ba0d9934600512e626 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 27 May 2023 13:20:34 +0200 Subject: [PATCH 1222/2432] Sparse `cross_entropy` implementation (#7447) --- CHANGELOG.md | 1 + test/utils/test_cross_entropy.py | 24 +++++++++++++++++++++ torch_geometric/utils/cross_entropy.py | 29 ++++++++++++++++++++++++++ 3 files changed, 54 insertions(+) create mode 100644 test/utils/test_cross_entropy.py create mode 100644 torch_geometric/utils/cross_entropy.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 03674baac49f..f3a3959726da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447)) - Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) - Added the `PMLP` model ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370)) - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) diff --git a/test/utils/test_cross_entropy.py b/test/utils/test_cross_entropy.py new file mode 100644 index 000000000000..c7fb1c7bdc22 --- /dev/null +++ b/test/utils/test_cross_entropy.py @@ -0,0 +1,24 @@ +import torch +import torch.nn.functional as F + +from torch_geometric.utils.cross_entropy import sparse_cross_entropy + + +def test_sparse_cross_entropy_multiclass(): + x = torch.randn(5, 5) + y = torch.eye(5) + edge_label_index = y.nonzero().t() + + expected = F.cross_entropy(x, y) + out = sparse_cross_entropy(x, edge_label_index) + assert torch.allclose(expected, out) + + +def test_sparse_cross_entropy_multilabel(): + x = torch.randn(5, 8) + y = torch.randint_like(x, 0, 2) + edge_label_index = y.nonzero().t() + + expected = F.cross_entropy(x, y) + out = sparse_cross_entropy(x, edge_label_index) + assert torch.allclose(expected, out) diff --git a/torch_geometric/utils/cross_entropy.py b/torch_geometric/utils/cross_entropy.py new file mode 100644 index 000000000000..a05793892859 --- /dev/null +++ b/torch_geometric/utils/cross_entropy.py @@ -0,0 +1,29 @@ +from torch import Tensor + + +def sparse_cross_entropy(inputs: Tensor, edge_label_index: Tensor) -> Tensor: + r"""A sparse-label variant of :func:`torch.nn.functional.cross_entropy`. + In particular, the binary target matrix is solely given by sparse indices + :obj:`edge_label_index`. + + Args: + inputs (torch.Tensor): The predicted unnormalized logits of shape + :obj:`[batch_size, num_classes]`. + edge_index (torch.Tensor): The sparse ground-truth indices of + shape :obj:`[2, num_labels]`. + + :rtype: :class:`torch.Tensor` + + Example: + + >>> inputs = torch.randn(2, 3) + >>> edge_label_index = torch.tensor([[0, 0, 1], + ... [0, 1, 2]]) + >>> sparse_cross_entropy(inputs, edge_label_index) + tensor(1.2919) + """ + assert inputs.dim() == 2 + logsumexp = inputs.logsumexp(dim=-1) + values = inputs[edge_label_index[0], edge_label_index[1]] + out = -values + logsumexp[edge_label_index[0]] + return out.sum() / inputs.size(0) From f2957584f04aa3a631baa79a303426e359f00310 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 27 May 2023 15:18:13 +0200 Subject: [PATCH 1223/2432] Improve `import torch_geometric` speed (#7448) --- test/nn/test_model_hub.py | 28 +++++++++---------- torch_geometric/datasets/pascal.py | 9 +++++- .../datasets/willow_object_class.py | 9 +++++- torch_geometric/nn/__init__.py | 2 -- torch_geometric/nn/model_hub.py | 14 ++++++---- .../transforms/feature_propagation.py | 3 +- 6 files changed, 40 insertions(+), 25 deletions(-) diff --git a/test/nn/test_model_hub.py b/test/nn/test_model_hub.py index 40c280a737e3..f8404ffdd092 100644 --- a/test/nn/test_model_hub.py +++ b/test/nn/test_model_hub.py @@ -5,14 +5,14 @@ import pytest import torch +from torch_geometric.nn import GCN from torch_geometric.nn.model_hub import PyGModelHubMixin -from torch_geometric.nn.models import GCN from torch_geometric.testing import withPackage -REPO_NAME = "pyg_hugging_test" +REPO_NAME = 'pyg_hugging_test' MODEL_NAME = 'pyg_test_model' DATASET_NAME = 'pyg_dataset' -CONFIG = {"hello": "world"} +CONFIG = {'hello': 'world'} class DummyModel(GCN, PyGModelHubMixin): @@ -30,23 +30,23 @@ def model(): def test_model_init(): model = DummyModel( MODEL_NAME, DATASET_NAME, model_kwargs={ - **CONFIG, "tensor": torch.Tensor([1, 2, 3]) + **CONFIG, 'tensor': torch.Tensor([1, 2, 3]) }) assert model.model_config == CONFIG @withPackage('huggingface_hub') def test_save_pretrained(model, tmp_path): - save_directory = f"{str(tmp_path / REPO_NAME)}" + save_directory = f'{str(tmp_path / REPO_NAME)}' model.save_pretrained(save_directory) files = os.listdir(save_directory) - assert "model.pth" in files + assert 'model.pth' in files assert len(files) >= 1 @withPackage('huggingface_hub') def test_save_pretrained_internal(model, tmp_path): - save_directory = f"{str(tmp_path / REPO_NAME)}" + save_directory = f'{str(tmp_path / REPO_NAME)}' model._save_pretrained = Mock() model.save_pretrained(save_directory) model._save_pretrained.assert_called_with(Path(save_directory)) @@ -54,7 +54,7 @@ def test_save_pretrained_internal(model, tmp_path): @withPackage('huggingface_hub') def test_save_pretrained_with_push_to_hub(model, tmp_path): - save_directory = f"{str(tmp_path / REPO_NAME)}" + save_directory = f'{str(tmp_path / REPO_NAME)}' model.push_to_hub = Mock() model.construct_model_card = Mock() @@ -66,9 +66,9 @@ def test_save_pretrained_with_push_to_hub(model, tmp_path): model.construct_model_card.assert_called_with(MODEL_NAME, DATASET_NAME) # Push to hub with repo_id - model.save_pretrained(save_directory, push_to_hub=True, repo_id="CustomID", + model.save_pretrained(save_directory, push_to_hub=True, repo_id='CustomID', config=CONFIG) - model.push_to_hub.assert_called_with(repo_id="CustomID", config=CONFIG) + model.push_to_hub.assert_called_with(repo_id='CustomID', config=CONFIG) # Push to hub with default repo_id (based on dir name) model.save_pretrained(save_directory, push_to_hub=True, config=CONFIG) @@ -77,7 +77,7 @@ def test_save_pretrained_with_push_to_hub(model, tmp_path): @withPackage('huggingface_hub') def test_from_pretrained(model, tmp_path): - save_directory = f"{str(tmp_path / REPO_NAME)}" + save_directory = f'{str(tmp_path / REPO_NAME)}' model.save_pretrained(save_directory) model = model.from_pretrained(save_directory) @@ -87,9 +87,9 @@ def test_from_pretrained(model, tmp_path): @withPackage('huggingface_hub') def test_from_pretrained_internal(model, monkeypatch): hf_hub_download = Mock(side_effect='model') - monkeypatch.setattr("torch_geometric.nn.model_hub.hf_hub_download", + monkeypatch.setattr('torch_geometric.nn.model_hub.hf_hub_download', hf_hub_download) - monkeypatch.setattr("torch_geometric.nn.model_hub.torch.load", + monkeypatch.setattr('torch_geometric.nn.model_hub.torch.load', lambda x, **kwargs: {'state_dict': 1}) model = model._from_pretrained( @@ -103,7 +103,7 @@ def test_from_pretrained_internal(model, monkeypatch): token=False, dataset_name=DATASET_NAME, model_name=MODEL_NAME, - map_location="cpu", + map_location='cpu', strict=False, **CONFIG, ) diff --git a/torch_geometric/datasets/pascal.py b/torch_geometric/datasets/pascal.py index c83ea6b6476c..94284aa27344 100644 --- a/torch_geometric/datasets/pascal.py +++ b/torch_geometric/datasets/pascal.py @@ -52,6 +52,9 @@ class PascalVOCKeypoints(InMemoryDataset): :obj:`torch_geometric.data.Data` object and returns a boolean value, indicating whether the data object should be included in the final dataset. (default: :obj:`None`) + device (str or torch.device, optional): The device to use for + processing the raw data. If set to :obj:`None`, will utilize + GPU-processing if available. (default: :obj:`None`) """ image_url = ('http://host.robots.ox.ac.uk/pascal/VOC/voc2011/' 'VOCtrainval_25-May-2011.tar') @@ -68,7 +71,6 @@ class PascalVOCKeypoints(InMemoryDataset): 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] - device = 'cuda' if torch.cuda.is_available() else 'cpu' batch_size = 32 def __init__( @@ -79,9 +81,14 @@ def __init__( transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None, pre_filter: Optional[Callable] = None, + device: Optional[str] = None, ): + if device is None: + device = 'cuda' if torch.cuda.is_available() else 'cpu' + self.category = category.lower() assert self.category in self.categories + self.device = device super().__init__(root, transform, pre_transform, pre_filter) path = self.processed_paths[0] if train else self.processed_paths[1] self.data, self.slices = torch.load(path) diff --git a/torch_geometric/datasets/willow_object_class.py b/torch_geometric/datasets/willow_object_class.py index 7e67efe39c2a..9207d47c715d 100644 --- a/torch_geometric/datasets/willow_object_class.py +++ b/torch_geometric/datasets/willow_object_class.py @@ -40,13 +40,15 @@ class WILLOWObjectClass(InMemoryDataset): :obj:`torch_geometric.data.Data` object and returns a boolean value, indicating whether the data object should be included in the final dataset. (default: :obj:`None`) + device (str or torch.device, optional): The device to use for + processing the raw data. If set to :obj:`None`, will utilize + GPU-processing if available. (default: :obj:`None`) """ url = ('http://www.di.ens.fr/willow/research/graphlearning/' 'WILLOW-ObjectClass_dataset.zip') categories = ['face', 'motorbike', 'car', 'duck', 'winebottle'] - device = 'cuda' if torch.cuda.is_available() else 'cpu' batch_size = 32 def __init__( @@ -56,9 +58,14 @@ def __init__( transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None, pre_filter: Optional[Callable] = None, + device: Optional[str] = None, ): + if device is None: + device = 'cuda' if torch.cuda.is_available() else 'cpu' + assert category.lower() in self.categories self.category = category + self.device = device super().__init__(root, transform, pre_transform, pre_filter) self.data, self.slices = torch.load(self.processed_paths[0]) diff --git a/torch_geometric/nn/__init__.py b/torch_geometric/nn/__init__.py index fb6c56b1c5e3..5c615d6e9b45 100644 --- a/torch_geometric/nn/__init__.py +++ b/torch_geometric/nn/__init__.py @@ -5,7 +5,6 @@ from .to_hetero_with_bases_transformer import to_hetero_with_bases from .to_fixed_size_transformer import to_fixed_size from .encoding import PositionalEncoding, TemporalEncoding -from .model_hub import PyGModelHubMixin from .summary import summary from .aggr import * # noqa @@ -28,6 +27,5 @@ 'to_fixed_size', 'PositionalEncoding', 'TemporalEncoding', - 'PyGModelHubMixin', 'summary', ] diff --git a/torch_geometric/nn/model_hub.py b/torch_geometric/nn/model_hub.py index f0a4ca68c49c..89d0a4dbef1f 100644 --- a/torch_geometric/nn/model_hub.py +++ b/torch_geometric/nn/model_hub.py @@ -23,7 +23,8 @@ class PyGModelHubMixin(ModelHubMixin): .. code-block:: python from torch_geometric.datasets import Planetoid - from torch_geometric.nn import Node2Vec, PyGModelHubMixin + from torch_geometric.nn import Node2Vec + from torch_geometric.nn.model_hub import PyGModelHubMixin # Define your class with the mixin: class N2V(Node2Vec, PyGModelHubMixin): @@ -69,7 +70,8 @@ def __init__(self,model_name, dataset_name, model_kwargs): """ def __init__(self, model_name: str, dataset_name: str, model_kwargs: Dict): ModelHubMixin.__init__(self) - # Huggingface Hub api only accepts saving the config as a dict. + + # Huggingface Hub API only accepts saving the config as a dict. # If the model is instantiated with non-native python types # such as torch Tensors (node2vec being an example), we have to remove # these as they are not json serialisable @@ -95,7 +97,7 @@ def construct_model_card(self, model_name: str, dataset_name: str) -> Any: def _save_pretrained(self, save_directory: Union[Path, str]): path = os.path.join(save_directory, MODEL_WEIGHTS_NAME) - model_to_save = self.module if hasattr(self, "module") else self + model_to_save = self.module if hasattr(self, 'module') else self torch.save(model_to_save.state_dict(), path) def save_pretrained(self, save_directory: Union[str, Path], @@ -144,9 +146,9 @@ def _from_pretrained( resume_download, local_files_only, token, - dataset_name="", - model_name="", - map_location="cpu", + dataset_name='', + model_name='', + map_location='cpu', strict=False, **model_kwargs, ): diff --git a/torch_geometric/transforms/feature_propagation.py b/torch_geometric/transforms/feature_propagation.py index 56f12b7dc311..26a22de2a753 100644 --- a/torch_geometric/transforms/feature_propagation.py +++ b/torch_geometric/transforms/feature_propagation.py @@ -1,8 +1,8 @@ from torch import Tensor +import torch_geometric from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform -from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.transforms import BaseTransform from torch_geometric.utils import is_torch_sparse_tensor, to_torch_csc_tensor @@ -43,6 +43,7 @@ def __init__(self, missing_mask: Tensor, num_iterations: int = 40): def forward(self, data: Data) -> Data: assert 'edge_index' in data or 'adj_t' in data assert data.x.size() == self.missing_mask.size() + gcn_norm = torch_geometric.nn.conv.gcn_conv.gcn_norm missing_mask = self.missing_mask.to(data.x.device) known_mask = ~missing_mask From fd91f26dd426116510dc657356095ed24b949bf9 Mon Sep 17 00:00:00 2001 From: Karuna Bhaila <71278219+karuna-bhaila@users.noreply.github.com> Date: Sat, 27 May 2023 12:01:41 -0500 Subject: [PATCH 1224/2432] Add Douban, Flixster, and Yahoo-Music datasets from `IGMC` (#7441) `Unable to open file (file signature not found)` error when opening .mat files downloaded using `download_url` Code runs on manually downloaded files from the same source --------- Co-authored-by: Karuna Bhaila Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/igmc_dataset.py | 124 +++++++++++++++++++++++ 3 files changed, 127 insertions(+) create mode 100644 torch_geometric/datasets/igmc_dataset.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a3959726da..1b7892f0f448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) - Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447)) - Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) - Added the `PMLP` model ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 88796c3577b4..61355da802db 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -81,6 +81,7 @@ from .last_fm import LastFM from .hgb_dataset import HGBDataset from .taobao import Taobao +from .igmc_dataset import IGMCDataset from .fake import FakeDataset, FakeHeteroDataset from .sbm_dataset import StochasticBlockModelDataset @@ -180,6 +181,7 @@ 'LastFM', 'HGBDataset', 'Taobao', + 'IGMCDataset', ] synthetic_datasets = [ 'FakeDataset', diff --git a/torch_geometric/datasets/igmc_dataset.py b/torch_geometric/datasets/igmc_dataset.py new file mode 100644 index 000000000000..799d61a55807 --- /dev/null +++ b/torch_geometric/datasets/igmc_dataset.py @@ -0,0 +1,124 @@ +import os.path as osp +from typing import Callable, Optional + +import torch +from torch import Tensor + +from torch_geometric.data import HeteroData, InMemoryDataset, download_url + + +class IGMCDataset(InMemoryDataset): + r"""The user-item heterogeneous rating datasets :obj:`"Douban"`, + :obj:`"Flixster"` and :obj:`"Yahoo-Music"` from the `"Inductive Matrix + Completion Based on Graph Neural Networks" + `_ paper. + + Nodes represent users and items. + Edges and features between users and items represent a (training) rating of + the item given by the user. + + Args: + root (str): Root directory where the dataset should be saved. + name (str): The name of the dataset (:obj:`"Douban"`, + :obj:`"Flixster"`, :obj:`"Yahoo-Music"`). + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + """ + url = 'https://github.com/muhanzhang/IGMC/raw/master/raw_data' + + def __init__( + self, + root: str, + name: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + self.name = name.lower().replace('-', '_') + assert self.name in ['flixster', 'douban', 'yahoo_music'] + + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_dir(self) -> str: + return osp.join(self.root, self.name, 'raw') + + @property + def processed_dir(self) -> str: + return osp.join(self.root, self.name, 'processed') + + @property + def raw_file_names(self) -> str: + return 'training_test_dataset.mat' + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = f'{self.url}/{self.name}/training_test_dataset.mat' + download_url(http://github.com/path,%20self.raw_dir) + + @staticmethod + def load_matlab_file(path_file: str, name: str) -> Tensor: + import h5py + import numpy as np + + db = h5py.File(path_file, 'r') + out = torch.from_numpy(np.asarray(db[name])).to(torch.float).t() + db.close() + + return out + + def process(self): + data = HeteroData() + + M = self.load_matlab_file(self.raw_paths[0], 'M') + + if self.name == 'flixster': + user_x = self.load_matlab_file(self.raw_paths[0], 'W_users') + item_x = self.load_matlab_file(self.raw_paths[0], 'W_movies') + elif self.name == 'douban': + user_x = self.load_matlab_file(self.raw_paths[0], 'W_users') + item_x = torch.eye(M.size(1)) + elif self.name == 'yahoo_music': + user_x = torch.eye(M.size(0)) + item_x = self.load_matlab_file(self.raw_paths[0], 'W_tracks') + + data['user'].x = user_x + data['item'].x = item_x + + train_mask = self.load_matlab_file(self.raw_paths[0], 'Otraining') + train_mask = train_mask.to(torch.bool) + + edge_index = train_mask.nonzero().t() + rating = M[edge_index[0], edge_index[1]] + + data['user', 'rates', 'item'].edge_index = edge_index + data['user', 'rates', 'item'].rating = rating + + data['item', 'rated_by', 'user'].edge_index = edge_index.flip([0]) + data['item', 'rated_by', 'user'].rating = rating + + test_mask = self.load_matlab_file(self.raw_paths[0], 'Otest') + test_mask = test_mask.to(torch.bool) + + edge_label_index = test_mask.nonzero().t() + edge_label = M[edge_label_index[0], edge_label_index[1]] + + data['user', 'rates', 'item'].edge_label_index = edge_label_index + data['user', 'rates', 'item'].edge_label = edge_label + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}(name={self.name})' From cb3734cd39bafb6a3d0d9c098c1702de942b89b0 Mon Sep 17 00:00:00 2001 From: Aniket Saxena <92912434+fork123aniket@users.noreply.github.com> Date: Sat, 27 May 2023 22:44:26 +0530 Subject: [PATCH 1225/2432] Make `GraphMaskExplainer` independent of `layer_type` (#7445) This PR is related to the points discussed in #7271. The code associated with this PR is `layer_type` independent and seeks inclusion in the main library (`torch_geometric.explain`). Furthermore, have included a few examples, yet please let me know if more examples are necessary to be included. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/contrib/graphmask_explainer.py | 4 +- .../contrib/explain/graphmask_explainer.py | 62 +++++++++++-------- 3 files changed, 39 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b7892f0f448..83ce98aebdf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Removed `layer_type` argument in `contrib.explain.GraphMaskExplainer` ([#7445](https://github.com/pyg-team/pytorch_geometric/pull/7445)) - Replaced `FastHGTConv` with `HGTConv` ([#7117](https://github.com/pyg-team/pytorch_geometric/pull/7117)) ## [2.3.0] - 2023-03-23 diff --git a/examples/contrib/graphmask_explainer.py b/examples/contrib/graphmask_explainer.py index 2a007d16ee69..906fa4dfd367 100644 --- a/examples/contrib/graphmask_explainer.py +++ b/examples/contrib/graphmask_explainer.py @@ -43,7 +43,7 @@ def forward(self, x, edge_index): explainer = Explainer( model=model, - algorithm=GraphMaskExplainer(2, epochs=5, layer_type='GCN'), + algorithm=GraphMaskExplainer(2, epochs=5), explanation_type='model', node_mask_type='attributes', edge_mask_type='object', @@ -87,7 +87,7 @@ def forward(self, x, edge_index): explainer = Explainer( model=model, - algorithm=GraphMaskExplainer(2, epochs=5, layer_type='GAT'), + algorithm=GraphMaskExplainer(2, epochs=5), explanation_type='model', node_mask_type='attributes', edge_mask_type='object', diff --git a/torch_geometric/contrib/explain/graphmask_explainer.py b/torch_geometric/contrib/explain/graphmask_explainer.py index 8d95313a1db8..d0fa742b9cf2 100644 --- a/torch_geometric/contrib/explain/graphmask_explainer.py +++ b/torch_geometric/contrib/explain/graphmask_explainer.py @@ -51,7 +51,6 @@ class GraphMaskExplainer(ExplainerAlgorithm): the predictions made by a GNN. .. note:: - For an example of using :class:`GraphMaskExplainer`, see `examples/contrib/graphmask_explainer.py Tensor: + def explain( + self, + model: torch.nn.Module, + *, + index: Optional[Union[int, Tensor]] = None, + ) -> Tensor: - if not isinstance(index, Tensor) and not isinstance(index, int) \ - and index is not None: + if (not isinstance(index, Tensor) and not isinstance(index, int) + and index is not None): raise ValueError("'index' parameter can only be a 'Tensor', " "'integer' or set to 'None' instead.") @@ -490,8 +504,7 @@ def explain(self, model: torch.nn.Module, *, if i == 0: edge_weight = sampling_weights else: - if (edge_weight.size(-1) != sampling_weights.size(-1) - and self.layer_type == 'GAT'): + if edge_weight.size(-1) != sampling_weights.size(-1): sampling_weights = F.pad( input=sampling_weights, pad=(0, edge_weight.size(-1) - @@ -508,6 +521,3 @@ def explain(self, model: torch.nn.Module, *, edge_mask = torch.mean(edge_mask, 0) return edge_mask - - def __repr__(self): - return f'{self.__class__.__name__}()' From 2ef8d1e229ebf03fed8c9f7812ed8bed68a31485 Mon Sep 17 00:00:00 2001 From: Aakash Thatte <84656834+sky-2002@users.noreply.github.com> Date: Sat, 27 May 2023 23:24:23 +0530 Subject: [PATCH 1226/2432] Add support for approximate k-NN (#7421) As mentioned in [this](https://github.com/pyg-team/pytorch_geometric/issues/7420) issue, I am adding code to support approximate nearest neighbour graphs. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + pyproject.toml | 1 + test/nn/pool/test_approx_knn.py | 61 +++++++++++++++ torch_geometric/nn/pool/__init__.py | 3 + torch_geometric/nn/pool/approx_knn.py | 108 ++++++++++++++++++++++++++ 5 files changed, 174 insertions(+) create mode 100644 test/nn/pool/test_approx_knn.py create mode 100644 torch_geometric/nn/pool/approx_knn.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 83ce98aebdf2..b00821d1b895 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) - Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447)) - Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) diff --git a/pyproject.toml b/pyproject.toml index 2402bfad81f6..5155c92022b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -81,6 +81,7 @@ full = [ "graphviz", "tabulate", "matplotlib", + "pynndescent", "torchmetrics", "scikit-image", "pytorch-memlab", diff --git a/test/nn/pool/test_approx_knn.py b/test/nn/pool/test_approx_knn.py new file mode 100644 index 000000000000..b1ada9038c36 --- /dev/null +++ b/test/nn/pool/test_approx_knn.py @@ -0,0 +1,61 @@ +import warnings + +import torch + +from torch_geometric.nn import approx_knn, approx_knn_graph +from torch_geometric.testing import onlyFullTest, withPackage + + +def to_set(edge_index): + return set([(i, j) for i, j in edge_index.t().tolist()]) + + +@onlyFullTest # JIT compile makes this test too slow :( +@withPackage('pynndescent') +def test_approx_knn(): + warnings.filterwarnings('ignore', '.*find n_neighbors.*') + + x = torch.tensor([ + [-1.0, -1.0], + [-1.0, +1.0], + [+1.0, +1.0], + [+1.0, -1.0], + [-1.0, -1.0], + [-1.0, +1.0], + [+1.0, +1.0], + [+1.0, -1.0], + ]) + y = torch.tensor([ + [+1.0, 0.0], + [-1.0, 0.0], + ]) + + batch_x = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1]) + batch_y = torch.tensor([0, 1]) + + edge_index = approx_knn(x, y, 2) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)]) + + edge_index = approx_knn(x, y, 2, batch_x, batch_y) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) + + +@onlyFullTest # JIT compile makes this test too slow :( +@withPackage('pynndescent') +def test_approx_knn_graph(): + warnings.filterwarnings('ignore', '.*find n_neighbors.*') + + x = torch.tensor([ + [-1.0, -1.0], + [-1.0, +1.0], + [+1.0, +1.0], + [+1.0, -1.0], + ]) + + edge_index = approx_knn_graph(x, k=2, flow='target_to_source') + assert to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1), + (2, 3), (3, 0), (3, 2)]) + + edge_index = approx_knn_graph(x, k=2, flow='source_to_target') + assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), + (3, 2), (0, 3), (2, 3)]) diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index 2162ca2b1fcd..9a8cdad56b2b 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -15,6 +15,7 @@ from .sag_pool import SAGPooling from .topk_pool import TopKPooling from .voxel_grid import voxel_grid +from .approx_knn import approx_knn, approx_knn_graph try: import torch_cluster @@ -328,6 +329,8 @@ def nearest( 'fps', 'knn', 'knn_graph', + 'approx_knn', + 'approx_knn_graph', 'radius', 'radius_graph', 'nearest', diff --git a/torch_geometric/nn/pool/approx_knn.py b/torch_geometric/nn/pool/approx_knn.py new file mode 100644 index 000000000000..67e1031c8f77 --- /dev/null +++ b/torch_geometric/nn/pool/approx_knn.py @@ -0,0 +1,108 @@ +import torch +from torch import Tensor + + +def approx_knn( + x: Tensor, + y: Tensor, + k: int, + batch_x: Tensor = None, + batch_y: Tensor = None, +) -> Tensor: # pragma: no cover + r"""Finds for each element in :obj:`y` the :obj:`k` approximated nearest + points in :obj:`x`. + + .. note:: + + Approximated :math:`k`-nearest neighbor search is performed via the + `pynndescent `_ library. + + Args: + x (torch.Tensor): Node feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. + y (torch.Tensor): Node feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{M \times F}`. + k (int): The number of neighbors. + batch_x (torch.Tensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + node to a specific example. (default: :obj:`None`) + batch_y (torch.Tensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each + node to a specific example. (default: :obj:`None`) + + :rtype: :class:`torch.Tensor` + """ + from pynndescent import NNDescent + + if batch_x is None: + batch_x = x.new_zeros(x.size(0), dtype=torch.long) + if batch_y is None: + batch_y = y.new_zeros(y.size(0), dtype=torch.long) + + x = x.view(-1, 1) if x.dim() == 1 else x + y = y.view(-1, 1) if y.dim() == 1 else y + + assert x.dim() == 2 and batch_x.dim() == 1 + assert y.dim() == 2 and batch_y.dim() == 1 + assert x.size(1) == y.size(1) + assert x.size(0) == batch_x.size(0) + assert y.size(0) == batch_y.size(0) + + min_xy = min(x.min(), y.min()) + x, y = x - min_xy, y - min_xy + + max_xy = max(x.max(), y.max()) + x, y, = x / max_xy, y / max_xy + + # Concat batch/features to ensure no cross-links between examples exist: + x = torch.cat([x, 2 * x.size(1) * batch_x.view(-1, 1).to(x.dtype)], dim=-1) + y = torch.cat([y, 2 * y.size(1) * batch_y.view(-1, 1).to(y.dtype)], dim=-1) + + index = NNDescent(x.detach().cpu().numpy()) + col, dist = index.query(y.detach().cpu().numpy(), k=k) + dist = torch.from_numpy(dist).view(-1).to(x.device, x.dtype) + col = torch.from_numpy(col).view(-1).to(x.device, torch.long) + row = torch.arange(y.size(0), device=x.device, dtype=torch.long) + row = row.repeat_interleave(k) + mask = ~torch.isinf(dist) + row, col = row[mask], col[mask] + + return torch.stack([row, col], dim=0) + + +def approx_knn_graph( + x: Tensor, + k: int, + batch: Tensor = None, + loop: bool = False, + flow: str = 'source_to_target', +) -> Tensor: # pragma: no cover + r"""Computes graph edges to the nearest approximated :obj:`k` points. + + .. note:: + + Approximated :math:`k`-nearest neighbor search is performed via the + `pynndescent `_ library. + + Args: + x (torch.Tensor): Node feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. + k (int): The number of neighbors. + batch (torch.Tensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + node to a specific example. (default: :obj:`None`) + loop (bool, optional): If :obj:`True`, the graph will contain + self-loops. (default: :obj:`False`) + flow (str, optional): The flow direction when using in combination with + message passing (:obj:`"source_to_target"` or + :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) + + :rtype: :class:`torch.Tensor` + """ + assert flow in ['source_to_target', 'target_to_source'] + row, col = approx_knn(x, x, k if loop else k + 1, batch, batch) + row, col = (col, row) if flow == 'source_to_target' else (row, col) + if not loop: + mask = row != col + row, col = row[mask], col[mask] + return torch.stack([row, col], dim=0) From 8770c93f755f377d6e776106115730fa04efe5d0 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Sat, 27 May 2023 20:37:52 +0100 Subject: [PATCH 1227/2432] Add `GDELTLite` dataset (#7442) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/gdelt_lite.py | 91 ++++++++++++++++++++++++++ 3 files changed, 94 insertions(+) create mode 100644 torch_geometric/datasets/gdelt_lite.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b00821d1b895..fef2b85545af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) - Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 61355da802db..6dfa20cd4155 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -36,6 +36,7 @@ from .s3dis import S3DIS from .geometry import GeometricShapes from .bitcoin_otc import BitcoinOTC +from .gdelt_lite import GDELTLite from .icews import ICEWS18 from .gdelt import GDELT from .willow_object_class import WILLOWObjectClass @@ -133,6 +134,7 @@ 'S3DIS', 'GeometricShapes', 'BitcoinOTC', + 'GDELTLite', 'ICEWS18', 'GDELT', 'WILLOWObjectClass', diff --git a/torch_geometric/datasets/gdelt_lite.py b/torch_geometric/datasets/gdelt_lite.py new file mode 100644 index 000000000000..8c83a7f2b699 --- /dev/null +++ b/torch_geometric/datasets/gdelt_lite.py @@ -0,0 +1,91 @@ +import os +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + Data, + InMemoryDataset, + download_url, + extract_zip, +) + + +class GDELTLite(InMemoryDataset): + r"""The (reduced) version of the Global Database of Events, Language, and + Tone (GDELT) dataset used in the `"Do We Really Need Complicated Model + Architectures for Temporal Networks?" `_ + paper, consisting of events collected from 2016 to 2020. + + Each node (actor) holds a 413-dimensional multi-hot feature vector that + represents CAMEO codes attached to the corresponding actor to server. + + Each edge (event) holds a timestamp and a 186-dimensional multi-hot vector + representing CAMEO codes attached to the corresponding event to server. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 10 10 10 10 + :header-rows: 1 + + * - #nodes + - #edges + - #features + - #classes + * - 8,831 + - 1,912,909 + - 413 + - + """ + url = 'https://data.pyg.org/datasets/gdelt_lite.zip' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0]) + + @property + def raw_file_names(self) -> List[str]: + return ['node_features.pt', 'edges.csv', 'edge_features.pt'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.raw_dir) + extract_zip(path, self.raw_dir) + os.unlink(path) + + def process(self): + import pandas as pd + + x = torch.load(self.raw_paths[0]) + df = pd.read_csv(self.raw_paths[1]) + edge_attr = torch.load(self.raw_paths[2]) + + row = torch.from_numpy(df['src'].values) + col = torch.from_numpy(df['dst'].values) + edge_index = torch.stack([row, col], dim=0) + time = torch.from_numpy(df['time'].values).to(torch.long) + + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, time=time) + data = data if self.pre_transform is None else self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From d64fa4390499c3fa872341e5d5e5d5859d1466d4 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Sat, 27 May 2023 13:07:23 -0700 Subject: [PATCH 1228/2432] Add `LocalGraphStore` for distributed training (#7451) This code belongs to the part of the whole distributed training for PyG. This class extends from GraphStore and use the store dict to save the graph topology and also use edge_ids dict to save the global eids. We also include the unit test under /test folder to show how the graph edge_index/ edge_ids will be saving/getting and verified. Any comments please let us know. thanks --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/distributed/test_local_graph_store.py | 26 ++++++++++ torch_geometric/distributed/__init__.py | 5 ++ .../distributed/local_graph_store.py | 47 +++++++++++++++++++ 4 files changed, 79 insertions(+) create mode 100644 test/distributed/test_local_graph_store.py create mode 100644 torch_geometric/distributed/__init__.py create mode 100644 torch_geometric/distributed/local_graph_store.py diff --git a/CHANGELOG.md b/CHANGELOG.md index fef2b85545af..15ebe1b9ca5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a `LocalGraphStore` implementation for distributed training ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_local_graph_store.py b/test/distributed/test_local_graph_store.py new file mode 100644 index 000000000000..2047db8ef0f0 --- /dev/null +++ b/test/distributed/test_local_graph_store.py @@ -0,0 +1,26 @@ +import torch + +from torch_geometric.distributed import LocalGraphStore +from torch_geometric.testing import get_random_edge_index + + +def test_local_graph_store(): + graph_store = LocalGraphStore() + + edge_index = get_random_edge_index(100, 100, 300) + edge_id = torch.tensor([1, 2, 3, 5, 8, 4], dtype=torch.int64) + + graph_store.put_edge_index(edge_index, edge_type=None, layout='coo', + size=(100, 100)) + + graph_store.put_edge_id(edge_id, edge_type=None, layout='coo', + size=(100, 100)) + + assert len(graph_store.get_all_edge_attrs()) == 1 + edge_attr = graph_store.get_all_edge_attrs()[0] + assert torch.equal(graph_store.get_edge_index(edge_attr), edge_index) + assert torch.equal(graph_store.get_edge_id(edge_attr), edge_id) + + graph_store.remove_edge_index(edge_attr) + graph_store.remove_edge_id(edge_attr) + assert len(graph_store.get_all_edge_attrs()) == 0 diff --git a/torch_geometric/distributed/__init__.py b/torch_geometric/distributed/__init__.py new file mode 100644 index 000000000000..489e0063de2c --- /dev/null +++ b/torch_geometric/distributed/__init__.py @@ -0,0 +1,5 @@ +from .local_graph_store import LocalGraphStore + +__all__ = classes = [ + 'LocalGraphStore', +] diff --git a/torch_geometric/distributed/local_graph_store.py b/torch_geometric/distributed/local_graph_store.py new file mode 100644 index 000000000000..aa5281bd38fe --- /dev/null +++ b/torch_geometric/distributed/local_graph_store.py @@ -0,0 +1,47 @@ +from typing import Dict, List, Optional, Tuple + +from torch import Tensor + +from torch_geometric.data import EdgeAttr, GraphStore +from torch_geometric.typing import EdgeTensorType + + +class LocalGraphStore(GraphStore): + r"""This class implements the :class:`torch_geometric.data.GraphStore` + interface to act as a local graph store for distributed training. + """ + def __init__(self): + super().__init__() + self._edge_index: Dict[Tuple, EdgeTensorType] = {} + self._edge_id: Dict[Tuple, Tensor] = {} + + @staticmethod + def key(attr: EdgeAttr) -> Tuple: + return (attr.edge_type, attr.layout.value, attr.is_sorted, attr.size) + + def put_edge_id(self, edge_id: Tensor, *args, **kwargs) -> bool: + edge_attr = self._edge_attr_cls.cast(*args, **kwargs) + self._edge_id[self.key(edge_attr)] = edge_id + return True + + def get_edge_id(self, *args, **kwargs) -> Optional[EdgeTensorType]: + edge_attr = self._edge_attr_cls.cast(*args, **kwargs) + return self._edge_id[self.key(edge_attr)] + + def remove_edge_id(self, *args, **kwargs) -> bool: + edge_attr = self._edge_attr_cls.cast(*args, **kwargs) + return self._edge_id.pop(self.key(edge_attr), None) is not None + + def _put_edge_index(self, edge_index: EdgeTensorType, + edge_attr: EdgeAttr) -> bool: + self._edge_index[self.key(edge_attr)] = edge_index + return True + + def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: + return self._edge_index.get(self.key(edge_attr), None) + + def _remove_edge_index(self, edge_attr: EdgeAttr) -> bool: + return self._edge_index.pop(self.key(edge_attr), None) is not None + + def get_all_edge_attrs(self) -> List[EdgeAttr]: + return [EdgeAttr(*key) for key in self._edge_index.keys()] From d8a651cf0f96c41168593565498734be0a931210 Mon Sep 17 00:00:00 2001 From: mszarma Date: Sat, 27 May 2023 23:06:44 +0200 Subject: [PATCH 1229/2432] [Feature] HGAM hetero low-level example (#7425) - enable hetero part for CSR in trim_to_layer - refactor of trim_to_layer utils to add low-level functions - add hgam hetero low-level example --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/hetero/hierarchical_sage.py | 141 +++++++++++++++++++++++++ torch_geometric/utils/trim_to_layer.py | 96 +++++++++-------- 3 files changed, 193 insertions(+), 45 deletions(-) create mode 100644 examples/hetero/hierarchical_sage.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 15ebe1b9ca5a..de26923625aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added a `LocalGraphStore` implementation for distributed training ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) diff --git a/examples/hetero/hierarchical_sage.py b/examples/hetero/hierarchical_sage.py new file mode 100644 index 000000000000..c4ae8ad12d95 --- /dev/null +++ b/examples/hetero/hierarchical_sage.py @@ -0,0 +1,141 @@ +import argparse + +import torch +import torch.nn.functional as F +from tqdm import tqdm + +import torch_geometric.transforms as T +from torch_geometric.datasets import OGB_MAG +from torch_geometric.loader import NeighborLoader +from torch_geometric.nn import HeteroConv, Linear, SAGEConv +from torch_geometric.utils import trim_to_layer + +parser = argparse.ArgumentParser() +parser.add_argument('--device', type=str, default='cuda') +parser.add_argument('--use-sparse-tensor', action='http://github.com/store_true') +args = parser.parse_args() + +device = args.device if torch.cuda.is_available() else 'cpu' + +transforms = [T.ToUndirected(merge=True)] +if args.use_sparse_tensor: + transforms.append(T.ToSparseTensor()) +dataset = OGB_MAG(root='../../data', preprocess='metapath2vec', + transform=T.Compose(transforms)) +data = dataset[0].to(device, 'x', 'y') + + +class HierarchicalHeteroGraphSage(torch.nn.Module): + def __init__(self, edge_types, hidden_channels, out_channels, num_layers): + super().__init__() + + self.convs = torch.nn.ModuleList() + for _ in range(num_layers): + conv = HeteroConv( + { + edge_type: SAGEConv((-1, -1), hidden_channels) + for edge_type in edge_types + }, aggr='sum') + self.convs.append(conv) + + self.lin = Linear(hidden_channels, out_channels) + + def forward(self, x_dict, edge_index_dict, num_sampled_edges_dict, + num_sampled_nodes_dict): + + for i, conv in enumerate(self.convs): + x_dict, edge_index_dict, _ = trim_to_layer( + layer=i, + num_sampled_nodes_per_hop=num_sampled_nodes_dict, + num_sampled_edges_per_hop=num_sampled_edges_dict, + x=x_dict, + edge_index=edge_index_dict, + ) + + x_dict = conv(x_dict, edge_index_dict) + x_dict = {key: x.relu() for key, x in x_dict.items()} + + return self.lin(x_dict['paper']) + + +model = HierarchicalHeteroGraphSage( + edge_types=data.edge_types, + hidden_channels=64, + out_channels=dataset.num_classes, + num_layers=2, +).to(args.device) + +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +kwargs = {'batch_size': 1024, 'num_workers': 0} +train_loader = NeighborLoader( + data, + num_neighbors=[10] * 2, + shuffle=True, + input_nodes=('paper', data['paper'].train_mask), + **kwargs, +) + +val_loader = NeighborLoader( + data, + num_neighbors=[10] * 2, + shuffle=False, + input_nodes=('paper', data['paper'].val_mask), + **kwargs, +) + + +def train(): + model.train() + + total_examples = total_loss = 0 + for batch in tqdm(train_loader): + batch = batch.to(device) + optimizer.zero_grad() + + out = model( + batch.x_dict, + batch.adj_t_dict + if args.use_sparse_tensor else batch.edge_index_dict, + num_sampled_nodes_dict=batch.num_sampled_nodes_dict, + num_sampled_edges_dict=batch.num_sampled_edges_dict, + ) + + batch_size = batch['paper'].batch_size + loss = F.cross_entropy(out[:batch_size], batch['paper'].y[:batch_size]) + loss.backward() + optimizer.step() + + total_examples += batch_size + total_loss += float(loss) * batch_size + + return total_loss / total_examples + + +@torch.no_grad() +def test(loader): + model.eval() + + total_examples = total_correct = 0 + for batch in tqdm(loader): + batch = batch.to(device) + out = model( + batch.x_dict, + batch.adj_t_dict + if args.use_sparse_tensor else batch.edge_index_dict, + num_sampled_nodes_dict=batch.num_sampled_nodes_dict, + num_sampled_edges_dict=batch.num_sampled_edges_dict, + ) + + batch_size = batch['paper'].batch_size + pred = out[:batch_size].argmax(dim=-1) + total_examples += batch_size + total_correct += int((pred == batch['paper'].y[:batch_size]).sum()) + + return total_correct / total_examples + + +for epoch in range(1, 6): + loss = train() + val_acc = test(val_loader) + print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_acc:.4f}') diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index dd2ce60b8c57..025041d2da38 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.typing import ( + Adj, EdgeType, MaybeHeteroEdgeTensor, MaybeHeteroNodeTensor, @@ -17,10 +18,10 @@ def trim_to_layer( layer: int, num_sampled_nodes_per_hop: Union[List[int], Dict[NodeType, List[int]]], num_sampled_edges_per_hop: Union[List[int], Dict[EdgeType, List[int]]], - x: Union[MaybeHeteroNodeTensor], - edge_index: Union[MaybeHeteroEdgeTensor], + x: MaybeHeteroNodeTensor, + edge_index: MaybeHeteroEdgeTensor, edge_attr: Optional[MaybeHeteroEdgeTensor] = None, -) -> Tuple[MaybeHeteroEdgeTensor, MaybeHeteroNodeTensor, +) -> Tuple[MaybeHeteroNodeTensor, MaybeHeteroEdgeTensor, Optional[MaybeHeteroEdgeTensor]]: r"""Trims the :obj:`edge_index` representation, node features :obj:`x` and edge features :obj:`edge_attr` to a minimal-sized representation for the @@ -47,62 +48,31 @@ def trim_to_layer( if layer <= 0: return x, edge_index, edge_attr - # TODO Support `SparseTensor` for heterogeneous graphs. if isinstance(num_sampled_edges_per_hop, dict): x = { - k: v.narrow( - dim=0, - start=0, - length=v.size(0) - num_sampled_nodes_per_hop[k][-layer], - ) + k: trim_feat(v, layer, num_sampled_nodes_per_hop[k]) for k, v in x.items() } edge_index = { - k: v.narrow( - dim=1, - start=0, - length=v.size(1) - num_sampled_edges_per_hop[k][-layer], - ) + k: trim_adj(v, layer, num_sampled_nodes_per_hop[k[-1]], + num_sampled_edges_per_hop[k]) for k, v in edge_index.items() } if edge_attr is not None: edge_attr = { - k: v.narrow( - dim=0, - start=0, - length=v.size(0) - num_sampled_edges_per_hop[k][-layer], - ) + k: trim_feat(v, layer, num_sampled_edges_per_hop[k]) for k, v in edge_attr.items() } return x, edge_index, edge_attr - x = x.narrow( - dim=0, - start=0, - length=x.size(0) - num_sampled_nodes_per_hop[-layer], - ) - if edge_attr is not None: - edge_attr = edge_attr.narrow( - dim=0, - start=0, - length=edge_attr.size(0) - num_sampled_edges_per_hop[-layer], - ) - if isinstance(edge_index, Tensor): - edge_index = edge_index.narrow( - dim=1, - start=0, - length=edge_index.size(1) - num_sampled_edges_per_hop[-layer], - ) - return x, edge_index, edge_attr - - elif isinstance(edge_index, SparseTensor): - num_nodes = edge_index.size(0) - num_sampled_nodes_per_hop[-layer] - num_seed_nodes = num_nodes - num_sampled_nodes_per_hop[-(layer + 1)] - edge_index = trim_sparse_tensor(edge_index, num_nodes, num_seed_nodes) + x = trim_feat(x, layer, num_sampled_nodes_per_hop) + edge_index = trim_adj(edge_index, layer, num_sampled_nodes_per_hop, + num_sampled_edges_per_hop) - return x, edge_index, edge_attr + if edge_attr is not None: + edge_attr = trim_feat(edge_attr, layer, num_sampled_edges_per_hop) - raise NotImplementedError + return x, edge_index, edge_attr class TrimToLayer(torch.nn.Module): @@ -112,7 +82,7 @@ def forward( num_sampled_nodes_per_hop: Optional[List[int]], num_sampled_edges_per_hop: Optional[List[int]], x: Tensor, - edge_index: Union[Tensor, SparseTensor], + edge_index: Adj, edge_attr: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: @@ -141,6 +111,42 @@ def forward( # Helper functions ############################################################ +def trim_feat(x: Tensor, layer: int, num_samples_per_hop: List[int]) -> Tensor: + if layer <= 0: + return x + + return x.narrow( + dim=0, + start=0, + length=x.size(0) - num_samples_per_hop[-layer], + ) + + +def trim_adj( + edge_index: Adj, + layer: int, + num_sampled_nodes_per_hop: List[int], + num_sampled_edges_per_hop: List[int], +) -> Adj: + + if layer <= 0: + return edge_index + + if isinstance(edge_index, Tensor): + return edge_index.narrow( + dim=1, + start=0, + length=edge_index.size(1) - num_sampled_edges_per_hop[-layer], + ) + + elif isinstance(edge_index, SparseTensor): + num_nodes = edge_index.size(0) - num_sampled_nodes_per_hop[-layer] + num_seed_nodes = num_nodes - num_sampled_nodes_per_hop[-(layer + 1)] + return trim_sparse_tensor(edge_index, num_nodes, num_seed_nodes) + + raise ValueError(f"Unsupported 'edge_index' type '{type(edge_index)}'") + + def trim_sparse_tensor(src: SparseTensor, num_nodes: int, num_seed_nodes: None) -> SparseTensor: r"""Trims a :class:`SparseTensor` along both dimensions to only contain From af4360a2ed5490e17b12d06f46bca13e6bbf46e1 Mon Sep 17 00:00:00 2001 From: Marco De Nadai Date: Tue, 30 May 2023 06:56:43 +0200 Subject: [PATCH 1230/2432] Update warning message in `NeighborSampler` (#7456) This PR is just a minor issue with a warning --------- Co-authored-by: Matthias Fey --- torch_geometric/sampler/neighbor_sampler.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index 04ca3ca69477..770e066f9752 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -55,10 +55,10 @@ def __init__( f"`subgraph_type='induced'` instead.") if not torch_geometric.typing.WITH_PYG_LIB and sys.platform == 'linux': - warnings.warn("Using '{self.__class__.__name__}' without a " - "'pyg-lib' installation is deprecated and will be " - "removed soon. Please install 'pyg-lib' for " - "accelerated neighborhood sampling") + warnings.warn(f"Using '{self.__class__.__name__}' without a " + f"'pyg-lib' installation is deprecated and will be " + f"removed soon. Please install 'pyg-lib' for " + f"accelerated neighborhood sampling") self.data_type = DataType.from_data(data) From 58f1339af1a0ef03517c37941db683b2bc883fbe Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 30 May 2023 15:08:37 +0200 Subject: [PATCH 1231/2432] Custom backward function for `sparse_cross_entropy` loss (#7466) Gives some good speed-ups. --- CHANGELOG.md | 2 +- test/utils/test_cross_entropy.py | 18 +++++++++-- torch_geometric/utils/cross_entropy.py | 44 +++++++++++++++++++++++--- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de26923625aa..72aace80f572 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) -- Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447)) +- Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447), [#7466](https://github.com/pyg-team/pytorch_geometric/pull/7466)) - Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) - Added the `PMLP` model ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370)) - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) diff --git a/test/utils/test_cross_entropy.py b/test/utils/test_cross_entropy.py index c7fb1c7bdc22..4782cb4c7143 100644 --- a/test/utils/test_cross_entropy.py +++ b/test/utils/test_cross_entropy.py @@ -5,20 +5,34 @@ def test_sparse_cross_entropy_multiclass(): - x = torch.randn(5, 5) + x = torch.randn(5, 5, requires_grad=True) y = torch.eye(5) edge_label_index = y.nonzero().t() expected = F.cross_entropy(x, y) + expected.backward() + expected_grad = x.grad + + x.grad = None out = sparse_cross_entropy(x, edge_label_index) + out.backward() + assert torch.allclose(expected, out) + assert torch.allclose(expected_grad, x.grad) def test_sparse_cross_entropy_multilabel(): - x = torch.randn(5, 8) + x = torch.randn(4, 4, requires_grad=True) y = torch.randint_like(x, 0, 2) edge_label_index = y.nonzero().t() expected = F.cross_entropy(x, y) + expected.backward() + expected_grad = x.grad + + x.grad = None out = sparse_cross_entropy(x, edge_label_index) + out.backward() + assert torch.allclose(expected, out) + assert torch.allclose(expected_grad, x.grad) diff --git a/torch_geometric/utils/cross_entropy.py b/torch_geometric/utils/cross_entropy.py index a05793892859..68ed50ea1315 100644 --- a/torch_geometric/utils/cross_entropy.py +++ b/torch_geometric/utils/cross_entropy.py @@ -1,6 +1,44 @@ +from typing import Tuple + +import torch from torch import Tensor +class SparseCrossEntropy(torch.autograd.Function): + # We implement our own custom autograd function for this to avoid the + # double gradient computation to `inputs`. + @staticmethod + def forward(ctx, inputs: Tensor, edge_label_index: Tensor) -> Tensor: + assert inputs.dim() == 2 + + logsumexp = inputs.logsumexp(dim=-1) + ctx.save_for_backward(inputs, edge_label_index, logsumexp) + + out = inputs[edge_label_index[0], edge_label_index[1]] + out.neg_().add_(logsumexp[edge_label_index[0]]) + + return out.sum() / inputs.size(0) + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_out: Tensor) -> Tuple[Tensor, None]: + inputs, edge_label_index, logsumexp = ctx.saved_tensors + + grad_out = grad_out / inputs.size(0) + + grad_logsumexp = inputs.new_zeros(inputs.size(0)).index_add_( + 0, edge_label_index[0], grad_out.expand(edge_label_index.size(1))) + + # Gradient computation of `logsumexp`: `grad * (self - result).exp()` + grad_input = (inputs - logsumexp.view(-1, 1)) + grad_input.exp_() + grad_input.mul_(grad_logsumexp.view(-1, 1)) + + grad_input[edge_label_index[0], edge_label_index[1]] -= grad_out + + return grad_input, None + + def sparse_cross_entropy(inputs: Tensor, edge_label_index: Tensor) -> Tensor: r"""A sparse-label variant of :func:`torch.nn.functional.cross_entropy`. In particular, the binary target matrix is solely given by sparse indices @@ -22,8 +60,4 @@ def sparse_cross_entropy(inputs: Tensor, edge_label_index: Tensor) -> Tensor: >>> sparse_cross_entropy(inputs, edge_label_index) tensor(1.2919) """ - assert inputs.dim() == 2 - logsumexp = inputs.logsumexp(dim=-1) - values = inputs[edge_label_index[0], edge_label_index[1]] - out = -values + logsumexp[edge_label_index[0]] - return out.sum() / inputs.size(0) + return SparseCrossEntropy.apply(inputs, edge_label_index) From 68fa7e783582190dce7bb35150501ab63c5861ce Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 30 May 2023 17:27:54 +0200 Subject: [PATCH 1232/2432] Re-enable induced subgraph sampling with `torch-sparse` implementation (#7468) --- test/loader/test_neighbor_loader.py | 29 ++++++++++++--------- torch_geometric/sampler/neighbor_sampler.py | 11 +++++--- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 5f46593f0ec4..7c4b286e5d06 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -9,6 +9,7 @@ from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero +from torch_geometric.sampler.base import SubgraphType from torch_geometric.testing import ( MyFeatureStore, MyGraphStore, @@ -36,11 +37,12 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): @onlyNeighborSampler -@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +@pytest.mark.parametrize('subgraph_type', list(SubgraphType)) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) @pytest.mark.parametrize('filter_per_worker', [None, True, False]) def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): - if dtype != torch.int64 and not WITH_PYG_LIB: + if (dtype != torch.int64 + and (not WITH_PYG_LIB or subgraph_type == SubgraphType.induced)): return torch.manual_seed(12345) @@ -80,7 +82,7 @@ def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): batch.x[:batch.batch_size], torch.arange(i * batch.batch_size, (i + 1) * batch.batch_size)) - if subgraph_type == 'directional': + if subgraph_type != SubgraphType.bidirectional: assert batch.e_id.size() == (batch.num_edges, ) assert batch.edge_attr.min() >= 0 assert batch.edge_attr.max() < 500 @@ -94,10 +96,11 @@ def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): @onlyNeighborSampler -@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +@pytest.mark.parametrize('subgraph_type', list(SubgraphType)) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) def test_hetero_neighbor_loader_basic(subgraph_type, dtype): - if dtype != torch.int64 and not WITH_PYG_LIB: + if (dtype != torch.int64 + and (not WITH_PYG_LIB or subgraph_type == SubgraphType.induced)): return torch.manual_seed(12345) @@ -173,7 +176,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): assert row.min() >= 0 and row.max() < batch['paper'].num_nodes assert col.min() >= 0 and col.max() < batch['paper'].num_nodes - if subgraph_type != 'bidirectional': + if subgraph_type != SubgraphType.bidirectional: assert batch['paper', 'paper'].e_id.size() == (row.numel(), ) value = batch['paper', 'paper'].edge_attr assert value.min() >= 0 and value.max() < 500 @@ -184,7 +187,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): batch['paper'].x, batch['paper'].x, ) - elif subgraph_type != 'directional': + elif subgraph_type != SubgraphType.directional: assert 'e_id' not in batch['paper', 'paper'] assert 'edge_attr' not in batch['paper', 'paper'] @@ -194,7 +197,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): assert row.min() >= 0 and row.max() < batch['paper'].num_nodes assert col.min() >= 0 and col.max() < batch['author'].num_nodes - if subgraph_type != 'bidirectional': + if subgraph_type != SubgraphType.bidirectional: assert batch['paper', 'author'].e_id.size() == (row.numel(), ) value = batch['paper', 'author'].edge_attr assert value.min() >= 500 and value.max() < 1500 @@ -205,7 +208,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): batch['paper'].x, batch['author'].x - 100, ) - elif subgraph_type != 'directional': + elif subgraph_type != SubgraphType.directional: assert 'e_id' not in batch['paper', 'author'] assert 'edge_attr' not in batch['paper', 'author'] @@ -218,7 +221,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): assert row.min() >= 0 and row.max() < batch['author'].num_nodes assert col.min() >= 0 and col.max() < batch['paper'].num_nodes - if subgraph_type != 'bidirectional': + if subgraph_type != SubgraphType.bidirectional: assert batch['author', 'paper'].e_id.size() == (row.numel(), ) value = batch['author', 'paper'].edge_attr assert value.min() >= 1500 and value.max() < 2500 @@ -229,7 +232,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): batch['author'].x - 100, batch['paper'].x, ) - elif subgraph_type != 'directional': + elif subgraph_type != SubgraphType.directional: assert 'e_id' not in batch['author', 'paper'] assert 'edge_attr' not in batch['author', 'paper'] @@ -243,7 +246,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): @onlyNeighborSampler -@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +@pytest.mark.parametrize('subgraph_type', list(SubgraphType)) def test_homo_neighbor_loader_on_cora(get_dataset, subgraph_type): dataset = get_dataset(name='Cora') data = dataset[0] @@ -286,7 +289,7 @@ def forward(self, x, edge_index, edge_weight): @onlyNeighborSampler -@pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) +@pytest.mark.parametrize('subgraph_type', list(SubgraphType)) def test_hetero_neighbor_loader_on_cora(get_dataset, subgraph_type): dataset = get_dataset(name='Cora') data = dataset[0] diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index 770e066f9752..aeae99fb3ec9 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -54,7 +54,8 @@ def __init__( f"'{self.__class__.__name__}' is deprecated. Use " f"`subgraph_type='induced'` instead.") - if not torch_geometric.typing.WITH_PYG_LIB and sys.platform == 'linux': + if (not torch_geometric.typing.WITH_PYG_LIB and sys.platform == 'linux' + and subgraph_type != SubgraphType.induced): warnings.warn(f"Using '{self.__class__.__name__}' without a " f"'pyg-lib' installation is deprecated and will be " f"removed soon. Please install 'pyg-lib' for " @@ -213,7 +214,9 @@ def _sample( r"""Implements neighbor sampling by calling either :obj:`pyg-lib` (if installed) or :obj:`torch-sparse` (if installed) sampling routines.""" if isinstance(seed, dict): # Heterogeneous sampling: - if torch_geometric.typing.WITH_PYG_LIB: + # TODO Support induced subgraph sampling in `pyg-lib`. + if (torch_geometric.typing.WITH_PYG_LIB + and self.subgraph_type != SubgraphType.induced): # TODO (matthias) `return_edge_id` if edge features present # TODO (matthias) Ideally, `seed` inherits dtype from `colptr` colptrs = list(self.colptr_dict.values()) @@ -290,7 +293,9 @@ def _sample( ) else: # Homogeneous sampling: - if torch_geometric.typing.WITH_PYG_LIB: + # TODO Support induced subgraph sampling in `pyg-lib`. + if (torch_geometric.typing.WITH_PYG_LIB + and self.subgraph_type != SubgraphType.induced): # TODO (matthias) `return_edge_id` if edge features present # TODO (matthias) Ideally, `seed` inherits dtype from `colptr` out = torch.ops.pyg.neighbor_sample( From 1e062556045605c6bc34dbf1458be24baff05946 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Wed, 31 May 2023 01:00:11 -0700 Subject: [PATCH 1233/2432] Add `LocalFeatureStore` for distributed training (#7452) This code belongs to the part of the whole distributed training for PyG. This class extends from FeatureStore and use the store dict to save the node/edge features and also use global_idx & id2index dict to save the global node ids and the id mapping between node ids and local feature index, these of which will be used by sampling and feature lookup stage. We also include the unit test under /test/distributed folder to show how the node/edge feature will be saving/getting and verified by two kinds of method. Any comments please let us know. thanks --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/distributed/test_local_feature_store.py | 59 ++++++++++ torch_geometric/distributed/__init__.py | 2 + .../distributed/local_feature_store.py | 102 ++++++++++++++++++ .../distributed/local_graph_store.py | 5 +- torch_geometric/testing/feature_store.py | 13 ++- 6 files changed, 172 insertions(+), 11 deletions(-) create mode 100644 test/distributed/test_local_feature_store.py create mode 100644 torch_geometric/distributed/local_feature_store.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 72aace80f572..7ae81a384d3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added a `LocalGraphStore` implementation for distributed training ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_local_feature_store.py b/test/distributed/test_local_feature_store.py new file mode 100644 index 000000000000..1ccb15b3e334 --- /dev/null +++ b/test/distributed/test_local_feature_store.py @@ -0,0 +1,59 @@ +import torch + +from torch_geometric.distributed import LocalFeatureStore + + +def test_local_feature_store_global_id(): + store = LocalFeatureStore() + + feat = torch.Tensor([ + [0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3], + [4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7], + [8, 8, 8], + ]) + + kwargs = dict(group_name='part1', attr_name='feat') + part1_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) + part1_feat = feat[part1_global_id] + + store.put_global_id(part1_global_id, **kwargs) + store.put_tensor(part1_feat, **kwargs) + + out = store.get_tensor_from_global_id(index=torch.tensor([3, 8, 4]), + **kwargs) + assert torch.equal(out, feat[torch.tensor([3, 8, 4])]) + + +def test_local_feature_store_utils(): + store = LocalFeatureStore() + + feat = torch.Tensor([ + [0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3], + [4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7], + [8, 8, 8], + ]) + + kwargs = dict(group_name='part1', attr_name='feat') + part1_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) + part1_feat = feat[part1_global_id] + + store.put_tensor(part1_feat, **kwargs) + + assert len(store.get_all_tensor_attrs()) == 1 + attr = store.get_all_tensor_attrs()[0] + assert attr.group_name == 'part1' + assert attr.attr_name == 'feat' + assert attr.index is None + assert store.get_tensor_size(attr) == (6, 3) diff --git a/torch_geometric/distributed/__init__.py b/torch_geometric/distributed/__init__.py index 489e0063de2c..1412f174a53c 100644 --- a/torch_geometric/distributed/__init__.py +++ b/torch_geometric/distributed/__init__.py @@ -1,5 +1,7 @@ +from .local_feature_store import LocalFeatureStore from .local_graph_store import LocalGraphStore __all__ = classes = [ + 'LocalFeatureStore', 'LocalGraphStore', ] diff --git a/torch_geometric/distributed/local_feature_store.py b/torch_geometric/distributed/local_feature_store.py new file mode 100644 index 000000000000..5abbfa2f1f47 --- /dev/null +++ b/torch_geometric/distributed/local_feature_store.py @@ -0,0 +1,102 @@ +import copy +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import torch +from torch import Tensor + +from torch_geometric.data import FeatureStore, TensorAttr +from torch_geometric.data.feature_store import _field_status + + +@dataclass +class LocalTensorAttr(TensorAttr): + r"""Tensor attribute for storing features without :obj:`index`.""" + def __init__( + self, + group_name: Optional[str] = _field_status.UNSET, + attr_name: Optional[str] = _field_status.UNSET, + index=None, + ): + super().__init__(group_name, attr_name, index) + + +class LocalFeatureStore(FeatureStore): + r"""This class implements the :class:`torch_geometric.data.FeatureStore` + interface to act as a local feature store for distributed training.""" + def __init__(self): + super().__init__(tensor_attr_cls=LocalTensorAttr) + + self._feat: Dict[Tuple[str, str], Tensor] = {} + + # Save the global node/edge IDs: + self._global_id: Dict[Tuple[str, str], Tensor] = {} + + # Save the mapping from global node/edge IDs to indices in `_feat`: + self._global_id_to_index: Dict[Tuple[str, str], Tensor] = {} + + @staticmethod + def key(attr: TensorAttr) -> Tuple[str, str]: + return (attr.group_name, attr.attr_name) + + def put_global_id(self, global_id: Tensor, *args, **kwargs) -> bool: + attr = self._tensor_attr_cls.cast(*args, **kwargs) + self._global_id[self.key(attr)] = global_id + self._set_global_id_to_index(attr) + return True + + def get_global_id(self, *args, **kwargs) -> Optional[Tensor]: + attr = self._tensor_attr_cls.cast(*args, **kwargs) + return self._global_id.get(self.key(attr)) + + def remove_global_id(self, *args, **kwargs) -> bool: + attr = self._tensor_attr_cls.cast(*args, **kwargs) + return self._global_id.pop(self.key(attr), None) is not None + + def _set_global_id_to_index(self, *args, **kwargs): + attr = self._tensor_attr_cls.cast(*args, **kwargs) + global_id = self.get_global_id(attr) + + if global_id is None: + return + + # TODO Compute this mapping without materializing a full-sized tensor: + global_id_to_index = global_id.new_full((int(global_id.max()) + 1, ), + fill_value=-1) + global_id_to_index[global_id] = torch.arange(global_id.numel()) + self._global_id_to_index[self.key(attr)] = global_id_to_index + + def _put_tensor(self, tensor: Tensor, attr: TensorAttr) -> bool: + assert attr.index is None + self._feat[self.key(attr)] = tensor + return True + + def _get_tensor(self, attr: TensorAttr) -> Optional[Tensor]: + tensor = self._feat.get(self.key(attr)) + + if tensor is None: + return None + + if attr.index is None: # Empty indices return the full tensor: + return tensor + + return tensor[attr.index] + + def _remove_tensor(self, attr: TensorAttr) -> bool: + assert attr.index is None + return self._feat.pop(self.key(attr), None) is not None + + def get_tensor_from_global_id(self, *args, **kwargs) -> Optional[Tensor]: + attr = self._tensor_attr_cls.cast(*args, **kwargs) + assert attr.index is not None + + attr = copy.copy(attr) + attr.index = self._global_id_to_index[self.key(attr)][attr.index] + + return self.get_tensor(attr) + + def _get_tensor_size(self, attr: TensorAttr) -> Tuple[int, ...]: + return self._get_tensor(attr).size() + + def get_all_tensor_attrs(self) -> List[LocalTensorAttr]: + return [self._tensor_attr_cls.cast(*key) for key in self._feat.keys()] diff --git a/torch_geometric/distributed/local_graph_store.py b/torch_geometric/distributed/local_graph_store.py index aa5281bd38fe..d83e301a15dd 100644 --- a/torch_geometric/distributed/local_graph_store.py +++ b/torch_geometric/distributed/local_graph_store.py @@ -8,8 +8,7 @@ class LocalGraphStore(GraphStore): r"""This class implements the :class:`torch_geometric.data.GraphStore` - interface to act as a local graph store for distributed training. - """ + interface to act as a local graph store for distributed training.""" def __init__(self): super().__init__() self._edge_index: Dict[Tuple, EdgeTensorType] = {} @@ -26,7 +25,7 @@ def put_edge_id(self, edge_id: Tensor, *args, **kwargs) -> bool: def get_edge_id(self, *args, **kwargs) -> Optional[EdgeTensorType]: edge_attr = self._edge_attr_cls.cast(*args, **kwargs) - return self._edge_id[self.key(edge_attr)] + return self._edge_id.get(self.key(edge_attr)) def remove_edge_id(self, *args, **kwargs) -> bool: edge_attr = self._edge_attr_cls.cast(*args, **kwargs) diff --git a/torch_geometric/testing/feature_store.py b/torch_geometric/testing/feature_store.py index 55fa633e11ab..fb74ba9826ec 100644 --- a/torch_geometric/testing/feature_store.py +++ b/torch_geometric/testing/feature_store.py @@ -24,12 +24,12 @@ def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: index = torch.arange(0, tensor.shape[0]) # Store the index: - self.store[MyFeatureStore.key(attr)] = (index, tensor) + self.store[self.key(attr)] = (index, tensor) return True def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: - index, tensor = self.store.get(MyFeatureStore.key(attr), (None, None)) + index, tensor = self.store.get(self.key(attr), (None, None)) if tensor is None: return None @@ -47,14 +47,13 @@ def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: return tensor[idx] def _remove_tensor(self, attr: TensorAttr) -> bool: - del self.store[MyFeatureStore.key(attr)] - return True + return self.store.pop(self.key(attr), None) is not None - def _get_tensor_size(self, attr: TensorAttr) -> Tuple: + def _get_tensor_size(self, attr: TensorAttr) -> Tuple[int, ...]: return self._get_tensor(attr).size() - def get_all_tensor_attrs(self) -> List[str]: - return [TensorAttr(*key) for key in self.store.keys()] + def get_all_tensor_attrs(self) -> List[TensorAttr]: + return [self._tensor_attr_cls.cast(*key) for key in self.store.keys()] def __len__(self): raise NotImplementedError From db460561919ca384ef6b10f0ed3b7a8a6dab95e2 Mon Sep 17 00:00:00 2001 From: toenshoff Date: Wed, 31 May 2023 10:35:53 +0200 Subject: [PATCH 1234/2432] Fix `HeteroLinear` usage for mixed precision (#7473) This PR fixes an issue with the `HeteroLinear` module, which crashes in the forward pass if it us used within a `torch.cuda.amp.autocast` environment for mixed precision. Consider the following example: ```python import torch from torch_geometric.nn.dense.linear import HeteroLinear x = torch.randn(3, 16, device='cuda:0') type_vec = torch.tensor([0, 1, 2], device='cuda:0') lin = HeteroLinear(16, 32, num_types=3).to('cuda:0') lin.use_segmm = 0 with torch.cuda.amp.autocast(): out_no_segmm = lin(x, type_vec) ``` We obtain the following trace (Python 3.10, current nightly, CUDA 11.7): ``` Traceback (most recent call last): File "/home/jan/git/gerd/pyg_test.py", line 9, in out_no_segmm = lin(x, type_vec) File "/home/jan/miniconda3/envs/gerd/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "/home/jan/miniconda3/envs/gerd/lib/python3.10/site-packages/torch_geometric/nn/dense/linear.py", line 275, in forward out[mask] = F.linear(x[mask], self.weight[i].T) RuntimeError: Index put requires the source and destination dtypes match, got Float for the destination and Half for the source. ``` The issue is caused by `F.linear` changing the dtype on the fly. An additional cast is needed in this case. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/dense/test_linear.py | 13 +++++++++++++ torch_geometric/nn/dense/linear.py | 10 ++++++---- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ae81a384d3d..2cdfca1c001d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `HeteroLinear` bug when used via mixed precision ([#7473](https://github.com/pyg-team/pytorch_geometric/pull/7473)) - All transforms are now immutable, i.e., they perform a shallow-copy of the data and therefore do not longer modify data in-place ([#7429](https://github.com/pyg-team/pytorch_geometric/pull/7429)) - Set `output_size` in the `repeat_interleave` operation in `QuantileAggregation` ([#7426](https://github.com/pyg-team/pytorch_geometric/pull/7426)) - Fixed gradient computation of edge weights in `utils.spmm` ([#7428](https://github.com/pyg-team/pytorch_geometric/pull/7428)) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index 9e7fd118220f..cba07e0db952 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -127,6 +127,19 @@ def test_hetero_linear(device): assert torch.allclose(jit(x, type_vec), out, atol=1e-3) +@withCUDA +@pytest.mark.parametrize('use_segmm', [True, False]) +def test_hetero_linear_amp(device, use_segmm): + x = torch.randn(3, 16, device=device) + type_vec = torch.tensor([0, 1, 2], device=device) + + lin = HeteroLinear(16, 32, num_types=3).to(device) + lin.use_segmm = use_segmm + + with torch.cuda.amp.autocast(): + assert lin(x, type_vec).size() == (3, 32) + + @withCUDA def test_lazy_hetero_linear(device): x = torch.randn(3, 16, device=device) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 761accdffc4f..e7f8da99e41b 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -243,9 +243,8 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: x (torch.Tensor): The input features. type_vec (torch.Tensor): A vector that maps each entry to a type. """ - - if torch_geometric.typing.WITH_PYG_LIB and (self.use_segmm == -1 - or bool(self.use_segmm)): + if (torch_geometric.typing.WITH_PYG_LIB + and (self.use_segmm == -1 or bool(self.use_segmm))): assert self.weight is not None perm: Optional[Tensor] = None @@ -272,7 +271,10 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: mask = type_vec == i if mask.numel() == 0: continue - out[mask] = F.linear(x[mask], self.weight[i].T) + subset_out = F.linear(x[mask], self.weight[i].T) + # The data type may have changed with mixed precision: + out[mask] = subset_out.to(out.dtype) + if self.bias is not None: out += self.bias[type_vec] return out From 6dc116630968681df8ee410e80c27f5b63feacf6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 31 May 2023 19:34:31 +0200 Subject: [PATCH 1235/2432] Replace slow `index_add` call (#7471) `scatter_add` is preferable to use. --- torch_geometric/utils/cross_entropy.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/torch_geometric/utils/cross_entropy.py b/torch_geometric/utils/cross_entropy.py index 68ed50ea1315..e3ddd59ed6d7 100644 --- a/torch_geometric/utils/cross_entropy.py +++ b/torch_geometric/utils/cross_entropy.py @@ -3,6 +3,8 @@ import torch from torch import Tensor +from torch_geometric.utils import scatter + class SparseCrossEntropy(torch.autograd.Function): # We implement our own custom autograd function for this to avoid the @@ -26,8 +28,9 @@ def backward(ctx, grad_out: Tensor) -> Tuple[Tensor, None]: grad_out = grad_out / inputs.size(0) - grad_logsumexp = inputs.new_zeros(inputs.size(0)).index_add_( - 0, edge_label_index[0], grad_out.expand(edge_label_index.size(1))) + grad_logsumexp = scatter(grad_out.expand(edge_label_index.size(1)), + edge_label_index[0], dim=0, + dim_size=inputs.size(0), reduce='sum') # Gradient computation of `logsumexp`: `grad * (self - result).exp()` grad_input = (inputs - logsumexp.view(-1, 1)) From 91b4d1f7059d6354067313682fbae66c91cb3c89 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 31 May 2023 20:50:48 +0200 Subject: [PATCH 1236/2432] Test batch-wise `ChebConv` correctness (#7480) --- test/nn/conv/test_cheb_conv.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/nn/conv/test_cheb_conv.py b/test/nn/conv/test_cheb_conv.py index b2e38e223b0b..ff264a11e22c 100644 --- a/test/nn/conv/test_cheb_conv.py +++ b/test/nn/conv/test_cheb_conv.py @@ -1,5 +1,6 @@ import torch +from torch_geometric.data import Batch, Data from torch_geometric.nn import ChebConv from torch_geometric.testing import is_full_test @@ -44,3 +45,27 @@ def test_cheb_conv(): assert torch.allclose(jit(x, edge_index, edge_weight, batch), out4) assert torch.allclose( jit(x, edge_index, edge_weight, batch, lambda_max), out5) + + +def test_cheb_conv_batch(): + x1 = torch.randn(4, 8) + edge_index1 = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) + edge_weight1 = torch.rand(edge_index1.size(1)) + data1 = Data(x=x1, edge_index=edge_index1, edge_weight=edge_weight1) + + x2 = torch.randn(3, 8) + edge_index2 = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + edge_weight2 = torch.rand(edge_index2.size(1)) + data2 = Data(x=x2, edge_index=edge_index2, edge_weight=edge_weight2) + + conv = ChebConv(8, 16, K=2) + + out1 = conv(x1, edge_index1, edge_weight1) + out2 = conv(x2, edge_index2, edge_weight2) + + batch = Batch.from_data_list([data1, data2]) + out = conv(batch.x, batch.edge_index, batch.edge_weight, batch.batch) + + assert out.size() == (7, 16) + assert torch.allclose(out1, out[:4]) + assert torch.allclose(out2, out[4:]) From 7016d04497d5e0d097300ce8a4a94381cf78eb03 Mon Sep 17 00:00:00 2001 From: Chris Goreczny <116633453+chrisgo-gc@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:51:48 +0200 Subject: [PATCH 1237/2432] Move the `scaler` tensor in `GeneralConv` to the correct device (#7484) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/conv/general_conv.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cdfca1c001d..271f27d04b8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Move the `scaler` tensor in `GeneralConv` to the correct device ([#7484](https://github.com/pyg-team/pytorch_geometric/pull/7484)) - Fixed `HeteroLinear` bug when used via mixed precision ([#7473](https://github.com/pyg-team/pytorch_geometric/pull/7473)) - All transforms are now immutable, i.e., they perform a shallow-copy of the data and therefore do not longer modify data in-place ([#7429](https://github.com/pyg-team/pytorch_geometric/pull/7429)) - Set `output_size` in the `repeat_interleave` operation in `QuantileAggregation` ([#7426](https://github.com/pyg-team/pytorch_geometric/pull/7426)) diff --git a/torch_geometric/nn/conv/general_conv.py b/torch_geometric/nn/conv/general_conv.py index 395ff22deb89..135e3c379d16 100644 --- a/torch_geometric/nn/conv/general_conv.py +++ b/torch_geometric/nn/conv/general_conv.py @@ -122,8 +122,8 @@ def __init__( self.att_msg = Parameter( torch.Tensor(1, self.heads, self.out_channels)) elif self.attention_type == 'dot_product': - self.scaler = torch.sqrt( - torch.tensor(out_channels, dtype=torch.float)) + scaler = torch.tensor(out_channels, dtype=torch.float).sqrt() + self.register_buffer('scaler', scaler) else: raise ValueError( f"Attention type '{self.attention_type}' not supported") From d17bf07c8a0095a05bd8e2805c91fc773a093d15 Mon Sep 17 00:00:00 2001 From: YanbingJiang Date: Thu, 1 Jun 2023 22:09:37 +0800 Subject: [PATCH 1238/2432] [Benchmark] Add compile option in benchmark kernel, citation and points (#7470) Citation uses `Data` as input, which can be extended in `profile/benchmark.py`. Kernel and points use `DataLoader`. They only compile(model), does not use `profile/benchmark.py` structure. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- benchmark/citation/appnp.py | 3 ++- benchmark/citation/arma.py | 3 ++- benchmark/citation/cheb.py | 3 ++- benchmark/citation/gat.py | 3 ++- benchmark/citation/gcn.py | 3 ++- benchmark/citation/sgc.py | 3 ++- benchmark/citation/train_eval.py | 22 +++++++++++++++------- benchmark/kernel/main_performance.py | 8 ++++++-- benchmark/points/edge_cnn.py | 3 ++- benchmark/points/mpnn.py | 3 ++- benchmark/points/point_cnn.py | 3 ++- benchmark/points/point_net.py | 3 ++- benchmark/points/spline_cnn.py | 3 ++- benchmark/points/train_eval.py | 23 ++++++++++++++++------- 14 files changed, 59 insertions(+), 27 deletions(-) diff --git a/benchmark/citation/appnp.py b/benchmark/citation/appnp.py index 8bb9addbe9ec..53c56beb7601 100644 --- a/benchmark/citation/appnp.py +++ b/benchmark/citation/appnp.py @@ -24,6 +24,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -51,7 +52,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/benchmark/citation/arma.py b/benchmark/citation/arma.py index 923cc0cf81dd..b39b5845dddb 100644 --- a/benchmark/citation/arma.py +++ b/benchmark/citation/arma.py @@ -25,6 +25,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -53,7 +54,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/benchmark/citation/cheb.py b/benchmark/citation/cheb.py index 46f7d530303d..04b98685ced0 100644 --- a/benchmark/citation/cheb.py +++ b/benchmark/citation/cheb.py @@ -22,6 +22,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -46,7 +47,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/benchmark/citation/gat.py b/benchmark/citation/gat.py index 9462083660e2..f9ed5d6071af 100644 --- a/benchmark/citation/gat.py +++ b/benchmark/citation/gat.py @@ -23,6 +23,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -51,7 +52,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/benchmark/citation/gcn.py b/benchmark/citation/gcn.py index d91a0be90379..589d8361eb8a 100644 --- a/benchmark/citation/gcn.py +++ b/benchmark/citation/gcn.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -45,7 +46,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/benchmark/citation/sgc.py b/benchmark/citation/sgc.py index 13633239a855..a11e177fdb1f 100644 --- a/benchmark/citation/sgc.py +++ b/benchmark/citation/sgc.py @@ -20,6 +20,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -41,7 +42,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/benchmark/citation/train_eval.py b/benchmark/citation/train_eval.py index 5427053c80a4..40bb8e03101d 100644 --- a/benchmark/citation/train_eval.py +++ b/benchmark/citation/train_eval.py @@ -5,6 +5,7 @@ from torch import tensor from torch.optim import Adam +import torch_geometric from torch_geometric.profile import timeit, torch_profile from torch_geometric.utils import index_to_mask @@ -36,8 +37,11 @@ def random_planetoid_splits(data, num_classes): def run_train(dataset, model, runs, epochs, lr, weight_decay, early_stopping, - profiling, permute_masks=None, logger=None): + profiling, use_compile, permute_masks=None, logger=None): val_losses, accs, durations = [], [], [] + if use_compile: + model = torch_geometric.compile(model) + for run in range(runs): data = dataset[0] if permute_masks is not None: @@ -98,14 +102,16 @@ def run_train(dataset, model, runs, epochs, lr, weight_decay, early_stopping, @torch.no_grad() -def run_inference(dataset, model, epochs, profiling, bf16, permute_masks=None, - logger=None): +def run_inference(dataset, model, epochs, profiling, bf16, use_compile, + permute_masks=None, logger=None): data = dataset[0] if permute_masks is not None: data = permute_masks(data, dataset.num_classes) data = data.to(device) model.to(device).reset_parameters() + if use_compile: + model = torch_geometric.compile(model) if torch.cuda.is_available(): amp = torch.cuda.amp.autocast(enabled=False) @@ -128,13 +134,15 @@ def run_inference(dataset, model, epochs, profiling, bf16, permute_masks=None, def run(dataset, model, runs, epochs, lr, weight_decay, early_stopping, - inference, profiling, bf16, permute_masks=None, logger=None): + inference, profiling, bf16, use_compile, permute_masks=None, + logger=None): if not inference: run_train(dataset, model, runs, epochs, lr, weight_decay, - early_stopping, profiling, permute_masks, logger) + early_stopping, profiling, use_compile, permute_masks, + logger) else: - run_inference(dataset, model, epochs, profiling, bf16, permute_masks, - logger) + run_inference(dataset, model, epochs, profiling, bf16, use_compile, + permute_masks, logger) def train(model, optimizer, data): diff --git a/benchmark/kernel/main_performance.py b/benchmark/kernel/main_performance.py index 0871f81a43b4..777c0435ab1f 100644 --- a/benchmark/kernel/main_performance.py +++ b/benchmark/kernel/main_performance.py @@ -8,6 +8,7 @@ from graph_sage import GraphSAGE from train_eval import eval_acc, inference_run, train +import torch_geometric from torch_geometric import seed_everything from torch_geometric.loader import DataLoader from torch_geometric.profile import rename_profile_file, timeit, torch_profile @@ -32,6 +33,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') @@ -77,7 +79,8 @@ def run_train(): model = Model(dataset, num_layers, hidden).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) - + if args.compile: + model = torch_geometric.compile(model) loss_list = [] acc_list = [] for epoch in range(1, args.epochs + 1): @@ -116,7 +119,8 @@ def run_inference(): print(f'{dataset_name} - {model_name}- {num_layers} - {hidden}') model = Model(dataset, num_layers, hidden).to(device) - + if args.compile: + model = torch_geometric.compile(model) with amp: for epoch in range(1, args.epochs + 1): if epoch == args.epochs: diff --git a/benchmark/points/edge_cnn.py b/benchmark/points/edge_cnn.py index 0a0bcbfeac6f..d2017cae45d8 100644 --- a/benchmark/points/edge_cnn.py +++ b/benchmark/points/edge_cnn.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -60,7 +61,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', DynamicEdgeConv.__name__) diff --git a/benchmark/points/mpnn.py b/benchmark/points/mpnn.py index 68c66c683e60..3588433b4596 100644 --- a/benchmark/points/mpnn.py +++ b/benchmark/points/mpnn.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -77,7 +78,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', NNConv.__name__) diff --git a/benchmark/points/point_cnn.py b/benchmark/points/point_cnn.py index 2b804e4effea..e8b4a01d43e4 100644 --- a/benchmark/points/point_cnn.py +++ b/benchmark/points/point_cnn.py @@ -19,6 +19,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -65,7 +66,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', XConv.__name__) diff --git a/benchmark/points/point_net.py b/benchmark/points/point_net.py index 56a19b578f66..fe1edf06121c 100644 --- a/benchmark/points/point_net.py +++ b/benchmark/points/point_net.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -73,7 +74,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', PointNetConv.__name__) diff --git a/benchmark/points/spline_cnn.py b/benchmark/points/spline_cnn.py index 5ab5b010ae53..b2586396a3e8 100644 --- a/benchmark/points/spline_cnn.py +++ b/benchmark/points/spline_cnn.py @@ -19,6 +19,7 @@ parser.add_argument('--inference', action='http://github.com/store_true') parser.add_argument('--profile', action='http://github.com/store_true') parser.add_argument('--bf16', action='http://github.com/store_true') +parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() @@ -74,7 +75,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', SplineConv.__name__) diff --git a/benchmark/points/train_eval.py b/benchmark/points/train_eval.py index 9640debd3346..a5c364a6db2e 100644 --- a/benchmark/points/train_eval.py +++ b/benchmark/points/train_eval.py @@ -4,15 +4,19 @@ import torch.nn.functional as F from torch.optim import Adam +import torch_geometric from torch_geometric.loader import DataLoader from torch_geometric.profile import timeit, torch_profile device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -def run_train(train_dataset, test_dataset, model, epochs, batch_size, lr, - lr_decay_factor, lr_decay_step_size, weight_decay): +def run_train(train_dataset, test_dataset, model, epochs, batch_size, + use_compile, lr, lr_decay_factor, lr_decay_step_size, + weight_decay): model = model.to(device) + if use_compile: + model = torch_geometric.compile(model) optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) train_loader = DataLoader(train_dataset, batch_size, shuffle=True) @@ -41,8 +45,11 @@ def run_train(train_dataset, test_dataset, model, epochs, batch_size, lr, @torch.no_grad() -def run_inference(test_dataset, model, epochs, batch_size, profiling, bf16): +def run_inference(test_dataset, model, epochs, batch_size, profiling, bf16, + use_compile): model = model.to(device) + if use_compile: + model = torch_geometric.compile(model) test_loader = DataLoader(test_dataset, batch_size, shuffle=False) if torch.cuda.is_available(): @@ -66,12 +73,14 @@ def run_inference(test_dataset, model, epochs, batch_size, profiling, bf16): def run(train_dataset, test_dataset, model, epochs, batch_size, lr, lr_decay_factor, lr_decay_step_size, weight_decay, inference, - profiling, bf16): + profiling, bf16, use_compile): if not inference: - run_train(train_dataset, test_dataset, model, epochs, batch_size, lr, - lr_decay_factor, lr_decay_step_size, weight_decay) + run_train(train_dataset, test_dataset, model, epochs, batch_size, + use_compile, lr, lr_decay_factor, lr_decay_step_size, + weight_decay) else: - run_inference(test_dataset, model, epochs, batch_size, profiling, bf16) + run_inference(test_dataset, model, epochs, batch_size, profiling, bf16, + use_compile) def train(model, optimizer, train_loader, device): From fe78a7e4053326ec5c4b190c606c889cbec71cea Mon Sep 17 00:00:00 2001 From: happykygo <62350285+happykygo@users.noreply.github.com> Date: Thu, 1 Jun 2023 10:49:20 -0400 Subject: [PATCH 1239/2432] Amazon book dataset (#7483) Add Amazon book heterogeneous dataset. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/amazon_book.py | 78 +++++++++++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 torch_geometric/datasets/amazon_book.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 271f27d04b8e..f2c1e3f169da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 6dfa20cd4155..7bf92d16c0d9 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -83,6 +83,7 @@ from .hgb_dataset import HGBDataset from .taobao import Taobao from .igmc_dataset import IGMCDataset +from .amazon_book import AmazonBook from .fake import FakeDataset, FakeHeteroDataset from .sbm_dataset import StochasticBlockModelDataset @@ -184,6 +185,7 @@ 'HGBDataset', 'Taobao', 'IGMCDataset', + 'AmazonBook', ] synthetic_datasets = [ 'FakeDataset', diff --git a/torch_geometric/datasets/amazon_book.py b/torch_geometric/datasets/amazon_book.py new file mode 100644 index 000000000000..b3c26730e2c9 --- /dev/null +++ b/torch_geometric/datasets/amazon_book.py @@ -0,0 +1,78 @@ +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import HeteroData, InMemoryDataset, download_url + + +class AmazonBook(InMemoryDataset): + r"""A subset of the AmazonBook rating dataset from the + `"LightGCN: Simplifying and Powering Graph Convolution Network for + Recommendation" `_ paper. + This is a heterogeneous dataset consisting of 52,643 users and 91,599 books + with approximately 2.9 million ratings between them. + No labels or features are provided. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + """ + url = ('https://raw.githubusercontent.com/gusye1234/LightGCN-PyTorch/' + 'master/data/amazon-book') + + def __init__(self, root: str, transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_file_names(self) -> List[str]: + return ['user_list.txt', 'item_list.txt', 'train.txt', 'test.txt'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + for name in self.raw_file_names: + download_url(http://github.com/f'%7Bself.url%7D/%7Bname%7D',%20self.raw_dir) + + def process(self): + import pandas as pd + + data = HeteroData() + + # Process number of nodes for each node type: + node_types = ['user', 'book'] + for path, node_type in zip(self.raw_paths, node_types): + df = pd.read_csv(path, sep=' ', header=0) + data[node_type].num_nodes = len(df) + + # Process edge information for training and testing: + attr_names = ['edge_index', 'edge_label_index'] + for path, attr_name in zip(self.raw_paths[2:], attr_names): + rows, cols = [], [] + with open(path, 'r') as f: + lines = f.readlines() + for line in lines: + line = line.strip().split(' ') + for dst in line[1:]: + rows.append(int(line[0])) + cols.append(int(dst)) + index = torch.tensor([rows, cols]) + + data['user', 'rates', 'book'][attr_name] = index + if attr_name == 'edge_index': + data['book', 'rated_by', 'user'][attr_name] = index.flip([0]) + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From ef9019eec66731664e66f74f9f2bce60ef0d671b Mon Sep 17 00:00:00 2001 From: James Myatt Date: Thu, 1 Jun 2023 17:05:28 +0100 Subject: [PATCH 1240/2432] Enable Python 3.11 conda packages (#7485) Related to #6704 --- .github/workflows/building_pyg_conda.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 4464ddf144f1..94db4b342cd4 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -11,8 +11,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-18.04, macos-10.15, windows-2019] - # We have troube building for Python 3.11 due to version conflicts. - python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' + python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] exclude: From 7c96036c613799fc1f79371273041633df37cdcb Mon Sep 17 00:00:00 2001 From: James Myatt Date: Fri, 2 Jun 2023 12:18:03 +0100 Subject: [PATCH 1241/2432] Build Python 3.11 conda package for rusty1s channel too (#7490) --- .github/workflows/building_rusty1s_conda.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index 296a402cbf60..d0b88eda2878 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -11,8 +11,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-18.04, macos-10.15, windows-2019] - # We have troube building for Python 3.11 due to version conflicts. - python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' + python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] exclude: From 8ddbd6b8da26cbf39bda76e888d4ee0531dbffeb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 2 Jun 2023 13:21:44 +0200 Subject: [PATCH 1242/2432] `cudf` support in `map_index` (#7493) --- CHANGELOG.md | 1 + test/utils/test_map.py | 74 +++++++++++++++--- torch_geometric/utils/map.py | 140 +++++++++++++++++++++++++++++------ 3 files changed, 184 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2c1e3f169da..c1aecb3e97a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)) diff --git a/test/utils/test_map.py b/test/utils/test_map.py index 923bb5b811fb..df6eb6e5466e 100644 --- a/test/utils/test_map.py +++ b/test/utils/test_map.py @@ -1,21 +1,77 @@ +import pytest import torch +from torch_geometric.profile import benchmark +from torch_geometric.testing import withCUDA from torch_geometric.utils.map import map_index -def test_map_index(): - src = torch.tensor([2, 0, 1, 0, 3]) - index = torch.tensor([3, 2, 0, 1]) +@withCUDA +@pytest.mark.parametrize('max_index', [3, 100_000_000]) +def test_map_index(device, max_index): + src = torch.tensor([2, 0, 1, 0, max_index], device=device) + index = torch.tensor([max_index, 2, 0, 1], device=device) - out, mask = map_index(src, index) + out, mask = map_index(src, index, inclusive=True) + assert out.device == device + assert mask is None assert out.tolist() == [1, 2, 3, 2, 0] - assert mask.tolist() == [True, True, True, True, True] -def test_map_index_na(): - src = torch.tensor([2, 0, 1, 0, 3]) - index = torch.tensor([3, 2, 0]) +@withCUDA +@pytest.mark.parametrize('max_index', [3, 100_000_000]) +def test_map_index_na(device, max_index): + src = torch.tensor([2, 0, 1, 0, max_index], device=device) + index = torch.tensor([max_index, 2, 0], device=device) - out, mask = map_index(src, index) + out, mask = map_index(src, index, inclusive=False) + assert out.device == device + assert mask.device == device assert out.tolist() == [1, 2, 2, 0] assert mask.tolist() == [True, True, False, True, True] + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cuda') + args = parser.parse_args() + + src = torch.randint(0, 100_000_000, (100_000, ), device=args.device) + index = src.unique() + + def trivial_map(src, index, max_index, inclusive): + if max_index is None: + max_index = max(src.max(), index.max()) + + if inclusive: + assoc = src.new_empty(max_index + 1) + else: + assoc = src.new_full((max_index + 1, ), -1) + assoc[index] = torch.arange(index.numel(), device=index.device) + out = assoc[src] + + if inclusive: + return out, None + else: + mask = out != -1 + return out[mask], mask + + print('Inclusive:') + benchmark( + funcs=[trivial_map, map_index], + func_names=['trivial', 'map_index'], + args=(src, index, None, True), + num_steps=100, + num_warmups=50, + ) + + print('Exclusive:') + benchmark( + funcs=[trivial_map, map_index], + func_names=['trivial', 'map_index'], + args=(src, index[:50_000], None, False), + num_steps=100, + num_warmups=50, + ) diff --git a/torch_geometric/utils/map.py b/torch_geometric/utils/map.py index cea56b0edd42..39db6520d7cf 100644 --- a/torch_geometric/utils/map.py +++ b/torch_geometric/utils/map.py @@ -1,16 +1,29 @@ -from typing import Tuple +import warnings +from typing import Optional, Tuple import torch from torch import Tensor +from torch.utils.dlpack import from_dlpack -def map_index(src: Tensor, index: Tensor) -> Tuple[Tensor, Tensor]: +def map_index( + src: Tensor, + index: Tensor, + max_index: Optional[int] = None, + inclusive: bool = False, +) -> Tuple[Tensor, Optional[Tensor]]: r"""Maps indices in :obj:`src` to the positional value of their corresponding occurence in :obj:`index`. + Indices must be strictly positive. Args: src (torch.Tensor): The source tensor to map. index (torch.Tensor): The index tensor that denotes the new mapping. + max_index (int, optional): The maximum index value. + (default :obj:`None`) + inclusive (bool, optional): If set to :obj:`True`, it is assumed that + every entry in :obj:`src` has a valid entry in :obj:`index`. + Can speed-up computation. (default: :obj:`False`) :rtype: (:class:`torch.Tensor`, :class:`torch.BoolTensor`) @@ -28,23 +41,106 @@ def map_index(src: Tensor, index: Tensor) -> Tuple[Tensor, Tensor]: >>> map_index(src, index) (tensor([1, 2, 2, 0]), tensor([True, True, False, True, True])) """ - import pandas as pd - - assert src.dim() == 1 and index.dim() == 1 - assert not src.is_floating_point() - assert not index.is_floating_point() - - arange = pd.RangeIndex(0, index.size(0)) - df = pd.DataFrame(index=index.detach().cpu().numpy(), data={'out': arange}) - ser = pd.Series(src.detach().cpu(), name='key') - result = df.merge(ser, how='right', left_index=True, right_on='key') - out = torch.from_numpy(result['out'].values).to(index.device) - - if out.is_floating_point(): - mask = torch.isnan(out).logical_not_() - out = out[mask].to(index.dtype) - return out, mask - - out = out.to(index.dtype) - mask = torch.ones_like(out, dtype=torch.bool) - return out, mask + if src.is_floating_point(): + raise ValueError(f"Expected 'src' to be an index (got '{src.dtype}')") + if index.is_floating_point(): + raise ValueError(f"Expected 'index' to be an index (got " + f"'{index.dtype}')") + if src.device != index.device: + raise ValueError(f"Both 'src' and 'index' must be on the same device " + f"(got '{src.device}' and '{index.device}')") + + if max_index is None: + max_index = max(src.max(), index.max()) + + # If the `max_index` is in a reasonable range, we can accelerate this + # operation by creating a helper vector to perform the mapping. + # NOTE This will potentially consumes a large chunk of memory + # (max_index=10 million => ~75MB), so we cap it at a reasonable size: + THRESHOLD = 40_000_000 if src.is_cuda else 10_000_000 + if max_index <= THRESHOLD: + if inclusive: + assoc = src.new_empty(max_index + 1) + else: + assoc = src.new_full((max_index + 1, ), -1) + assoc[index] = torch.arange(index.numel(), dtype=src.dtype, + device=src.device) + out = assoc[src] + + if inclusive: + return out, None + else: + mask = out != -1 + return out[mask], mask + + WITH_CUDF = False + if src.is_cuda: + try: + import cudf + WITH_CUDF = True + except ImportError: + import pandas as pd + warnings.warn("Using CPU-based processing within 'map_index' " + "which may cause slowdowns and device " + "synchronization. Consider installing 'cudf' to " + "accelerate computation") + else: + import pandas as pd + + if not WITH_CUDF: + left_ser = pd.Series(src.cpu().numpy(), name='left_ser') + right_ser = pd.Series( + index=index.cpu().numpy(), + data=pd.RangeIndex(0, index.size(0)), + name='right_ser', + ) + + result = pd.merge(left_ser, right_ser, how='left', left_on='left_ser', + right_index=True) + + out = torch.from_numpy(result['right_ser'].values).to(index.device) + + if out.is_floating_point() and inclusive: + raise ValueError("Found invalid entries in 'src' that do not have " + "a corresponding entry in 'index'. Set " + "`inclusive=False` to ignore these entries.") + + if out.is_floating_point(): + mask = torch.isnan(out).logical_not_() + out = out[mask].to(index.dtype) + return out, mask + + if inclusive: + return out, None + else: + mask = out != -1 + return out[mask], mask + + else: + left_ser = cudf.Series(src, name='left_ser') + right_ser = cudf.Series( + index=index, + data=cudf.RangeIndex(0, index.size(0)), + name='right_ser', + ) + + result = cudf.merge(left_ser, right_ser, how='left', + left_on='left_ser', right_index=True, sort=True) + + if inclusive: + try: + out = from_dlpack(result['right_ser'].to_dlpack()) + except ValueError: + raise ValueError("Found invalid entries in 'src' that do not " + "have a corresponding entry in 'index'. Set " + "`inclusive=False` to ignore these entries.") + else: + out = from_dlpack(result['right_ser'].fillna(-1).to_dlpack()) + + out = out[src.argsort().argsort()] # Restore original order. + + if inclusive: + return out, None + else: + mask = out != -1 + return out[mask], mask From 10f640f6243e9d6e69bbb41405a1792b54f741e6 Mon Sep 17 00:00:00 2001 From: Thomas Kwok Date: Fri, 2 Jun 2023 12:56:37 +0100 Subject: [PATCH 1243/2432] Add `MovieLens-1M` heterogeneous dataset (#7479) Co-authored-by: Matthias Fey --- CHANGELOG.md | 3 +- torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/movie_lens_100k.py | 3 +- torch_geometric/datasets/movie_lens_1m.py | 165 ++++++++++++++++++++ 4 files changed, 170 insertions(+), 3 deletions(-) create mode 100644 torch_geometric/datasets/movie_lens_1m.py diff --git a/CHANGELOG.md b/CHANGELOG.md index c1aecb3e97a3..c7f2f300b702 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) +- Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) +- Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 7bf92d16c0d9..1891f59af4ba 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -78,6 +78,7 @@ from .dblp import DBLP from .movie_lens import MovieLens from .movie_lens_100k import MovieLens100K +from .movie_lens_1m import MovieLens1M from .imdb import IMDB from .last_fm import LastFM from .hgb_dataset import HGBDataset @@ -180,6 +181,7 @@ 'DBLP', 'MovieLens', 'MovieLens100K', + 'MovieLens1M', 'IMDB', 'LastFM', 'HGBDataset', diff --git a/torch_geometric/datasets/movie_lens_100k.py b/torch_geometric/datasets/movie_lens_100k.py index 7b29e840a5c9..b7b8c600188a 100644 --- a/torch_geometric/datasets/movie_lens_100k.py +++ b/torch_geometric/datasets/movie_lens_100k.py @@ -66,7 +66,6 @@ class MovieLens100K(InMemoryDataset): - 1 - 1 """ - url = 'https://files.grouplens.org/datasets/movielens/ml-100k.zip' def __init__( @@ -148,7 +147,7 @@ def process(self): edge_index = torch.tensor([src, dst]) data['user', 'rates', 'movie'].edge_index = edge_index - rating = torch.from_numpy(df['rating'].values).to(torch.float) + rating = torch.from_numpy(df['rating'].values).to(torch.long) data['user', 'rates', 'movie'].rating = rating time = torch.from_numpy(df['timestamp'].values) diff --git a/torch_geometric/datasets/movie_lens_1m.py b/torch_geometric/datasets/movie_lens_1m.py new file mode 100644 index 000000000000..092c0ff39b98 --- /dev/null +++ b/torch_geometric/datasets/movie_lens_1m.py @@ -0,0 +1,165 @@ +import os +import os.path as osp +import shutil +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + HeteroData, + InMemoryDataset, + download_url, + extract_zip, +) + +MOVIE_HEADERS = ["movieId", "title", "genres"] +USER_HEADERS = ["userId", "gender", "age", "occupation", "zipCode"] +RATING_HEADERS = ['userId', 'movieId', 'rating', 'timestamp'] + + +class MovieLens1M(InMemoryDataset): + r"""The MovieLens 1M heterogeneous rating dataset, assembled by GroupLens + Research from the `MovieLens web site `__, + consisting of movies (3,883 nodes) and users (6,040 nodes) with + approximately 1 million ratings between them. + User ratings for movies are available as ground truth labels. + Features of users and movies are encoded according to the `"Inductive + Matrix Completion Based on Graph Neural Networks" + `__ paper. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 20 10 10 10 + :header-rows: 1 + + * - Node/Edge Type + - #nodes/#edges + - #features + - #tasks + * - Movie + - 3,883 + - 18 + - + * - User + - 6,040 + - 30 + - + * - User-Movie + - 1,000,209 + - 1 + - 1 + """ + url = 'https://files.grouplens.org/datasets/movielens/ml-1m.zip' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_file_names(self) -> List[str]: + return ['movies.dat', 'users.dat', 'ratings.dat'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.root) + extract_zip(path, self.root) + os.remove(path) + folder = osp.join(self.root, 'ml-1m') + shutil.rmtree(self.raw_dir) + os.rename(folder, self.raw_dir) + + def process(self): + import pandas as pd + + data = HeteroData() + + # Process movie data: + df = pd.read_csv( + self.raw_paths[0], + sep='::', + header=None, + index_col='movieId', + names=MOVIE_HEADERS, + encoding='ISO-8859-1', + engine='python', + ) + movie_mapping = {idx: i for i, idx in enumerate(df.index)} + + genres = df['genres'].str.get_dummies('|').values + genres = torch.from_numpy(genres).to(torch.float) + + data['movie'].x = genres + + # Process user data: + df = pd.read_csv( + self.raw_paths[1], + sep='::', + header=None, + index_col='userId', + names=USER_HEADERS, + dtype='str', + encoding='ISO-8859-1', + engine='python', + ) + user_mapping = {idx: i for i, idx in enumerate(df.index)} + + age = df['age'].str.get_dummies().values + age = torch.from_numpy(age).to(torch.float) + + gender = df['gender'].str.get_dummies().values + gender = torch.from_numpy(gender).to(torch.float) + + occupation = df['occupation'].str.get_dummies().values + occupation = torch.from_numpy(occupation).to(torch.float) + + data['user'].x = torch.cat([age, gender, occupation], dim=-1) + + # Process rating data: + df = pd.read_csv( + self.raw_paths[2], + sep='::', + header=None, + names=RATING_HEADERS, + encoding='ISO-8859-1', + engine='python', + ) + + src = [user_mapping[idx] for idx in df['userId']] + dst = [movie_mapping[idx] for idx in df['movieId']] + edge_index = torch.tensor([src, dst]) + data['user', 'rates', 'movie'].edge_index = edge_index + + rating = torch.from_numpy(df['rating'].values).to(torch.long) + data['user', 'rates', 'movie'].rating = rating + + time = torch.from_numpy(df['timestamp'].values) + data['user', 'rates', 'movie'].time = time + + data['movie', 'rated_by', 'user'].edge_index = edge_index.flip([0]) + data['movie', 'rated_by', 'user'].rating = rating + data['movie', 'rated_by', 'user'].time = time + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From 51f99c3bd60a799a45522b2eda075be86f4c02b1 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 2 Jun 2023 05:53:48 -0700 Subject: [PATCH 1244/2432] Make `TGN` use `torch` native when `torch-scatter` not available (#7481) in my tested torch native and torch scatter have similar perf for the tgn training example (which includes both forward and back) ``` mine: root@212f80de0614:/workspace# python3 examples/tgn.py Epoch: 01, Loss: 1.1555 Val AP: 0.8242, Val AUC: 0.8313 Test AP: 0.8127, Test AUC: 0.8208 epochtime= 13.63751745223999 Epoch: 02, Loss: 0.9218 Val AP: 0.8302, Val AUC: 0.8499 Test AP: 0.8428, Test AUC: 0.8493 epochtime= 13.39035940170288 Epoch: 03, Loss: 0.8480 Val AP: 0.8654, Val AUC: 0.8696 Test AP: 0.8656, Test AUC: 0.8644 epochtime= 12.575356721878052 Epoch: 04, Loss: 0.8008 Val AP: 0.8784, Val AUC: 0.8783 Test AP: 0.8670, Test AUC: 0.8672 epochtime= 13.801454782485962 original: Epoch: 01, Loss: 1.0855 Val AP: 0.8646, Val AUC: 0.8787 Test AP: 0.8323, Test AUC: 0.8527 epochtime= 13.7134690284729 Epoch: 02, Loss: 0.8287 Val AP: 0.9079, Val AUC: 0.9113 Test AP: 0.9035, Test AUC: 0.9053 epochtime= 13.247861862182617 Epoch: 03, Loss: 0.7423 Val AP: 0.9243, Val AUC: 0.9269 Test AP: 0.9217, Test AUC: 0.9235 epochtime= 12.532079458236694 Epoch: 04, Loss: 0.6858 Val AP: 0.9380, Val AUC: 0.9351 Test AP: 0.9280, Test AUC: 0.9248 epochtime= 12.426196813583374 ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/models/test_tgn.py | 2 -- test/utils/test_scatter.py | 13 ++++++++++++- torch_geometric/nn/models/tgn.py | 4 ++-- torch_geometric/utils/scatter.py | 25 +++++++++++++++++++++++++ 4 files changed, 39 insertions(+), 5 deletions(-) diff --git a/test/nn/models/test_tgn.py b/test/nn/models/test_tgn.py index c9437fa74bbf..cb1f5c6ddd14 100644 --- a/test/nn/models/test_tgn.py +++ b/test/nn/models/test_tgn.py @@ -8,10 +8,8 @@ LastAggregator, LastNeighborLoader, ) -from torch_geometric.testing import withPackage -@withPackage('torch_scatter') # TODO Requires `scatter_argmax` for now. def test_tgn(): memory_dim = 16 time_dim = 16 diff --git a/test/utils/test_scatter.py b/test/utils/test_scatter.py index 735a5cee1692..d93f170008b5 100644 --- a/test/utils/test_scatter.py +++ b/test/utils/test_scatter.py @@ -2,8 +2,9 @@ import torch from torch_geometric.profile import benchmark -from torch_geometric.testing import withCUDA, withPackage +from torch_geometric.testing import disableExtensions, withCUDA, withPackage from torch_geometric.utils import scatter +from torch_geometric.utils.scatter import scatter_argmax def test_scatter_validate(): @@ -70,6 +71,16 @@ def test_scatter_any(device): assert float(out[i, j]) in src[2 * i:2 * i + 2, j].tolist() +@withCUDA +@disableExtensions +def test_scatter_argmax(device): + src = torch.arange(5, device=device) + index = torch.tensor([2, 2, 0, 0, 3], device=device) + + argmax = scatter_argmax(src, index, dim_size=6) + assert argmax.tolist() == [3, 5, 1, 4, 5, 5] + + if __name__ == '__main__': # Insights on GPU: # ================ diff --git a/torch_geometric/nn/models/tgn.py b/torch_geometric/nn/models/tgn.py index d224f9b06209..24c391fba029 100644 --- a/torch_geometric/nn/models/tgn.py +++ b/torch_geometric/nn/models/tgn.py @@ -7,6 +7,7 @@ from torch_geometric.nn.inits import zeros from torch_geometric.utils import scatter +from torch_geometric.utils.scatter import scatter_argmax TGNMessageStoreType = Dict[int, Tuple[Tensor, Tensor, Tensor, Tensor]] @@ -194,8 +195,7 @@ def forward(self, z_src: Tensor, z_dst: Tensor, raw_msg: Tensor, class LastAggregator(torch.nn.Module): def forward(self, msg: Tensor, index: Tensor, t: Tensor, dim_size: int): - from torch_scatter import scatter_max - _, argmax = scatter_max(t, index, dim=0, dim_size=dim_size) + argmax = scatter_argmax(t, index, dim=0, dim_size=dim_size) out = msg.new_zeros((dim_size, msg.size(-1))) mask = argmax < msg.size(0) # Filter items with at least one entry. out[mask] = msg[argmax[mask]] diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index 472d19599489..5d14ee87163b 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -164,3 +164,28 @@ def scatter(src: Tensor, index: Tensor, dim: int = 0, def broadcast(src: Tensor, ref: Tensor, dim: int) -> Tensor: size = ((1, ) * dim) + (-1, ) + ((1, ) * (ref.dim() - dim - 1)) return src.view(size).expand_as(ref) + + +def scatter_argmax(src: Tensor, index: Tensor, dim: int = 0, + dim_size: Optional[int] = None) -> Tensor: + + if torch_geometric.typing.WITH_TORCH_SCATTER: + out = torch_scatter.scatter_max(src, index, dim=dim, dim_size=dim_size) + return out[1] + + # Only implemented under certain conditions for now :( + assert dim == 0 + assert src.dim() == 1 and index.dim() == 1 + + if dim_size is None: + dim_size = index.max() + 1 if index.numel() > 0 else 0 + + res = src.new_empty(dim_size) + res.scatter_reduce_(0, index, src.detach(), reduce='max', + include_self=False) + + out = index.new_full((dim_size, ), fill_value=dim_size - 1) + nonzero = (src == res[index]).nonzero().view(-1) + out[index[nonzero]] = nonzero + + return out From 93d5066257e7b366f297fa2aa8bdd1e781a6189a Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 2 Jun 2023 05:54:40 -0700 Subject: [PATCH 1245/2432] Making `RevGNN` example work w/o `torch-sparse` (#7477) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- examples/rev_gnn.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/examples/rev_gnn.py b/examples/rev_gnn.py index aef5fcdbeb86..56fd5fa3aa86 100644 --- a/examples/rev_gnn.py +++ b/examples/rev_gnn.py @@ -14,7 +14,6 @@ import torch_geometric.transforms as T from torch_geometric.loader import RandomNodeLoader from torch_geometric.nn import GroupAddRev, SAGEConv -from torch_geometric.typing import SparseTensor from torch_geometric.utils import index_to_mask @@ -81,9 +80,11 @@ def forward(self, x, edge_index): from ogb.nodeproppred import Evaluator, PygNodePropPredDataset # noqa -transform = T.AddSelfLoops() +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +transform = T.Compose([T.ToDevice(device), T.ToSparseTensor()]) root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') -dataset = PygNodePropPredDataset('ogbn-products', root, transform=transform) +dataset = PygNodePropPredDataset('ogbn-products', root, + transform=T.AddSelfLoops()) evaluator = Evaluator(name='ogbn-products') data = dataset[0] @@ -97,7 +98,6 @@ def forward(self, x, edge_index): # the full batch graph into your GPU: test_loader = RandomNodeLoader(data, num_parts=1, num_workers=5) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = RevGNN( in_channels=dataset.num_features, hidden_channels=160, @@ -117,12 +117,11 @@ def train(epoch): total_loss = total_examples = 0 for data in train_loader: - data = data.to(device) optimizer.zero_grad() # Memory-efficient aggregations: - adj_t = SparseTensor.from_edge_index(data.edge_index).t() - out = model(data.x, adj_t)[data.train_mask] + data = transform(data) + out = model(data.x, data.adj_t)[data.train_mask] loss = F.cross_entropy(out, data.y[data.train_mask].view(-1)) loss.backward() optimizer.step() @@ -147,11 +146,9 @@ def test(epoch): pbar.set_description(f'Evaluating epoch: {epoch:03d}') for data in test_loader: - data = data.to(device) - # Memory-efficient aggregations - adj_t = SparseTensor.from_edge_index(data.edge_index).t() - out = model(data.x, adj_t).argmax(dim=-1, keepdim=True) + data = transform(data) + out = model(data.x, data.adj_t).argmax(dim=-1, keepdim=True) for split in ['train', 'valid', 'test']: mask = data[f'{split}_mask'] From 788fc10ae1351ada46ce4b17a096a55d5521c4db Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Sun, 4 Jun 2023 09:11:00 +0100 Subject: [PATCH 1246/2432] Fix `torch_geometric.utils.scatter_argmax` (#7495) Fixes a test failure introduced in #7481 to unblock my PR :) - https://pytorch.org/docs/stable/generated/torch.Tensor.scatter_reduce_.html - https://github.com/pyg-team/pytorch_geometric/actions/runs/5156873804/jobs/9288489563 ``` _________________________ test_scatter_argmax[device0] _________________________ device = device(type='cpu') @withCUDA @disableExtensions def test_scatter_argmax(device): src = torch.arange(5, device=device) index = torch.tensor([2, 2, 0, 0, 3], device=device) > argmax = scatter_argmax(src, index, dim_size=6) test/utils/test_scatter.py:80: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ src = tensor([0, 1, 2, 3, 4]), index = tensor([2, 2, 0, 0, 3]), dim = 0 dim_size = 6 def scatter_argmax(src: Tensor, index: Tensor, dim: int = 0, dim_size: Optional[int] = None) -> Tensor: if torch_geometric.typing.WITH_TORCH_SCATTER: out = torch_scatter.scatter_max(src, index, dim=dim, dim_size=dim_size) return out[1] # Only implemented under certain conditions for now :( assert dim == 0 assert src.dim() == 1 and index.dim() == 1 if dim_size is None: dim_size = index.max() + 1 if index.numel() > 0 else 0 res = src.new_empty(dim_size) > res.scatter_reduce_(0, index, src.detach(), reduce='max', include_self=False) E RuntimeError: reduce argument must be either sum, prod, mean, amax or amin. torch_geometric/utils/scatter.py:184: RuntimeError ``` --- torch_geometric/utils/scatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index 5d14ee87163b..0366f4868805 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -181,7 +181,7 @@ def scatter_argmax(src: Tensor, index: Tensor, dim: int = 0, dim_size = index.max() + 1 if index.numel() > 0 else 0 res = src.new_empty(dim_size) - res.scatter_reduce_(0, index, src.detach(), reduce='max', + res.scatter_reduce_(0, index, src.detach(), reduce='amax', include_self=False) out = index.new_full((dim_size, ), fill_value=dim_size - 1) From e90aa0c842b4bf77441c10e08342faa75476da90 Mon Sep 17 00:00:00 2001 From: James Myatt Date: Sun, 4 Jun 2023 09:12:12 +0100 Subject: [PATCH 1247/2432] Add conda build number 1 for republishing with Python 3.11 (#7491) I think this is required before rerunning https://github.com/pyg-team/pytorch_geometric/actions/workflows/building_pyg_conda.yml to push the new conda packages with the python 3.11 versions from https://github.com/pyg-team/pytorch_geometric/pull/7485. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- conda/pyg/meta.yaml | 1 + conda/pytorch-geometric/meta.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/conda/pyg/meta.yaml b/conda/pyg/meta.yaml index ad1f90408e17..a6375bf68662 100644 --- a/conda/pyg/meta.yaml +++ b/conda/pyg/meta.yaml @@ -25,6 +25,7 @@ requirements: build: string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} + number: 1 script: pip install . test: diff --git a/conda/pytorch-geometric/meta.yaml b/conda/pytorch-geometric/meta.yaml index a93e0b638948..6092c328acaf 100644 --- a/conda/pytorch-geometric/meta.yaml +++ b/conda/pytorch-geometric/meta.yaml @@ -25,6 +25,7 @@ requirements: build: string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} + number: 1 script: pip install . test: From 67d48931dee0e849a2d5e1063a68314c1401d919 Mon Sep 17 00:00:00 2001 From: James Myatt Date: Sun, 4 Jun 2023 09:12:38 +0100 Subject: [PATCH 1248/2432] Publish Pyg 2.3.1 conda packages (#7486) Current version of pyg on conda is 2.3.0: https://anaconda.org/pyg/pyg Co-authored-by: Matthias Fey --- conda/pyg/meta.yaml | 4 ++-- conda/pytorch-geometric/meta.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/conda/pyg/meta.yaml b/conda/pyg/meta.yaml index a6375bf68662..0934df2d73d0 100644 --- a/conda/pyg/meta.yaml +++ b/conda/pyg/meta.yaml @@ -1,9 +1,9 @@ package: name: pyg - version: 2.3.0 + version: 2.3.1 source: - url: https://files.pythonhosted.org/packages/43/b5/be9795db7756e6c1fa2606c8145ec637552487e72c6428ed0b231f8bcbd3/torch_geometric-2.3.0.tar.gz + url: https://files.pythonhosted.org/packages/06/a5/9f5af849c4185da5ea55f70ef17e23f93355cd4e989d82cfc8ba2d8747af/torch_geometric-2.3.1.tar.gz requirements: host: diff --git a/conda/pytorch-geometric/meta.yaml b/conda/pytorch-geometric/meta.yaml index 6092c328acaf..e10547ec71ed 100644 --- a/conda/pytorch-geometric/meta.yaml +++ b/conda/pytorch-geometric/meta.yaml @@ -1,9 +1,9 @@ package: name: pytorch-geometric - version: 2.3.0 + version: 2.3.1 source: - url: https://files.pythonhosted.org/packages/43/b5/be9795db7756e6c1fa2606c8145ec637552487e72c6428ed0b231f8bcbd3/torch_geometric-2.3.0.tar.gz + url: https://files.pythonhosted.org/packages/06/a5/9f5af849c4185da5ea55f70ef17e23f93355cd4e989d82cfc8ba2d8747af/torch_geometric-2.3.1.tar.gz requirements: host: From d974f519be26d0356d538237ffb7c36fd9a73e3d Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Sun, 4 Jun 2023 01:23:33 -0700 Subject: [PATCH 1249/2432] Use `NeighborLoader` instead of `NeighborSampler` in Cluster-GCN example (#7498) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- examples/cluster_gcn_reddit.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/cluster_gcn_reddit.py b/examples/cluster_gcn_reddit.py index 8b293a384183..eddcfa7a2056 100644 --- a/examples/cluster_gcn_reddit.py +++ b/examples/cluster_gcn_reddit.py @@ -4,7 +4,7 @@ from tqdm import tqdm from torch_geometric.datasets import Reddit -from torch_geometric.loader import ClusterData, ClusterLoader, NeighborSampler +from torch_geometric.loader import ClusterData, ClusterLoader, NeighborLoader from torch_geometric.nn import SAGEConv dataset = Reddit('../data/Reddit') @@ -15,8 +15,8 @@ train_loader = ClusterLoader(cluster_data, batch_size=20, shuffle=True, num_workers=12) -subgraph_loader = NeighborSampler(data.edge_index, sizes=[-1], batch_size=1024, - shuffle=False, num_workers=12) +subgraph_loader = NeighborLoader(data, num_neighbors=[-1], batch_size=1024, + shuffle=False, num_workers=12) class Net(torch.nn.Module): @@ -43,16 +43,16 @@ def inference(self, x_all): # immediately computing the final representations of each batch. for i, conv in enumerate(self.convs): xs = [] - for batch_size, n_id, adj in subgraph_loader: - edge_index, _, size = adj.to(device) - x = x_all[n_id].to(device) - x_target = x[:size[1]] + for batch in subgraph_loader: + edge_index = batch.edge_index.to(device) + x = x_all[batch.n_id].to(device) + x_target = x[:batch.batch_size] x = conv((x, x_target), edge_index) if i != len(self.convs) - 1: x = F.relu(x) xs.append(x.cpu()) - pbar.update(batch_size) + pbar.update(batch.batch_size) x_all = torch.cat(xs, dim=0) From 1e57a6c69cf15868bccc10b79479fdc4e7468211 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 4 Jun 2023 11:27:47 +0200 Subject: [PATCH 1250/2432] Update the documentation of `GraphMaskExplainer` (#7504) --- test/utils/test_sparse.py | 7 +- .../contrib/explain/graphmask_explainer.py | 127 ++++++++++++------ 2 files changed, 92 insertions(+), 42 deletions(-) diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index aa4efcae4fba..10ef78c32966 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -4,7 +4,7 @@ import torch_geometric.typing from torch_geometric.profile import benchmark -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withPackage from torch_geometric.typing import SparseTensor from torch_geometric.utils import ( dense_to_sparse, @@ -181,6 +181,7 @@ def test_to_torch_csc_tensor(): edge_attr) +@withPackage('torch>=2.1.0') def test_to_torch_coo_tensor_save_load(tmp_path): edge_index = torch.tensor([ [0, 1, 1, 2, 2, 3], @@ -192,9 +193,7 @@ def test_to_torch_coo_tensor_save_load(tmp_path): path = osp.join(tmp_path, 'adj.t') torch.save(adj, path) adj = torch.load(path) - - # This is obviously a bug in PyTorch. Wait for a fix... - assert not adj.is_coalesced() + assert adj.is_coalesced() def test_to_edge_index(): diff --git a/torch_geometric/contrib/explain/graphmask_explainer.py b/torch_geometric/contrib/explain/graphmask_explainer.py index d0fa742b9cf2..b7d137c2a7bb 100644 --- a/torch_geometric/contrib/explain/graphmask_explainer.py +++ b/torch_geometric/contrib/explain/graphmask_explainer.py @@ -1,11 +1,11 @@ import math -from typing import Optional, Union +from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F -from torch import Tensor, sigmoid -from torch.nn import LayerNorm, Linear, Parameter, ReLU, Sequential, init +from torch import Tensor +from torch.nn import LayerNorm, Linear, Parameter, ReLU, Sequential from tqdm import tqdm from torch_geometric.explain import Explanation @@ -19,7 +19,7 @@ from torch_geometric.nn import MessagePassing -def explain_message(self, out, x_i, x_j): +def explain_message(self, out: Tensor, x_i: Tensor, x_j: Tensor) -> Tensor: norm = Sequential(LayerNorm(out.size(-1)).to(out.device), ReLU()) basis_messages = norm(out) @@ -128,11 +128,11 @@ def forward( if self.model_config.task_level == ModelTaskLevel.node: hard_node_mask, hard_edge_mask = self._get_hard_masks( model, index, edge_index, num_nodes=x.size(0)) - self.train_explainer(model, x, edge_index, target=target, index=index, - **kwargs) + self._train_explainer(model, x, edge_index, target=target, index=index, + **kwargs) node_mask = self._post_process_mask(self.node_feat_mask, hard_node_mask, apply_sigmoid=True) - edge_mask = self.explain(model, index=index) + edge_mask = self._explain(model, index=index) edge_mask = edge_mask[:edge_index.size(1)] return Explanation(node_mask=node_mask, edge_mask=edge_mask) @@ -140,21 +140,32 @@ def forward( def supports(self) -> bool: return True - def hard_concrete(self, input_element, summarize_penalty=True, beta=1 / 3, - gamma=-0.2, zeta=1.2, loc_bias=2, min_val=0, max_val=1, - training=True) -> Union[Tensor, Tensor]: + def _hard_concrete( + self, + input_element: Tensor, + summarize_penalty: bool = True, + beta: float = 1 / 3, + gamma: float = -0.2, + zeta: float = 1.2, + loc_bias: int = 2, + min_val: int = 0, + max_val: int = 1, + training: bool = True, + ) -> Tuple[Tensor, Tensor]: + r"""Helps to set the edge mask while sampling its values from the + hard-concrete distribution.""" input_element = input_element + loc_bias if training: u = torch.empty_like(input_element).uniform_(1e-6, 1.0 - 1e-6) - s = sigmoid( + s = torch.sigmoid( (torch.log(u) - torch.log(1 - u) + input_element) / beta) - penalty = sigmoid(input_element - - beta * np.math.log(-gamma / zeta)) + penalty = torch.sigmoid(input_element - + beta * np.math.log(-gamma / zeta)) else: - s = sigmoid(input_element) + s = torch.sigmoid(input_element) penalty = torch.zeros_like(input_element) if summarize_penalty: @@ -170,8 +181,15 @@ def hard_concrete(self, input_element, summarize_penalty=True, beta=1 / 3, return clipped_s, penalty - def set_masks(self, i_dim, j_dim, h_dim, x, device): - (num_nodes, num_feat), std = x.size(), 0.1 + def _set_masks( + self, + i_dim: List[int], + j_dim: List[int], + h_dim: List[int], + x: Tensor, + ): + r"""Sets the node masks and edge masks.""" + (num_nodes, num_feat), std, device = x.size(), 0.1, x.device self.feat_mask_type = self.explainer_config.node_mask_type if self.feat_mask_type == MaskType.attributes: @@ -227,23 +245,25 @@ def set_masks(self, i_dim, j_dim, h_dim, x, device): for parameter in self.parameters(): parameter.requires_grad = False - def enable_layer(self, layer): + def _enable_layer(self, layer: int): + r"""Enables the input layer's edge mask.""" for d in range(layer * 4, (layer * 4) + 4): for parameter in self.gates[d].parameters(): parameter.requires_grad = True self.full_biases[layer].requires_grad = True self.baselines[layer].requires_grad = True - def reset_parameters(self, input_dims, h_dim): + def reset_parameters(self, input_dims: List[int], h_dim: List[int]): + r"""Resets all learnable parameters of the module.""" fan_in = sum(input_dims) std = math.sqrt(2.0 / float(fan_in + h_dim)) a = math.sqrt(3.0) * std for transform in self.transforms: - init._no_grad_uniform_(transform.weight, -a, a) + torch.nn.init._no_grad_uniform_(transform.weight, -a, a) - init.zeros_(self.full_bias) + torch.nn.init.zeros_(self.full_bias) for layer_norm in self.layer_norms: layer_norm.reset_parameters() @@ -279,7 +299,7 @@ def _loss_multiclass_classification( return loss_fn(y_hat, y) - def _loss(self, y_hat: Tensor, y: Tensor, penalty) -> Tensor: + def _loss(self, y_hat: Tensor, y: Tensor, penalty: float) -> Tensor: if self.model_config.mode == ModelMode.binary_classification: loss = self._loss_binary_classification(y_hat, y) elif self.model_config.mode == ModelMode.multiclass_classification: @@ -303,19 +323,30 @@ def _loss(self, y_hat: Tensor, y: Tensor, penalty) -> Tensor: return loss - def freeze_model(self, module): + def _freeze_model(self, module: torch.nn.Module): + r"""Freezes the parameters of the original GNN model by disabling + their gradients.""" for param in module.parameters(): param.requires_grad = False - def _set_flags(self, model): + def _set_flags(self, model: torch.nn.Module): + r"""Initializes the underlying explainer model's parameters for each + layer of the original GNN model.""" for module in model.modules(): if isinstance(module, MessagePassing): module.explain_message = explain_message.__get__( module, MessagePassing) module.explain = True - def _inject_messages(self, model: torch.nn.Module, message_scale, - message_replacement, set=False): + def _inject_messages( + self, + model: torch.nn.Module, + message_scale: List[Tensor], + message_replacement: torch.nn.ParameterList, + set: bool = False, + ): + r"""Injects the computed messages into each layer of the original GNN + model.""" i = 0 for module in model.modules(): if isinstance(module, MessagePassing): @@ -327,7 +358,7 @@ def _inject_messages(self, model: torch.nn.Module, message_scale, module.message_scale = None module.message_replacement = None - def train_explainer( + def _train_explainer( self, model: torch.nn.Module, x: Tensor, @@ -337,12 +368,25 @@ def train_explainer( index: Optional[Union[int, Tensor]] = None, **kwargs, ): + r"""Trains the underlying explainer model. + + Args: + model (torch.nn.Module): The model to explain. + x (torch.Tensor): The input node features. + edge_index (torch.Tensor): The input edge indices. + target (torch.Tensor): The target of the model. + index (int or torch.Tensor, optional): The index of the model + output to explain. Needs to be a single index. + (default: :obj:`None`) + **kwargs (optional): Additional keyword arguments passed to + :obj:`model`. + """ if (not isinstance(index, Tensor) and not isinstance(index, int) and index is not None): raise ValueError("'index' parameter can only be a 'Tensor', " "'integer' or set to 'None' instead.") - self.freeze_model(model) + self._freeze_model(model) self._set_flags(model) input_dims, output_dims = [], [] @@ -351,7 +395,7 @@ def train_explainer( input_dims.append(module.in_channels) output_dims.append(module.out_channels) - self.set_masks(input_dims, output_dims, output_dims, x, x.device) + self._set_masks(input_dims, output_dims, output_dims, x) optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) @@ -370,7 +414,7 @@ def train_explainer( pbar.set_description( f'Train explainer for graph {index} with layer ' f'{layer}') - self.enable_layer(layer) + self._enable_layer(layer) for epoch in range(self.epochs): with torch.no_grad(): model(x, edge_index, **kwargs) @@ -395,13 +439,13 @@ def train_explainer( partial = self.gates[i * 4][j](gate_input[j][i]) except Exception: try: - self.set_masks(output_dims, output_dims, - output_dims, x, x.device) + self._set_masks(output_dims, output_dims, + output_dims, x) partial = self.gates[i * 4][j]( gate_input[j][i]) except Exception: - self.set_masks(input_dims, input_dims, - output_dims, x, x.device) + self._set_masks(input_dims, input_dims, + output_dims, x) partial = self.gates[i * 4][j]( gate_input[j][i]) result = self.gates[(i * 4) + 1][j](partial) @@ -411,7 +455,7 @@ def train_explainer( sampling_weights = self.gates[(i * 4) + 3](relu_output).squeeze( dim=-1) - sampling_weights, penalty = self.hard_concrete( + sampling_weights, penalty = self._hard_concrete( sampling_weights) gates.append(sampling_weights) total_penalty += penalty @@ -457,19 +501,26 @@ def train_explainer( if self.log: pbar.close() - def explain( + def _explain( self, model: torch.nn.Module, *, index: Optional[Union[int, Tensor]] = None, ) -> Tensor: - + r"""Generates explanations for the original GNN model. + + Args: + model (torch.nn.Module): The model to explain. + index (int or torch.Tensor, optional): The index of the model + output to explain. Needs to be a single index. + (default: :obj:`None`). + """ if (not isinstance(index, Tensor) and not isinstance(index, int) and index is not None): raise ValueError("'index' parameter can only be a 'Tensor', " "'integer' or set to 'None' instead.") - self.freeze_model(model) + self._freeze_model(model) self._set_flags(model) with torch.no_grad(): @@ -499,7 +550,7 @@ def explain( relu_output = self.gates[(i * 4) + 2](output / len(gate_input)) sampling_weights = self.gates[(i * 4) + 3](relu_output).squeeze(dim=-1) - sampling_weights, _ = self.hard_concrete( + sampling_weights, _ = self._hard_concrete( sampling_weights, training=False) if i == 0: edge_weight = sampling_weights From c1bffa145a90c563409d937932e887fe45fa9fa0 Mon Sep 17 00:00:00 2001 From: toenshoff Date: Sun, 4 Jun 2023 12:13:23 +0200 Subject: [PATCH 1251/2432] Add option to override the `segmm` heuristic decision in `HeteroLinear` (#7474) We added an option to override the heuristic that decides whether segmm should be used in HeteroLinear ([#7258](https://github.com/pyg-team/pytorch_geometric/pull/7258)). We also added the option to the constructor of HGTConv which is passed to the projections for keys and values, respectively. We found that on one use case we are working on the heuristic incorrectly selects the for-loop implementation even though it runs around 3 times slower than the fused segmm kernel. This happens with a standard 4-layer HGT model with dimension 128 and 4 heads when training on a heterogeneous graph with 16 node types, 26 edge types and around 1 Million vertices. We can not share the dataset, but the user should be able to force a certain selection if the heuristic makes a sub-optimal choice on a given configuration. Interestingly, we use an A40 GPU which should be very similar to the A100 that was used to fit the heuristic. It is not clear to us why a wrong selection is made. We were wondering if the training of the heuristic should also incorporate the time needed for a backwards pass through the layer as well as imbalanced node types? --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/dense/linear.py | 37 ++++++++++++++++++++++++++---- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7f2f300b702..bc77cd0708c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index e7f8da99e41b..90c0d8e9e5c9 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -198,6 +198,13 @@ class HeteroLinear(torch.nn.Module): :obj:`type_vec` is sorted. This avoids internal re-sorting of the data and can improve runtime and memory efficiency. (default: :obj:`False`) + use_segmm (bool, optional): If set to :obj:`True` and :obj:`pyg-lib` is + installed, this module will use the fused :obj:`segment_matmul` + kernel to parallelize the linear transformation across types. If + set to :obj:`False`, :obj:`segment_matmul` will not be used. If + left as :obj:`None` and :obj:`pyg-lib` is installed, the module + will determine heuristically whether to use :obj:`segment_matmul`. + (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.Linear`. @@ -207,16 +214,24 @@ class HeteroLinear(torch.nn.Module): type vector :math:`(*)` - **output:** features :math:`(*, F_{out})` """ - def __init__(self, in_channels: int, out_channels: int, num_types: int, - is_sorted: bool = False, **kwargs): + def __init__( + self, + in_channels: int, + out_channels: int, + num_types: int, + is_sorted: bool = False, + use_segmm: Optional[bool] = None, + **kwargs, + ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.num_types = num_types self.is_sorted = is_sorted + self.use_segmm: int = -1 if use_segmm is None else int(use_segmm) self.kwargs = kwargs - self.use_segmm: int = -1 + if self.in_channels == -1: self.weight = nn.parameter.UninitializedParameter() self._hook = self.register_forward_pre_hook( @@ -313,6 +328,13 @@ class HeteroDictLinear(torch.nn.Module): out_channels (int): Size of each output sample. types (List[Any], optional): The keys of the input dictionary. (default: :obj:`None`) + use_segmm (bool, optional): If set to :obj:`True` and :obj:`pyg-lib` is + installed, this module will use the fused :obj:`segment_matmul` + kernel to parallelize the linear transformation across types. If + set to :obj:`False`, :obj:`segment_matmul` will not be used. If + left as :obj:`None` and :obj:`pyg-lib` is installed, the module + will determine heuristically whether to use :obj:`segment_matmul`. + (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.Linear`. """ @@ -321,6 +343,7 @@ def __init__( in_channels: Union[int, Dict[Any, int]], out_channels: int, types: Optional[Any] = None, + use_segmm: Optional[bool] = None, **kwargs, ): super().__init__() @@ -350,6 +373,7 @@ def __init__( self.in_channels = in_channels self.out_channels = out_channels + self.use_segmm = use_segmm self.kwargs = kwargs self.lins = torch.nn.ModuleDict({ @@ -377,8 +401,13 @@ def forward( # Only apply fused kernel for more than 10 types, otherwise default # back to sequential computation (which is faster for these cases). + if self.use_segmm is None: + use_segmm = len(x_dict) >= 10 + else: + use_segmm = self.use_segmm + if (torch_geometric.typing.WITH_GMM and not torch.jit.is_scripting() - and len(x_dict) >= 10): + and use_segmm): xs, weights, biases = [], [], [] for key, lin in self.lins.items(): if key in x_dict: From d90842d6f38e111d14353449fef8eeac54d52994 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Sun, 4 Jun 2023 13:00:25 +0200 Subject: [PATCH 1252/2432] Introduce `disable_dynamic_shapes` experimental flag; adding its use into `to_dense_batch function` (#7246) There are devices that do not support dynamic shapes - (compiling and optimizing only static graphs). The ability to set and read the "disable_dynamic_shapes" flag allows implementors to provide static shape-friendly implementations and report user-friendly messages if it is impossible to avoid using dynamic shapes. --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + test/test_experimental.py | 3 +- test/utils/test_to_dense_batch.py | 22 +++++++++++ torch_geometric/experimental.py | 51 ++++++++++++++++++++++++- torch_geometric/utils/to_dense_batch.py | 10 ++++- 5 files changed, 82 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc77cd0708c7..f1c4f940d456 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) diff --git a/test/test_experimental.py b/test/test_experimental.py index 39bfd71f359f..6d1cd4e513dc 100644 --- a/test/test_experimental.py +++ b/test/test_experimental.py @@ -7,8 +7,7 @@ ) -@pytest.mark.skip(reason='No experimental options available right now.') -@pytest.mark.parametrize('options', [None]) +@pytest.mark.parametrize('options', ['disable_dynamic_shapes']) def test_experimental_mode(options): assert is_experimental_mode_enabled(options) is False with experimental_mode(options): diff --git a/test/utils/test_to_dense_batch.py b/test/utils/test_to_dense_batch.py index 0c84aef88641..1611cb39fb69 100644 --- a/test/utils/test_to_dense_batch.py +++ b/test/utils/test_to_dense_batch.py @@ -4,6 +4,7 @@ import torch from torch import Tensor +from torch_geometric.experimental import set_experimental_mode from torch_geometric.testing import onlyFullTest from torch_geometric.utils import to_dense_batch @@ -54,6 +55,27 @@ def test_to_dense_batch(fill): assert out.size() == (4, 3, 2) +def test_to_dense_batch_disable_dynamic_shapes(): + x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + batch = torch.tensor([0, 0, 1, 2, 2, 2]) + + with set_experimental_mode(True, 'disable_dynamic_shapes'): + with pytest.raises(ValueError, match="'batch_size' needs to be set"): + out, mask = to_dense_batch(x, batch, max_num_nodes=6) + with pytest.raises(ValueError, match="'max_num_nodes' needs to be"): + out, mask = to_dense_batch(x, batch, batch_size=4) + with pytest.raises(ValueError, match="'batch_size' needs to be set"): + out, mask = to_dense_batch(x) + + out, mask = to_dense_batch(x, batch_size=1, max_num_nodes=6) + assert out.size() == (1, 6, 2) + assert mask.size() == (1, 6) + + out, mask = to_dense_batch(x, batch, batch_size=3, max_num_nodes=10) + assert out.size() == (3, 10, 2) + assert mask.size() == (3, 10) + + @onlyFullTest def test_to_dense_batch_jit(): @torch.jit.script diff --git a/torch_geometric/experimental.py b/torch_geometric/experimental.py index c4134c5aa6ae..9177f2cb80b1 100644 --- a/torch_geometric/experimental.py +++ b/torch_geometric/experimental.py @@ -1,6 +1,8 @@ -from typing import List, Optional, Union +import functools +import inspect +from typing import Any, Callable, Dict, List, Optional, Union -__experimental_flag__ = {} +__experimental_flag__ = {'disable_dynamic_shapes': False} Options = Optional[Union[str, List[str]]] @@ -77,3 +79,48 @@ def __enter__(self): def __exit__(self, *args): for option, value in self.previous_state.items(): __experimental_flag__[option] = value + + +def disable_dynamic_shapes(required_args: List[str]) -> Callable: + r"""A decorator that disables the usage of dynamic shapes for the given + arguments, i.e., it will raise an error in case :obj:`required_args` are + not passed and needs to be automatically inferred.""" + def decorator(func: Callable) -> Callable: + spec = inspect.getfullargspec(func) + + required_args_pos: Dict[str, int] = {} + for arg_name in required_args: + if arg_name not in spec.args: + raise ValueError(f"The function '{func}' does not have a " + f"'{arg_name}' argument") + required_args_pos[arg_name] = spec.args.index(arg_name) + + num_args = len(spec.args) + num_default_args = 0 if spec.defaults is None else len(spec.defaults) + num_positional_args = num_args - num_default_args + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not is_experimental_mode_enabled('disable_dynamic_shapes'): + return func(*args, **kwargs) + + for required_arg in required_args: + index = required_args_pos[required_arg] + + value: Optional[Any] = None + if index < len(args): + value = args[index] + elif required_arg in kwargs: + value = kwargs[required_arg] + elif num_default_args > 0: + value = spec.defaults[index - num_positional_args] + + if value is None: + raise ValueError(f"Dynamic shapes disabled. Argument " + f"'{required_arg}' needs to be set") + + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/torch_geometric/utils/to_dense_batch.py b/torch_geometric/utils/to_dense_batch.py index 1e5538fe3c0e..c6857caf278b 100644 --- a/torch_geometric/utils/to_dense_batch.py +++ b/torch_geometric/utils/to_dense_batch.py @@ -3,9 +3,14 @@ import torch from torch import Tensor +from torch_geometric.experimental import ( + disable_dynamic_shapes, + is_experimental_mode_enabled, +) from torch_geometric.utils import scatter +@disable_dynamic_shapes(required_args=['batch_size', 'max_num_nodes']) def to_dense_batch( x: Tensor, batch: Optional[Tensor] = None, @@ -106,9 +111,12 @@ def to_dense_batch( cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)]) filter_nodes = False + dynamic_shapes_disabled = is_experimental_mode_enabled( + 'disable_dynamic_shapes') + if max_num_nodes is None: max_num_nodes = int(num_nodes.max()) - elif num_nodes.max() > max_num_nodes: + elif not dynamic_shapes_disabled and num_nodes.max() > max_num_nodes: filter_nodes = True tmp = torch.arange(batch.size(0), device=x.device) - cum_nodes[batch] From e2f1164ba3dc17cf2d2af2222171f01f27d12aa4 Mon Sep 17 00:00:00 2001 From: Furkan Akkurt <71407287+furkanakkurt1335@users.noreply.github.com> Date: Sun, 4 Jun 2023 15:09:39 +0300 Subject: [PATCH 1253/2432] Fix typo in documentation (#7507) --- docs/source/get_started/introduction.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/get_started/introduction.rst b/docs/source/get_started/introduction.rst index af0e1ebe81e1..5616c32f57d4 100644 --- a/docs/source/get_started/introduction.rst +++ b/docs/source/get_started/introduction.rst @@ -3,7 +3,7 @@ Introduction by Example We shortly introduce the fundamental concepts of :pyg:`PyG` through self-contained examples. -For an introduction to Graph Machine Learning, we refer the interested reader to the :stanford:`null` `Stanford CS22W: Machine Learning with Graphs `__ lectures. +For an introduction to Graph Machine Learning, we refer the interested reader to the :stanford:`null` `Stanford CS224W: Machine Learning with Graphs `__ lectures. For an interactive introduction to :pyg:`PyG`, we recommend our carefully curated :colab:`null` `Google Colab `__ notebooks. At its core, :pyg:`PyG` provides the following main features: From 464d177eb716a035cd9501afe4fa9c80f07b071e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 4 Jun 2023 15:55:02 +0200 Subject: [PATCH 1254/2432] Fix full tests on master (#7506) --- test/loader/test_cluster.py | 17 ++++++---------- torch_geometric/loader/cluster.py | 32 +++++++++++++++++++------------ 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index c934eb52e5b8..6a58cf42fd1d 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -7,16 +7,9 @@ from torch_geometric.testing import onlyFullTest from torch_geometric.utils import sort_edge_index -try: - rowptr = torch.tensor([0, 1]) - col = torch.tensor([0]) - torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) - WITH_METIS = True -except (AttributeError, RuntimeError): - WITH_METIS = False or torch_geometric.typing.WITH_METIS - -@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') +@pytest.mark.skipif(not torch_geometric.typing.WITH_METIS, + reason='Not compiled with METIS support') def test_cluster_gcn(): adj = torch.tensor([ [1, 1, 1, 0, 1, 0], @@ -109,7 +102,8 @@ def test_cluster_gcn(): assert torch.equal(out.edge_attr, tmp[1]) -@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') +@pytest.mark.skipif(not torch_geometric.typing.WITH_METIS, + reason='Not compiled with METIS support') def test_keep_inter_cluster_edges(): adj = torch.tensor([ [1, 1, 1, 0, 1, 0], @@ -145,7 +139,8 @@ def test_keep_inter_cluster_edges(): @onlyFullTest -@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') +@pytest.mark.skipif(not torch_geometric.typing.WITH_METIS, + reason='Not compiled with METIS support') def test_cluster_gcn_correctness(get_dataset): dataset = get_dataset('Cora') data = dataset[0].clone() diff --git a/torch_geometric/loader/cluster.py b/torch_geometric/loader/cluster.py index 4b3cbb51a072..3574b6869e50 100644 --- a/torch_geometric/loader/cluster.py +++ b/torch_geometric/loader/cluster.py @@ -90,25 +90,33 @@ def _metis(self, edge_index: Tensor, num_nodes: int) -> Tensor: rowptr = index2ptr(row, size=num_nodes) # Compute METIS partitioning: - if torch_geometric.typing.WITH_METIS: - return pyg_lib.partition.metis( - rowptr.cpu(), - col.cpu(), - self.num_parts, - recursive=self.recursive, - ).to(edge_index.device) + cluster: Optional[Tensor] = None if torch_geometric.typing.WITH_TORCH_SPARSE: - return torch.ops.torch_sparse.partition( + try: + cluster = torch.ops.torch_sparse.partition( + rowptr.cpu(), + col.cpu(), + None, + self.num_parts, + self.recursive, + ).to(edge_index.device) + except (AttributeError, RuntimeError): + pass + + if cluster is None and torch_geometric.typing.WITH_METIS: + cluster = pyg_lib.partition.metis( rowptr.cpu(), col.cpu(), - None, self.num_parts, - self.recursive, + recursive=self.recursive, ).to(edge_index.device) - raise ImportError(f"'{self.__class__.__name__}' requires either " - f"'pyg-lib' or 'torch-sparse'") + if cluster is None: + raise ImportError(f"'{self.__class__.__name__}' requires either " + f"'pyg-lib' or 'torch-sparse'") + + return cluster def _partition(self, edge_index: Tensor, cluster: Tensor) -> Partition: # Computes node-level and edge-level permutations and permutes the edge From 4b9cb65857d29e0f8a68676e25b8b5455c3e7e81 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 4 Jun 2023 16:34:11 +0200 Subject: [PATCH 1255/2432] Fix `disable_dynamic_shapes` decorator in combination with TorchScript (#7508) --- torch_geometric/experimental.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/torch_geometric/experimental.py b/torch_geometric/experimental.py index 9177f2cb80b1..08812e0eedd9 100644 --- a/torch_geometric/experimental.py +++ b/torch_geometric/experimental.py @@ -2,7 +2,15 @@ import inspect from typing import Any, Callable, Dict, List, Optional, Union -__experimental_flag__ = {'disable_dynamic_shapes': False} +import torch + +# TODO (matthias) This file currently requires manual imports to let +# TorchScript work on decorated functions. Not totally sure why :( +from torch_geometric.utils import * # noqa + +__experimental_flag__: Dict[str, bool] = { + 'disable_dynamic_shapes': False, +} Options = Optional[Union[str, List[str]]] @@ -19,6 +27,8 @@ def is_experimental_mode_enabled(options: Options = None) -> bool: r"""Returns :obj:`True` if the experimental mode is enabled. See :class:`torch_geometric.experimental_mode` for a list of (optional) options.""" + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return False options = get_options(options) return all([__experimental_flag__[option] for option in options]) From c27d14a3650eb59d0e8ef8066422398e37f06b0c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 4 Jun 2023 18:04:00 +0200 Subject: [PATCH 1256/2432] Remove some warnings (#7509) --- test/nn/dense/test_linear.py | 2 ++ torch_geometric/data/hetero_data.py | 3 +++ torch_geometric/nn/aggr/multi.py | 4 +++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index cba07e0db952..d8ddbd8e764f 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -130,6 +130,8 @@ def test_hetero_linear(device): @withCUDA @pytest.mark.parametrize('use_segmm', [True, False]) def test_hetero_linear_amp(device, use_segmm): + warnings.filterwarnings('ignore', '.*but CUDA is not available.*') + x = torch.randn(3, 16, device=device) type_vec = torch.tensor([0, 1, 2], device=device) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index da19896d040a..8c352a8c7919 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -985,6 +985,9 @@ def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: r"""Gets an edge index from edge storage, in the specified layout.""" store = self[edge_attr.edge_type] + edge_attrs = getattr(self, '_edge_attrs', {}) + if (edge_attr.edge_type, edge_attr.layout) in edge_attrs: + edge_attr = edge_attrs[(edge_attr.edge_type, edge_attr.layout)] if edge_attr.size is None: edge_attr.size = store.size() # Modify in-place. diff --git a/torch_geometric/nn/aggr/multi.py b/torch_geometric/nn/aggr/multi.py index 72b941ceda2c..4a73d6ae9075 100644 --- a/torch_geometric/nn/aggr/multi.py +++ b/torch_geometric/nn/aggr/multi.py @@ -38,6 +38,9 @@ class MultiAggregation(Aggregation): (int) is needed to be specified for the number of parallel attention heads. (default: :obj:`None`) """ + fused_out_index: List[int] + is_fused_aggr: List[bool] + def __init__( self, aggrs: List[Union[Aggregation, str]], @@ -73,7 +76,6 @@ def __init__( # Divide the set into fusable and non-fusable aggregations: fused_aggrs: List[Aggregation] = [] self.fused_out_index: List[int] = [] - # self.non_fused_aggrs: List[Aggregation] = [] self.is_fused_aggr: List[bool] = [] for i, aggr in enumerate(self.aggrs): if aggr.__class__ in FusedAggregation.FUSABLE_AGGRS: From 46bfe7aca71f6520532b1329e87a9a490af7e431 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 5 Jun 2023 08:15:36 +0200 Subject: [PATCH 1257/2432] Fix `GCN2` example for `torch.sparse_csr` layout (#7511) --- examples/gcn2_cora.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/gcn2_cora.py b/examples/gcn2_cora.py index 11f84a3f1bdd..eadc358fd03e 100644 --- a/examples/gcn2_cora.py +++ b/examples/gcn2_cora.py @@ -7,14 +7,12 @@ import torch_geometric.transforms as T from torch_geometric.datasets import Planetoid from torch_geometric.nn import GCN2Conv -from torch_geometric.nn.conv.gcn_conv import gcn_norm dataset = 'Cora' path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) -transform = T.Compose([T.NormalizeFeatures(), T.ToSparseTensor()]) +transform = T.Compose([T.NormalizeFeatures(), T.GCNNorm(), T.ToSparseTensor()]) dataset = Planetoid(path, dataset, transform=transform) data = dataset[0] -data.adj_t = gcn_norm(data.adj_t) # Pre-process GCN normalization. class Net(torch.nn.Module): From 0fb30b8fbc3cafd65b19d3d3122255dc7184398b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 5 Jun 2023 08:31:14 +0200 Subject: [PATCH 1258/2432] Fail gracefully in case `GNNExplainer` will not compute gradients for node features or edges (#7512) --- torch_geometric/explain/algorithm/gnn_explainer.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/torch_geometric/explain/algorithm/gnn_explainer.py b/torch_geometric/explain/algorithm/gnn_explainer.py index 0ecb1e492aac..cf6449a35f18 100644 --- a/torch_geometric/explain/algorithm/gnn_explainer.py +++ b/torch_geometric/explain/algorithm/gnn_explainer.py @@ -131,8 +131,18 @@ def _train( # involved into making the prediction. These are all the nodes and # edges with gradient != 0 (without regularization applied). if i == 0 and self.node_mask is not None: + if self.node_mask.grad is None: + raise ValueError("Could not compute gradients for node " + "features. Please make sure that node " + "features are used inside the model or " + "disable it via `node_mask_type=None`.") self.hard_node_mask = self.node_mask.grad != 0.0 if i == 0 and self.edge_mask is not None: + if self.edge_mask.grad is None: + raise ValueError("Could not compute gradients for edges. " + "Please make sure that edges are used " + "via message passing inside the model or " + "disable it via `edge_mask_type=None`.") self.hard_edge_mask = self.edge_mask.grad != 0.0 def _initialize_masks(self, x: Tensor, edge_index: Tensor): @@ -310,7 +320,7 @@ def _convert_output(self, explanation, edge_index, index=None, x=None): self.model, index, edge_index, num_nodes=x.size(0)) edge_mask = edge_mask.to(x.dtype) else: - edge_mask = torch.ones(edge_index.shape[1], + edge_mask = torch.ones(edge_index.size(1), device=edge_index.device) return node_mask, edge_mask From d63627ccdc80e99cf4ed4c9ca41bf67e405a0fb6 Mon Sep 17 00:00:00 2001 From: Wendy Mak <6398157+wwymak@users.noreply.github.com> Date: Mon, 5 Jun 2023 11:13:59 +0200 Subject: [PATCH 1259/2432] Add `NodeEncoder` from the GraphMixer paper (#7501) Not very sure it's the correct way to implement what they did in GraphMixer, but hopefully not too far off.. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/models/test_graph_mixer.py | 26 +++++++++++++ torch_geometric/nn/models/graph_mixer.py | 49 ++++++++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100644 test/nn/models/test_graph_mixer.py create mode 100644 torch_geometric/nn/models/graph_mixer.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f1c4f940d456..7f22d25f8576 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) - Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) diff --git a/test/nn/models/test_graph_mixer.py b/test/nn/models/test_graph_mixer.py new file mode 100644 index 000000000000..072ad5061f37 --- /dev/null +++ b/test/nn/models/test_graph_mixer.py @@ -0,0 +1,26 @@ +import torch + +from torch_geometric.nn.models.graph_mixer import NodeEncoder + + +def test_node_encoder(): + x = torch.arange(4, dtype=torch.float).view(-1, 1) + edge_index = torch.tensor([[1, 2, 0, 0, 1, 3], [0, 0, 1, 2, 2, 2]]) + edge_time = torch.tensor([0, 1, 1, 1, 2, 3]) + seed_time = torch.tensor([2, 2, 2, 2]) + + encoder = NodeEncoder(time_window=2) + assert str(encoder) == 'NodeEncoder(time_window=2)' + + out = encoder(x, edge_index, edge_time, seed_time) + # Node 0 aggregates information from node 2 (excluding node 1). + # Node 1 aggregates information from node 0. + # Node 2 aggregates information from node 0 and node 1 (exluding node 3). + # Node 3 aggregates no information. + expected = torch.tensor([ + [0 + 2], + [1 + 0], + [2 + 0.5 * (0 + 1)], + [3], + ]) + assert torch.allclose(out, expected) diff --git a/torch_geometric/nn/models/graph_mixer.py b/torch_geometric/nn/models/graph_mixer.py new file mode 100644 index 000000000000..dd702383aa01 --- /dev/null +++ b/torch_geometric/nn/models/graph_mixer.py @@ -0,0 +1,49 @@ +import torch +from torch import Tensor + +from torch_geometric.utils import scatter + + +class NodeEncoder(torch.nn.Module): + r"""The node encoder module from the `"Do We Really Need Complicated + Model Architectures for Temporal Networks?" + `_ paper. + :class:`NodeEncoder` captures the 1-hop temporal neighborhood information + via mean pooling. + + .. math:: + \mathbf{x}_v^{\prime}(t_0) = \mathbf{x}_v + \textrm{mean} \left\{ + \mathbf{x}_w : w \in \mathcal{N}(v, t_0 - T, t_0) \right\} + + Args: + time_window (int): The temporal window size :math:`T` to define the + 1-hop temporal neighborhood. + """ + def __init__(self, time_window: int): + super().__init__() + self.time_window = time_window + + def forward( + self, + x: Tensor, + edge_index: Tensor, + edge_time: Tensor, + seed_time: Tensor, + ) -> Tensor: + r""" + Args: + x (torch.Tensor): The input node features. + edge_index (torch.Tensor): The edge indices. + edge_time (torch.Tensor): The timestamp attached to every edge. + seed_time (torch.Tensor): The seed time :math:`t_0` for every + destination node. + """ + mask = ((edge_time <= seed_time[edge_index[1]]) & + (edge_time > seed_time[edge_index[1]] - self.time_window)) + + src, dst = edge_index[:, mask] + mean = scatter(x[src], dst, dim=0, dim_size=x.size(0), reduce='mean') + return x + mean + + def __repr__(self) -> str: + return f'{self.__class__.__name__}(time_window={self.time_window})' From ccc77c7108430b1943e3a41b300a9abe7bbfee80 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 5 Jun 2023 14:12:22 +0200 Subject: [PATCH 1260/2432] Added the `HM` personalized fashion recommendation dataset (#7515) --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/hm.py | 162 +++++++++++++++++++++++++ torch_geometric/datasets/movie_lens.py | 11 +- 4 files changed, 172 insertions(+), 4 deletions(-) create mode 100644 torch_geometric/datasets/hm.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f22d25f8576..729d526812a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) - Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) - Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 1891f59af4ba..8734f6285b11 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -85,6 +85,7 @@ from .taobao import Taobao from .igmc_dataset import IGMCDataset from .amazon_book import AmazonBook +from .hm import HM from .fake import FakeDataset, FakeHeteroDataset from .sbm_dataset import StochasticBlockModelDataset @@ -188,6 +189,7 @@ 'Taobao', 'IGMCDataset', 'AmazonBook', + 'HM', ] synthetic_datasets = [ 'FakeDataset', diff --git a/torch_geometric/datasets/hm.py b/torch_geometric/datasets/hm.py new file mode 100644 index 000000000000..daf6f4bde6a1 --- /dev/null +++ b/torch_geometric/datasets/hm.py @@ -0,0 +1,162 @@ +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import HeteroData, InMemoryDataset + + +class HM(InMemoryDataset): + r"""The heterogeneous H&M dataset from the `Kaggle H&M Personalized Fashion + Recommendations + `_ + challenge. + The task is to develop product recommendations based on data from previous + transactions, as well as from customer and product meta data. + + Args: + root (str): Root directory where the dataset should be saved. + use_all_tables_as_node_types (bool, optional): If set to :obj:`True`, + will use the transaction table as a distinct node type. + (default: :obj:`False`) + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + """ + url = ('https://www.kaggle.com/competitions/' + 'h-and-m-personalized-fashion-recommendations/data') + + def __init__( + self, + root: str, + use_all_tables_as_node_types: bool = False, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + self.use_all_tables_as_node_types = use_all_tables_as_node_types + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_file_names(self) -> List[str]: + return [ + 'customers.csv.zip', 'articles.csv.zip', + 'transactions_train.csv.zip' + ] + + @property + def processed_file_names(self) -> str: + if self.use_all_tables_as_node_types: + return 'data.pt' + else: + return 'data_merged.pt' + + def download(self): + raise RuntimeError( + f"Dataset not found. Please download {self.raw_file_names} from " + f"'{self.url}' and move it to '{self.raw_dir}'") + + def process(self): + import pandas as pd + + data = HeteroData() + + # Process customer data ############################################### + df = pd.read_csv(self.raw_paths[0], index_col='customer_id') + customer_map = {idx: i for i, idx in enumerate(df.index)} + + xs = [] + for name in [ + 'Active', 'FN', 'club_member_status', 'fashion_news_frequency' + ]: + x = pd.get_dummies(df[name]).values + xs.append(torch.from_numpy(x).to(torch.float)) + + x = torch.from_numpy(df['age'].values).to(torch.float).view(-1, 1) + x = x.nan_to_num(nan=x.nanmean()) + xs.append(x / x.max()) + + data['customer'].x = torch.cat(xs, dim=-1) + + # Process article data ################################################ + df = pd.read_csv(self.raw_paths[1], index_col='article_id') + article_map = {idx: i for i, idx in enumerate(df.index)} + + xs = [] + for name in [ # We drop a few columns here that are high cardinality. + # 'product_code', # Drop. + # 'prod_name', # Drop. + 'product_type_no', + 'product_type_name', + 'product_group_name', + 'graphical_appearance_no', + 'graphical_appearance_name', + 'colour_group_code', + 'colour_group_name', + 'perceived_colour_value_id', + 'perceived_colour_value_name', + 'perceived_colour_master_id', + 'perceived_colour_master_name', + # 'department_no', # Drop. + # 'department_name', # Drop. + 'index_code', + 'index_name', + 'index_group_no', + 'index_group_name', + 'section_no', + 'section_name', + 'garment_group_no', + 'garment_group_name', + # 'detail_desc', # Drop. + ]: + x = pd.get_dummies(df[name]).values + xs.append(torch.from_numpy(x).to(torch.float)) + + data['article'].x = torch.cat(xs, dim=-1) + + # Process transaction data ############################################ + df = pd.read_csv(self.raw_paths[2], parse_dates=['t_dat']) + + x1 = pd.get_dummies(df['sales_channel_id']).values + x1 = torch.from_numpy(x1).to(torch.float) + x2 = torch.from_numpy(df['price'].values).to(torch.float).view(-1, 1) + x = torch.cat([x1, x2], dim=-1) + + time = torch.from_numpy(df['t_dat'].values.astype(int)) + time = time // (60 * 60 * 24 * 10**9) # Convert nanoseconds to days. + + src = torch.tensor([customer_map[idx] for idx in df['customer_id']]) + dst = torch.tensor([article_map[idx] for idx in df['article_id']]) + + if self.use_all_tables_as_node_types: + data['transaction'].x = x + data['transaction'].time = time + + edge_index = torch.stack([src, torch.arange(len(df))], dim=0) + data['customer', 'to', 'transaction'].edge_index = edge_index + edge_index = edge_index.flip([0]) + data['transaction', 'rev_to', 'customer'].edge_index = edge_index + + edge_index = torch.stack([dst, torch.arange(len(df))], dim=0) + data['article', 'to', 'transaction'].edge_index = edge_index + edge_index = edge_index.flip([0]) + data['transaction', 'rev_to', 'article'].edge_index = edge_index + else: + edge_index = torch.stack([src, dst], dim=0) + data['customer', 'to', 'article'].edge_index = edge_index + data['customer', 'to', 'article'].time = time + data['customer', 'to', 'article'].edge_attr = x + + edge_index = edge_index.flip([0]) + data['article', 'rev_to', 'customer'].edge_index = edge_index + data['article', 'rev_to', 'customer'].time = time + data['article', 'rev_to', 'customer'].edge_attr = x + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) diff --git a/torch_geometric/datasets/movie_lens.py b/torch_geometric/datasets/movie_lens.py index 2be4a414a293..bc872625bbe1 100644 --- a/torch_geometric/datasets/movie_lens.py +++ b/torch_geometric/datasets/movie_lens.py @@ -33,12 +33,15 @@ class MovieLens(InMemoryDataset): features. The model comes from the`Huggingface SentenceTransformer `_. """ - url = 'https://files.grouplens.org/datasets/movielens/ml-latest-small.zip' - def __init__(self, root, transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None, - model_name: Optional[str] = "all-MiniLM-L6-v2"): + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + model_name: Optional[str] = 'all-MiniLM-L6-v2', + ): self.model_name = model_name super().__init__(root, transform, pre_transform) self.data, self.slices = torch.load(self.processed_paths[0]) From 45067db76284a8dea3a3e2427d12d4ba19f1d950 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 5 Jun 2023 15:03:00 +0200 Subject: [PATCH 1261/2432] Add number of sampled nodes/edges information to `LinkLoader` (#7516) --- CHANGELOG.md | 1 + test/loader/test_link_neighbor_loader.py | 7 ------- torch_geometric/loader/link_loader.py | 5 +++++ 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 729d526812a2..3f49a3d1c850 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) - Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) - Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) - Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 0fc8724d7781..a4afe6749991 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -58,7 +58,6 @@ def test_homo_link_neighbor_loader_basic(subgraph_type, neg_sampling_ratio, for batch in loader: assert isinstance(batch, Data) - assert len(batch) == 8 assert batch.n_id.size() == (batch.num_nodes, ) assert batch.e_id.size() == (batch.num_edges, ) assert batch.x.size(0) <= 100 @@ -320,7 +319,6 @@ def test_homo_link_neighbor_loader_no_edges(): for batch in loader: assert isinstance(batch, Data) - assert len(batch) == 5 assert batch.input_id.numel() == 20 assert batch.edge_label_index.size(1) == 20 assert batch.num_nodes == batch.edge_label_index.unique().numel() @@ -340,7 +338,6 @@ def test_hetero_link_neighbor_loader_no_edges(): for batch in loader: assert isinstance(batch, HeteroData) - assert len(batch) == 4 assert batch['paper', 'paper'].input_id.numel() == 20 assert batch['paper', 'paper'].edge_label_index.size(1) == 20 assert batch['paper'].num_nodes == batch[ @@ -388,8 +385,6 @@ def test_homo_link_neighbor_loader_triplet(disjoint, temporal, amount): for batch in loader: assert isinstance(batch, Data) - num_elems = 9 + (1 if disjoint else 0) + (2 if temporal else 0) - assert len(batch) == num_elems # Check that `src_index` and `dst_pos_index` point to valid edges: assert torch.equal(batch.x[batch.src_index], @@ -482,8 +477,6 @@ def test_hetero_link_neighbor_loader_triplet(disjoint, temporal, amount): for batch in loader: assert isinstance(batch, HeteroData) - num_elems = 8 + (1 if disjoint else 0) + (2 if temporal else 0) - assert len(batch) == num_elems node_store = batch['paper'] edge_store = batch['paper', 'paper'] diff --git a/torch_geometric/loader/link_loader.py b/torch_geometric/loader/link_loader.py index 461aad3de4bf..d960c4e3ed96 100644 --- a/torch_geometric/loader/link_loader.py +++ b/torch_geometric/loader/link_loader.py @@ -225,6 +225,9 @@ def filter_fn( data.e_id = out.edge data.batch = out.batch + data.num_sampled_nodes = out.num_sampled_nodes + data.num_sampled_edges = out.num_sampled_edges + data.input_id = out.metadata[0] if self.neg_sampling is None or self.neg_sampling.is_binary(): @@ -259,6 +262,8 @@ def filter_fn( data[key].e_id = edge data.set_value_dict('batch', out.batch) + data.set_value_dict('num_sampled_nodes', out.num_sampled_nodes) + data.set_value_dict('num_sampled_edges', out.num_sampled_edges) input_type = self.input_data.input_type data[input_type].input_id = out.metadata[0] From 89e781d4d5ebd8eb73ae88dd5fa2aab16d5e30f7 Mon Sep 17 00:00:00 2001 From: mszarma Date: Mon, 5 Jun 2023 15:30:45 +0200 Subject: [PATCH 1262/2432] Enable `trim_to_layer` with hetero CSR flow (#7514) Enable https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/hierarchical_sage.py using SparseTensor flow (https://github.com/pyg-team/pytorch_geometric/pull/7425#pullrequestreview-1447574875) --------- Co-authored-by: rusty1s --- test/utils/test_trim_to_layer.py | 2 +- torch_geometric/utils/trim_to_layer.py | 41 ++++++++++++++++++-------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/test/utils/test_trim_to_layer.py b/test/utils/test_trim_to_layer.py index 3260a133c6e1..5ee789feb7c8 100644 --- a/test/utils/test_trim_to_layer.py +++ b/test/utils/test_trim_to_layer.py @@ -18,7 +18,7 @@ def test_trim_sparse_tensor(): edge_index = torch.tensor([[0, 0, 1, 2], [1, 2, 3, 4]]) adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[5, 5]) - adj = trim_sparse_tensor(adj, num_nodes=3, num_seed_nodes=1) + adj = trim_sparse_tensor(adj, size=(3, 3), num_seed_nodes=1) row, col, _ = adj.coo() assert row.tolist() == [0, 0] diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index 025041d2da38..524b92fce3ec 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -54,8 +54,13 @@ def trim_to_layer( for k, v in x.items() } edge_index = { - k: trim_adj(v, layer, num_sampled_nodes_per_hop[k[-1]], - num_sampled_edges_per_hop[k]) + k: trim_adj( + v, + layer, + num_sampled_nodes_per_hop[k[0]], + num_sampled_nodes_per_hop[k[-1]], + num_sampled_edges_per_hop[k], + ) for k, v in edge_index.items() } if edge_attr is not None: @@ -66,8 +71,13 @@ def trim_to_layer( return x, edge_index, edge_attr x = trim_feat(x, layer, num_sampled_nodes_per_hop) - edge_index = trim_adj(edge_index, layer, num_sampled_nodes_per_hop, - num_sampled_edges_per_hop) + edge_index = trim_adj( + edge_index, + layer, + num_sampled_nodes_per_hop, + num_sampled_nodes_per_hop, + num_sampled_edges_per_hop, + ) if edge_attr is not None: edge_attr = trim_feat(edge_attr, layer, num_sampled_edges_per_hop) @@ -125,7 +135,8 @@ def trim_feat(x: Tensor, layer: int, num_samples_per_hop: List[int]) -> Tensor: def trim_adj( edge_index: Adj, layer: int, - num_sampled_nodes_per_hop: List[int], + num_sampled_src_nodes_per_hop: List[int], + num_sampled_dst_nodes_per_hop: List[int], num_sampled_edges_per_hop: List[int], ) -> Adj: @@ -140,14 +151,19 @@ def trim_adj( ) elif isinstance(edge_index, SparseTensor): - num_nodes = edge_index.size(0) - num_sampled_nodes_per_hop[-layer] - num_seed_nodes = num_nodes - num_sampled_nodes_per_hop[-(layer + 1)] - return trim_sparse_tensor(edge_index, num_nodes, num_seed_nodes) + size = ( + edge_index.size(0) - num_sampled_dst_nodes_per_hop[-layer], + edge_index.size(1) - num_sampled_src_nodes_per_hop[-layer], + ) + + num_seed_nodes = size[0] - num_sampled_dst_nodes_per_hop[-(layer + 1)] + + return trim_sparse_tensor(edge_index, size, num_seed_nodes) raise ValueError(f"Unsupported 'edge_index' type '{type(edge_index)}'") -def trim_sparse_tensor(src: SparseTensor, num_nodes: int, +def trim_sparse_tensor(src: SparseTensor, size: Tuple[int, int], num_seed_nodes: None) -> SparseTensor: r"""Trims a :class:`SparseTensor` along both dimensions to only contain the upper :obj:`num_nodes` in both dimensions. @@ -157,13 +173,14 @@ def trim_sparse_tensor(src: SparseTensor, num_nodes: int, Args: src (SparseTensor): The sparse tensor. - num_nodes (int): The number of first nodes to keep. + size (Tuple[int, int]): The number of source and destination nodes to + keep. num_seed_nodes (int): The number of seed nodes to compute representations. """ rowptr, col, value = src.csr() - rowptr = torch.narrow(rowptr, 0, 0, num_nodes + 1).clone() + rowptr = torch.narrow(rowptr, 0, 0, size[0] + 1).clone() rowptr[num_seed_nodes + 1:] = rowptr[num_seed_nodes] col = torch.narrow(col, 0, 0, rowptr[-1]) @@ -180,7 +197,7 @@ def trim_sparse_tensor(src: SparseTensor, num_nodes: int, rowptr=rowptr, col=col, value=value, - sparse_sizes=(num_nodes, num_nodes), + sparse_sizes=size, rowcount=None, colptr=None, colcount=None, From 8eb0813cdbafe623fec0bc5301377fda8e5c3dcd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 5 Jun 2023 16:01:30 +0200 Subject: [PATCH 1263/2432] Disable graph break test (#7517) --- test/nn/models/test_basic_gnn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index d9517f24b8ca..4ca973419bbb 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -300,6 +300,7 @@ def test_trim_to_layer(): @disableExtensions @withPackage('torch>=2.0.0') @pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA]) +@pytest.mark.skip(reason="Does not work yet in the full test suite") def test_compile_graph_breaks(Model): x = torch.randn(3, 8) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) From c600f09701d2b89328b1195758e0f21578bfec06 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 6 Jun 2023 07:37:08 +0200 Subject: [PATCH 1264/2432] Fix empty edge indices handling with `SparseTensor` (#7519) --- CHANGELOG.md | 1 + torch_geometric/typing.py | 3 +++ torch_geometric/utils/spmm.py | 3 +++ torch_geometric/utils/trim_to_layer.py | 2 +- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f49a3d1c850..720656d252fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed empty edge indices handling in `SparseTensor` ([#7519](https://github.com/pyg-team/pytorch_geometric/pull/7519)) - Move the `scaler` tensor in `GeneralConv` to the correct device ([#7484](https://github.com/pyg-team/pytorch_geometric/pull/7484)) - Fixed `HeteroLinear` bug when used via mixed precision ([#7473](https://github.com/pyg-team/pytorch_geometric/pull/7473)) - All transforms are now immutable, i.e., they perform a shallow-copy of the data and therefore do not longer modify data in-place ([#7429](https://github.com/pyg-team/pytorch_geometric/pull/7429)) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index a4b102f27073..434d26747e56 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -115,6 +115,9 @@ def from_dense(self, mat: Tensor, def size(self, dim: int) -> int: raise ImportError("'SparseTensor' requires 'torch-sparse'") + def nnz(self) -> int: + raise ImportError("'SparseTensor' requires 'torch-sparse'") + def is_cuda(self) -> bool: raise ImportError("'SparseTensor' requires 'torch-sparse'") diff --git a/torch_geometric/utils/spmm.py b/torch_geometric/utils/spmm.py index 290182dce7c0..3d523c5d2761 100644 --- a/torch_geometric/utils/spmm.py +++ b/torch_geometric/utils/spmm.py @@ -40,6 +40,9 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: raise ValueError(f"`reduce` argument '{reduce}' not supported") if isinstance(src, SparseTensor): + if src.nnz() == 0: + return other.new_zeros(src.size(0), other.size(1)) + if (torch_geometric.typing.WITH_PT2 and other.dim() == 2 and not src.is_cuda() and not src.requires_grad()): # Use optimized PyTorch `torch.sparse.mm` path: diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index 524b92fce3ec..da294fbf5acc 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -164,7 +164,7 @@ def trim_adj( def trim_sparse_tensor(src: SparseTensor, size: Tuple[int, int], - num_seed_nodes: None) -> SparseTensor: + num_seed_nodes: int) -> SparseTensor: r"""Trims a :class:`SparseTensor` along both dimensions to only contain the upper :obj:`num_nodes` in both dimensions. From b5155d7c2809a3eb56c4dce501edf7dccd19f969 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 6 Jun 2023 10:09:44 +0200 Subject: [PATCH 1265/2432] Fix `NeighborLoader` tests in case `torch-sparse` is not installed (#7526) --- test/loader/test_neighbor_loader.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 7c4b286e5d06..76238219b6c5 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -18,7 +18,7 @@ onlyNeighborSampler, withPackage, ) -from torch_geometric.typing import WITH_PYG_LIB +from torch_geometric.typing import WITH_PYG_LIB, WITH_TORCH_SPARSE from torch_geometric.utils import ( is_undirected, sort_edge_index, @@ -41,6 +41,8 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) @pytest.mark.parametrize('filter_per_worker', [None, True, False]) def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): + if subgraph_type == SubgraphType.induced and not WITH_TORCH_SPARSE: + return if (dtype != torch.int64 and (not WITH_PYG_LIB or subgraph_type == SubgraphType.induced)): return @@ -99,6 +101,8 @@ def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): @pytest.mark.parametrize('subgraph_type', list(SubgraphType)) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) def test_hetero_neighbor_loader_basic(subgraph_type, dtype): + if subgraph_type == SubgraphType.induced and not WITH_TORCH_SPARSE: + return if (dtype != torch.int64 and (not WITH_PYG_LIB or subgraph_type == SubgraphType.induced)): return @@ -248,6 +252,8 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', list(SubgraphType)) def test_homo_neighbor_loader_on_cora(get_dataset, subgraph_type): + if subgraph_type == SubgraphType.induced and not WITH_TORCH_SPARSE: + return dataset = get_dataset(name='Cora') data = dataset[0] @@ -291,6 +297,8 @@ def forward(self, x, edge_index, edge_weight): @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', list(SubgraphType)) def test_hetero_neighbor_loader_on_cora(get_dataset, subgraph_type): + if subgraph_type == SubgraphType.induced and not WITH_TORCH_SPARSE: + return dataset = get_dataset(name='Cora') data = dataset[0] From 417933789aad725392f3ce53ac2aa4d92af1187b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 6 Jun 2023 10:21:57 +0200 Subject: [PATCH 1266/2432] Fix type hints in `InMemoryDataset` (#7527) --- torch_geometric/data/dataset.py | 25 +++++++++------ torch_geometric/data/in_memory_dataset.py | 38 +++++++++++++---------- 2 files changed, 37 insertions(+), 26 deletions(-) diff --git a/torch_geometric/data/dataset.py b/torch_geometric/data/dataset.py index 06df9343a791..18bb65a5525e 100644 --- a/torch_geometric/data/dataset.py +++ b/torch_geometric/data/dataset.py @@ -25,18 +25,23 @@ class Dataset(torch.utils.data.Dataset, ABC): Args: root (str, optional): Root directory where the dataset should be saved. (optional: :obj:`None`) - transform (callable, optional): A function/transform that takes in an - :obj:`torch_geometric.data.Data` object and returns a transformed - version. The data object will be transformed before every access. + transform (callable, optional): A function/transform that takes in a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + transformed version. + The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in - an :obj:`torch_geometric.data.Data` object and returns a - transformed version. The data object will be transformed before - being saved to disk. (default: :obj:`None`) - pre_filter (callable, optional): A function that takes in an - :obj:`torch_geometric.data.Data` object and returns a boolean - value, indicating whether the data object should be included in the - final dataset. (default: :obj:`None`) + a :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + transformed version. + The data object will be transformed before being saved to disk. + (default: :obj:`None`) + pre_filter (callable, optional): A function that takes in a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + boolean value, indicating whether the data object should be + included in the final dataset. (default: :obj:`None`) log (bool, optional): Whether to print any console output while downloading and processing the dataset. (default: :obj:`True`) """ diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index 90333354f6fe..f8b5b1733c1b 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -35,18 +35,23 @@ class InMemoryDataset(Dataset, ABC): Args: root (str, optional): Root directory where the dataset should be saved. (optional: :obj:`None`) - transform (callable, optional): A function/transform that takes in an - :obj:`torch_geometric.data.Data` object and returns a transformed - version. The data object will be transformed before every access. + transform (callable, optional): A function/transform that takes in a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + transformed version. + The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in - an :obj:`torch_geometric.data.Data` object and returns a - transformed version. The data object will be transformed before - being saved to disk. (default: :obj:`None`) - pre_filter (callable, optional): A function that takes in an - :obj:`torch_geometric.data.Data` object and returns a boolean - value, indicating whether the data object should be included in the - final dataset. (default: :obj:`None`) + a :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + transformed version. + The data object will be transformed before being saved to disk. + (default: :obj:`None`) + pre_filter (callable, optional): A function that takes in a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + boolean value, indicating whether the data object should be + included in the final dataset. (default: :obj:`None`) log (bool, optional): Whether to print any console output while downloading and processing the dataset. (default: :obj:`True`) """ @@ -69,7 +74,7 @@ def __init__( super().__init__(root, transform, pre_transform, pre_filter, log) self._data = None self.slices = None - self._data_list: Optional[List[Data]] = None + self._data_list: Optional[List[BaseData]] = None @property def num_classes(self) -> int: @@ -84,7 +89,7 @@ def len(self) -> int: return len(value) - 1 return 0 - def get(self, idx: int) -> Data: + def get(self, idx: int) -> BaseData: # TODO (matthias) Avoid unnecessary copy here. if self.len() == 1: return copy.copy(self._data) @@ -121,10 +126,11 @@ def load(self, path: str, data_cls: Type[BaseData] = Data): @staticmethod def collate( - data_list: List[Data]) -> Tuple[Data, Optional[Dict[str, Tensor]]]: - r"""Collates a Python list of :obj:`torch_geometric.data.Data` objects - to the internal storage format of - :class:`~torch_geometric.data.InMemoryDataset`.""" + data_list: List[BaseData], + ) -> Tuple[BaseData, Optional[Dict[str, Tensor]]]: + r"""Collates a Python list of :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` objects to the internal + storage format of :class:`~torch_geometric.data.InMemoryDataset`.""" if len(data_list) == 1: return data_list[0], None From 0f0e0da31d2f2848d7f0bb77e9ffcf1911d5482f Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Tue, 6 Jun 2023 15:48:55 +0200 Subject: [PATCH 1267/2432] Add `max_num_elements` parameter to the forward method of aggregation layers (#7529) `GraphMultisetTransformer`, `GRUAggregation`, `LSTMAggregation`, `SetTransformerAggregation` layers. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/aggr/base.py | 14 +++++++++++--- torch_geometric/nn/aggr/gmt.py | 17 ++++++++++++----- torch_geometric/nn/aggr/gru.py | 17 +++++++++++++---- torch_geometric/nn/aggr/lstm.py | 17 +++++++++++++---- torch_geometric/nn/aggr/set_transformer.py | 17 ++++++++++++----- 6 files changed, 62 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 720656d252fb..9ba7d97a37c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added a `max_num_elements` parameter to the forward method of `GraphMultisetTransformer`, `GRUAggregation`, `LSTMAggregation` and `SetTransformerAggregation` ([#7529](https://github.com/pyg-team/pytorch_geometric/pull/7529)) - Fixed empty edge indices handling in `SparseTensor` ([#7519](https://github.com/pyg-team/pytorch_geometric/pull/7519)) - Move the `scaler` tensor in `GeneralConv` to the correct device ([#7484](https://github.com/pyg-team/pytorch_geometric/pull/7484)) - Fixed `HeteroLinear` bug when used via mixed precision ([#7473](https://github.com/pyg-team/pytorch_geometric/pull/7473)) diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 3de22b8e7c80..ebfb2e42fa9a 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -58,9 +58,15 @@ class Aggregation(torch.nn.Module): - **output:** graph features :math:`(*, |\mathcal{G}|, F_{out})` or node features :math:`(*, |\mathcal{V}|, F_{out})` """ - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. @@ -76,6 +82,8 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, dimension :obj:`dim` after aggregation. (default: :obj:`None`) dim (int, optional): The dimension in which to aggregate. (default: :obj:`-2`) + max_num_elements: (int, optional): The maximum number of elements + within a single aggregation group. (default: :obj:`None`) """ pass diff --git a/torch_geometric/nn/aggr/gmt.py b/torch_geometric/nn/aggr/gmt.py index 3dc249264220..42e367c4b602 100644 --- a/torch_geometric/nn/aggr/gmt.py +++ b/torch_geometric/nn/aggr/gmt.py @@ -65,11 +65,18 @@ def reset_parameters(self): encoder.reset_parameters() self.pma2.reset_parameters() - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - - x, mask = self.to_dense_batch(x, index, ptr, dim_size, dim) + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: + + x, mask = self.to_dense_batch(x, index, ptr, dim_size, dim, + max_num_elements=max_num_elements) x = self.pma1(x, mask) diff --git a/torch_geometric/nn/aggr/gru.py b/torch_geometric/nn/aggr/gru.py index 59d8917c4060..72f6f09f5216 100644 --- a/torch_geometric/nn/aggr/gru.py +++ b/torch_geometric/nn/aggr/gru.py @@ -29,10 +29,19 @@ def __init__(self, in_channels: int, out_channels: int, **kwargs): def reset_parameters(self): self.gru.reset_parameters() - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim) + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: + + x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, + max_num_elements=max_num_elements) + return self.gru(x)[0][:, -1] def __repr__(self) -> str: diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py index f784b95b026e..a4f98c517b3d 100644 --- a/torch_geometric/nn/aggr/lstm.py +++ b/torch_geometric/nn/aggr/lstm.py @@ -29,10 +29,19 @@ def __init__(self, in_channels: int, out_channels: int, **kwargs): def reset_parameters(self): self.lstm.reset_parameters() - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim) + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: + + x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, + max_num_elements=max_num_elements) + return self.lstm(x)[0][:, -1] def __repr__(self) -> str: diff --git a/torch_geometric/nn/aggr/set_transformer.py b/torch_geometric/nn/aggr/set_transformer.py index da9737f4cd56..4458fe1037a8 100644 --- a/torch_geometric/nn/aggr/set_transformer.py +++ b/torch_geometric/nn/aggr/set_transformer.py @@ -73,11 +73,18 @@ def reset_parameters(self): for decoder in self.decoders: decoder.reset_parameters() - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - - x, mask = self.to_dense_batch(x, index, ptr, dim_size, dim) + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: + + x, mask = self.to_dense_batch(x, index, ptr, dim_size, dim, + max_num_elements=max_num_elements) for encoder in self.encoders: x = encoder(x, mask) From 2c63016b262b99e0df95da466cfef4610900e7aa Mon Sep 17 00:00:00 2001 From: Mohamad Zamini <32536264+mzamini92@users.noreply.github.com> Date: Wed, 7 Jun 2023 08:14:35 -0600 Subject: [PATCH 1268/2432] Ensure data is on the same device in `knn_graph` (#7497) we can directly move the tensors to the desired device using the `to()` method provided by PyTorch. This ensures that all tensors within the data object are on the same device before calling `knn_graph`. This seems to be a solution for #7475 issue. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- torch_geometric/nn/pool/__init__.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index 9a8cdad56b2b..00c0d2756e7e 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -1,3 +1,4 @@ +import warnings from typing import Optional from torch import Tensor @@ -161,6 +162,11 @@ def knn_graph( :rtype: :class:`torch.Tensor` """ + if batch is not None and x.device != batch.device: + warnings.warn("Input tensor 'x' and 'batch' are on different devices " + "in 'knn_graph'. Performing blocking device transfer") + batch = batch.to(x.device) + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: return torch_cluster.knn_graph(x, k, batch, loop, flow, cosine, num_workers) @@ -264,6 +270,11 @@ def radius_graph( :rtype: :class:`torch.Tensor` """ + if batch is not None and x.device != batch.device: + warnings.warn("Input tensor 'x' and 'batch' are on different devices " + "in 'radius_graph'. Performing blocking device transfer") + batch = batch.to(x.device) + if not torch_geometric.typing.WITH_TORCH_CLUSTER_BATCH_SIZE: return torch_cluster.radius_graph(x, r, batch, loop, max_num_neighbors, flow, num_workers) From 3e207dc953f3742aacd121e2c3ca4628230276f8 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 7 Jun 2023 16:17:53 +0200 Subject: [PATCH 1269/2432] Ensure same dtypes in `Planetoid` processing (#7536) Fixes #7532 --- torch_geometric/io/planetoid.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/io/planetoid.py b/torch_geometric/io/planetoid.py index ba767668f21d..3026b11f5534 100644 --- a/torch_geometric/io/planetoid.py +++ b/torch_geometric/io/planetoid.py @@ -30,9 +30,9 @@ def read_planetoid_data(folder, prefix): # as zero vectors to `tx` and `ty`. len_test_indices = (test_index.max() - test_index.min()).item() + 1 - tx_ext = torch.zeros(len_test_indices, tx.size(1)) + tx_ext = torch.zeros(len_test_indices, tx.size(1), dtype=tx.dtype) tx_ext[sorted_test_index - test_index.min(), :] = tx - ty_ext = torch.zeros(len_test_indices, ty.size(1)) + ty_ext = torch.zeros(len_test_indices, ty.size(1), dtype=ty.dtype) ty_ext[sorted_test_index - test_index.min(), :] = ty tx, ty = tx_ext, ty_ext From e37d0ec8a56d1f503675ce738e7021014de33af7 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 7 Jun 2023 16:39:47 +0200 Subject: [PATCH 1270/2432] Add usage of `disable_dynamic_shape` decorator in aggregation layers (#7534) Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- torch_geometric/nn/aggr/base.py | 2 ++ torch_geometric/nn/aggr/gmt.py | 2 ++ torch_geometric/nn/aggr/gru.py | 2 ++ torch_geometric/nn/aggr/lstm.py | 2 ++ torch_geometric/nn/aggr/set_transformer.py | 2 ++ torch_geometric/nn/aggr/sort.py | 2 ++ 7 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ba7d97a37c7..3c854f35c9ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) - Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) - Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) -- Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246)) +- Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246), [#7534](https://github.com/pyg-team/pytorch_geometric/pull/7534)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index ebfb2e42fa9a..593cce8a5ff8 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -3,6 +3,7 @@ import torch from torch import Tensor +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.utils import scatter, segment, to_dense_batch @@ -91,6 +92,7 @@ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" pass + @disable_dynamic_shapes(required_args=['dim_size']) def __call__(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2, **kwargs) -> Tensor: diff --git a/torch_geometric/nn/aggr/gmt.py b/torch_geometric/nn/aggr/gmt.py index 42e367c4b602..3f81785b141c 100644 --- a/torch_geometric/nn/aggr/gmt.py +++ b/torch_geometric/nn/aggr/gmt.py @@ -3,6 +3,7 @@ import torch from torch import Tensor +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.nn.aggr import Aggregation from torch_geometric.nn.aggr.utils import ( PoolingByMultiheadAttention, @@ -65,6 +66,7 @@ def reset_parameters(self): encoder.reset_parameters() self.pma2.reset_parameters() + @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) def forward( self, x: Tensor, diff --git a/torch_geometric/nn/aggr/gru.py b/torch_geometric/nn/aggr/gru.py index 72f6f09f5216..c731d60252b4 100644 --- a/torch_geometric/nn/aggr/gru.py +++ b/torch_geometric/nn/aggr/gru.py @@ -3,6 +3,7 @@ from torch import Tensor from torch.nn import GRU +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.nn.aggr import Aggregation @@ -29,6 +30,7 @@ def __init__(self, in_channels: int, out_channels: int, **kwargs): def reset_parameters(self): self.gru.reset_parameters() + @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) def forward( self, x: Tensor, diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py index a4f98c517b3d..25da0b0309a7 100644 --- a/torch_geometric/nn/aggr/lstm.py +++ b/torch_geometric/nn/aggr/lstm.py @@ -3,6 +3,7 @@ from torch import Tensor from torch.nn import LSTM +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.nn.aggr import Aggregation @@ -29,6 +30,7 @@ def __init__(self, in_channels: int, out_channels: int, **kwargs): def reset_parameters(self): self.lstm.reset_parameters() + @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) def forward( self, x: Tensor, diff --git a/torch_geometric/nn/aggr/set_transformer.py b/torch_geometric/nn/aggr/set_transformer.py index 4458fe1037a8..01b8379662f9 100644 --- a/torch_geometric/nn/aggr/set_transformer.py +++ b/torch_geometric/nn/aggr/set_transformer.py @@ -3,6 +3,7 @@ import torch from torch import Tensor +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.nn.aggr import Aggregation from torch_geometric.nn.aggr.utils import ( PoolingByMultiheadAttention, @@ -73,6 +74,7 @@ def reset_parameters(self): for decoder in self.decoders: decoder.reset_parameters() + @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) def forward( self, x: Tensor, diff --git a/torch_geometric/nn/aggr/sort.py b/torch_geometric/nn/aggr/sort.py index 3ddce63ec5bf..ba7c216fff71 100644 --- a/torch_geometric/nn/aggr/sort.py +++ b/torch_geometric/nn/aggr/sort.py @@ -3,6 +3,7 @@ import torch from torch import Tensor +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.nn.aggr import Aggregation @@ -20,6 +21,7 @@ def __init__(self, k: int): super().__init__() self.k = k + @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) def forward( self, x: Tensor, From 44ebf0db365294ce47201d7bb3c9b359a2ae8326 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 7 Jun 2023 16:48:00 +0200 Subject: [PATCH 1271/2432] Add `max_num_nodes` parameter to the constructor of `EquilibriumAggregation` layer (#7530) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/aggr/test_equilibrium.py | 6 ------ torch_geometric/nn/aggr/equilibrium.py | 20 +++++--------------- 3 files changed, 6 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c854f35c9ac..b35a86fbf4b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Use `dim_size` to initialize output size of the `EquilibriumAggregation` layer ([#7530](https://github.com/pyg-team/pytorch_geometric/pull/7530)) - Added a `max_num_elements` parameter to the forward method of `GraphMultisetTransformer`, `GRUAggregation`, `LSTMAggregation` and `SetTransformerAggregation` ([#7529](https://github.com/pyg-team/pytorch_geometric/pull/7529)) - Fixed empty edge indices handling in `SparseTensor` ([#7519](https://github.com/pyg-team/pytorch_geometric/pull/7519)) - Move the `scaler` tensor in `GeneralConv` to the correct device ([#7484](https://github.com/pyg-team/pytorch_geometric/pull/7484)) diff --git a/test/nn/aggr/test_equilibrium.py b/test/nn/aggr/test_equilibrium.py index fcb976140d5e..86107d8f70a2 100644 --- a/test/nn/aggr/test_equilibrium.py +++ b/test/nn/aggr/test_equilibrium.py @@ -18,9 +18,6 @@ def test_equilibrium(iter, alpha): out = model(x) assert out.size() == (1, 2) - with pytest.raises(ValueError): - model(x, dim_size=0) - out = model(x, dim_size=3) assert out.size() == (3, 2) assert torch.all(out[1:, :] == 0) @@ -43,9 +40,6 @@ def test_equilibrium_batch(iter, alpha): out = model(x, batch) assert out.size() == (2, 2) - with pytest.raises(ValueError): - model(x, dim_size=0) - out = model(x, dim_size=3) assert out.size() == (3, 2) assert torch.all(out[1:, :] == 0) diff --git a/torch_geometric/nn/aggr/equilibrium.py b/torch_geometric/nn/aggr/equilibrium.py index a17de73c4b9e..87b46f0cc5bc 100644 --- a/torch_geometric/nn/aggr/equilibrium.py +++ b/torch_geometric/nn/aggr/equilibrium.py @@ -146,9 +146,8 @@ def reset_parameters(self): reset(self.optimizer) reset(self.potential) - def init_output(self, index: Optional[Tensor] = None) -> Tensor: - index_size = 1 if index is None else int(index.max().item() + 1) - return torch.zeros(index_size, self.output_dim, requires_grad=True, + def init_output(self, dim_size: int) -> Tensor: + return torch.zeros(dim_size, self.output_dim, requires_grad=True, device=self.lamb.device).float() def reg(self, y: Tensor) -> Tensor: @@ -163,20 +162,11 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, self.assert_index_present(index) - index_size = 1 if index is None else index.max() + 1 - dim_size = index_size if dim_size is None else dim_size - - if dim_size < index_size: - raise ValueError("`dim_size` is less than `index` " - "implied size") + dim_size = int(index.max()) + 1 if dim_size is None else dim_size with torch.enable_grad(): - y = self.optimizer(x, self.init_output(index), index, self.energy, - iterations=self.grad_iter) - - if dim_size > index_size: - zero = y.new_zeros(dim_size - index_size, *y.size()[1:]) - y = torch.cat([y, zero]) + y = self.optimizer(x, self.init_output(dim_size), index, + self.energy, iterations=self.grad_iter) return y From 3b545ee793cdd5eafea834a30b7a7e67bf800d56 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 7 Jun 2023 17:00:31 +0200 Subject: [PATCH 1272/2432] Use `repeat` instead of `new_full` to avoid graph breaks in `to_dense_batch` (#7537) New full if fill value is tensor call item() on it under the hood which causes graph break in torch.compile --------- Co-authored-by: Matthias Fey --- torch_geometric/utils/to_dense_batch.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/torch_geometric/utils/to_dense_batch.py b/torch_geometric/utils/to_dense_batch.py index c6857caf278b..c6d9f2ba60a7 100644 --- a/torch_geometric/utils/to_dense_batch.py +++ b/torch_geometric/utils/to_dense_batch.py @@ -94,8 +94,6 @@ def to_dense_batch( [ True, False, False, False], [ True, True, True, False]]) """ - fill_value = 0.0 if fill_value is None else fill_value - if batch is None and max_num_nodes is None: mask = torch.ones(1, x.size(0), dtype=torch.bool, device=x.device) return x.unsqueeze(0), mask @@ -126,7 +124,8 @@ def to_dense_batch( x, idx = x[mask], idx[mask] size = [batch_size * max_num_nodes] + list(x.size())[1:] - out = x.new_full(size, fill_value) + out = torch.as_tensor(fill_value, dtype=x.dtype, + device=x.device).repeat(size) out[idx] = x out = out.view([batch_size, max_num_nodes] + list(x.size())[1:]) From a6a1f46d7ff8479dd4cb9057f6f682d98c9a933d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 7 Jun 2023 19:44:49 +0200 Subject: [PATCH 1273/2432] Testing PyG with minimal dependencies (only `pyg-lib`) (#7538) --- .github/actions/setup/action.yml | 12 ++++++------ .../workflows/{install.yml => minimal_testing.yml} | 12 +++++------- test/utils/test_map.py | 4 +++- 3 files changed, 14 insertions(+), 14 deletions(-) rename .github/workflows/{install.yml => minimal_testing.yml} (80%) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 185a2bc62736..ff73c90022b6 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -46,15 +46,15 @@ runs: python -c "import torch; print('CUDA:', torch.version.cuda)" shell: bash + - name: Install pyg-lib # pyg-lib is currently only available on Linux. + if: ${{ inputs.torch-version != 'nightly' && runner.os == 'Linux' }} + run: | + pip install pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html + shell: bash + - name: Install extension packages if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' }} run: | pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} pip install torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash - - - name: Install pyg-lib # pyg-lib is currently only available on Linux. - if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' && runner.os == 'Linux' }} - run: | - pip install pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html - shell: bash diff --git a/.github/workflows/install.yml b/.github/workflows/minimal_testing.yml similarity index 80% rename from .github/workflows/install.yml rename to .github/workflows/minimal_testing.yml index 95e8d96d6913..7cb65921a154 100644 --- a/.github/workflows/install.yml +++ b/.github/workflows/minimal_testing.yml @@ -1,4 +1,4 @@ -name: Minimal Installation +name: Testing minimal PyTorch 2.0 on: # yamllint disable-line rule:truthy push: @@ -8,7 +8,7 @@ on: # yamllint disable-line rule:truthy jobs: - import: + minimal_pytest: runs-on: ubuntu-latest steps: @@ -41,11 +41,9 @@ jobs: - name: Install main package if: steps.changed-files-specific.outputs.only_changed != 'true' run: | - pip install -e . + pip install -e .[test] - - name: Test imports + - name: Run tests if: steps.changed-files-specific.outputs.only_changed != 'true' run: | - python -c "import torch_geometric" - python -c "import torch_geometric.contrib" - python -c "import torch_geometric.graphgym" + pytest diff --git a/test/utils/test_map.py b/test/utils/test_map.py index df6eb6e5466e..d625713a7196 100644 --- a/test/utils/test_map.py +++ b/test/utils/test_map.py @@ -2,11 +2,12 @@ import torch from torch_geometric.profile import benchmark -from torch_geometric.testing import withCUDA +from torch_geometric.testing import withCUDA, withPackage from torch_geometric.utils.map import map_index @withCUDA +@withPackage('pandas') @pytest.mark.parametrize('max_index', [3, 100_000_000]) def test_map_index(device, max_index): src = torch.tensor([2, 0, 1, 0, max_index], device=device) @@ -19,6 +20,7 @@ def test_map_index(device, max_index): @withCUDA +@withPackage('pandas') @pytest.mark.parametrize('max_index', [3, 100_000_000]) def test_map_index_na(device, max_index): src = torch.tensor([2, 0, 1, 0, max_index], device=device) From ca5ba4ccbfa433ce7584e1beb60e6d58c7f9a284 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Wed, 7 Jun 2023 20:36:51 +0200 Subject: [PATCH 1274/2432] Remove python fallbacks in `voxel_grid` (#7528) Replace pure python ops, by torch ops in order to avoid graph break in torch.compile. --------- Co-authored-by: rusty1s --- torch_geometric/nn/pool/voxel_grid.py | 32 ++++++++++++++------------- torch_geometric/utils/repeat.py | 24 +++++++++++++++++++- 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/torch_geometric/nn/pool/voxel_grid.py b/torch_geometric/nn/pool/voxel_grid.py index 5df0539c7d3c..9874c267a9b7 100644 --- a/torch_geometric/nn/pool/voxel_grid.py +++ b/torch_geometric/nn/pool/voxel_grid.py @@ -44,26 +44,28 @@ def voxel_grid( raise ImportError('`voxel_grid` requires `torch-cluster`.') pos = pos.unsqueeze(-1) if pos.dim() == 1 else pos - num_nodes, dim = pos.size() - - size = size.tolist() if torch.is_tensor(size) else size - start = start.tolist() if torch.is_tensor(start) else start - end = end.tolist() if torch.is_tensor(end) else end - - size, start, end = repeat(size, dim), repeat(start, dim), repeat(end, dim) + dim = pos.size(1) if batch is None: - batch = torch.zeros(pos.shape[0], dtype=torch.long) + batch = pos.new_zeros(pos.size(0), dtype=torch.long) - pos = torch.cat([pos, batch.unsqueeze(-1).type_as(pos)], dim=-1) - size = size + [1] - start = None if start is None else start + [0] - end = None if end is None else end + [batch.max().item()] + pos = torch.cat([pos, batch.view(-1, 1).to(pos.dtype)], dim=-1) + + if not isinstance(size, Tensor): + size = torch.tensor(size, dtype=pos.dtype, device=pos.device) + size = repeat(size, dim) + size = torch.cat([size, size.new_ones(1)]) # Add additional batch dim. - size = torch.tensor(size, dtype=pos.dtype, device=pos.device) if start is not None: - start = torch.tensor(start, dtype=pos.dtype, device=pos.device) + if not isinstance(start, Tensor): + start = torch.tensor(start, dtype=pos.dtype, device=pos.device) + start = repeat(start, dim) + start = torch.cat([start, start.new_zeros(1)]) + if end is not None: - end = torch.tensor(end, dtype=pos.dtype, device=pos.device) + if not isinstance(end, Tensor): + end = torch.tensor(end, dtype=pos.dtype, device=pos.device) + end = repeat(end, dim) + end = torch.cat([end, batch.max().unsqueeze(0)]) return grid_cluster(pos, size, start, end) diff --git a/torch_geometric/utils/repeat.py b/torch_geometric/utils/repeat.py index 0a93f7438ed6..351f898beff4 100644 --- a/torch_geometric/utils/repeat.py +++ b/torch_geometric/utils/repeat.py @@ -1,14 +1,36 @@ import itertools import numbers +from typing import Any +import torch +from torch import Tensor -def repeat(src, length): + +def repeat(src: Any, length: int) -> Any: if src is None: return None + + if isinstance(src, Tensor): + if src.numel() == 1: + return src.repeat(length) + + if src.numel() > length: + return src[:length] + + if src.numel() < length: + last_elem = src[-1].unsqueeze(0) + padding = last_elem.repeat(length - src.numel()) + return torch.cat([src, padding]) + + return src + if isinstance(src, numbers.Number): return list(itertools.repeat(src, length)) + if (len(src) > length): return src[:length] + if (len(src) < length): return src + list(itertools.repeat(src[-1], length - len(src))) + return src From 0520bbb5a1d117a68504e00fb3b5b76613190de0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 8 Jun 2023 08:21:34 +0200 Subject: [PATCH 1275/2432] Fix `get_mesh_laplacian` with `normalization="sym"` (#7544) --- CHANGELOG.md | 1 + torch_geometric/utils/get_mesh_laplacian.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b35a86fbf4b7..61fb5eb545a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `get_mesh_laplacian` for `normalization="sym"` ([#7544](https://github.com/pyg-team/pytorch_geometric/pull/7544)) - Use `dim_size` to initialize output size of the `EquilibriumAggregation` layer ([#7530](https://github.com/pyg-team/pytorch_geometric/pull/7530)) - Added a `max_num_elements` parameter to the forward method of `GraphMultisetTransformer`, `GRUAggregation`, `LSTMAggregation` and `SetTransformerAggregation` ([#7529](https://github.com/pyg-team/pytorch_geometric/pull/7529)) - Fixed empty edge indices handling in `SparseTensor` ([#7519](https://github.com/pyg-team/pytorch_geometric/pull/7519)) diff --git a/torch_geometric/utils/get_mesh_laplacian.py b/torch_geometric/utils/get_mesh_laplacian.py index c739d715fd57..98b8cba135f7 100644 --- a/torch_geometric/utils/get_mesh_laplacian.py +++ b/torch_geometric/utils/get_mesh_laplacian.py @@ -98,7 +98,7 @@ def get_areas(left, centre, right): if normalization == 'sym': area_deg_inv_sqrt = area_deg.pow_(-0.5) - area_deg_inv_sqrt[area_deg_inv_sqrt == float('inf')] = 0.0, + area_deg_inv_sqrt[area_deg_inv_sqrt == float('inf')] = 0.0 edge_weight = (area_deg_inv_sqrt[edge_index[0]] * edge_weight * area_deg_inv_sqrt[edge_index[1]]) elif normalization == 'rw': From e4297b124306d8f8e8419874b751aa4450057272 Mon Sep 17 00:00:00 2001 From: Zecheng Zhang Date: Sun, 11 Jun 2023 03:49:31 -0700 Subject: [PATCH 1276/2432] Add `Performer` to `GPSConv` (#7465) ### Description * Most code is adopted and simplified from https://github.com/lucidrains/performer-pytorch/blob/main/performer_pytorch/performer_pytorch.py and https://github.com/rampasek/GraphGPS/blob/main/graphgps/layer/gps_layer.py Should we add some comments to cite? Checked that both of them have the MIT License. * There is a projection matrix redrawer that redraws the projection matrix in the performer. It seems to be not required so I just put it into the `examples/graph_gps.py`. Not sure where should I put it. * There are also some other params and kernels to be added to the performer, such as softmax kernel etc. For now I added some TODOs. ### ZINC Example ```python python3 examples/graph_gps.py --attn_type performer ``` I directly replace the multihead attention with performer fast attention with the `ReLU` kernel. The performance is similar (there are some randomness for each run and for the graph classification task) Following is the `test_mae` plot for each epoch. ![training](https://github.com/pyg-team/pytorch_geometric/assets/21955420/50aac085-460c-4293-b74c-966d4857a05e) --------- Co-authored-by: Jinu Sunil Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + examples/graph_gps.py | 45 ++++- test/nn/attention/test_performer.py | 13 ++ test/nn/conv/test_gps_conv.py | 20 ++- torch_geometric/nn/attention/__init__.py | 3 + torch_geometric/nn/attention/performer.py | 199 ++++++++++++++++++++++ torch_geometric/nn/conv/gps_conv.py | 47 +++-- 7 files changed, 304 insertions(+), 24 deletions(-) create mode 100644 test/nn/attention/test_performer.py create mode 100644 torch_geometric/nn/attention/__init__.py create mode 100644 torch_geometric/nn/attention/performer.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 61fb5eb545a2..ae0f0b87c9c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added Performer to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) - Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) - Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) - Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) diff --git a/examples/graph_gps.py b/examples/graph_gps.py index d2a606ef8a9e..4f83934e575a 100644 --- a/examples/graph_gps.py +++ b/examples/graph_gps.py @@ -1,4 +1,6 @@ +import argparse import os.path as osp +from typing import Any, Dict, Optional import torch from torch.nn import ( @@ -15,6 +17,7 @@ from torch_geometric.datasets import ZINC from torch_geometric.loader import DataLoader from torch_geometric.nn import GINEConv, GPSConv, global_add_pool +from torch_geometric.nn.attention import PerformerAttention path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ZINC-PE') transform = T.AddRandomWalkPE(walk_length=20, attr_name='pe') @@ -26,9 +29,16 @@ val_loader = DataLoader(val_dataset, batch_size=64) test_loader = DataLoader(test_dataset, batch_size=64) +parser = argparse.ArgumentParser() +parser.add_argument( + '--attn_type', default='multihead', + help="Global attention type such as 'multihead' or 'performer'.") +args = parser.parse_args() + class GPS(torch.nn.Module): - def __init__(self, channels: int, pe_dim: int, num_layers: int): + def __init__(self, channels: int, pe_dim: int, num_layers: int, + attn_type: str, attn_kwargs: Dict[str, Any]): super().__init__() self.node_emb = Embedding(28, channels - pe_dim) @@ -43,7 +53,8 @@ def __init__(self, channels: int, pe_dim: int, num_layers: int): ReLU(), Linear(channels, channels), ) - conv = GPSConv(channels, GINEConv(nn), heads=4, attn_dropout=0.5) + conv = GPSConv(channels, GINEConv(nn), heads=4, + attn_type=attn_type, attn_kwargs=attn_kwargs) self.convs.append(conv) self.mlp = Sequential( @@ -53,6 +64,9 @@ def __init__(self, channels: int, pe_dim: int, num_layers: int): ReLU(), Linear(channels // 4, 1), ) + self.redraw_projection = RedrawProjection( + self.convs, + redraw_interval=1000 if attn_type == 'performer' else None) def forward(self, x, pe, edge_index, edge_attr, batch): x_pe = self.pe_norm(pe) @@ -65,8 +79,32 @@ def forward(self, x, pe, edge_index, edge_attr, batch): return self.mlp(x) +class RedrawProjection: + def __init__(self, model: torch.nn.Module, + redraw_interval: Optional[int] = None): + self.model = model + self.redraw_interval = redraw_interval + self.num_last_redraw = 0 + + def redraw_projections(self): + if not self.model.training or self.redraw_interval is None: + return + if self.num_last_redraw >= self.redraw_interval: + fast_attentions = [ + module for module in self.model.modules() + if isinstance(module, PerformerAttention) + ] + for fast_attention in fast_attentions: + fast_attention.redraw_projection_matrix() + self.num_last_redraw = 0 + return + self.num_last_redraw += 1 + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = GPS(channels=64, pe_dim=8, num_layers=10).to(device) +attn_kwargs = {'dropout': 0.5} +model = GPS(channels=64, pe_dim=8, num_layers=10, attn_type=args.attn_type, + attn_kwargs=attn_kwargs).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5) scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, min_lr=0.00001) @@ -79,6 +117,7 @@ def train(): for data in train_loader: data = data.to(device) optimizer.zero_grad() + model.redraw_projection.redraw_projections() out = model(data.x, data.pe, data.edge_index, data.edge_attr, data.batch) loss = (out.squeeze() - data.y).abs().mean() diff --git a/test/nn/attention/test_performer.py b/test/nn/attention/test_performer.py new file mode 100644 index 000000000000..c0b45940e74a --- /dev/null +++ b/test/nn/attention/test_performer.py @@ -0,0 +1,13 @@ +import torch + +from torch_geometric.nn.attention import PerformerAttention + + +def test_performer_attention(): + x = torch.randn(1, 4, 16) + mask = torch.ones([1, 4], dtype=torch.bool) + attn = PerformerAttention(channels=16, heads=4) + out = attn(x, mask) + assert out.shape == (1, 4, 16) + assert str(attn) == ('PerformerAttention(heads=4, ' + 'head_channels=64 kernel=ReLU())') diff --git a/test/nn/conv/test_gps_conv.py b/test/nn/conv/test_gps_conv.py index eda1a4889ff7..1e063f2e4e8c 100644 --- a/test/nn/conv/test_gps_conv.py +++ b/test/nn/conv/test_gps_conv.py @@ -6,18 +6,20 @@ from torch_geometric.typing import SparseTensor from torch_geometric.utils import to_torch_csc_tensor +x = torch.randn(4, 16) +edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) +batch = torch.tensor([0, 0, 1, 1]) +adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) -@pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm']) -def test_gps_conv(norm): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) - batch = torch.tensor([0, 0, 1, 1]) - adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) - conv = GPSConv(16, conv=SAGEConv(16, 16), heads=4, norm=norm) +@pytest.mark.parametrize('attn_type', ['multihead', 'performer']) +@pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm']) +def test_gps_conv(norm, attn_type): + conv = GPSConv(16, conv=SAGEConv(16, 16), heads=4, norm=norm, + attn_type=attn_type) conv.reset_parameters() - assert str(conv) == ('GPSConv(16, conv=SAGEConv(16, 16, aggr=mean), ' - 'heads=4)') + assert str(conv) == (f'GPSConv(16, conv=SAGEConv(16, 16, aggr=mean), ' + f'heads=4, attn_type={attn_type})') out = conv(x, edge_index) assert out.size() == (4, 16) diff --git a/torch_geometric/nn/attention/__init__.py b/torch_geometric/nn/attention/__init__.py new file mode 100644 index 000000000000..947d5850173b --- /dev/null +++ b/torch_geometric/nn/attention/__init__.py @@ -0,0 +1,3 @@ +from .performer import PerformerAttention + +__all__ = ['PerformerAttention'] diff --git a/torch_geometric/nn/attention/performer.py b/torch_geometric/nn/attention/performer.py new file mode 100644 index 000000000000..b82b51f8e93e --- /dev/null +++ b/torch_geometric/nn/attention/performer.py @@ -0,0 +1,199 @@ +import math +from typing import Callable, Optional + +import torch +from torch import Tensor + + +def _orthogonal_matrix(dim: int) -> Tensor: + r"""Get an orthogonal matrix by applying QR decomposition.""" + # Random matrix from normal distribution + mat = torch.randn((dim, dim)) + # QR decomposition to two orthogonal matrices + q, _ = torch.linalg.qr(mat.cpu(), mode='reduced') + return q.t() + + +def orthogonal_matrix(num_rows: int, num_cols: int) -> Tensor: + r"""Generate an orthogonal matrix with `num_rows` rows + and `num_cols` columns. + """ + num_full_blocks = int(num_rows / num_cols) + blocks = [] + for _ in range(num_full_blocks): + q = _orthogonal_matrix(num_cols) + blocks.append(q) + remain_rows = num_rows - num_full_blocks * num_cols + if remain_rows > 0: + q = _orthogonal_matrix(num_cols) + blocks.append(q[:remain_rows]) + mat = torch.cat(blocks) + # multiplier = torch.randn((num_rows, num_cols)).norm(dim=1) + # scaler = torch.diag(multiplier) + # mat = scaler @ mat + return mat + + +def linear_attention(q: Tensor, k: Tensor, v: Tensor) -> Tensor: + r"""Efficient attention mechanism from the + `"Rethinking Attention with Performers" + `_ paper. + + .. math:: + \mathbf{\hat{D}}^{-1}(\mathbf{Q}'((\mathbf{K}')^{\top} \mathbf{V})) + + """ + k_contract = k.sum(dim=-2) + D_inv = 1.0 / torch.einsum('...Lr,...r->...L', q, k_contract) + kv = torch.einsum('...Lr,...Ld->...rd', k, v) + qkv = torch.einsum('...Lr,...rd->...Ld', q, kv) + out = torch.einsum('...L,...Ld->...Ld', D_inv, qkv) + return out + + +def generalized_kernel(x: Tensor, mat: Tensor, + kernel: Callable = torch.nn.ReLU(), + epsilon: float = 0.001) -> Tensor: + r"""Apply generalized kernelizable attention with + kernel functions such as the ReLU. + """ + num_batches, num_heads, *_ = x.shape + # Expand projection matrix to number of batches and number of heads + projection = mat.expand(num_batches, num_heads, *mat.shape) + # "Inner" product x with projection matrix + x = torch.einsum('...id,...jd->...ij', x, projection) + out = kernel(x) + epsilon + return out + + +class PerformerProjection(torch.nn.Module): + r"""The fast attention that uses a projection matrix + from the `"Rethinking Attention with Performers" + `_ paper. This class + projects :math:`\mathbf{Q}` and :math:`\mathbf{K}` matrices + with specified kernel. + + Args: + num_cols (int): Projection matrix number of columns. + kernel (Callable, optional): Kernels for generalized attention. + If not specified, `ReLU` kernel will be used. + (default: :obj:`torch.nn.ReLU()`) + """ + def __init__(self, num_cols: int, kernel: Callable = torch.nn.ReLU()): + super().__init__() + num_rows = int(num_cols * math.log(num_cols)) + self.num_rows = num_rows + self.num_cols = num_cols + # Generate an orthogonal projection matrix + # with the shape (num_rows, num_cols) + projection_matrix = orthogonal_matrix(self.num_rows, self.num_cols) + self.register_buffer('projection_matrix', projection_matrix) + assert kernel is not None + self.kernel = kernel + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + q = generalized_kernel(q, self.projection_matrix, self.kernel) + k = generalized_kernel(k, self.projection_matrix, self.kernel) + out = linear_attention(q, k, v) + return out + + +class PerformerAttention(torch.nn.Module): + r"""The linear scaled attention mechanism from the + `"Rethinking Attention with Performers" + `_ paper. + + Args: + channels (int): Size of each input sample. + heads (int, optional): Number of parallel attention heads. + head_channels (int, optional): Size of each attention head. + (default: :obj:`64.`) + kernel (Callable, optional): Kernels for generalized attention. + If not specified, `ReLU` kernel will be used. + (default: :obj:`torch.nn.ReLU()`) + qkv_bias (bool, optional): If specified, add bias to query, key + and value in the self attention. (default: :obj:`False`) + attn_out_bias (bool, optional): If specified, add bias to the + attention output. (default: :obj:`True`) + dropout (float, optional): Dropout probability of the final + attention output. (default: :obj:`0.0`) + + """ + def __init__( + self, + channels: int, + heads: int, + head_channels: int = 64, + kernel: Callable = torch.nn.ReLU(), + qkv_bias: bool = False, + attn_out_bias: bool = True, + dropout: float = 0.0, + ): + super().__init__() + assert channels % heads == 0 + if head_channels is None: + head_channels = channels // heads + + self.heads = heads + self.head_channels = head_channels + self.kernel = kernel + self.fast_attn = PerformerProjection(head_channels, kernel) + + inner_channels = head_channels * heads + self.q = torch.nn.Linear(channels, inner_channels, bias=qkv_bias) + self.k = torch.nn.Linear(channels, inner_channels, bias=qkv_bias) + self.v = torch.nn.Linear(channels, inner_channels, bias=qkv_bias) + self.attn_out = torch.nn.Linear(inner_channels, channels, + bias=attn_out_bias) + self.dropout = torch.nn.Dropout(dropout) + + def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: + r""" + + Args: + x (torch.Tensor): Node feature tensor + :math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`, with + batch-size :math:`B`, (maximum) number of nodes :math:`N` for + each graph, and feature dimension :math:`F`. + mask (torch.Tensor, optional): Mask matrix + :math:`\mathbf{M} \in {\{ 0, 1 \}}^{B \times N}` indicating + the valid nodes for each graph. (default: :obj:`None`) + """ + B, N, *_ = x.shape + q, k, v = self.q(x), self.k(x), self.v(x) + # Reshape and permute q, k and v to proper shape + # (B, N, num_heads * head_channels) to (b, num_heads, n, head_channels) + q, k, v = map( + lambda t: t.reshape(B, N, self.heads, self.head_channels).permute( + 0, 2, 1, 3), (q, k, v)) + if mask is not None: + mask = mask[:, None, :, None] + v.masked_fill_(~mask, 0.) + out = self.fast_attn(q, k, v) + out = out.permute(0, 2, 1, 3).reshape(B, N, -1) + out = self.attn_out(out) + out = self.dropout(out) + return out + + @torch.no_grad() + def redraw_projection_matrix(self): + r"""As described in the paper, periodically redraw + examples to improve overall approximation of attention.""" + num_rows = self.fast_attn.num_rows + num_cols = self.fast_attn.num_cols + projection_matrix = orthogonal_matrix(num_rows, num_cols) + self.fast_attn.projection_matrix.copy_(projection_matrix) + del projection_matrix + + def _reset_parameters(self): + self.q.reset_parameters() + self.k.reset_parameters() + self.v.reset_parameters() + self.attn_out.reset_parameters() + self.redraw_projection_matrix() + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}(' + f'heads={self.heads}, ' + f'head_channels={self.head_channels} ' + f'kernel={self.kernel})') diff --git a/torch_geometric/nn/conv/gps_conv.py b/torch_geometric/nn/conv/gps_conv.py index 1b5787d44117..c967e10a85d7 100644 --- a/torch_geometric/nn/conv/gps_conv.py +++ b/torch_geometric/nn/conv/gps_conv.py @@ -6,6 +6,7 @@ from torch import Tensor from torch.nn import Dropout, Linear, Sequential +from torch_geometric.nn.attention import PerformerAttention from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.inits import reset from torch_geometric.nn.resolver import ( @@ -43,8 +44,6 @@ class GPSConv(torch.nn.Module): (default: :obj:`1`) dropout (float, optional): Dropout probability of intermediate embeddings. (default: :obj:`0.`) - attn_dropout (float, optional): Dropout probability of the normalized - attention coefficients. (default: :obj:`0`) act (str or Callable, optional): The non-linear activation function to use. (default: :obj:`"relu"`) act_kwargs (Dict[str, Any], optional): Arguments passed to the @@ -55,6 +54,10 @@ class GPSConv(torch.nn.Module): norm_kwargs (Dict[str, Any], optional): Arguments passed to the respective normalization function defined by :obj:`norm`. (default: :obj:`None`) + attn_type (str): Global attention type, :obj:`multihead` or + :obj:`performer`. (default: :obj:`multihead`) + attn_kwargs (Dict[str, Any], optional): Arguments passed to the + attention layer. (default: :obj:`None`) """ def __init__( self, @@ -62,11 +65,12 @@ def __init__( conv: Optional[MessagePassing], heads: int = 1, dropout: float = 0.0, - attn_dropout: float = 0.0, act: str = 'relu', act_kwargs: Optional[Dict[str, Any]] = None, norm: Optional[str] = 'batch_norm', norm_kwargs: Optional[Dict[str, Any]] = None, + attn_type: str = 'multihead', + attn_kwargs: Optional[Dict[str, Any]] = None, ): super().__init__() @@ -74,13 +78,25 @@ def __init__( self.conv = conv self.heads = heads self.dropout = dropout - - self.attn = torch.nn.MultiheadAttention( - channels, - heads, - dropout=attn_dropout, - batch_first=True, - ) + self.attn_type = attn_type + + attn_kwargs = attn_kwargs or {} + if attn_type == 'multihead': + self.attn = torch.nn.MultiheadAttention( + channels, + heads, + batch_first=True, + **attn_kwargs, + ) + elif attn_type == 'performer': + self.attn = PerformerAttention( + channels=channels, + heads=heads, + **attn_kwargs, + ) + else: + # TODO: Support BigBird + raise ValueError(f'{attn_type} is not supported') self.mlp = Sequential( Linear(channels, channels * 2), @@ -135,7 +151,13 @@ def forward( # Global attention transformer-style model. h, mask = to_dense_batch(x, batch) - h, _ = self.attn(h, h, h, key_padding_mask=~mask, need_weights=False) + + if isinstance(self.attn, torch.nn.MultiheadAttention): + h, _ = self.attn(h, h, h, key_padding_mask=~mask, + need_weights=False) + elif isinstance(self.attn, PerformerAttention): + h = self.attn(h, mask=mask) + h = h[mask] h = F.dropout(h, p=self.dropout, training=self.training) h = h + x # Residual connection. @@ -159,4 +181,5 @@ def forward( def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.channels}, ' - f'conv={self.conv}, heads={self.heads})') + f'conv={self.conv}, heads={self.heads}, ' + f'attn_type={self.attn_type})') From 2dcfdf1f9fa3d4809a9871fa48070e38636cd161 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Mon, 12 Jun 2023 06:03:46 -0700 Subject: [PATCH 1277/2432] Add `LocalFeatureStore` and `LocalGraphStore` helper initializations (#7482) This code belongs to the part of the whole distributed training for PyG. This class will initialize the graph & node/edge feature data based on LocalGraphStore/LocalFeatureStore data structure. You can based on these initialize API to define your graph and node/edge features for your homo/hetero datasets. We also include the unit test under /test/distributed folder to show how one OGB_MAG dataset will be initialize into the graph and feature by LocalDataset APIs. Any comments please let us know. thanks --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/distributed/test_local_feature_store.py | 100 ++++++++++++-- test/distributed/test_local_graph_store.py | 50 ++++++- .../distributed/local_feature_store.py | 130 +++++++++++++++--- .../distributed/local_graph_store.py | 70 +++++++++- 5 files changed, 315 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae0f0b87c9c6..7e73babfee60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_local_feature_store.py b/test/distributed/test_local_feature_store.py index 1ccb15b3e334..557ca91ca641 100644 --- a/test/distributed/test_local_feature_store.py +++ b/test/distributed/test_local_feature_store.py @@ -18,15 +18,14 @@ def test_local_feature_store_global_id(): [8, 8, 8], ]) - kwargs = dict(group_name='part1', attr_name='feat') - part1_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) - part1_feat = feat[part1_global_id] + paper_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) + paper_feat = feat[paper_global_id] - store.put_global_id(part1_global_id, **kwargs) - store.put_tensor(part1_feat, **kwargs) + store.put_global_id(paper_global_id, group_name='paper') + store.put_tensor(paper_feat, group_name='paper', attr_name='feat') - out = store.get_tensor_from_global_id(index=torch.tensor([3, 8, 4]), - **kwargs) + out = store.get_tensor_from_global_id(group_name='paper', attr_name='feat', + index=torch.tensor([3, 8, 4])) assert torch.equal(out, feat[torch.tensor([3, 8, 4])]) @@ -45,15 +44,92 @@ def test_local_feature_store_utils(): [8, 8, 8], ]) - kwargs = dict(group_name='part1', attr_name='feat') - part1_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) - part1_feat = feat[part1_global_id] + paper_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) + paper_feat = feat[paper_global_id] - store.put_tensor(part1_feat, **kwargs) + store.put_tensor(paper_feat, group_name='paper', attr_name='feat') assert len(store.get_all_tensor_attrs()) == 1 attr = store.get_all_tensor_attrs()[0] - assert attr.group_name == 'part1' + assert attr.group_name == 'paper' assert attr.attr_name == 'feat' assert attr.index is None assert store.get_tensor_size(attr) == (6, 3) + + +def test_homogeneous_feature_store(): + node_id = torch.randperm(6) + x = torch.randn(6, 32) + y = torch.randint(0, 2, (6, )) + edge_id = torch.randperm(12) + edge_attr = torch.randn(12, 16) + + store = LocalFeatureStore.from_data(node_id, x, y, edge_id, edge_attr) + + assert len(store.get_all_tensor_attrs()) == 3 + attrs = store.get_all_tensor_attrs() + + assert attrs[0].group_name is None + assert attrs[0].attr_name == 'x' + assert attrs[1].group_name is None + assert attrs[1].attr_name == 'y' + assert attrs[2].group_name == (None, None) + assert attrs[2].attr_name == 'edge_attr' + + assert torch.equal(store.get_global_id(group_name=None), node_id) + assert torch.equal(store.get_tensor(group_name=None, attr_name='x'), x) + assert torch.equal(store.get_tensor(group_name=None, attr_name='y'), y) + assert torch.equal(store.get_global_id(group_name=(None, None)), edge_id) + assert torch.equal( + store.get_tensor(group_name=(None, None), attr_name='edge_attr'), + edge_attr, + ) + + +def test_heterogeneous_feature_store(): + node_type = 'paper' + edge_type = ('paper', 'to', 'paper') + node_id_dict = {node_type: torch.randperm(6)} + x_dict = {node_type: torch.randn(6, 32)} + y_dict = {node_type: torch.randint(0, 2, (6, ))} + edge_id_dict = {edge_type: torch.randperm(12)} + edge_attr_dict = {edge_type: torch.randn(12, 16)} + + store = LocalFeatureStore.from_hetero_data( + node_id_dict, + x_dict, + y_dict, + edge_id_dict, + edge_attr_dict, + ) + + assert len(store.get_all_tensor_attrs()) == 3 + attrs = store.get_all_tensor_attrs() + + assert attrs[0].group_name == node_type + assert attrs[0].attr_name == 'x' + assert attrs[1].group_name == node_type + assert attrs[1].attr_name == 'y' + assert attrs[2].group_name == edge_type + assert attrs[2].attr_name == 'edge_attr' + + assert torch.equal( + store.get_global_id(group_name=node_type), + node_id_dict[node_type], + ) + assert torch.equal( + store.get_tensor(group_name=node_type, attr_name='x'), + x_dict[node_type], + ) + assert torch.equal( + store.get_tensor(group_name=node_type, attr_name='y'), + y_dict[node_type], + ) + assert torch.equal( + store.get_global_id(group_name=edge_type), + edge_id_dict[edge_type], + ) + assert torch.equal( + store.get_tensor(group_name=edge_type, attr_name='edge_attr'), + edge_attr_dict[edge_type], + ) diff --git a/test/distributed/test_local_graph_store.py b/test/distributed/test_local_graph_store.py index 2047db8ef0f0..ce83082afc4b 100644 --- a/test/distributed/test_local_graph_store.py +++ b/test/distributed/test_local_graph_store.py @@ -8,7 +8,7 @@ def test_local_graph_store(): graph_store = LocalGraphStore() edge_index = get_random_edge_index(100, 100, 300) - edge_id = torch.tensor([1, 2, 3, 5, 8, 4], dtype=torch.int64) + edge_id = torch.tensor([1, 2, 3, 5, 8, 4]) graph_store.put_edge_index(edge_index, edge_type=None, layout='coo', size=(100, 100)) @@ -24,3 +24,51 @@ def test_local_graph_store(): graph_store.remove_edge_index(edge_attr) graph_store.remove_edge_id(edge_attr) assert len(graph_store.get_all_edge_attrs()) == 0 + + +def test_homogeneous_graph_store(): + edge_id = torch.randperm(300) + edge_index = get_random_edge_index(100, 100, 300) + + graph_store = LocalGraphStore.from_data(edge_id, edge_index, num_nodes=100) + + assert len(graph_store.get_all_edge_attrs()) == 1 + edge_attr = graph_store.get_all_edge_attrs()[0] + assert edge_attr.edge_type is None + assert edge_attr.layout.value == 'coo' + assert not edge_attr.is_sorted + assert edge_attr.size == (100, 100) + + assert torch.equal( + graph_store.get_edge_id(edge_type=None, layout='coo'), + edge_id, + ) + assert torch.equal( + graph_store.get_edge_index(edge_type=None, layout='coo'), + edge_index, + ) + + +def test_heterogeneous_graph_store(): + edge_type = ('paper', 'to', 'paper') + edge_id_dict = {edge_type: torch.randperm(300)} + edge_index_dict = {edge_type: get_random_edge_index(100, 100, 300)} + + graph_store = LocalGraphStore.from_hetero_data( + edge_id_dict, edge_index_dict, num_nodes_dict={'paper': 100}) + + assert len(graph_store.get_all_edge_attrs()) == 1 + edge_attr = graph_store.get_all_edge_attrs()[0] + assert edge_attr.edge_type == edge_type + assert edge_attr.layout.value == 'coo' + assert not edge_attr.is_sorted + assert edge_attr.size == (100, 100) + + assert torch.equal( + graph_store.get_edge_id(edge_type, layout='coo'), + edge_id_dict[edge_type], + ) + assert torch.equal( + graph_store.get_edge_index(edge_type, layout='coo'), + edge_index_dict[edge_type], + ) diff --git a/torch_geometric/distributed/local_feature_store.py b/torch_geometric/distributed/local_feature_store.py index 5abbfa2f1f47..76442c005f02 100644 --- a/torch_geometric/distributed/local_feature_store.py +++ b/torch_geometric/distributed/local_feature_store.py @@ -1,12 +1,13 @@ import copy from dataclasses import dataclass -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union import torch from torch import Tensor from torch_geometric.data import FeatureStore, TensorAttr from torch_geometric.data.feature_store import _field_status +from torch_geometric.typing import EdgeType, NodeType @dataclass @@ -14,7 +15,7 @@ class LocalTensorAttr(TensorAttr): r"""Tensor attribute for storing features without :obj:`index`.""" def __init__( self, - group_name: Optional[str] = _field_status.UNSET, + group_name: Optional[Union[NodeType, EdgeType]] = _field_status.UNSET, attr_name: Optional[str] = _field_status.UNSET, index=None, ): @@ -27,35 +28,38 @@ class LocalFeatureStore(FeatureStore): def __init__(self): super().__init__(tensor_attr_cls=LocalTensorAttr) - self._feat: Dict[Tuple[str, str], Tensor] = {} + self._feat: Dict[Tuple[Union[NodeType, EdgeType], str], Tensor] = {} # Save the global node/edge IDs: - self._global_id: Dict[Tuple[str, str], Tensor] = {} + self._global_id: Dict[Union[NodeType, EdgeType], Tensor] = {} # Save the mapping from global node/edge IDs to indices in `_feat`: - self._global_id_to_index: Dict[Tuple[str, str], Tensor] = {} + self._global_id_to_index: Dict[Union[NodeType, EdgeType], Tensor] = {} @staticmethod def key(attr: TensorAttr) -> Tuple[str, str]: return (attr.group_name, attr.attr_name) - def put_global_id(self, global_id: Tensor, *args, **kwargs) -> bool: - attr = self._tensor_attr_cls.cast(*args, **kwargs) - self._global_id[self.key(attr)] = global_id - self._set_global_id_to_index(attr) + def put_global_id( + self, + global_id: Tensor, + group_name: Union[NodeType, EdgeType], + ) -> bool: + self._global_id[group_name] = global_id + self._set_global_id_to_index(group_name) return True - def get_global_id(self, *args, **kwargs) -> Optional[Tensor]: - attr = self._tensor_attr_cls.cast(*args, **kwargs) - return self._global_id.get(self.key(attr)) + def get_global_id( + self, + group_name: Union[NodeType, EdgeType], + ) -> Optional[Tensor]: + return self._global_id.get(group_name) - def remove_global_id(self, *args, **kwargs) -> bool: - attr = self._tensor_attr_cls.cast(*args, **kwargs) - return self._global_id.pop(self.key(attr), None) is not None + def remove_global_id(self, group_name: Union[NodeType, EdgeType]) -> bool: + return self._global_id.pop(group_name) is not None - def _set_global_id_to_index(self, *args, **kwargs): - attr = self._tensor_attr_cls.cast(*args, **kwargs) - global_id = self.get_global_id(attr) + def _set_global_id_to_index(self, group_name: Union[NodeType, EdgeType]): + global_id = self.get_global_id(group_name) if global_id is None: return @@ -64,7 +68,7 @@ def _set_global_id_to_index(self, *args, **kwargs): global_id_to_index = global_id.new_full((int(global_id.max()) + 1, ), fill_value=-1) global_id_to_index[global_id] = torch.arange(global_id.numel()) - self._global_id_to_index[self.key(attr)] = global_id_to_index + self._global_id_to_index[group_name] = global_id_to_index def _put_tensor(self, tensor: Tensor, attr: TensorAttr) -> bool: assert attr.index is None @@ -91,7 +95,7 @@ def get_tensor_from_global_id(self, *args, **kwargs) -> Optional[Tensor]: assert attr.index is not None attr = copy.copy(attr) - attr.index = self._global_id_to_index[self.key(attr)][attr.index] + attr.index = self._global_id_to_index[attr.group_name][attr.index] return self.get_tensor(attr) @@ -100,3 +104,89 @@ def _get_tensor_size(self, attr: TensorAttr) -> Tuple[int, ...]: def get_all_tensor_attrs(self) -> List[LocalTensorAttr]: return [self._tensor_attr_cls.cast(*key) for key in self._feat.keys()] + + # Initialization ########################################################## + + @classmethod + def from_data( + cls, + node_id: Tensor, + x: Optional[Tensor] = None, + y: Optional[Tensor] = None, + edge_id: Optional[Tensor] = None, + edge_attr: Optional[Tensor] = None, + ) -> 'LocalFeatureStore': + r"""Creates a local feature store from homogeneous :pyg:`PyG` tensors. + + Args: + node_id (torch.Tensor): The global identifier for every local node. + x (torch.Tensor, optional): The node features. + (default: :obj:`None`) + y (torch.Tensor, optional): The node labels. (default: :obj:`None`) + edge_id (torch.Tensor, optional): The global identifier for every + local edge. (default: :obj:`None`) + edge_attr (torch.Tensor, optional): The edge features. + (default: :obj:`None`) + """ + feat_store = cls() + feat_store.put_global_id(node_id, group_name=None) + if x is not None: + feat_store.put_tensor(x, group_name=None, attr_name='x') + if y is not None: + feat_store.put_tensor(y, group_name=None, attr_name='y') + if edge_id is not None: + feat_store.put_global_id(edge_id, group_name=(None, None)) + if edge_attr is not None: + if edge_id is None: + raise ValueError("'edge_id' needs to be present in case " + "'edge_attr' is passed") + feat_store.put_tensor(edge_attr, group_name=(None, None), + attr_name='edge_attr') + return feat_store + + @classmethod + def from_hetero_data( + cls, + node_id_dict: Dict[NodeType, Tensor], + x_dict: Optional[Dict[NodeType, Tensor]] = None, + y_dict: Optional[Dict[NodeType, Tensor]] = None, + edge_id_dict: Optional[Dict[EdgeType, Tensor]] = None, + edge_attr_dict: Optional[Dict[EdgeType, Tensor]] = None, + ) -> 'LocalFeatureStore': + r"""Creates a local graph store from heterogeneous :pyg:`PyG` tensors. + + Args: + node_id_dict (Dict[NodeType, torch.Tensor]): The global identifier + for every local node of every node type. + x_dict (Dict[NodeType, torch.Tensor], optional): The node features + of node types. (default: :obj:`None`) + y_dict (Dict[NodeType, torch.Tensor], optional): The node labels of + node types. (default: :obj:`None`) + edge_id_dict (Dict[EdgeType, torch.Tensor], optional): The global + identifier for every local edge of edge types. + (default: :obj:`None`) + edge_attr_dict (Dict[EdgeType, torch.Tensor], optional): The edge + features of edge types. (default: :obj:`None`) + """ + feat_store = cls() + + for node_type, node_id in node_id_dict.items(): + feat_store.put_global_id(node_id, group_name=node_type) + if x_dict is not None: + for node_type, x in x_dict.items(): + feat_store.put_tensor(x, group_name=node_type, attr_name='x') + if y_dict is not None: + for node_type, y in y_dict.items(): + feat_store.put_tensor(y, group_name=node_type, attr_name='y') + if edge_id_dict is not None: + for edge_type, edge_id in edge_id_dict.items(): + feat_store.put_global_id(edge_id, group_name=edge_type) + if edge_attr_dict is not None: + for edge_type, edge_attr in edge_attr_dict.items(): + if edge_id_dict is None or edge_type not in edge_id_dict: + raise ValueError("'edge_id' needs to be present in case " + "'edge_attr' is passed") + feat_store.put_tensor(edge_attr, group_name=edge_type, + attr_name='edge_attr') + + return feat_store diff --git a/torch_geometric/distributed/local_graph_store.py b/torch_geometric/distributed/local_graph_store.py index d83e301a15dd..ecdaf7dfd367 100644 --- a/torch_geometric/distributed/local_graph_store.py +++ b/torch_geometric/distributed/local_graph_store.py @@ -3,7 +3,7 @@ from torch import Tensor from torch_geometric.data import EdgeAttr, GraphStore -from torch_geometric.typing import EdgeTensorType +from torch_geometric.typing import EdgeTensorType, EdgeType, NodeType class LocalGraphStore(GraphStore): @@ -12,11 +12,12 @@ class LocalGraphStore(GraphStore): def __init__(self): super().__init__() self._edge_index: Dict[Tuple, EdgeTensorType] = {} + self._edge_attr: Dict[Tuple, EdgeAttr] = {} self._edge_id: Dict[Tuple, Tensor] = {} @staticmethod def key(attr: EdgeAttr) -> Tuple: - return (attr.edge_type, attr.layout.value, attr.is_sorted, attr.size) + return (attr.edge_type, attr.layout.value) def put_edge_id(self, edge_id: Tensor, *args, **kwargs) -> bool: edge_attr = self._edge_attr_cls.cast(*args, **kwargs) @@ -34,13 +35,76 @@ def remove_edge_id(self, *args, **kwargs) -> bool: def _put_edge_index(self, edge_index: EdgeTensorType, edge_attr: EdgeAttr) -> bool: self._edge_index[self.key(edge_attr)] = edge_index + self._edge_attr[self.key(edge_attr)] = edge_attr return True def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: return self._edge_index.get(self.key(edge_attr), None) def _remove_edge_index(self, edge_attr: EdgeAttr) -> bool: + self._edge_attr.pop(self.key(edge_attr), None) return self._edge_index.pop(self.key(edge_attr), None) is not None def get_all_edge_attrs(self) -> List[EdgeAttr]: - return [EdgeAttr(*key) for key in self._edge_index.keys()] + return [self._edge_attr[key] for key in self._edge_index.keys()] + + # Initialization ########################################################## + + @classmethod + def from_data( + cls, + edge_id: Tensor, + edge_index: Tensor, + num_nodes: int, + ) -> 'LocalGraphStore': + r"""Creates a local graph store from a homogeneous :pyg:`PyG` graph. + + Args: + edge_id (torch.Tensor): The global identifier for every local edge. + edge_index (torch.Tensor): The local edge indices. + num_nodes (int): The number of nodes in the local graph. + """ + attr = dict( + edge_type=None, + layout='coo', + size=(num_nodes, num_nodes), + ) + + graph_store = cls() + graph_store.put_edge_index(edge_index, **attr) + graph_store.put_edge_id(edge_id, **attr) + return graph_store + + @classmethod + def from_hetero_data( + cls, + edge_id_dict: Dict[EdgeType, Tensor], + edge_index_dict: Dict[EdgeType, Tensor], + num_nodes_dict: Dict[NodeType, int], + ) -> 'LocalGraphStore': + r"""Creates a local graph store from a heterogeneous :pyg:`PyG` graph. + + Args: + edge_id_dict (Dict[EdgeType, torch.Tensor]): The global identifier + for every local edge of every edge type. + edge_index_dict (Dict[EdgeType, torch.Tensor]): The local edge + indices of every edge type. + num_nodes_dict (Dict[NodeType, int]): The number of nodes in the + local graph of every node type. + """ + attr_dict = {} + for edge_type in edge_index_dict.keys(): + src, _, dst = edge_type + attr_dict[edge_type] = dict( + edge_type=edge_type, + layout='coo', + size=(num_nodes_dict[src], num_nodes_dict[dst]), + ) + + graph_store = cls() + for edge_type, edge_index in edge_index_dict.items(): + attr = attr_dict[edge_type] + edge_id = edge_id_dict[edge_type] + graph_store.put_edge_index(edge_index, **attr) + graph_store.put_edge_id(edge_id, **attr) + return graph_store From 5feff45b8919048d68136b9d5d46b5c3ebc21959 Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Mon, 12 Jun 2023 15:18:13 +0200 Subject: [PATCH 1278/2432] Add `dim size` parameter to the `EquilibriumAggregation` layer potential function (#7559) Avoid calculating the same value multiple times. --------- Co-authored-by: rusty1s --- examples/equilibrium_median.py | 6 +++--- torch_geometric/nn/aggr/equilibrium.py | 19 ++++++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/examples/equilibrium_median.py b/examples/equilibrium_median.py index 6a5c0f341144..2cd142efcac6 100644 --- a/examples/equilibrium_median.py +++ b/examples/equilibrium_median.py @@ -27,7 +27,7 @@ total_loss = 0 n_loss = 0 -for i in range(steps): +for i in range(1, steps + 1): optimizer.zero_grad() dist = np.random.choice([norm, gamma, uniform]) x = dist.sample((input_size, 1)) @@ -37,5 +37,5 @@ optimizer.step() total_loss += loss n_loss += 1 - if i % eval_each == (eval_each - 1): - print(f"Average loss at epoc {i} is {total_loss / n_loss}") + if i % eval_each == 0: + print(f"Epoch: {i}, Loss {total_loss / n_loss:.6f}") diff --git a/torch_geometric/nn/aggr/equilibrium.py b/torch_geometric/nn/aggr/equilibrium.py index 87b46f0cc5bc..1357696a9f2f 100644 --- a/torch_geometric/nn/aggr/equilibrium.py +++ b/torch_geometric/nn/aggr/equilibrium.py @@ -26,7 +26,8 @@ def __init__(self, in_channels: int, out_channels: int, for layer_size in num_layers + [out_channels] ]) - def forward(self, x: Tensor, y: Tensor, index: Optional[Tensor]) -> Tensor: + def forward(self, x: Tensor, y: Tensor, index: Optional[Tensor], + dim_size: Optional[int] = None) -> Tensor: if index is None: inp = torch.cat([x, y.expand(x.size(0), -1)], dim=1) else: @@ -40,8 +41,10 @@ def forward(self, x: Tensor, y: Tensor, index: Optional[Tensor]) -> Tensor: if index is None: return h.mean() - size = int(index.max().item() + 1) - return scatter(h, index, dim=0, dim_size=size, reduce='mean').sum() + if dim_size is None: + dim_size = int(index.max().item() + 1) + + return scatter(h, index, 0, dim_size, reduce='mean').sum() class MomentumOptimizer(torch.nn.Module): @@ -86,13 +89,14 @@ def forward( x: Tensor, y: Tensor, index: Optional[Tensor], + dim_size: Optional[int], func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor], iterations: int = 5, ) -> Tuple[Tensor, float]: momentum_buffer = torch.zeros_like(y) for _ in range(iterations): - val = func(x, y, index) + val = func(x, y, index, dim_size) grad = torch.autograd.grad(val, y, create_graph=True, retain_graph=True)[0] delta = self.learning_rate * grad @@ -153,8 +157,9 @@ def init_output(self, dim_size: int) -> Tensor: def reg(self, y: Tensor) -> Tensor: return self.softplus(self.lamb) * y.square().sum(dim=-1).mean() - def energy(self, x: Tensor, y: Tensor, index: Optional[Tensor]): - return self.potential(x, y, index) + self.reg(y) + def energy(self, x: Tensor, y: Tensor, index: Optional[Tensor], + dim_size: Optional[int] = None): + return self.potential(x, y, index, dim_size) + self.reg(y) def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, @@ -165,7 +170,7 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, dim_size = int(index.max()) + 1 if dim_size is None else dim_size with torch.enable_grad(): - y = self.optimizer(x, self.init_output(dim_size), index, + y = self.optimizer(x, self.init_output(dim_size), index, dim_size, self.energy, iterations=self.grad_iter) return y From d2da4dacddff0015d9fb7459b0df53b6907f194d Mon Sep 17 00:00:00 2001 From: Piotr Chmiel Date: Mon, 12 Jun 2023 15:22:53 +0200 Subject: [PATCH 1279/2432] Add a `num_nodes` parameter to the `HypergraphConv` layer (#7560) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/conv/hypergraph_conv.py | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e73babfee60..7d47d4386b4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Added a `num_edges` parameter to the forward method of `HypergraphConv` ([#7560](https://github.com/pyg-team/pytorch_geometric/pull/7560)) - Fixed `get_mesh_laplacian` for `normalization="sym"` ([#7544](https://github.com/pyg-team/pytorch_geometric/pull/7544)) - Use `dim_size` to initialize output size of the `EquilibriumAggregation` layer ([#7530](https://github.com/pyg-team/pytorch_geometric/pull/7530)) - Added a `max_num_elements` parameter to the forward method of `GraphMultisetTransformer`, `GRUAggregation`, `LSTMAggregation` and `SetTransformerAggregation` ([#7529](https://github.com/pyg-team/pytorch_geometric/pull/7529)) diff --git a/torch_geometric/nn/conv/hypergraph_conv.py b/torch_geometric/nn/conv/hypergraph_conv.py index de1c00180ed7..5947986346b2 100644 --- a/torch_geometric/nn/conv/hypergraph_conv.py +++ b/torch_geometric/nn/conv/hypergraph_conv.py @@ -5,6 +5,7 @@ from torch import Tensor from torch.nn import Parameter +from torch_geometric.experimental import disable_dynamic_shapes from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.dense.linear import Linear from torch_geometric.nn.inits import glorot, zeros @@ -107,9 +108,11 @@ def reset_parameters(self): glorot(self.att) zeros(self.bias) + @disable_dynamic_shapes(required_args=['num_edges']) def forward(self, x: Tensor, hyperedge_index: Tensor, hyperedge_weight: Optional[Tensor] = None, - hyperedge_attr: Optional[Tensor] = None) -> Tensor: + hyperedge_attr: Optional[Tensor] = None, + num_edges: Optional[int] = None) -> Tensor: r"""Runs the forward pass of the module. Args: @@ -125,10 +128,15 @@ def forward(self, x: Tensor, hyperedge_index: Tensor, in :math:`\mathbb{R}^{M \times F}`. These features only need to get passed in case :obj:`use_attention=True`. (default: :obj:`None`) + num_edges (int, optional) : The number of edges :math:`M`. + (default: :obj:`None`) """ - num_nodes, num_edges = x.size(0), 0 - if hyperedge_index.numel() > 0: - num_edges = int(hyperedge_index[1].max()) + 1 + num_nodes = x.size(0) + + if num_edges is None: + num_edges = 0 + if hyperedge_index.numel() > 0: + num_edges = int(hyperedge_index[1].max()) + 1 if hyperedge_weight is None: hyperedge_weight = x.new_ones(num_edges) From b6b413787d5c985942c99d44e8c5407f02b836eb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 12 Jun 2023 15:45:57 +0200 Subject: [PATCH 1280/2432] Temporarily disable `pyg-lib`+METIS tests (#7561) --- test/loader/test_cluster.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index 6a58cf42fd1d..292f2a551380 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -1,15 +1,23 @@ import pytest import torch -import torch_geometric.typing from torch_geometric.data import Data from torch_geometric.loader import ClusterData, ClusterLoader from torch_geometric.testing import onlyFullTest from torch_geometric.utils import sort_edge_index +try: + # TODO Using `pyg-lib` metis partitioning leads to some weird bugs in the + # CI. As such, we require `torch-sparse` for these tests for now. + rowptr = torch.tensor([0, 1]) + col = torch.tensor([0]) + torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) + WITH_METIS = True +except (AttributeError, RuntimeError): + WITH_METIS = False -@pytest.mark.skipif(not torch_geometric.typing.WITH_METIS, - reason='Not compiled with METIS support') + +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_cluster_gcn(): adj = torch.tensor([ [1, 1, 1, 0, 1, 0], @@ -102,8 +110,7 @@ def test_cluster_gcn(): assert torch.equal(out.edge_attr, tmp[1]) -@pytest.mark.skipif(not torch_geometric.typing.WITH_METIS, - reason='Not compiled with METIS support') +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_keep_inter_cluster_edges(): adj = torch.tensor([ [1, 1, 1, 0, 1, 0], @@ -139,8 +146,7 @@ def test_keep_inter_cluster_edges(): @onlyFullTest -@pytest.mark.skipif(not torch_geometric.typing.WITH_METIS, - reason='Not compiled with METIS support') +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_cluster_gcn_correctness(get_dataset): dataset = get_dataset('Cora') data = dataset[0].clone() From 6d9a03ac3eb062b5b914c83720f8b92987f05ac8 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Mon, 12 Jun 2023 07:03:03 -0700 Subject: [PATCH 1281/2432] `dense_diff_pool` benchmark (#7550) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/dense/test_diff_pool.py | 38 ++++++++++++++++++++++++++++ torch_geometric/profile/benchmark.py | 26 ++++++++++++++++--- 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/test/nn/dense/test_diff_pool.py b/test/nn/dense/test_diff_pool.py index 051f5f4a6ce2..5ad3e64f1b1c 100644 --- a/test/nn/dense/test_diff_pool.py +++ b/test/nn/dense/test_diff_pool.py @@ -1,6 +1,9 @@ +from itertools import product + import torch from torch_geometric.nn import dense_diff_pool +from torch_geometric.profile import benchmark from torch_geometric.testing import is_full_test @@ -24,3 +27,38 @@ def test_dense_diff_pool(): assert torch.allclose(adj_jit, adj_out) assert link_loss.item() >= 0 assert ent_loss.item() >= 0 + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cuda') + args = parser.parse_args() + + BS = [2**i for i in range(4, 8)] + NS = [2**i for i in range(4, 8)] + FS = [2**i for i in range(5, 9)] + CS = [2**i for i in range(5, 9)] + + funcs = [] + func_names = [] + args_list = [] + for B, N, F, C in product(BS, NS, FS, CS): + x = torch.randn(B, N, F, device=args.device) + adj = torch.randint(0, 2, (B, N, N), dtype=x.dtype, device=args.device) + s = torch.randn(B, N, C, device=args.device) + + funcs.append(dense_diff_pool) + func_names.append(f'B={B}, N={N}, F={F}, C={C}') + args_list.append((x, adj, s)) + + benchmark( + funcs=funcs, + func_names=func_names, + args=args_list, + num_steps=50 if args.device == 'cpu' else 500, + num_warmups=10 if args.device == 'cpu' else 100, + per_step=True, + progress_bar=True, + ) diff --git a/torch_geometric/profile/benchmark.py b/torch_geometric/profile/benchmark.py index 60e6b2b8ea8e..3909124e2f3e 100644 --- a/torch_geometric/profile/benchmark.py +++ b/torch_geometric/profile/benchmark.py @@ -27,6 +27,8 @@ def benchmark( func_names: Optional[List[str]] = None, num_warmups: int = 10, backward: bool = False, + per_step: bool = False, + progress_bar: bool = False, ): r"""Benchmark a list of functions :obj:`funcs` that receive the same set of arguments :obj:`args`. @@ -44,6 +46,10 @@ def benchmark( (default: :obj:`10`) backward (bool, optional): If set to :obj:`True`, will benchmark both forward and backward passes. (default: :obj:`False`) + per_step (bool, optional): If set to :obj:`True`, will report runtimes + per step. (default: :obj:`False`) + progress_bar (bool, optional): If set to :obj:`True`, will print a + progress bar during benchmarking. (default: :obj:`False`) """ from tabulate import tabulate @@ -65,8 +71,13 @@ def benchmark( # Zero-copy `args` for each function (if necessary): args_list = [args] * len(funcs) if isinstance(args, tuple) else args + iterator = zip(funcs, args_list, func_names) + if progress_bar: + from tqdm import tqdm + iterator = tqdm(iterator, total=len(funcs)) + ts: List[List[str]] = [] - for func, args, name in zip(funcs, args_list, func_names): + for func, args, name in iterator: t_forward = t_backward = 0 for i in range(num_warmups + num_steps): args = require_grad(args, backward) @@ -100,10 +111,17 @@ def benchmark( if i >= num_warmups: t_backward += time.perf_counter() - t_start - ts.append([name, f'{t_forward:.4f}s']) + if per_step: + ts.append([name, f'{t_forward/num_steps:.6f}s']) + else: + ts.append([name, f'{t_forward:.4f}s']) if backward: - ts[-1].append(f'{t_backward:.4f}s') - ts[-1].append(f'{t_forward + t_backward:.4f}s') + if per_step: + ts[-1].append(f'{t_backward/num_steps:.6f}s') + ts[-1].append(f'{(t_forward + t_backward)/num_steps:.6f}s') + else: + ts[-1].append(f'{t_backward:.4f}s') + ts[-1].append(f'{t_forward + t_backward:.4f}s') header = ['Name', 'Forward'] if backward: From 17e81c4fd1eea67c2af37917e767c5fc7cf65efc Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Mon, 12 Jun 2023 07:41:34 -0700 Subject: [PATCH 1282/2432] `topk` benchmark (#7549) on my a5000 desktop ``` Number of nodes: 2 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 4 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 8 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 16 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 32 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 64 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 128 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 256 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0004s | +--------+-----------+ Number of nodes: 512 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0006s | +--------+-----------+ Number of nodes: 1024 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0009s | +--------+-----------+ Number of nodes: 2048 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0015s | +--------+-----------+ Number of nodes: 4096 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0023s | +--------+-----------+ Number of nodes: 8192 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0049s | +--------+-----------+ Number of nodes: 16384 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0124s | +--------+-----------+ Number of nodes: 32768 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.0366s | +--------+-----------+ Number of nodes: 65536 +--------+-----------+ | Name | Forward | |--------+-----------| | topk | 0.1098s | +--------+-----------+ ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- test/nn/dense/test_diff_pool.py | 1 - test/nn/pool/select/test_select_topk.py | 34 +++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/test/nn/dense/test_diff_pool.py b/test/nn/dense/test_diff_pool.py index 5ad3e64f1b1c..2bd8b7fabf31 100644 --- a/test/nn/dense/test_diff_pool.py +++ b/test/nn/dense/test_diff_pool.py @@ -59,6 +59,5 @@ def test_dense_diff_pool(): args=args_list, num_steps=50 if args.device == 'cpu' else 500, num_warmups=10 if args.device == 'cpu' else 100, - per_step=True, progress_bar=True, ) diff --git a/test/nn/pool/select/test_select_topk.py b/test/nn/pool/select/test_select_topk.py index a0418fff9306..61f1ce17e9a5 100644 --- a/test/nn/pool/select/test_select_topk.py +++ b/test/nn/pool/select/test_select_topk.py @@ -1,8 +1,11 @@ +from itertools import product + import pytest import torch from torch_geometric.nn.pool.select import SelectOutput, SelectTopK from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.profile import benchmark from torch_geometric.testing import is_full_test @@ -55,3 +58,34 @@ def test_select_topk(min_score): assert out.node_index.max() < out.num_nodes assert out.cluster_index.min() == 0 assert out.cluster_index.max() == out.num_clusters - 1 + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cuda') + args = parser.parse_args() + + BS = [2**i for i in range(6, 8)] + NS = [2**i for i in range(8, 16)] + + funcs = [] + func_names = [] + args_list = [] + for B, N in product(BS, NS): + x = torch.randn(N, device=args.device) + batch = torch.randint(0, B, (N, ), device=args.device).sort()[0] + + funcs.append(topk) + func_names.append(f'B={B}, N={N}') + args_list.append((x, 0.5, batch)) + + benchmark( + funcs=funcs, + func_names=func_names, + args=args_list, + num_steps=50 if args.device == 'cpu' else 500, + num_warmups=10 if args.device == 'cpu' else 100, + progress_bar=True, + ) From 09cb4401b3064a9e2ad04d597c8c99f08409348d Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Mon, 12 Jun 2023 07:52:10 -0700 Subject: [PATCH 1283/2432] Allow GPU inference in `BasicGNN` (#7548) Signed-off-by: Serge Panev Co-authored-by: rusty1s --- CHANGELOG.md | 3 ++- torch_geometric/nn/models/basic_gnn.py | 29 ++++++++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d47d4386b4f..4c2fbf105bfa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added Performer to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) +- Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548)) +- Added `Performer` to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) - Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) - Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) - Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index b40bcdcd0c26..fdea248ccc0e 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -241,9 +241,13 @@ def forward( return x @torch.no_grad() - def inference(self, loader: NeighborLoader, - device: Optional[torch.device] = None, - progress_bar: bool = False) -> Tensor: + def inference( + self, + loader: NeighborLoader, + device: Optional[Union[str, torch.device]] = None, + embedding_device: Union[str, torch.device] = 'cpu', + progress_bar: bool = False, + ) -> Tensor: r"""Performs layer-wise inference on large-graphs using a :class:`~torch_geometric.loader.NeighborLoader`, where :class:`~torch_geometric.loader.NeighborLoader` should sample the @@ -251,6 +255,19 @@ def inference(self, loader: NeighborLoader, This is an efficient way to compute the output embeddings for all nodes in the graph. Only applicable in case :obj:`jk=None` or `jk='last'`. + + Args: + loader (torch_geometric.loader.NeighborLoader): A neighbor loader + object that generates full 1-hop subgraphs, *i.e.*, + :obj:`loader.num_neighbors = [-1]`. + device (torch.device, optional): The device to run the GNN on. + (default: :obj:`None`) + embedding_device (torch.device, optional): The device to store + intermediate embeddings on. If intermediate embeddings fit on + GPU, this option helps to avoid unnecessary device transfers. + (default: :obj:`"cpu"`) + progress_bar (bool, optional): If set to :obj:`True`, will print a + progress bar during computation. (default: :obj:`False`) """ assert self.jk_mode is None or self.jk_mode == 'last' assert isinstance(loader, NeighborLoader) @@ -262,7 +279,7 @@ def inference(self, loader: NeighborLoader, pbar = tqdm(total=len(self.convs) * len(loader)) pbar.set_description('Inference') - x_all = loader.data.x.cpu() + x_all = loader.data.x.to(embedding_device) loader.data.n_id = torch.arange(x_all.size(0)) for i in range(self.num_layers): @@ -275,7 +292,7 @@ def inference(self, loader: NeighborLoader, edge_index = batch.edge_index.to(device) x = self.convs[i](x, edge_index)[:batch.batch_size] if i == self.num_layers - 1 and self.jk_mode is None: - xs.append(x.cpu()) + xs.append(x.to(embedding_device)) if progress_bar: pbar.update(1) continue @@ -287,7 +304,7 @@ def inference(self, loader: NeighborLoader, x = self.act(x) if i == self.num_layers - 1 and hasattr(self, 'lin'): x = self.lin(x) - xs.append(x.cpu()) + xs.append(x.to(embedding_device)) if progress_bar: pbar.update(1) x_all = torch.cat(xs, dim=0) From 05490776e576addd4727e0a4bcd18e7cc0a16f3c Mon Sep 17 00:00:00 2001 From: Novi Patricia Date: Mon, 12 Jun 2023 22:13:13 +0700 Subject: [PATCH 1284/2432] Add an example `PMLP` on Cora dataset (#7543) Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- examples/pmlp.py | 58 +++++++++++++++++++++++++++++++ torch_geometric/nn/models/pmlp.py | 15 ++++++-- 3 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 examples/pmlp.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c2fbf105bfa..19326015f17a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) - Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447), [#7466](https://github.com/pyg-team/pytorch_geometric/pull/7466)) - Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) -- Added the `PMLP` model ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370)) +- Added the `PMLP` model and an example ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370), [#7543](https://github.com/pyg-team/pytorch_geometric/pull/7543)) - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) diff --git a/examples/pmlp.py b/examples/pmlp.py new file mode 100644 index 000000000000..29088aa8da17 --- /dev/null +++ b/examples/pmlp.py @@ -0,0 +1,58 @@ +import os.path as osp + +import torch +import torch.nn.functional as F + +import torch_geometric.transforms as T +from torch_geometric.datasets import Planetoid +from torch_geometric.nn import PMLP + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') +dataset = Planetoid(path, name='Cora', transform=T.NormalizeFeatures()) +data = dataset[0].to(device) + +model = PMLP( + in_channels=dataset.num_features, + hidden_channels=16, + out_channels=dataset.num_classes, + num_layers=2, + dropout=0.5, + norm=False, +).to(device) + +optimizer = torch.optim.Adam(model.parameters(), weight_decay=5e-4, lr=0.01) + + +def train(): + model.train() + optimizer.zero_grad() + out = model(data.x) # MLP during training. + loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) + loss.backward() + optimizer.step() + return float(loss) + + +@torch.no_grad() +def test(): + model.eval() + out = model(data.x, data.edge_index) + pred = out.argmax(dim=-1) + + accs = [] + for mask in [data.train_mask, data.val_mask, data.test_mask]: + accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) + return accs + + +best_val_acc = final_test_acc = 0 +for epoch in range(1, 201): + loss = train() + train_acc, val_acc, tmp_test_acc = test() + if val_acc > best_val_acc: + best_val_acc = val_acc + test_acc = tmp_test_acc + print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' + f'Test: {test_acc:.4f}') diff --git a/torch_geometric/nn/models/pmlp.py b/torch_geometric/nn/models/pmlp.py index 7677e0b3462f..9034a175c106 100644 --- a/torch_geometric/nn/models/pmlp.py +++ b/torch_geometric/nn/models/pmlp.py @@ -22,6 +22,8 @@ class PMLP(torch.nn.Module): num_layers (int): The number of layers. dropout (float, optional): Dropout probability of each hidden embedding. (default: :obj:`0.`) + norm (bool, optional): If set to :obj:`False`, will not apply batch + normalization. (default: :obj:`True`) bias (bool, optional): If set to :obj:`False`, the module will not learn additive biases. (default: :obj:`True`) """ @@ -32,6 +34,7 @@ def __init__( out_channels: int, num_layers: int, dropout: float = 0., + norm: bool = True, bias: bool = True, ): super().__init__() @@ -50,8 +53,13 @@ def __init__( self.lins.append(lin) self.lins.append(Linear(hidden_channels, out_channels, self.bias)) - self.norm = torch.nn.BatchNorm1d(hidden_channels, affine=False, - track_running_stats=False) + self.norm = None + if norm: + self.norm = torch.nn.BatchNorm1d( + hidden_channels, + affine=False, + track_running_stats=False, + ) self.conv = SimpleConv(aggr='mean', combine_root='self_loop') @@ -81,7 +89,8 @@ def forward( if self.bias: x = x + self.lins[i].bias if i != self.num_layers - 1: - x = self.norm(x) + if self.norm is not None: + x = self.norm(x) x = x.relu() x = F.dropout(x, p=self.dropout, training=self.training) From 8af0faa82e5266331c0d1ea2755253e8beafad1c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 13 Jun 2023 14:56:24 +0200 Subject: [PATCH 1285/2432] Move `ConfigStore` outside GraphGym repository (#7571) --- test/{graphgym => }/my_config.yaml | 0 test/{graphgym => }/test_config_store.py | 5 +++-- torch_geometric/{graphgym => }/config_store.py | 0 torch_geometric/graphgym/__init__.py | 6 ------ 4 files changed, 3 insertions(+), 8 deletions(-) rename test/{graphgym => }/my_config.yaml (100%) rename test/{graphgym => }/test_config_store.py (97%) rename torch_geometric/{graphgym => }/config_store.py (100%) diff --git a/test/graphgym/my_config.yaml b/test/my_config.yaml similarity index 100% rename from test/graphgym/my_config.yaml rename to test/my_config.yaml diff --git a/test/graphgym/test_config_store.py b/test/test_config_store.py similarity index 97% rename from test/graphgym/test_config_store.py rename to test/test_config_store.py index 5fd0f504b556..e74933f85400 100644 --- a/test/graphgym/test_config_store.py +++ b/test/test_config_store.py @@ -1,12 +1,13 @@ from typing import Any, Optional -from torch_geometric.graphgym import ( +from torch_geometric.config_store import ( class_from_dataclass, dataclass_from_class, + fill_config_store, get_config_store, + register, to_dataclass, ) -from torch_geometric.graphgym.config_store import fill_config_store, register from torch_geometric.testing import withPackage diff --git a/torch_geometric/graphgym/config_store.py b/torch_geometric/config_store.py similarity index 100% rename from torch_geometric/graphgym/config_store.py rename to torch_geometric/config_store.py diff --git a/torch_geometric/graphgym/__init__.py b/torch_geometric/graphgym/__init__.py index 8354971740ac..f70d8e8ea500 100644 --- a/torch_geometric/graphgym/__init__.py +++ b/torch_geometric/graphgym/__init__.py @@ -18,8 +18,6 @@ register_config, register_dataset, register_loader, register_optimizer, register_scheduler, register_loss, register_train, register_metric) -from .config_store import (to_dataclass, dataclass_from_class, - class_from_dataclass, get_config_store) __all__ = classes = [ 'load_ckpt', @@ -60,8 +58,4 @@ 'register_loss', 'register_train', 'register_metric', - 'to_dataclass', - 'dataclass_from_class', - 'class_from_dataclass', - 'get_config_store', ] From ed52d8104898a7fe8d36fc20a5af46ff06bb7e4c Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Tue, 13 Jun 2023 06:52:37 -0700 Subject: [PATCH 1286/2432] Correctly move data to device in GraphSAGE example (#7568) Fix, 40% faster on V100 --------- Signed-off-by: Serge Panev Co-authored-by: rusty1s --- examples/graph_sage_unsup.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/examples/graph_sage_unsup.py b/examples/graph_sage_unsup.py index 51f829d1799e..385ca559a8b7 100644 --- a/examples/graph_sage_unsup.py +++ b/examples/graph_sage_unsup.py @@ -23,10 +23,15 @@ ) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = GraphSAGE(data.num_node_features, hidden_channels=64, - num_layers=2).to(device) +data = data.to(device, 'x', 'edge_index') + +model = GraphSAGE( + data.num_node_features, + hidden_channels=64, + num_layers=2, +).to(device) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) -x, edge_index = data.x.to(device), data.edge_index.to(device) def train(): @@ -51,7 +56,7 @@ def train(): @torch.no_grad() def test(): model.eval() - out = model(data.x.to(device), data.edge_index.to(device)).cpu() + out = model(data.x, data.edge_index).cpu() clf = LogisticRegression() clf.fit(out[data.train_mask], data.y[data.train_mask]) From 27c2f606d0eebe0afc881b193bce7882abed95fc Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Tue, 13 Jun 2023 06:59:17 -0700 Subject: [PATCH 1287/2432] adding backwards to `dense_diff_pool` benchmark (#7567) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/nn/dense/test_diff_pool.py | 2 ++ torch_geometric/profile/benchmark.py | 7 +++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/test/nn/dense/test_diff_pool.py b/test/nn/dense/test_diff_pool.py index 2bd8b7fabf31..42f851c792c1 100644 --- a/test/nn/dense/test_diff_pool.py +++ b/test/nn/dense/test_diff_pool.py @@ -34,6 +34,7 @@ def test_dense_diff_pool(): parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--backward', action='http://github.com/store_true') args = parser.parse_args() BS = [2**i for i in range(4, 8)] @@ -59,5 +60,6 @@ def test_dense_diff_pool(): args=args_list, num_steps=50 if args.device == 'cpu' else 500, num_warmups=10 if args.device == 'cpu' else 100, + backward=args.backward, progress_bar=True, ) diff --git a/torch_geometric/profile/benchmark.py b/torch_geometric/profile/benchmark.py index 3909124e2f3e..d1dfe09b904c 100644 --- a/torch_geometric/profile/benchmark.py +++ b/torch_geometric/profile/benchmark.py @@ -94,12 +94,11 @@ def benchmark( t_forward += time.perf_counter() - t_start if backward: - # TODO Generalize this logic. This is also a bit unfair as the - # concatenation leads to incorrectly measured backward speeds. if isinstance(out, (tuple, list)): - out = torch.cat(out, dim=0) + out = sum(o.sum() for o in out if isinstance(o, Tensor)) elif isinstance(out, dict): - out = torch.cat(list(out.values()), dim=0) + out = out.values() + out = sum(o.sum() for o in out if isinstance(o, Tensor)) out_grad = torch.randn_like(out) t_start = time.perf_counter() From 5ff3295250715f765ac60dfd6c34e0fc7f1cd904 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 13 Jun 2023 17:55:32 +0200 Subject: [PATCH 1288/2432] Allow GPU input in `NodeLoader` and `LinkLoader` (#7572) Fixes #7557 --- CHANGELOG.md | 1 + test/loader/test_link_neighbor_loader.py | 21 ++++++++--- test/loader/test_neighbor_loader.py | 21 ++++++++--- torch_geometric/loader/link_loader.py | 4 +- torch_geometric/sampler/base.py | 47 +++++++++++++++++++++++- 5 files changed, 78 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19326015f17a..f73f18b505d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) - Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548)) - Added `Performer` to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) - Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index a4afe6749991..053454bf2c46 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -8,6 +8,7 @@ MyGraphStore, get_random_edge_index, onlyNeighborSampler, + withCUDA, withPackage, ) @@ -16,24 +17,29 @@ def unique_edge_pairs(edge_index): return set(map(tuple, edge_index.t().tolist())) +@withCUDA @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) @pytest.mark.parametrize('neg_sampling_ratio', [None, 1.0]) @pytest.mark.parametrize('filter_per_worker', [None, True, False]) -def test_homo_link_neighbor_loader_basic(subgraph_type, neg_sampling_ratio, +def test_homo_link_neighbor_loader_basic(device, subgraph_type, + neg_sampling_ratio, filter_per_worker): - pos_edge_index = get_random_edge_index(50, 50, 500) - neg_edge_index = get_random_edge_index(50, 50, 500) + pos_edge_index = get_random_edge_index(50, 50, 500, device=device) + neg_edge_index = get_random_edge_index(50, 50, 500, device=device) neg_edge_index += 50 edge_label_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1) - edge_label = torch.cat([torch.ones(500), torch.zeros(500)], dim=0) + edge_label = torch.cat([ + torch.ones(500, device=device), + torch.zeros(500, device=device), + ], dim=0) data = Data() data.edge_index = pos_edge_index - data.x = torch.arange(100) - data.edge_attr = torch.arange(500) + data.x = torch.arange(100, device=device) + data.edge_attr = torch.arange(500, device=device) loader = LinkNeighborLoader( data, @@ -60,11 +66,14 @@ def test_homo_link_neighbor_loader_basic(subgraph_type, neg_sampling_ratio, assert batch.n_id.size() == (batch.num_nodes, ) assert batch.e_id.size() == (batch.num_edges, ) + assert batch.x.device == device assert batch.x.size(0) <= 100 assert batch.x.min() >= 0 and batch.x.max() < 100 assert batch.input_id.numel() == 20 + assert batch.edge_index.device == device assert batch.edge_index.min() >= 0 assert batch.edge_index.max() < batch.num_nodes + assert batch.edge_attr.device == device assert batch.edge_attr.min() >= 0 assert batch.edge_attr.max() < 500 diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 76238219b6c5..34134e7f2ae7 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -16,6 +16,7 @@ get_random_edge_index, onlyLinux, onlyNeighborSampler, + withCUDA, withPackage, ) from torch_geometric.typing import WITH_PYG_LIB, WITH_TORCH_SPARSE @@ -32,15 +33,17 @@ def is_subset(subedge_index, edge_index, src_idx, dst_idx): num_nodes = int(edge_index.max()) + 1 idx = num_nodes * edge_index[0] + edge_index[1] subidx = num_nodes * src_idx[subedge_index[0]] + dst_idx[subedge_index[1]] - mask = torch.from_numpy(np.isin(subidx, idx)) + mask = torch.from_numpy(np.isin(subidx.cpu().numpy(), idx.cpu().numpy())) return int(mask.sum()) == mask.numel() +@withCUDA @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', list(SubgraphType)) @pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) @pytest.mark.parametrize('filter_per_worker', [None, True, False]) -def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): +def test_homo_neighbor_loader_basic(device, subgraph_type, dtype, + filter_per_worker): if subgraph_type == SubgraphType.induced and not WITH_TORCH_SPARSE: return if (dtype != torch.int64 @@ -51,9 +54,9 @@ def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): data = Data() - data.x = torch.arange(100) - data.edge_index = get_random_edge_index(100, 100, 500, dtype) - data.edge_attr = torch.arange(500) + data.x = torch.arange(100, device=device) + data.edge_index = get_random_edge_index(100, 100, 500, dtype, device) + data.edge_attr = torch.arange(500, device=device) loader = NeighborLoader( data, @@ -72,17 +75,23 @@ def test_homo_neighbor_loader_basic(subgraph_type, dtype, filter_per_worker): for i, batch in enumerate(loader): assert isinstance(batch, Data) + assert batch.x.device == device assert batch.x.size(0) <= 100 assert batch.n_id.size() == (batch.num_nodes, ) assert batch.input_id.numel() == batch.batch_size == 20 assert batch.x.min() >= 0 and batch.x.max() < 100 + assert batch.edge_index.device == device assert batch.edge_index.min() >= 0 assert batch.edge_index.max() < batch.num_nodes + assert batch.edge_attr.device == device + assert batch.edge_attr.size(0) == batch.edge_index.size(1) # Input nodes are always sampled first: assert torch.equal( batch.x[:batch.batch_size], - torch.arange(i * batch.batch_size, (i + 1) * batch.batch_size)) + torch.arange(i * batch.batch_size, (i + 1) * batch.batch_size, + device=device), + ) if subgraph_type != SubgraphType.bidirectional: assert batch.e_id.size() == (batch.num_edges, ) diff --git a/torch_geometric/loader/link_loader.py b/torch_geometric/loader/link_loader.py index d960c4e3ed96..d34a331405cc 100644 --- a/torch_geometric/loader/link_loader.py +++ b/torch_geometric/loader/link_loader.py @@ -172,8 +172,8 @@ def __init__( self.input_data = EdgeSamplerInput( input_id=input_id, - row=edge_label_index[0].clone(), - col=edge_label_index[1].clone(), + row=edge_label_index[0], + col=edge_label_index[1], label=edge_label, time=edge_label_time, input_type=input_type, diff --git a/torch_geometric/sampler/base.py b/torch_geometric/sampler/base.py index f1566932b753..d8b1a405ecf5 100644 --- a/torch_geometric/sampler/base.py +++ b/torch_geometric/sampler/base.py @@ -45,7 +45,7 @@ class SubgraphType(Enum): induced = 'induced' -@dataclass +@dataclass(init=False) class NodeSamplerInput(CastMixin): r"""The sampling input of :meth:`~torch_geometric.sampler.BaseSampler.sample_from_nodes`. @@ -64,6 +64,24 @@ class NodeSamplerInput(CastMixin): time: OptTensor = None input_type: Optional[NodeType] = None + def __init__( + self, + input_id: OptTensor, + node: Tensor, + time: OptTensor = None, + input_type: Optional[NodeType] = None, + ): + if input_id is not None: + input_id = input_id.cpu() + node = node.cpu() + if time is not None: + time = time.cpu() + + self.input_id = input_id + self.node = node + self.time = time + self.input_type = input_type + def __getitem__(self, index: Union[Tensor, Any]) -> 'NodeSamplerInput': if not isinstance(index, Tensor): index = torch.tensor(index, dtype=torch.long) @@ -76,7 +94,7 @@ def __getitem__(self, index: Union[Tensor, Any]) -> 'NodeSamplerInput': ) -@dataclass +@dataclass(init=False) class EdgeSamplerInput(CastMixin): r"""The sampling input of :meth:`~torch_geometric.sampler.BaseSampler.sample_from_edges`. @@ -102,6 +120,31 @@ class EdgeSamplerInput(CastMixin): time: OptTensor = None input_type: Optional[EdgeType] = None + def __init__( + self, + input_id: OptTensor, + row: Tensor, + col: Tensor, + label: OptTensor = None, + time: OptTensor = None, + input_type: Optional[EdgeType] = None, + ): + if input_id is not None: + input_id = input_id.cpu() + row = row.clone().cpu() + col = col.clone().cpu() + if label is not None: + label = label.cpu() + if time is not None: + time = time.cpu() + + self.input_id = input_id + self.row = row + self.col = col + self.label = label + self.time = time + self.input_type = input_type + def __getitem__(self, index: Union[Tensor, Any]) -> 'EdgeSamplerInput': if not isinstance(index, Tensor): index = torch.tensor(index, dtype=torch.long) From 76215869691f000517f7e3faf3e21ccde5704b9d Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Wed, 14 Jun 2023 16:10:16 +0530 Subject: [PATCH 1289/2432] feat(docker): bump version in `Dockerfile` (#7575) This PR aims to bump the version of the package in the `Dockerfile`. --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 262729455719..d4f37f061d68 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 # metainformation -LABEL org.opencontainers.image.version = "2.1.0" +LABEL org.opencontainers.image.version = "2.3.1" LABEL org.opencontainers.image.authors = "Matthias Fey" LABEL org.opencontainers.image.source = "https://github.com/pyg-team/pytorch_geometric" LABEL org.opencontainers.image.licenses = "MIT" From 8cb9bf29516b9bc069b38fe37d562ca83f2838df Mon Sep 17 00:00:00 2001 From: SalvishGoomanee <61241851+SalvishGoomanee@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:57:23 +0200 Subject: [PATCH 1290/2432] Added `edge_index` property to `TemporalData` (#7573) Added the `edge_index` property in order to make it easier to visualize time series of temporal by simply doing a `test_data.edge_index` and using `networkx` which is useful for large multiple graph datasets. --------- Co-authored-by: SalvishGoomanee Co-authored-by: rusty1s --- test/data/test_temporal.py | 6 ++++++ torch_geometric/data/temporal.py | 10 ++++++++++ 2 files changed, 16 insertions(+) diff --git a/test/data/test_temporal.py b/test/data/test_temporal.py index 47d2701c689d..7c6acfc061c5 100644 --- a/test/data/test_temporal.py +++ b/test/data/test_temporal.py @@ -26,6 +26,12 @@ def test_temporal_data(): assert data.src.tolist() == [0, 1, 2] assert data['src'].tolist() == [0, 1, 2] + assert data.edge_index.tolist() == [[0, 1, 2], [3, 4, 5]] + data.edge_index = 'edge_index' + assert data.edge_index == 'edge_index' + del data.edge_index + assert data.edge_index.tolist() == [[0, 1, 2], [3, 4, 5]] + assert sorted(data.keys) == ['dst', 'msg', 'src', 't', 'y'] assert sorted(data.to_dict().keys()) == sorted(data.keys) diff --git a/torch_geometric/data/temporal.py b/torch_geometric/data/temporal.py index 2b618a185603..c2e99af7888c 100644 --- a/torch_geometric/data/temporal.py +++ b/torch_geometric/data/temporal.py @@ -212,6 +212,16 @@ def num_edges(self) -> int: r"""Alias for :meth:`~torch_geometric.data.TemporalData.num_events`.""" return self.num_events + @property + def edge_index(self) -> Tensor: + r"""Returns the edge indices of the graph.""" + if 'edge_index' in self: + return self._store['edge_index'] + if self.src is not None and self.dst is not None: + return torch.stack([self.src, self.dst], dim=0) + raise ValueError(f"{self.__class__.__name__} does not contain " + f"'edge_index' information") + def size( self, dim: Optional[int] = None ) -> Union[Tuple[Optional[int], Optional[int]], Optional[int]]: From 64767fa5207015a597f4b61b10d146ff3b6f89c0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 15 Jun 2023 13:54:05 +0200 Subject: [PATCH 1291/2432] Check lazy initialization with `SAGEConv(project=True)` (#7589) --- test/nn/conv/test_sage_conv.py | 16 ++++++++++++++++ torch_geometric/nn/conv/sage_conv.py | 9 ++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index afc1f30763ce..0ff0066ea17d 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -56,6 +56,22 @@ def test_sage_conv(project, aggr): assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) +@pytest.mark.parametrize('project', [False, True]) +def test_lazy_sage_conv(project): + x = torch.randn(4, 8) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + + if project: + with pytest.raises(ValueError, match="does not support lazy"): + SAGEConv(-1, 32, project=project) + else: + conv = SAGEConv(-1, 32, project=project) + assert str(conv) == 'SAGEConv(-1, 32, aggr=mean)' + + out = conv(x, edge_index) + assert out.size() == (4, 32) + + def test_lstm_aggr_sage_conv(): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) diff --git a/torch_geometric/nn/conv/sage_conv.py b/torch_geometric/nn/conv/sage_conv.py index 3eba1674d19a..636550267dcb 100644 --- a/torch_geometric/nn/conv/sage_conv.py +++ b/torch_geometric/nn/conv/sage_conv.py @@ -2,7 +2,6 @@ import torch.nn.functional as F from torch import Tensor -from torch.nn import LSTM from torch_geometric.nn.aggr import Aggregation, MultiAggregation from torch_geometric.nn.conv import MessagePassing @@ -92,12 +91,12 @@ def __init__( super().__init__(aggr, **kwargs) if self.project: + if in_channels[0] <= 0: + raise ValueError(f"'{self.__class__.__name__}' does not " + f"support lazy initialization with " + f"`project=True`") self.lin = Linear(in_channels[0], in_channels[0], bias=True) - if self.aggr is None: - self.fuse = False # No "fused" message_and_aggregate. - self.lstm = LSTM(in_channels[0], in_channels[0], batch_first=True) - if isinstance(self.aggr_module, MultiAggregation): aggr_out_channels = self.aggr_module.get_out_channels( in_channels[0]) From 5c72f3357354374177d1268abab3da42a03cbb64 Mon Sep 17 00:00:00 2001 From: Emanuele Rossi Date: Thu, 15 Jun 2023 16:54:27 +0200 Subject: [PATCH 1292/2432] Implementation of Directed Graph Neural Networks (Dir-GNN) (#7458) Implementation of Directed Graph Neural Networks (Dir-GNN), as introduced in the paper "[Edge Directionality Improves Learning on Heterophilic Graphs](https://arxiv.org/abs/2305.10498)". We start by implementing DirSageConv. We note the following regarding the implementation: - It's not trivial to implement a generic DirGNN module which just takes a conv as input, and deep copies it to have convolutions in both directions. This is because if we take eg. SageConv as base conv, then we would have the root weights twice, whereas we need them only once. Or for GCN, we actually need to change the normalization to account for both in- and out-degree, and cannot just use two standard GCNConv layers. - As it is implemented now, DirSageConv uses two SageConv layers with different flows. However, the flow direction "target_to_source" does not support message propagation via torch_sparse.SparseTensor or torch.sparse.Tensor . Should we instead have a generic utility function which is able to transpose any Adj object, and always use SageConv with source_to_target flow, and feed once the original Adj, and the other time the tranpose? --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + examples/dir_gnn.py | 97 +++++++++++++++++++++++++ test/nn/conv/test_dir_gnn_conv.py | 24 ++++++ torch_geometric/nn/conv/__init__.py | 2 + torch_geometric/nn/conv/dir_gnn_conv.py | 74 +++++++++++++++++++ 5 files changed, 198 insertions(+) create mode 100644 examples/dir_gnn.py create mode 100644 test/nn/conv/test_dir_gnn_conv.py create mode 100644 torch_geometric/nn/conv/dir_gnn_conv.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f73f18b505d9..67192843c939 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) - Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) - Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548)) - Added `Performer` to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) diff --git a/examples/dir_gnn.py b/examples/dir_gnn.py new file mode 100644 index 000000000000..177e819f78ce --- /dev/null +++ b/examples/dir_gnn.py @@ -0,0 +1,97 @@ +import argparse +import os.path as osp + +import torch +import torch.nn.functional as F + +import torch_geometric.transforms as T +from torch_geometric.datasets import WikipediaNetwork +from torch_geometric.nn import DirGNNConv, GCNConv, SAGEConv + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, default='chameleon') +parser.add_argument('--hidden_channels', type=int, default=128) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--epochs', type=int, default=1000) +parser.add_argument('--alpha', type=float, default=1) +parser.add_argument('--conv', type=str, default='gcn') +args = parser.parse_args() + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Wikipedia') +dataset = WikipediaNetwork( + root=path, + name=args.dataset, + transform=T.NormalizeFeatures(), +) + +data = dataset[0].to(device) +data.train_mask = data.train_mask[:, 0] +data.val_mask = data.val_mask[:, 0] +data.test_mask = data.test_mask[:, 0] + +if args.conv == 'gcn': + Conv = GCNConv +elif args.conv == 'sage': + Conv = SAGEConv +else: + raise NotImplementedError + + +class DirGNN(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, alpha): + super().__init__() + self.conv1 = Conv(in_channels, hidden_channels) + self.conv1 = DirGNNConv(self.conv1, alpha, root_weight=False) + + self.conv2 = Conv(hidden_channels, out_channels) + self.conv2 = DirGNNConv(self.conv2, alpha, root_weight=False) + + def forward(self, x, edge_index): + x = self.conv1(x, edge_index).relu() + x = self.conv2(x, edge_index) + return x + + +model = DirGNN( + dataset.num_features, + args.hidden_channels, + dataset.num_classes, + alpha=args.alpha, +).to(device) + +optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) + + +def train(): + model.train() + optimizer.zero_grad() + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) + loss.backward() + optimizer.step() + return float(loss) + + +@torch.no_grad() +def test(): + model.eval() + pred = model(data.x, data.edge_index).argmax(dim=-1) + + accs = [] + for mask in [data.train_mask, data.val_mask, data.test_mask]: + accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) + return accs + + +best_val_acc = final_test_acc = 0 +for epoch in range(1, args.epochs + 1): + loss = train() + train_acc, val_acc, tmp_test_acc = test() + if val_acc > best_val_acc: + best_val_acc = val_acc + test_acc = tmp_test_acc + + print(f'Epoch: {epoch:04d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' + f'Val: {val_acc:.4f}, Test: {test_acc:.4f}') diff --git a/test/nn/conv/test_dir_gnn_conv.py b/test/nn/conv/test_dir_gnn_conv.py new file mode 100644 index 000000000000..fad36b1593f9 --- /dev/null +++ b/test/nn/conv/test_dir_gnn_conv.py @@ -0,0 +1,24 @@ +import torch + +from torch_geometric.nn import DirGNNConv, SAGEConv + + +def test_dir_gnn_conv(): + x = torch.randn(4, 16) + edge_index = torch.tensor([[0, 1, 2], [1, 2, 3]]) + + conv = DirGNNConv(SAGEConv(16, 32)) + assert str(conv) == 'DirGNNConv(SAGEConv(16, 32, aggr=mean), alpha=0.5)' + + out = conv(x, edge_index) + assert out.size() == (4, 32) + + +def test_static_dir_gnn_conv(): + x = torch.randn(3, 4, 16) + edge_index = torch.tensor([[0, 1, 2], [1, 2, 3]]) + + conv = DirGNNConv(SAGEConv(16, 32)) + + out = conv(x, edge_index) + assert out.size() == (3, 4, 32) diff --git a/torch_geometric/nn/conv/__init__.py b/torch_geometric/nn/conv/__init__.py index 9410e03f0b3d..9a6dae927e9f 100644 --- a/torch_geometric/nn/conv/__init__.py +++ b/torch_geometric/nn/conv/__init__.py @@ -59,6 +59,7 @@ from .point_gnn_conv import PointGNNConv from .gps_conv import GPSConv from .antisymmetric_conv import AntiSymmetricConv +from .dir_gnn_conv import DirGNNConv __all__ = [ 'MessagePassing', @@ -125,6 +126,7 @@ 'PointGNNConv', 'GPSConv', 'AntiSymmetricConv', + 'DirGNNConv', ] classes = __all__ diff --git a/torch_geometric/nn/conv/dir_gnn_conv.py b/torch_geometric/nn/conv/dir_gnn_conv.py new file mode 100644 index 000000000000..c14e98d14141 --- /dev/null +++ b/torch_geometric/nn/conv/dir_gnn_conv.py @@ -0,0 +1,74 @@ +import copy + +import torch +from torch import Tensor + +from torch_geometric.nn.conv import MessagePassing + + +class DirGNNConv(torch.nn.Module): + r"""A generic wrapper for computing graph convolution on directed + graphs as described in the `"Edge Directionality Improves Learning on + Heterophilic Graphs" `_ paper. + :class:`DirGNNConv` will pass messages both from source nodes to target + nodes and from target nodes to source nodes. + + Args: + conv (MessagePassing): The underlying + :class:`~torch_geometric.nn.conv.MessagePassing` layer to use. + alpha (float, optional): The alpha coefficient used to weight the + aggregations of in- and out-edges as part of a convex combination. + (default: :obj:`0.5`) + root_weight (bool, optional): If set to :obj:`True`, the layer will add + transformed root node features to the output. + (default: :obj:`True`) + """ + def __init__( + self, + conv: MessagePassing, + alpha: float = 0.5, + root_weight: bool = True, + ): + super().__init__() + + self.alpha = alpha + self.root_weight = root_weight + + self.conv_in = copy.deepcopy(conv) + self.conv_out = copy.deepcopy(conv) + + if hasattr(conv, 'add_self_loops'): + self.conv_in.add_self_loops = False + self.conv_out.add_self_loops = False + if hasattr(conv, 'root_weight'): + self.conv_in.root_weight = False + self.conv_out.root_weight = False + + if root_weight: + self.lin = torch.nn.Linear(conv.in_channels, conv.out_channels) + else: + self.lin = None + + self.reset_parameters() + + def reset_parameters(self): + r"""Resets all learnable parameters of the module.""" + self.conv_in.reset_parameters() + self.conv_out.reset_parameters() + if self.lin is not None: + self.lin.reset_parameters() + + def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: + r"""""" + x_in = self.conv_in(x, edge_index) + x_out = self.conv_out(x, edge_index.flip([0])) + + out = self.alpha * x_out + (1 - self.alpha) * x_in + + if self.root_weight: + out = out + self.lin(x) + + return out + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.conv_in}, alpha={self.alpha})' From 8858f50dc6e91b71f7c489cf270945ffb539cae2 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Fri, 16 Jun 2023 07:14:53 -0700 Subject: [PATCH 1293/2432] Add partitioning for distributed training (#7502) This code belongs to the part of the whole distributed training for PyG. This class (partitioner.py) will implement 1) the partition algorithm based on pyg's clusterData 2) in each partition LocalGraphStore/LocalFeatureStore will be used to initialize the graph & node/edge feature data 3) each partition also contains the partition information book/map between node/edge ids and partition id 4) each of which above will be further saved as .pt file folders include graph/node_feat/edge_feat/labels/node_map/edge_map. The partition folders as below- * homo graph output_dir/ |-- META.json |-- node_map.pt |-- edge_map.pt |-- part0/ > |-- graph.pt > |-- node_feats.pt > |-- edge_feats.pt |-- part1/ |-- graph.pt |-- node_feats.pt |-- edge_feats.pt * hetero graph output_dir/ |-- META.json |-- node_map/ |-- ntype1.pt |-- ntype2.pt |-- edge_map/ |-- etype1.pt |-- etype2.pt |-- part0/ |-- graph.pt |-- node_feats.pt |-- edge_feats.pt |-- part1/ |-- graph.pt |-- node_feats.pt |-- edge_feats.pt We also provide two example codes to help generate the homo/hetero graph partition based on ogbn-products/ogbn-mags under example/distributed folder. One unit test code under /test folder is used to verify this partition algorithm based on FakeDataset/FakeHeteroDataset. Any comments please let us know. thanks --------- Signed-off-by: Liu, Kaixuan Co-authored-by: Liu, Kaixuan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/distributed/test_partition.py | 89 +++++++++ torch_geometric/distributed/__init__.py | 2 + torch_geometric/distributed/partition.py | 239 +++++++++++++++++++++++ 4 files changed, 331 insertions(+), 1 deletion(-) create mode 100644 test/distributed/test_partition.py create mode 100644 torch_geometric/distributed/partition.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 67192843c939..6fb18c71843a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_partition.py b/test/distributed/test_partition.py new file mode 100644 index 000000000000..272c2184fd7a --- /dev/null +++ b/test/distributed/test_partition.py @@ -0,0 +1,89 @@ +import os.path as osp + +import torch + +from torch_geometric.datasets import FakeDataset, FakeHeteroDataset +from torch_geometric.distributed import Partitioner +from torch_geometric.testing import withPackage +from torch_geometric.typing import EdgeTypeStr + + +@withPackage('pyg_lib') +def test_partition_data(tmp_path): + data = FakeDataset()[0] + num_parts = 2 + + partitioner = Partitioner(data, num_parts, tmp_path) + partitioner.generate_partition() + + node_map_path = osp.join(tmp_path, 'node_map.pt') + assert osp.exists(node_map_path) + node_map = torch.load(node_map_path) + assert node_map.numel() == data.num_nodes + + edge_map_path = osp.join(tmp_path, 'edge_map.pt') + assert osp.exists(edge_map_path) + edge_map = torch.load(edge_map_path) + assert edge_map.numel() == data.num_edges + + meta_path = osp.join(tmp_path, 'META.json') + assert osp.exists(meta_path) + + graph0_path = osp.join(tmp_path, 'part_0', 'graph.pt') + assert osp.exists(graph0_path) + graph0 = torch.load(graph0_path) + assert len({'edge_id', 'row', 'col', 'size'} & set(graph0.keys())) == 4 + + graph1_path = osp.join(tmp_path, 'part_1', 'graph.pt') + assert osp.exists(graph1_path) + graph1 = torch.load(graph1_path) + assert len({'edge_id', 'row', 'col', 'size'} & set(graph1.keys())) == 4 + + node_feats0_path = osp.join(tmp_path, 'part_0', 'node_feats.pt') + assert osp.exists(node_feats0_path) + node_feats0 = torch.load(node_feats0_path) + + node_feats1_path = osp.join(tmp_path, 'part_1', 'node_feats.pt') + assert osp.exists(node_feats1_path) + node_feats1 = torch.load(node_feats1_path) + + assert (node_feats0['feats']['x'].size(0) + + node_feats1['feats']['x'].size(0) == data.num_nodes) + assert torch.equal(data.x[node_feats0['global_id']], + node_feats0['feats']['x']) + assert torch.equal(data.x[node_feats1['global_id']], + node_feats1['feats']['x']) + + +@withPackage('pyg_lib') +def test_partition_hetero_data(tmp_path): + data = FakeHeteroDataset()[0] + num_parts = 2 + + partitioner = Partitioner(data, num_parts, tmp_path) + partitioner.generate_partition() + + meta_path = osp.join(tmp_path, 'META.json') + assert osp.exists(meta_path) + + for edge_type, num_edges in data.num_edges_dict.items(): + assert len(edge_type) == 3 + edge_name = EdgeTypeStr(edge_type) + edge_map_path = osp.join(tmp_path, 'edge_map', f'{edge_name}.pt') + assert osp.exists(edge_map_path) + edge_map = torch.load(edge_map_path) + assert edge_map.numel() == num_edges + + for node_type, num_nodes in data.num_nodes_dict.items(): + node_map_path = osp.join(tmp_path, 'node_map', f'{node_type}.pt') + assert osp.exists(node_map_path) + node_map = torch.load(node_map_path) + assert node_map.numel() == num_nodes + + for pid in range(num_parts): + graph_path = osp.join(tmp_path, f'part_{pid}', 'graph.pt') + assert osp.exists(graph_path) + node_feats_path = osp.join(tmp_path, f'part_{pid}', 'node_feats.pt') + assert osp.exists(node_feats_path) + edge_feats_path = osp.join(tmp_path, f'part_{pid}', 'edge_feats.pt') + assert osp.exists(edge_feats_path) diff --git a/torch_geometric/distributed/__init__.py b/torch_geometric/distributed/__init__.py index 1412f174a53c..8f7ada60e144 100644 --- a/torch_geometric/distributed/__init__.py +++ b/torch_geometric/distributed/__init__.py @@ -1,7 +1,9 @@ from .local_feature_store import LocalFeatureStore from .local_graph_store import LocalGraphStore +from .partition import Partitioner __all__ = classes = [ 'LocalFeatureStore', 'LocalGraphStore', + 'Partitioner', ] diff --git a/torch_geometric/distributed/partition.py b/torch_geometric/distributed/partition.py new file mode 100644 index 000000000000..cc5eb2e334af --- /dev/null +++ b/torch_geometric/distributed/partition.py @@ -0,0 +1,239 @@ +import json +import logging +import os +import os.path as osp +from typing import List, Optional, Union + +import torch + +from torch_geometric.data import Data, HeteroData +from torch_geometric.loader import ClusterData +from torch_geometric.typing import EdgeType, EdgeTypeStr, NodeType + + +class Partitioner: + r"""Partition the graph structure and its features of a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object. + Partitioned data output will be structured like this: + + **Homogeneous graphs:** + + .. code-block:: + + root/ + |-- META.json + |-- node_map.pt + |-- edge_map.pt + |-- part0/ + |-- graph.pt + |-- node_feats.pt + |-- edge_feats.pt + |-- part1/ + |-- graph.pt + |-- node_feats.pt + |-- edge_feats.pt + + **Heterogeneous graphs:** + + .. code-block:: + + root/ + |-- META.json + |-- node_map/ + |-- ntype1.pt + |-- ntype2.pt + |-- edge_map/ + |-- etype1.pt + |-- etype2.pt + |-- part0/ + |-- graph.pt + |-- node_feats.pt + |-- edge_feats.pt + |-- part1/ + |-- graph.pt + |-- node_feats.pt + |-- edge_feats.pt + + Args: + data (Data or HeteroData): The data object. + num_parts (int): The number of partitions. + root (str): Root directory where the partitioned dataset should be + saved. + """ + def __init__( + self, + data: Union[Data, HeteroData], + num_parts: int, + root: str, + ): + assert num_parts > 1 + + self.data = data + self.num_parts = num_parts + self.root = root + + @property + def is_hetero(self) -> bool: + return isinstance(self.data, HeteroData) + + @property + def node_types(self) -> Optional[List[NodeType]]: + return self.data.node_types if self.is_hetero else None + + @property + def edge_types(self) -> Optional[List[EdgeType]]: + return self.data.edge_types if self.is_hetero else None + + def generate_partition(self): + r"""Generates the partition.""" + os.makedirs(self.root, exist_ok=True) + + logging.info('Saving metadata') + meta = { + 'num_parts': self.num_parts, + 'is_hetero': self.is_hetero, + 'node_types': self.node_types, + 'edge_types': self.node_types, + } + with open(osp.join(self.root, 'META.json'), 'w') as f: + json.dump(meta, f) + + data = self.data.to_homogeneous() if self.is_hetero else self.data + cluster_data = ClusterData( + data, + num_parts=self.num_parts, + log=True, + keep_inter_cluster_edges=True, + ) + + node_perm = cluster_data.partition.node_perm + partptr = cluster_data.partition.partptr + edge_perm = cluster_data.partition.edge_perm + + node_map = torch.empty(data.num_nodes, dtype=torch.int64) + edge_map = torch.empty(data.num_edges, dtype=torch.int64) + + if self.is_hetero: + node_offset, edge_offset = {}, {} + + offset = 0 + for node_type in self.node_types: + node_offset[node_type] = offset + offset += self.data[node_type].num_nodes + + offset = 0 + for edge_name in self.edge_types: + edge_offset[edge_name] = offset + offset += offset + self.data.num_edges_dict[edge_name] + + edge_start = 0 + for pid in range(self.num_parts): + logging.info(f'Saving graph partition {pid}') + path = osp.join(self.root, f'part_{pid}') + os.makedirs(path, exist_ok=True) + + part_data = cluster_data[pid] + start, end = int(partptr[pid]), int(partptr[pid + 1]) + + num_edges = part_data.num_edges + edge_id = edge_perm[edge_start:edge_start + num_edges] + edge_map[edge_id] = pid + edge_start += +num_edges + + node_id = node_perm[start:end] + node_map[node_id] = pid + + out = {} + for i, edge_type in enumerate(self.edge_types): + src, _, dst = edge_type + size = (self.data[src].num_nodes, self.data[dst].num_nodes) + + mask = part_data.edge_type == i + out[edge_type] = { + 'edge_id': edge_id[mask], + 'row': part_data.edge_index[0, mask], + 'col': part_data.edge_index[1, mask], + 'size': size, + } + torch.save(out, osp.join(path, 'graph.pt')) + + out = {} + for i, node_type in enumerate(self.node_types): + mask = part_data.node_type == i + x = part_data.x[mask] if 'x' in part_data else None + out[node_type] = { + 'global_id': node_id[mask], + 'feats': dict(x=x), + } + torch.save(out, osp.join(path, 'node_feats.pt')) + + out = {} + for i, edge_type in enumerate(self.edge_types): + mask = part_data.edge_type == i + edge_attr = None + if 'edge_attr' in part_data: + edge_attr = part_data.edge_attr[mask] + out[node_type] = { + 'global_id': edge_id[mask], + 'feats': dict(edge_attr=edge_attr), + } + torch.save(out, osp.join(path, 'edge_feats.pt')) + + logging.info('Saving partition mapping info') + + path = osp.join(self.root, 'node_map') + os.makedirs(path, exist_ok=True) + for i, node_type in enumerate(self.node_types): + mask = data.node_type == i + torch.save(node_map[mask], osp.join(path, f'{node_type}.pt')) + + path = osp.join(self.root, 'edge_map') + os.makedirs(path, exist_ok=True) + for i, edge_type in enumerate(self.edge_types): + mask = data.edge_type == i + torch.save(edge_map[mask], + osp.join(path, f'{EdgeTypeStr(edge_type)}.pt')) + + else: # `if not self.is_hetero:` + + edge_start = 0 + for pid in range(self.num_parts): + logging.info(f'Saving graph partition {pid}') + path = osp.join(self.root, f'part_{pid}') + os.makedirs(path, exist_ok=True) + + part_data = cluster_data[pid] + start, end = int(partptr[pid]), int(partptr[pid + 1]) + + num_edges = part_data.num_edges + edge_id = edge_perm[edge_start:edge_start + num_edges] + edge_map[edge_id] = pid + edge_start += num_edges + + node_id = node_perm[start:end] + node_map[node_id] = pid + + torch.save( + { + 'edge_id': edge_id, + 'row': part_data.edge_index[0], + 'col': part_data.edge_index[1], + 'size': (data.num_nodes, data.num_nodes), + }, osp.join(path, 'graph.pt')) + + torch.save( + { + 'global_id': node_id, + 'feats': dict(x=part_data.x), + }, osp.join(path, 'node_feats.pt')) + + torch.save( + { + 'global_id': edge_id, + 'feats': dict(edge_attr=part_data.edge_attr), + }, osp.join(path, 'edge_feats.pt')) + + logging.info('Saving partition mapping info') + torch.save(node_map, osp.join(self.root, 'node_map.pt')) + torch.save(edge_map, osp.join(self.root, 'edge_map.pt')) From 7b80090ddc04d40049839cdc0950e25d2a1b1df8 Mon Sep 17 00:00:00 2001 From: Ivan Marisca Date: Sat, 17 Jun 2023 04:47:22 +0200 Subject: [PATCH 1294/2432] Fixed `SelectTopK` with x.dim() == 1 (#7595) Fixed unsqueezing when x is unidimensional (reassignment was missing). From ```python x.view(-1, 1) if x.dim() == 1 else x ``` To: ```python x = x.view(-1, 1) if x.dim() == 1 else x ``` --- torch_geometric/nn/pool/select/topk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py index 0d6ae4ad6304..1ccecf3f3163 100644 --- a/torch_geometric/nn/pool/select/topk.py +++ b/torch_geometric/nn/pool/select/topk.py @@ -148,7 +148,7 @@ def forward( if batch is None: batch = x.new_zeros(x.size(0), dtype=torch.long) - x.view(-1, 1) if x.dim() == 1 else x + x = x.view(-1, 1) if x.dim() == 1 else x score = (x * self.weight).sum(dim=-1) if self.min_score is None: From c0209a170dd8a1a2e702ee05f31915aaff369d2c Mon Sep 17 00:00:00 2001 From: Daniel McDonald <101536185+djm93dev@users.noreply.github.com> Date: Fri, 16 Jun 2023 22:56:19 -0400 Subject: [PATCH 1295/2432] Fix typo in documentation: adivisable -> advisable (#7598) I noticed a typo in the docs where "adivisable" was used instead of "advisable." --------- Co-authored-by: Jintang Li --- docs/source/advanced/cpu_affinity.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/advanced/cpu_affinity.rst b/docs/source/advanced/cpu_affinity.rst index c1da03ce3501..e4d7f72451f6 100644 --- a/docs/source/advanced/cpu_affinity.rst +++ b/docs/source/advanced/cpu_affinity.rst @@ -46,7 +46,7 @@ The recommended number of workers to start with lies between :obj:`[2, 4]`, and for batch in loader: pass -It is generally adivisable to use :obj:`filter_per_worker=True` for any multi-process CPU workloads (:obj:`True` by default). +It is generally advisable to use :obj:`filter_per_worker=True` for any multi-process CPU workloads (:obj:`True` by default). The workers then prepare each mini-batch: first by sampling the node indices using pre-defined a sampler, and secondly filtering node and edge features according to sampled nodes and edges. The filtering function selects node feature vectors from the complete input :class:`~torch_geometric.data.Data` tensor loaded into DRAM. When :attr:`filter_per_worker` is set to :attr:`True`, each worker's subprocess performs the filtering within it's CPU resource. From 11513fdde087d001e15c2eda5ff3c07c2240e1c0 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Sat, 17 Jun 2023 14:30:11 +0530 Subject: [PATCH 1296/2432] Added `FilterEdges` graph coarsening operator (#7361) Co-authored-by: Rishi Puri Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 4 ++ test/nn/pool/connect/test_filter_edges.py | 28 +++++++++ test/nn/pool/test_topk_pool.py | 2 +- torch_geometric/nn/pool/connect/__init__.py | 2 + torch_geometric/nn/pool/connect/base.py | 4 +- .../nn/pool/connect/filter_edges.py | 61 +++++++++++++++++++ torch_geometric/nn/pool/pan_pool.py | 5 +- torch_geometric/nn/pool/sag_pool.py | 7 ++- torch_geometric/nn/pool/topk_pool.py | 34 +++-------- 9 files changed, 112 insertions(+), 35 deletions(-) create mode 100644 test/nn/pool/connect/test_filter_edges.py create mode 100644 torch_geometric/nn/pool/connect/filter_edges.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fb18c71843a..55b609ebe7a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +<<<<<<< HEAD +- Added `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) +======= - Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) - Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) - Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548)) @@ -30,6 +33,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) +>>>>>>> 8444e4869e20d45f5ba27f5dab8c455b3fa36094 - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) diff --git a/test/nn/pool/connect/test_filter_edges.py b/test/nn/pool/connect/test_filter_edges.py new file mode 100644 index 000000000000..4e1f840bf04a --- /dev/null +++ b/test/nn/pool/connect/test_filter_edges.py @@ -0,0 +1,28 @@ +import torch + +from torch_geometric.nn.pool.connect import FilterEdges +from torch_geometric.nn.pool.select import SelectOutput +from torch_geometric.testing import is_full_test + + +def test_filter_edges(): + edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 1, 3, 2, 2]]) + edge_attr = torch.tensor([1, 2, 3, 4, 5, 6]) + batch = torch.tensor([0, 0, 1, 1]) + + node_index = torch.tensor([1, 2]) + cluster_index = torch.tensor([0, 1]) + select_output = SelectOutput(node_index=node_index, num_nodes=4, + cluster_index=cluster_index, num_clusters=2) + connect = FilterEdges() + output1 = connect(select_output, edge_index, edge_attr, batch) + assert output1.edge_index.tolist() == [[0, 1], [0, 1]] + assert output1.edge_attr.tolist() == [3, 5] + assert output1.batch.tolist() == [0, 1] + + if is_full_test(): + jit = torch.jit.script(connect) + output2 = jit(select_output, edge_index, edge_attr, batch) + torch.allclose(output1.edge_index, output2.edge_index) + torch.allclose(output1.edge_attr, output2.edge_attr) + torch.allclose(output1.batch, output2.batch) diff --git a/test/nn/pool/test_topk_pool.py b/test/nn/pool/test_topk_pool.py index a0616183fe60..5e3fb20956b7 100644 --- a/test/nn/pool/test_topk_pool.py +++ b/test/nn/pool/test_topk_pool.py @@ -1,7 +1,7 @@ import torch from torch_geometric.nn.pool import TopKPooling -from torch_geometric.nn.pool.topk_pool import filter_adj +from torch_geometric.nn.pool.connect.filter_edges import filter_adj from torch_geometric.testing import is_full_test diff --git a/torch_geometric/nn/pool/connect/__init__.py b/torch_geometric/nn/pool/connect/__init__.py index 52b45639fc63..1e24f3ee042d 100644 --- a/torch_geometric/nn/pool/connect/__init__.py +++ b/torch_geometric/nn/pool/connect/__init__.py @@ -1,6 +1,8 @@ from .base import Connect, ConnectOutput +from .filter_edges import FilterEdges __all__ = [ 'Connect', 'ConnectOutput', + 'FilterEdges', ] diff --git a/torch_geometric/nn/pool/connect/base.py b/torch_geometric/nn/pool/connect/base.py index fd78a5c61dac..23d0dd44346b 100644 --- a/torch_geometric/nn/pool/connect/base.py +++ b/torch_geometric/nn/pool/connect/base.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Optional, Tuple +from typing import Optional import torch from torch import Tensor @@ -70,7 +70,7 @@ def forward( edge_index: Tensor, edge_attr: Optional[Tensor] = None, batch: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> ConnectOutput: r""" Args: select_output (SelectOutput): The output of :class:`Select`. diff --git a/torch_geometric/nn/pool/connect/filter_edges.py b/torch_geometric/nn/pool/connect/filter_edges.py new file mode 100644 index 000000000000..2481f45145c4 --- /dev/null +++ b/torch_geometric/nn/pool/connect/filter_edges.py @@ -0,0 +1,61 @@ +from typing import Optional, Tuple + +import torch +from torch import Tensor + +from torch_geometric.nn.pool.select import SelectOutput +from torch_geometric.utils.num_nodes import maybe_num_nodes + +from .base import Connect, ConnectOutput + + +def filter_adj( + edge_index: Tensor, + edge_attr: Optional[Tensor], + node_index: Tensor, + cluster_index: Optional[Tensor] = None, + num_nodes: Optional[int] = None, +) -> Tuple[Tensor, Optional[Tensor]]: + num_nodes = maybe_num_nodes(edge_index, num_nodes) + if cluster_index is None: + cluster_index = torch.arange(node_index.size(0), dtype=torch.long, + device=node_index.device) + mask = node_index.new_full((num_nodes, ), -1) + mask[node_index] = cluster_index + + row, col = edge_index[0], edge_index[1] + row, col = mask[row], mask[col] + mask = (row >= 0) & (col >= 0) + row, col = row[mask], col[mask] + + if edge_attr is not None: + edge_attr = edge_attr[mask] + + return torch.stack([row, col], dim=0), edge_attr + + +class FilterEdges(Connect): + r"""Filter out edges if their incident nodes are not in any cluster. + It is assumed that each cluster contains only one node. + + .. math:: + \mathbf{A}^{\prime} &= \mathbf{A}_{\mathbf{i},\mathbf{i}} + + Where :math:`\mathbf{i}` are retained nodes. + """ + def forward( + self, + select_output: SelectOutput, + edge_index: Tensor, + edge_attr: Optional[Tensor] = None, + batch: Optional[Tensor] = None, + ) -> ConnectOutput: + if select_output.num_clusters != select_output.cluster_index.size(0): + raise ValueError("'FilterEdges' requires each cluster to contain " + "only one node.") + edge_index, edge_attr = filter_adj(edge_index, edge_attr, + select_output.node_index, + select_output.cluster_index, + num_nodes=select_output.num_nodes) + batch = self.get_pooled_batch(select_output, batch) + return ConnectOutput(edge_index, edge_attr, batch) diff --git a/torch_geometric/nn/pool/pan_pool.py b/torch_geometric/nn/pool/pan_pool.py index f0c0ab66d02c..763a6148dd67 100644 --- a/torch_geometric/nn/pool/pan_pool.py +++ b/torch_geometric/nn/pool/pan_pool.py @@ -4,8 +4,8 @@ from torch import Tensor from torch.nn import Parameter +from torch_geometric.nn.pool.connect.filter_edges import filter_adj from torch_geometric.nn.pool.select.topk import topk -from torch_geometric.nn.pool.topk_pool import filter_adj from torch_geometric.typing import OptTensor, SparseTensor from torch_geometric.utils import scatter, softmax @@ -101,7 +101,8 @@ def forward( x = self.multiplier * x if self.multiplier != 1 else x edge_index = torch.stack([col, row], dim=0) - edge_index, edge_weight = filter_adj(edge_index, edge_weight, perm, + edge_index, edge_weight = filter_adj(edge_index, edge_weight, + node_index=perm, num_nodes=score.size(0)) assert edge_weight is not None diff --git a/torch_geometric/nn/pool/sag_pool.py b/torch_geometric/nn/pool/sag_pool.py index 580467949e24..d205288a23f8 100644 --- a/torch_geometric/nn/pool/sag_pool.py +++ b/torch_geometric/nn/pool/sag_pool.py @@ -4,8 +4,8 @@ from torch import Tensor from torch_geometric.nn import GraphConv +from torch_geometric.nn.pool.connect.filter_edges import filter_adj from torch_geometric.nn.pool.select.topk import topk -from torch_geometric.nn.pool.topk_pool import filter_adj from torch_geometric.typing import OptTensor from torch_geometric.utils import softmax @@ -136,8 +136,9 @@ def forward( x = self.multiplier * x if self.multiplier != 1 else x batch = batch[perm] - edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, - num_nodes=score.size(0)) + edge_index, edge_weight = filter_adj(edge_index, edge_attr, + node_index=perm, + num_nodes=score.size(0)) return x, edge_index, edge_attr, batch, perm, score[perm] diff --git a/torch_geometric/nn/pool/topk_pool.py b/torch_geometric/nn/pool/topk_pool.py index 5560c9d3a413..dc6d65f70860 100644 --- a/torch_geometric/nn/pool/topk_pool.py +++ b/torch_geometric/nn/pool/topk_pool.py @@ -3,31 +3,8 @@ import torch from torch import Tensor +from torch_geometric.nn.pool.connect import FilterEdges from torch_geometric.nn.pool.select import SelectTopK -from torch_geometric.utils.num_nodes import maybe_num_nodes - - -def filter_adj( - edge_index: Tensor, - edge_attr: Optional[Tensor], - perm: Tensor, - num_nodes: Optional[int] = None, -) -> Tuple[Tensor, Optional[Tensor]]: - num_nodes = maybe_num_nodes(edge_index, num_nodes) - - mask = perm.new_full((num_nodes, ), -1) - i = torch.arange(perm.size(0), dtype=torch.long, device=perm.device) - mask[perm] = i - - row, col = edge_index[0], edge_index[1] - row, col = mask[row], mask[col] - mask = (row >= 0) & (col >= 0) - row, col = row[mask], col[mask] - - if edge_attr is not None: - edge_attr = edge_attr[mask] - - return torch.stack([row, col], dim=0), edge_attr class TopKPooling(torch.nn.Module): @@ -100,6 +77,7 @@ def __init__( self.multiplier = multiplier self.select = SelectTopK(in_channels, ratio, min_score, nonlinearity) + self.connect = FilterEdges() self.reset_parameters() @@ -141,9 +119,11 @@ def forward( x = x[perm] * score.view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x - batch = batch[perm] - edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, - num_nodes=select_output.num_nodes) + connect_output = self.connect(select_output, edge_index, edge_attr, + batch) + edge_index = connect_output.edge_index + edge_attr = connect_output.edge_attr + batch = connect_output.batch return x, edge_index, edge_attr, batch, perm, score From 8e1241cd4e21268a3546bfc025f7e522d65b830b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 18 Jun 2023 08:54:23 +0200 Subject: [PATCH 1297/2432] Fix typos in `distributed.Partioner` (#7599) --- torch_geometric/distributed/partition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/distributed/partition.py b/torch_geometric/distributed/partition.py index cc5eb2e334af..918d03648de5 100644 --- a/torch_geometric/distributed/partition.py +++ b/torch_geometric/distributed/partition.py @@ -94,7 +94,7 @@ def generate_partition(self): 'num_parts': self.num_parts, 'is_hetero': self.is_hetero, 'node_types': self.node_types, - 'edge_types': self.node_types, + 'edge_types': self.edge_types, } with open(osp.join(self.root, 'META.json'), 'w') as f: json.dump(meta, f) @@ -174,7 +174,7 @@ def generate_partition(self): edge_attr = None if 'edge_attr' in part_data: edge_attr = part_data.edge_attr[mask] - out[node_type] = { + out[edge_type] = { 'global_id': edge_id[mask], 'feats': dict(edge_attr=edge_attr), } From 41493b3649dfd46b994b3b4dc02771e64a858018 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 18 Jun 2023 09:53:48 +0200 Subject: [PATCH 1298/2432] Fix TorchScript support in `TopKPooling` (#7600) --- CHANGELOG.md | 5 +-- test/nn/pool/connect/test_filter_edges.py | 29 +++++++++------ .../nn/pool/connect/filter_edges.py | 37 ++++++++++++------- torch_geometric/nn/pool/pan_pool.py | 3 +- torch_geometric/nn/pool/sag_pool.py | 5 +-- torch_geometric/nn/pool/select/topk.py | 3 +- torch_geometric/nn/pool/topk_pool.py | 18 ++++----- 7 files changed, 53 insertions(+), 47 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55b609ebe7a3..b8d1a302a7f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,9 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -<<<<<<< HEAD -- Added `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) -======= +- Added the `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) - Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) - Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) - Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548)) @@ -33,7 +31,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) ->>>>>>> 8444e4869e20d45f5ba27f5dab8c455b3fa36094 - Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) diff --git a/test/nn/pool/connect/test_filter_edges.py b/test/nn/pool/connect/test_filter_edges.py index 4e1f840bf04a..d2e9428539cf 100644 --- a/test/nn/pool/connect/test_filter_edges.py +++ b/test/nn/pool/connect/test_filter_edges.py @@ -10,19 +10,24 @@ def test_filter_edges(): edge_attr = torch.tensor([1, 2, 3, 4, 5, 6]) batch = torch.tensor([0, 0, 1, 1]) - node_index = torch.tensor([1, 2]) - cluster_index = torch.tensor([0, 1]) - select_output = SelectOutput(node_index=node_index, num_nodes=4, - cluster_index=cluster_index, num_clusters=2) + select_output = SelectOutput( + node_index=torch.tensor([1, 2]), + num_nodes=4, + cluster_index=torch.tensor([0, 1]), + num_clusters=2, + ) + connect = FilterEdges() - output1 = connect(select_output, edge_index, edge_attr, batch) - assert output1.edge_index.tolist() == [[0, 1], [0, 1]] - assert output1.edge_attr.tolist() == [3, 5] - assert output1.batch.tolist() == [0, 1] + assert str(connect) == 'FilterEdges()' + + out1 = connect(select_output, edge_index, edge_attr, batch) + assert out1.edge_index.tolist() == [[0, 1], [0, 1]] + assert out1.edge_attr.tolist() == [3, 5] + assert out1.batch.tolist() == [0, 1] if is_full_test(): jit = torch.jit.script(connect) - output2 = jit(select_output, edge_index, edge_attr, batch) - torch.allclose(output1.edge_index, output2.edge_index) - torch.allclose(output1.edge_attr, output2.edge_attr) - torch.allclose(output1.batch, output2.batch) + out2 = jit(select_output, edge_index, edge_attr, batch) + torch.equal(out1.edge_index, out2.edge_index) + torch.equal(out1.edge_attr, out2.edge_attr) + torch.equal(out1.batch, out2.batch) diff --git a/torch_geometric/nn/pool/connect/filter_edges.py b/torch_geometric/nn/pool/connect/filter_edges.py index 2481f45145c4..1eb42dcaab02 100644 --- a/torch_geometric/nn/pool/connect/filter_edges.py +++ b/torch_geometric/nn/pool/connect/filter_edges.py @@ -3,11 +3,10 @@ import torch from torch import Tensor +from torch_geometric.nn.pool.connect import Connect, ConnectOutput from torch_geometric.nn.pool.select import SelectOutput from torch_geometric.utils.num_nodes import maybe_num_nodes -from .base import Connect, ConnectOutput - def filter_adj( edge_index: Tensor, @@ -16,10 +15,13 @@ def filter_adj( cluster_index: Optional[Tensor] = None, num_nodes: Optional[int] = None, ) -> Tuple[Tensor, Optional[Tensor]]: + num_nodes = maybe_num_nodes(edge_index, num_nodes) + if cluster_index is None: - cluster_index = torch.arange(node_index.size(0), dtype=torch.long, + cluster_index = torch.arange(node_index.size(0), device=node_index.device) + mask = node_index.new_full((num_nodes, ), -1) mask[node_index] = cluster_index @@ -35,13 +37,13 @@ def filter_adj( class FilterEdges(Connect): - r"""Filter out edges if their incident nodes are not in any cluster. - It is assumed that each cluster contains only one node. + r"""Filters out edges if their incident nodes are not in any cluster .. math:: - \mathbf{A}^{\prime} &= \mathbf{A}_{\mathbf{i},\mathbf{i}} + \mathbf{A}^{\prime} &= \mathbf{A}_{\mathbf{i},\mathbf{i}}, - Where :math:`\mathbf{i}` are retained nodes. + where :math:`\mathbf{i}` denotes the set of retained nodes. + It is assumed that each cluster contains only one node. """ def forward( self, @@ -50,12 +52,19 @@ def forward( edge_attr: Optional[Tensor] = None, batch: Optional[Tensor] = None, ) -> ConnectOutput: - if select_output.num_clusters != select_output.cluster_index.size(0): - raise ValueError("'FilterEdges' requires each cluster to contain " - "only one node.") - edge_index, edge_attr = filter_adj(edge_index, edge_attr, - select_output.node_index, - select_output.cluster_index, - num_nodes=select_output.num_nodes) + + if (not torch.jit.is_scripting() and select_output.num_clusters != + select_output.cluster_index.size(0)): + raise ValueError(f"'{self.__class__.__name__}' requires each " + f"cluster to contain only one node") + + edge_index, edge_attr = filter_adj( + edge_index, + edge_attr, + select_output.node_index, + select_output.cluster_index, + num_nodes=select_output.num_nodes, + ) batch = self.get_pooled_batch(select_output, batch) + return ConnectOutput(edge_index, edge_attr, batch) diff --git a/torch_geometric/nn/pool/pan_pool.py b/torch_geometric/nn/pool/pan_pool.py index 763a6148dd67..174c63c90f57 100644 --- a/torch_geometric/nn/pool/pan_pool.py +++ b/torch_geometric/nn/pool/pan_pool.py @@ -101,8 +101,7 @@ def forward( x = self.multiplier * x if self.multiplier != 1 else x edge_index = torch.stack([col, row], dim=0) - edge_index, edge_weight = filter_adj(edge_index, edge_weight, - node_index=perm, + edge_index, edge_weight = filter_adj(edge_index, edge_weight, perm, num_nodes=score.size(0)) assert edge_weight is not None diff --git a/torch_geometric/nn/pool/sag_pool.py b/torch_geometric/nn/pool/sag_pool.py index d205288a23f8..ec18f26f37f8 100644 --- a/torch_geometric/nn/pool/sag_pool.py +++ b/torch_geometric/nn/pool/sag_pool.py @@ -136,9 +136,8 @@ def forward( x = self.multiplier * x if self.multiplier != 1 else x batch = batch[perm] - edge_index, edge_weight = filter_adj(edge_index, edge_attr, - node_index=perm, - num_nodes=score.size(0)) + edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, + num_nodes=score.size(0)) return x, edge_index, edge_attr, batch, perm, score[perm] diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py index 1ccecf3f3163..1679096e76e8 100644 --- a/torch_geometric/nn/pool/select/topk.py +++ b/torch_geometric/nn/pool/select/topk.py @@ -4,11 +4,10 @@ from torch import Tensor from torch_geometric.nn.inits import uniform +from torch_geometric.nn.pool.select import Select, SelectOutput from torch_geometric.nn.resolver import activation_resolver from torch_geometric.utils import scatter, softmax -from .base import Select, SelectOutput - # TODO (matthias) Benchmark and document this method. def topk( diff --git a/torch_geometric/nn/pool/topk_pool.py b/torch_geometric/nn/pool/topk_pool.py index dc6d65f70860..911a1275d9c2 100644 --- a/torch_geometric/nn/pool/topk_pool.py +++ b/torch_geometric/nn/pool/topk_pool.py @@ -5,6 +5,7 @@ from torch_geometric.nn.pool.connect import FilterEdges from torch_geometric.nn.pool.select import SelectTopK +from torch_geometric.typing import OptTensor class TopKPooling(torch.nn.Module): @@ -92,7 +93,7 @@ def forward( edge_attr: Optional[Tensor] = None, batch: Optional[Tensor] = None, attn: Optional[Tensor] = None, - ) -> Tuple[Tensor, Tensor, Optional[Tensor], Tensor, Tensor, Tensor]: + ) -> Tuple[Tensor, Tensor, OptTensor, OptTensor, Tensor, Tensor]: r""" Args: x (torch.Tensor): The node feature matrix. @@ -110,22 +111,19 @@ def forward( batch = edge_index.new_zeros(x.size(0)) attn = x if attn is None else attn - select_output = self.select(attn, batch) + select_out = self.select(attn, batch) - perm = select_output.node_index - score = select_output.weight + perm = select_out.node_index + score = select_out.weight assert score is not None x = x[perm] * score.view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x - connect_output = self.connect(select_output, edge_index, edge_attr, - batch) - edge_index = connect_output.edge_index - edge_attr = connect_output.edge_attr - batch = connect_output.batch + connect_out = self.connect(select_out, edge_index, edge_attr, batch) - return x, edge_index, edge_attr, batch, perm, score + return (x, connect_out.edge_index, connect_out.edge_attr, + connect_out.batch, perm, score) def __repr__(self) -> str: if self.min_score is None: From 4ebd80e21c15b0be47ceae4a75d40c742d98e2c1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 18 Jun 2023 10:15:53 +0200 Subject: [PATCH 1299/2432] Enable different attention modes in `HypergraphConv` (#7601) --- CHANGELOG.md | 1 + torch_geometric/nn/conv/hypergraph_conv.py | 30 +++++++++++++++++++--- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8d1a302a7f4..c5774f308569 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) - Added the `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) - Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) - Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) diff --git a/torch_geometric/nn/conv/hypergraph_conv.py b/torch_geometric/nn/conv/hypergraph_conv.py index 5947986346b2..90158d528a3f 100644 --- a/torch_geometric/nn/conv/hypergraph_conv.py +++ b/torch_geometric/nn/conv/hypergraph_conv.py @@ -45,6 +45,12 @@ class HypergraphConv(MessagePassing): out_channels (int): Size of each output sample. use_attention (bool, optional): If set to :obj:`True`, attention will be added to this layer. (default: :obj:`False`) + attention_mode (str, optional): The mode on how to compute attention. + If set to :obj:`"node"`, will compute attention scores of nodes + within all nodes belonging to the same hyperedge. + If set to :obj:`"edge"`, will compute attention scores of nodes + across all edges holding this node belongs to. + (default: :obj:`"node"`) heads (int, optional): Number of multi-head-attentions. (default: :obj:`1`) concat (bool, optional): If set to :obj:`False`, the multi-head @@ -68,15 +74,28 @@ class HypergraphConv(MessagePassing): hyperedge features :math:`(|\mathcal{E}|, D)` *(optional)* - **output:** node features :math:`(|\mathcal{V}|, F_{out})` """ - def __init__(self, in_channels, out_channels, use_attention=False, heads=1, - concat=True, negative_slope=0.2, dropout=0, bias=True, - **kwargs): + def __init__( + self, + in_channels: int, + out_channels: int, + use_attention: bool = False, + attention_mode: str = 'node', + heads: int = 1, + concat: bool = True, + negative_slope: float = 0.2, + dropout: float = 0, + bias: bool = True, + **kwargs, + ): kwargs.setdefault('aggr', 'add') super().__init__(flow='source_to_target', node_dim=0, **kwargs) + assert attention_mode in ['node', 'edge'] + self.in_channels = in_channels self.out_channels = out_channels self.use_attention = use_attention + self.attention_mode = attention_mode if self.use_attention: self.heads = heads @@ -154,7 +173,10 @@ def forward(self, x: Tensor, hyperedge_index: Tensor, x_j = hyperedge_attr[hyperedge_index[1]] alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1) alpha = F.leaky_relu(alpha, self.negative_slope) - alpha = softmax(alpha, hyperedge_index[0], num_nodes=x.size(0)) + if self.attention_mode == 'node': + alpha = softmax(alpha, hyperedge_index[1], num_nodes=x.size(0)) + else: + alpha = softmax(alpha, hyperedge_index[0], num_nodes=x.size(0)) alpha = F.dropout(alpha, p=self.dropout, training=self.training) D = scatter(hyperedge_weight[hyperedge_index[1]], hyperedge_index[0], From d0075a832a5fc88c6b9647f3d429604fa1e7787b Mon Sep 17 00:00:00 2001 From: andreazanetti Date: Mon, 19 Jun 2023 14:38:30 +0200 Subject: [PATCH 1300/2432] Documentation Page for HGAM (#7594) Adding a documentation page for HGAM. The idea is to pass the following information to the user: a) high level perspective of what HGAM is b) role of BFS ordering of the graph batch for HGAM to work c) support in the neighbor loader d) practical working example of TrimToLayer class e) pointers to external resources This is a draft, asking for preliminary feedback. Thanks for any input! Contributors: @mszarma, @rBenke (robert.benke@intel.com), manos.farsarakis@intel.com --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 5 +- docs/source/advanced/hgam.rst | 165 ++++++++++++++++++++++++++++++++++ docs/source/index.rst | 1 + 3 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 docs/source/advanced/hgam.rst diff --git a/CHANGELOG.md b/CHANGELOG.md index c5774f308569..b48fb3405998 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) - Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) - Added the `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) - Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) @@ -21,7 +22,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) -- Added hierarichial heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) +- Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) @@ -32,7 +33,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) - Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) -- Added an example for hierarichial sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) +- Added an example for hierarchical sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) diff --git a/docs/source/advanced/hgam.rst b/docs/source/advanced/hgam.rst new file mode 100644 index 000000000000..f957fe059b81 --- /dev/null +++ b/docs/source/advanced/hgam.rst @@ -0,0 +1,165 @@ +Hierarchical Neighborhood Sampling +================================== + +One of the design principles of :pyg:`PyG` is that models and data loading routines should be exchangeable to allow for flexible GNN and data loading experimentation. +As such, models can usually be written in a data loading agnostic fashion, independent of whether one applies full-batch or mini-batch training strategies via, *e.g.*, :class:`~torch_geometric.loader.DataLoader`, :class:`~torch_geometric.loader.NeighborLoader` or :class:`~torch_geometric.loader.ClusterLoader`. +However, in some scenarios, this flexibility comes at the cost of performance, as the model cannot exploit special characteristics of the underlying data loading routine. +One such limitation is that a GNN trained with the :class:`~torch_geometric.loader.NeighborLoader` routine iteratively builds representations for *all* nodes at *all* depths of the network, although nodes sampled in later hops do not contribute to the node representations of seed nodes in later GNN layers anymore, thus performing useless computation. + +*Hierarchical Neighborhood Sampling* or *Hierarchical Graph Adjacency Matrix (HGAM)* is a technique available in :pyg:`PyG` to eliminate this overhead and speeds up training and inference in mini-batch GNNs. +Its main idea is to progressively trim the adjacency matrix of the returned subgraph before inputting it to each GNN layer. +It works seamlessly across several models, basically reducing the amount of compute necessary to generate the representations for the seed node of the given mini-batch. + +Crucially, HGAM recognizes that the computation of the final node representations is only necessary for the seed nodes (which are the real target of the batch computation). +Thus, HGAM allows for every layer of the GNN to compute only the representations of the nodes that are necessary for that layer, leading to a reduction of the computation and a speed up of the training process that grows with the depth of the GNN being considered. +In practice, this is achieved by **trimming the adjacency matrix** and the various **features matrices** as the computation proceeds throughout the GNN layers. +This is in line with the fact that in order to compute the representation for the seed/target nodes (from which the mini-batch was build via sampling methods), the depth of the relevant neighborhood shrinks as we proceed through the layers of the GNN. +The trimming applied by HGAM is possible as the nodes of the subgraph built via sampling are ordered according to a *Breadth First Search (BFS)* strategy, meaning that the rows and columns of the adjacency matrix refer to a node ordering that starts with the seed nodes (in any order) followed by the 1-hop neighbors of the first seed node, followed by the 1-hop sampled neighbors of the second seed node and so on. +The BFS ordering of nodes in a mini-batch allows for incremental trimming (reduction) of the adjacency matrix of the subgraph. +This progressive trimming is done in a computational convenient manner thanks to the BFS ordering that causes the nodes more distant from the seed nodes to be appear farther away in the list of ordered nodes. + +To support this trimming and implement it effectively, the :class:`~torch_geometric.loader.NeighborLoader` implementation in :pyg:`PyG` and in :pyg:`pyg-lib` additionally return the number of nodes and edges sampled in hop. +This information allows for fast manipulation of the adjacency matrix, which in turns lead to great computation reduction. +The :class:`~torch_geometric.loader.NeighborLoader` prepares this metadata via the dedicated attributes :obj:`num_sampled_nodes` and :obj:`num_sampled_edges`. +It can be accessed from the :class:`~torch_geometric.data.Batch` object returned for both homogeneous and heterogeneous graphs. + +To sum up, HGAM is special data structure that enables efficient message passing computation in :class:`~torch_geometric.loader.NeighborLoader` scenarios. +HGAM is implemented in :pyg:`PyG` and can be utilized via the special :meth:`~torch_geometric.utils.trim_to_layer` functionality. +HGAM is currently an option that :pyg:`PyG` users are free to switch on, or leave it off *(current default)*. + +Usage +----- + +Here, we show examples of how to use the HGAM functionality in combination with :class:`~torch_geometric.loader.NeighborLoader`: + +* **Homogeneous data example:** + + .. code-block:: python + + from torch_geometric.datasets import Planetoid + from torch_geometric.loader import NeighborLoader + + data = Planetoid(path, name='Cora')[0] + + loader = NeighborLoader( + data, + num_neighbors=[10] * 3, + batch_size=128, + ) + + batch = next(iter(loader)) + print(batch) + >>> Data(x=[1883, 1433], edge_index=[2, 5441], y=[1883], train_mask=[1883], + val_mask=[1883], test_mask=[1883], batch_size=128, + num_sampled_nodes=[4], num_sampled_edges=[3]) + + print(batch.num_sampled_nodes) + >>> [128, 425, 702, 628] # Number of sampled nodes per hop/layer. + print(batch.num_sampled_edges) + >>> [520, 2036, 2885] # Number of sampled edges per hop/layer. + +* **Heterogeneous data example:** + + .. code-block:: python + + from torch_geometric.datasets import OGB_MAG + from torch_geometric.loader import NeighborLoader + + data = OGB_MAG(path)[0] + + loader = NeighborLoader( + data, + num_neighbors=[10] * 3, + batch_size=128, + input_nodes='paper', + ) + + batch = next(iter(loader)) + print(batch) + >>> HeteroData( + paper={ + x=[2275, 128], + num_sampled_nodes=[3], + batch_size=128, + }, + author={ + num_nodes=2541, + num_sampled_nodes=[3], + }, + institution={ + num_nodes=0, + num_sampled_nodes=[3], + }, + field_of_study={ + num_nodes=0, + num_sampled_nodes=[3], + }, + (author, affiliated_with, institution)={ + edge_index=[2, 0], + num_sampled_edges=[2], + }, + (author, writes, paper)={ + edge_index=[2, 3255], + num_sampled_edges=[2], + }, + (paper, cites, paper)={ + edge_index=[2, 2691], + num_sampled_edges=[2], + }, + (paper, has_topic, field_of_study)={ + edge_index=[2, 0], + num_sampled_edges=[2], + } + ) + print(batch['paper'].num_sampled_nodes) + >>> [128, 508, 1598] # Number of sampled paper nodes per hop/layer. + + print(batch['author', 'writes', 'paper'].num_sampled_edges) + >>>> [629, 2621] # Number of sampled autor<>paper edges per hop/layer. + +The attributes :obj:`num_sampled_nodes` and :obj:`num_sampled_edges` can be used by the :meth:`~torch_geometric.utils.trim_to_layer` function inside the GNN: + +.. code-block:: python + + from torch_geometric.datasets import Reddit + from torch_geometric.loader import NeighborLoader + from torch_geometric.nn import SAGEConv + from torch_geometric.utils import trim_to_layer + + dataset = Reddit(path) + loader = NeighborLoader(data, num_neighbors=[10, 5, 5], ...) + + class GNN(torch.nn.Module): + def __init__(self, in_channels: int, out_channels: int, num_layers: int): + super().__init__() + + self.convs = ModuleList([SAGEConv(in_channels, 64)]) + for _ in range(num_layers - 1): + self.convs.append(SAGEConv(hidden_channels, hidden_channels)) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + x: Tensor, + edge_index: Tensor, + num_sampled_nodes_per_hop: List[int], + num_sampled_edges_per_hop: List[int], + ) -> Tensor: + + for i, conv in enumerate(self.convs): + # Trim edge and node information to the current layer `i`. + x, edge_index, _ = trim_to_layer( + i, num_sampled_nodes_per_hop, num_sampled_edges_per_hop, + x, edge_index) + + x = conv(x, edge_index).relu() + + return self.lin(x) + +Examples +-------- + +We provide full examples of HGAM in the :pyg:`PyG` :obj:`examples/` folder: + +* :obj:`examples/hierarchical_sampling.py`: An `example `__ to show-case the basic usage of HGAM. +* :obj:`examples/hetero/hierarchical_sage.py`: An `example `__ of HGAM on heterogeneous graphs. diff --git a/docs/source/index.rst b/docs/source/index.rst index 5c18cbdd4329..cd75bddb295f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -40,6 +40,7 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many advanced/batching advanced/sparse_tensor + advanced/hgam advanced/jit advanced/remote advanced/graphgym From 1e1202ccd822bffa51c4ee82133e067f2f3d5e89 Mon Sep 17 00:00:00 2001 From: happykygo <62350285+happykygo@users.noreply.github.com> Date: Mon, 19 Jun 2023 10:51:21 -0400 Subject: [PATCH 1301/2432] `LightGCN` example (#7603) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/lightgcn.py | 108 ++++++++++++++++++++++++++ torch_geometric/data/hetero_data.py | 14 +++- torch_geometric/nn/models/lightgcn.py | 6 ++ 4 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 examples/lightgcn.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b48fb3405998..e1cd131814d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) - Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) - Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) - Added the `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) diff --git a/examples/lightgcn.py b/examples/lightgcn.py new file mode 100644 index 000000000000..b526013a9b0e --- /dev/null +++ b/examples/lightgcn.py @@ -0,0 +1,108 @@ +import os.path as osp + +import torch +from tqdm import tqdm + +from torch_geometric.datasets import AmazonBook +from torch_geometric.nn import LightGCN +from torch_geometric.utils import degree + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Amazon') +dataset = AmazonBook(path) +data = dataset[0] +num_users, num_books = data['user'].num_nodes, data['book'].num_nodes +data = data.to_homogeneous().to(device) + +# Use all message passing edges as training labels: +batch_size = 8192 +mask = data.edge_index[0] < data.edge_index[1] +train_edge_label_index = data.edge_index[:, mask] +train_loader = torch.utils.data.DataLoader( + range(train_edge_label_index.size(1)), + shuffle=True, + batch_size=batch_size, +) + +model = LightGCN( + num_nodes=data.num_nodes, + embedding_dim=64, + num_layers=2, +).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + + +def train(): + total_loss = total_examples = 0 + + for index in tqdm(train_loader): + # Sample positive and negative labels. + pos_edge_label_index = train_edge_label_index[:, index] + neg_edge_label_index = torch.stack([ + pos_edge_label_index[0], + torch.randint(num_users, num_users + num_books, + (index.numel(), ), device=device) + ], dim=0) + edge_label_index = torch.cat([ + pos_edge_label_index, + neg_edge_label_index, + ], dim=1) + + optimizer.zero_grad() + pos_rank, neg_rank = model(data.edge_index, edge_label_index).chunk(2) + + loss = model.recommendation_loss( + pos_rank, + neg_rank, + node_id=edge_label_index.unique(), + ) + loss.backward() + optimizer.step() + + total_loss += float(loss) * pos_rank.numel() + total_examples += pos_rank.numel() + + return total_loss / total_examples + + +@torch.no_grad() +def test(k: int): + emb = model.get_embedding(data.edge_index) + user_emb, book_emb = emb[:num_users], emb[num_users:] + + precision = recall = total_examples = 0 + for start in range(0, num_users, batch_size): + end = start + batch_size + logits = user_emb[start:end] @ book_emb.t() + + # Exclude training edges: + mask = ((train_edge_label_index[0] >= start) & + (train_edge_label_index[0] < end)) + logits[train_edge_label_index[0, mask] - start, + train_edge_label_index[1, mask] - num_users] = float('-inf') + + # Computing precision and recall: + ground_truth = torch.zeros_like(logits, dtype=torch.bool) + mask = ((data.edge_label_index[0] >= start) & + (data.edge_label_index[0] < end)) + ground_truth[data.edge_label_index[0, mask] - start, + data.edge_label_index[1, mask] - num_users] = True + node_count = degree(data.edge_label_index[0, mask] - start, + num_nodes=logits.size(0)) + + topk_index = logits.topk(k, dim=-1).indices + isin_mat = ground_truth.gather(1, topk_index) + + precision += float((isin_mat.sum(dim=-1) / k).sum()) + recall += float((isin_mat.sum(dim=-1) / node_count.clamp(1e-6)).sum()) + total_examples += int((node_count > 0).sum()) + + return precision / total_examples, recall / total_examples + + +for epoch in range(1, 101): + loss = train() + precision, recall = test(k=20) + print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Precision@20: ' + f'{precision:.4f}, Recall@20: {recall:.4f}') diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 8c352a8c7919..16c4320f50b6 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -784,7 +784,9 @@ def get_sizes(stores: List[BaseStorage]) -> Dict[str, List[Tuple]]: sizes_dict = defaultdict(list) for store in stores: for key, value in store.items(): - if key in ['edge_index', 'adj', 'adj_t']: + if key in [ + 'edge_index', 'edge_label_index', 'adj', 'adj_t' + ]: continue if isinstance(value, Tensor): dim = self.__cat_dim__(key, value, store) @@ -886,6 +888,16 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: value = torch.cat(values, dim) if len(values) > 1 else values[0] data[key] = value + if len(self.edge_label_index_dict) > 0: + edge_label_index_dict = self.edge_label_index_dict + for edge_type, edge_label_index in edge_label_index_dict.items(): + edge_label_index = edge_label_index.clone() + edge_label_index[0] += node_slices[edge_type[0]][0] + edge_label_index[1] += node_slices[edge_type[-1]][0] + edge_label_index_dict[edge_type] = edge_label_index + data.edge_label_index = torch.cat( + list(edge_label_index_dict.values()), dim=-1) + if add_node_type: sizes = [offset[1] - offset[0] for offset in node_slices.values()] sizes = torch.tensor(sizes, dtype=torch.long, device=device) diff --git a/torch_geometric/nn/models/lightgcn.py b/torch_geometric/nn/models/lightgcn.py index 97495bed427f..9de700f61465 100644 --- a/torch_geometric/nn/models/lightgcn.py +++ b/torch_geometric/nn/models/lightgcn.py @@ -43,6 +43,12 @@ class LightGCN(torch.nn.Module): by :obj:`edge_index` while rankings or link probabilities are computed according to the edges specified by :obj:`edge_label_index`. + .. note:: + + For an example of using :class:`LightGCN`, see `examples/lightgcn.py + `_. + Args: num_nodes (int): The number of nodes in the graph. embedding_dim (int): The dimensionality of node embeddings. From c33c4812d08c9ae02dc9172702a77e3bac0d6025 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Mon, 19 Jun 2023 15:59:27 +0100 Subject: [PATCH 1302/2432] Add `LinkEncoder` of GraphMixer (#7459) This PR introduces the "link-encoder" defined in 3.1 of the GraphMixer paper, [Do We Really Need Complicated Model Architectures For Temporal Networks?](https://openreview.net/forum?id=ayPPc0SyLv1). --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinu Sunil --- CHANGELOG.md | 2 +- test/nn/models/test_graph_mixer.py | 60 ++++++- torch_geometric/nn/encoding.py | 4 +- torch_geometric/nn/models/graph_mixer.py | 220 ++++++++++++++++++++++- 4 files changed, 281 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1cd131814d3..8d1ad426f9b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,7 +17,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `Performer` to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) - Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) - Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) -- Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501)) +- Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501), [#7459](https://github.com/pyg-team/pytorch_geometric/pull/7459)) - Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246), [#7534](https://github.com/pyg-team/pytorch_geometric/pull/7534)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) diff --git a/test/nn/models/test_graph_mixer.py b/test/nn/models/test_graph_mixer.py index 072ad5061f37..2e40ee631d7d 100644 --- a/test/nn/models/test_graph_mixer.py +++ b/test/nn/models/test_graph_mixer.py @@ -1,6 +1,10 @@ import torch -from torch_geometric.nn.models.graph_mixer import NodeEncoder +from torch_geometric.nn.models.graph_mixer import ( + LinkEncoder, + NodeEncoder, + get_latest_k_edge_attrs, +) def test_node_encoder(): @@ -24,3 +28,57 @@ def test_node_encoder(): [3], ]) assert torch.allclose(out, expected) + + +def test_link_encoding(): + num_nodes = 3 + num_edges = 6 + num_edge_features = 10 + edge_attr = torch.rand((num_edges, num_edge_features)) + edge_index = torch.randint(low=0, high=num_nodes, size=(2, num_edges)) + edge_time = torch.rand(num_edges) + + K = 3 + hidden_channels = 7 + out_channels = 11 + time_channels = 13 + dropout = 0.5 + + encoder = LinkEncoder( + K=K, + in_channels=num_edge_features, + hidden_channels=hidden_channels, + out_channels=out_channels, + time_channels=time_channels, + dropout=dropout, + ) + assert str(encoder) == (f'LinkEncoder(K={K}, ' + f'in_channels={num_edge_features}, ' + f'hidden_channels={hidden_channels}, ' + f'out_channels={out_channels}, ' + f'time_channels={time_channels}, ' + f'dropout={dropout})') + + out = encoder(edge_attr, edge_time, edge_index) + assert out.size() == (num_nodes, out_channels) + + +def test_latest_k_edge_attr(): + num_nodes = 3 + edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 0], [0, 1, 0, 1, 0, 1, 2]]) + edge_time = torch.tensor([3, 1, 2, 3, 1, 2, 3]) + edge_attr = torch.tensor([1, -1, 3, 4, -1, 6, 7]).view(-1, 1) + + k = 2 + latest_k_edge_attrs = get_latest_k_edge_attrs(k, edge_index, edge_time, + edge_attr, num_nodes) + expected_output = torch.tensor([[[1], [3]], [[4], [6]], [[7], [0]]]) + assert latest_k_edge_attrs.shape == (3, 2, 1) + assert latest_k_edge_attrs.equal(expected_output) + + k = 1 + latest_k_edge_attrs = get_latest_k_edge_attrs(k, edge_index, edge_time, + edge_attr, num_nodes) + expected_output = torch.tensor([[[1]], [[4]], [[7]]]) + assert latest_k_edge_attrs.shape == (3, 1, 1) + assert latest_k_edge_attrs.equal(expected_output) diff --git a/torch_geometric/nn/encoding.py b/torch_geometric/nn/encoding.py index cdead3b24030..6b5e297f2052 100644 --- a/torch_geometric/nn/encoding.py +++ b/torch_geometric/nn/encoding.py @@ -62,8 +62,8 @@ class TemporalEncoding(torch.nn.Module): r"""The time-encoding function from the `"Do We Really Need Complicated Model Architectures for Temporal Networks?" `_ paper. - :class:`TemporalEncoding` first maps each entry to a vector with - monotonically exponentially decreasing values, and then uses the cosine + It first maps each entry to a vector with + exponentially decreasing values, and then uses the cosine function to project all values to range :math:`[-1, 1]` .. math:: diff --git a/torch_geometric/nn/models/graph_mixer.py b/torch_geometric/nn/models/graph_mixer.py index dd702383aa01..82572cc59eb6 100644 --- a/torch_geometric/nn/models/graph_mixer.py +++ b/torch_geometric/nn/models/graph_mixer.py @@ -1,7 +1,13 @@ +from typing import Optional + +import numpy as np import torch +import torch.nn.functional as F from torch import Tensor -from torch_geometric.utils import scatter +from torch_geometric.nn import TemporalEncoding +from torch_geometric.utils import scatter, to_dense_batch +from torch_geometric.utils.num_nodes import maybe_num_nodes class NodeEncoder(torch.nn.Module): @@ -47,3 +53,215 @@ def forward( def __repr__(self) -> str: return f'{self.__class__.__name__}(time_window={self.time_window})' + + +# TODO: Generalize the module when needed +class _MLPMixer(torch.nn.Module): + """1-layer MLP-mixer for GraphMixer. + + Args: + num_tokens (int): The number of tokens (patches) in each sample. + in_channels (int): Input channels. + out_channels (int): Output channels. + dropout (float, optional): The dropout probability. (default: :obj:`0`) + """ + def __init__(self, num_tokens: int, in_channels: int, out_channels: int, + dropout: float = 0): + super().__init__() + self.num_tokens = num_tokens + self.in_channels = in_channels + self.out_channels = out_channels + self.dropout = dropout + + # token mixing + self.token_layer_norm = torch.nn.LayerNorm((in_channels, )) + self.token_lin_1 = torch.nn.Linear(num_tokens, num_tokens // 2) + self.token_lin_2 = torch.nn.Linear(num_tokens // 2, num_tokens) + + # channel mixing + self.channel_layer_norm = torch.nn.LayerNorm((in_channels, )) + self.channel_lin_1 = torch.nn.Linear(in_channels, 4 * in_channels) + self.channel_lin_2 = torch.nn.Linear(4 * in_channels, in_channels) + + # head + self.head_layer_norm = torch.nn.LayerNorm((in_channels, )) + self.head_lin = torch.nn.Linear(in_channels, out_channels) + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x (torch.Tensor): Features tensor of size + :obj:`[N, num_tokens, in_channels]`. + + Returns: + Tensor of size :obj:`[N, out_channels]`. + """ + # token mixing + h = self.token_layer_norm(x).mT + h = self.token_lin_1(h) + h = F.gelu(h) + h = F.dropout(h, p=self.dropout, training=self.training) + h = self.token_lin_2(h) + h = F.dropout(h, p=self.dropout, training=self.training) + h_token = h.mT + x + + # channel mixing + h = self.channel_layer_norm(h_token) + h = self.channel_lin_1(h) + h = F.gelu(h) + h = F.dropout(h, p=self.dropout, training=self.training) + h = self.channel_lin_2(h) + h = F.dropout(h, p=self.dropout, training=self.training) + h_channel = h + h_token + + # head + h_channel = self.head_layer_norm(h_channel) + t = torch.mean(h_channel, dim=1) + return self.head_lin(t) + + def __repr__(self) -> str: + return (f"{self.__class__.__name__}(" + f"num_tokens={self.num_tokens}, " + f"in_channels={self.in_channels}, " + f"out_channels={self.out_channels}, " + f"dropout={self.dropout})") + + +def get_latest_k_edge_attrs(K: int, edge_index: Tensor, edge_time: Tensor, + edge_attr: Tensor, num_nodes: int) -> Tensor: + r"""Returns the latest :obj:`K` incoming edge attributes by + :obj:`edge_time` for each node. The shape + of the output tensor is :obj:`[num_nodes, K, edge_attr_dim]`. + Nodes with fewer than :obj:`K` incoming edges are zero-padded. + Args: + K (int): The number of edges to keep for each node. + edge_index (LongTensor): The edge indices. + edge_time (Tensor): The edge timestamps. + edge_attr (Tensor): The edge attributes. + num_nodes (int): The number of nodes in the graph. + :rtype: :class:`Tensor` + """ + assert (edge_time >= 0).all() + _, col = edge_index + perm = np.lexsort( + [-edge_time.detach().cpu().numpy(), + col.detach().cpu().numpy()]) + perm = torch.from_numpy(perm).to(edge_index.device) + col = col[perm] + edge_attr = edge_attr[perm] + + # zero-pad each node's edges: + # [num_edges, hidden_channels] -> [num_nodes*K, hidden_channels] + edge_attr, _ = to_dense_batch( + edge_attr, + col, + max_num_nodes=K, + batch_size=num_nodes, + ) + return edge_attr + + +class LinkEncoder(torch.nn.Module): + r"""The link-encoding function from the `"Do We Really Need Complicated + Model Architectures for Temporal Networks?" + `_ paper. + It is composed of two components. The first component is + :class:`TemporalEncoding` that maps each edge timestamp to a + :obj:`time_channels` dimensional vector. + The second component a 1-layer MLP that maps each encoded timestamp + feature concatenated with its corresponding link feature to a + :obj:`out_channels` dimensional vector. + + Args: + K (int): The number of most recent teomporal links to use to construct + an intermediate feature representation for each node. + in_channels (int): Edge feature dimensionality. + hidden_channels (int): Size of each hidden sample. + time_channels (int): Size of encoded timestamp using + :class:`TemporalEncoding`. + out_channels (int): Size of each output sample. + is_sorted (bool, optional): If set to :obj:`True`, assumes that + :obj:`edge_index` is sorted by column and the + rows are sorted according to :obj:`edge_time` + within individual neighborhoods. This avoids internal + re-sorting of the data and can improve runtime and memory + efficiency. (default: :obj:`False`) + dropout (float, optional): Dropout probability of the MLP layer. + (default: :obj:`0.0`) + + """ + def __init__( + self, + K: int, + in_channels: int, + hidden_channels: int, + out_channels: int, + time_channels: int, + is_sorted: bool = False, + dropout: float = 0.0, + ): + super().__init__() + self.K = K + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.time_channels = time_channels + self.is_sorted = is_sorted + self.dropout = dropout + + # teomporal encoder + self.temporal_encoder = TemporalEncoding(time_channels) + self.temporal_encoder_head = torch.nn.Linear( + time_channels + in_channels, + hidden_channels, + ) + + # MLP that summarises temporal embedding. + self.mlp_mixer = _MLPMixer( + num_tokens=K, + in_channels=hidden_channels, + out_channels=out_channels, + dropout=dropout, + ) + + def forward( + self, + edge_attr: Tensor, + edge_time: Tensor, + edge_index: Tensor, + num_nodes: Optional[int] = None, + ) -> Tensor: + """ + Args: + edge_attr (torch.Tensor): The edge features of shape + :obj:`[num_edges, in_channels]`. + edge_time (torch.Tensor): The time tensor of shape + :obj:`[num_edges]`. This can be in the order of millions. + edge_index (torch.Tensor): The edge indicies. + num_nodes (int, optional): The number of nodes in the graph. + (default: :obj:`None`) + + Returns: + A node embedding tensor of shape :obj:`[num_nodes, out_channels]`. + """ + num_nodes = maybe_num_nodes(edge_index, num_nodes) + time_info = self.temporal_encoder(edge_time) + edge_attr_time = torch.cat((time_info, edge_attr), dim=1) + edge_attr_time = self.temporal_encoder_head(edge_attr_time) + + if not self.is_sorted: + edge_attr_time = get_latest_k_edge_attrs(self.K, edge_index, + edge_time, edge_attr_time, + num_nodes) + + return self.mlp_mixer( + edge_attr_time.view(-1, self.K, self.hidden_channels)) + + def __repr__(self): + return (f"{self.__class__.__name__}(" + f"K={self.K}, " + f"in_channels={self.in_channels}, " + f"hidden_channels={self.hidden_channels}, " + f"out_channels={self.out_channels}, " + f"time_channels={self.time_channels}, " + f"dropout={self.dropout})") From f5ebe30e677df4f13a7ef4473ccf151907e09392 Mon Sep 17 00:00:00 2001 From: Mohamad Zamini <32536264+mzamini92@users.noreply.github.com> Date: Tue, 20 Jun 2023 07:15:20 -0600 Subject: [PATCH 1303/2432] Add `interval` argument to `LocalCartesian` (#7533) The intermediate tensors `cart` and `max_value` in the original code were replaced with in-place operations to reduce memory usage. This was done by directly operating on the `cart` tensor and computing the maximum value iteratively without creating a separate `max_value` tensor. In-place operations `(torch.sub, cart.div_, cart.mul_, cart.add_)` were used to perform computations directly on tensors, reducing memory usage and eliminating the need for intermediate tensors. To compute the maximum value in a streaming fashion, a loop was introduced which iterates over the edges and updates the maximum value tensor (max_value) accordingly. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/transforms/local_cartesian.py | 30 ++++++++++++------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d1ad426f9b5..209fd25cd5a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `interval` argument to `LocalCartesian` transformation ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533)) - Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) - Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) - Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) diff --git a/torch_geometric/transforms/local_cartesian.py b/torch_geometric/transforms/local_cartesian.py index ee7916aeb297..d36455ed5c5c 100644 --- a/torch_geometric/transforms/local_cartesian.py +++ b/torch_geometric/transforms/local_cartesian.py @@ -1,3 +1,5 @@ +from typing import Tuple + import torch from torch_geometric.data import Data @@ -10,18 +12,26 @@ class LocalCartesian(BaseTransform): r"""Saves the relative Cartesian coordinates of linked nodes in its edge attributes (functional name: :obj:`local_cartesian`). Each coordinate gets - *neighborhood-normalized* to the interval :math:`{[0, 1]}^D`. + *neighborhood-normalized* to a specified interval + (:math:`[0, 1]` by default). Args: norm (bool, optional): If set to :obj:`False`, the output will not be - normalized to the interval :math:`{[0, 1]}^D`. - (default: :obj:`True`) + normalized. (default: :obj:`True`) cat (bool, optional): If set to :obj:`False`, all existing edge attributes will be replaced. (default: :obj:`True`) + interval ((float, float), optional): A tuple specifying the lower and + upper bound for normalization. (default: :obj:`(0.0, 1.0)`) """ - def __init__(self, norm: bool = True, cat: bool = True): + def __init__( + self, + norm: bool = True, + cat: bool = True, + interval: Tuple[float, float] = (0.0, 1.0), + ): self.norm = norm self.cat = cat + self.interval = interval def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr @@ -29,13 +39,13 @@ def forward(self, data: Data) -> Data: cart = pos[row] - pos[col] cart = cart.view(-1, 1) if cart.dim() == 1 else cart - max_value = scatter(cart.abs(), col, 0, pos.size(0), reduce='max') - max_value = max_value.max(dim=-1, keepdim=True)[0] - if self.norm: - cart = cart / (2 * max_value[col]) + 0.5 - else: - cart = cart / max_value[col] + max_value = scatter(cart.abs(), col, 0, pos.size(0), reduce='max') + max_value = max_value.max(dim=-1, keepdim=True)[0] + + length = self.interval[1] - self.interval[0] + center = (self.interval[0] + self.interval[1]) / 2 + cart = length * cart / (2 * max_value[col]) + center if pseudo is not None and self.cat: pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo From ce75464773939013ee5e359db6a3c72bf05ddf35 Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Wed, 21 Jun 2023 02:34:47 -0700 Subject: [PATCH 1304/2432] Adjust `atol` in `MessagePassing` tests (#7619) Make it consistent with other tests Signed-off-by: Serge Panev --- test/nn/conv/test_message_passing.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index 81e35df151e8..d77f7a7a68b2 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -401,7 +401,7 @@ def hook(module, inputs, output): out2 = conv(x, adj.t()) assert num_pre_hook_calls == 5 assert num_hook_calls == 5 - assert torch.allclose(out1, out2) + assert torch.allclose(out1, out2, atol=1e-6) handle1.remove() assert len(conv._propagate_forward_pre_hooks) == 0 @@ -436,7 +436,7 @@ def hook(module, inputs, output): out2 = conv(x, adj.t()) assert num_pre_hook_calls == 7 assert num_hook_calls == 7 - assert torch.allclose(out1, out2) + assert torch.allclose(out1, out2, atol=1e-6) handle1.remove() assert len(conv._propagate_forward_pre_hooks) == 0 @@ -463,7 +463,7 @@ def hook(module, inputs, output): conv.register_message_forward_hook(hook) out2 = conv(x, edge_index, edge_weight) - assert not torch.allclose(out1, out2) + assert not torch.allclose(out1, out2, atol=1e-6) class MyDefaultArgConv(MessagePassing): From 2bb41ccf27b9e1422dc8b97d9e504bda4db1c936 Mon Sep 17 00:00:00 2001 From: Mohamad Zamini <32536264+mzamini92@users.noreply.github.com> Date: Wed, 21 Jun 2023 03:55:55 -0600 Subject: [PATCH 1305/2432] Add `interval` argument to `Cartesian` (#7614) I've added the `interval` argument to the `LineGraph` class and modified the code to apply the normalization to the `data.x` (edge_attr) based on the specified interval. @rusty1s --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/transforms/test_cartesian.py | 24 +++++++++++++++++------- torch_geometric/transforms/cartesian.py | 25 ++++++++++++++++--------- 3 files changed, 34 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 209fd25cd5a4..aea479654d08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added `interval` argument to `LocalCartesian` transformation ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533)) +- Added `interval` argument to `Cartesian` and `LocalCartesian` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614)) - Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) - Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) - Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) diff --git a/test/transforms/test_cartesian.py b/test/transforms/test_cartesian.py index 9b66f6d4fa55..eeaa8393c609 100644 --- a/test/transforms/test_cartesian.py +++ b/test/transforms/test_cartesian.py @@ -14,14 +14,24 @@ def test_cartesian(): data = Data(edge_index=edge_index, pos=pos) data = Cartesian(norm=False)(data) assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[-1, 0], [1, 0], [-2, 0], [2, 0]] + assert torch.equal(data.pos, pos) + assert torch.equal(data.edge_index, edge_index) + assert torch.allclose( + data.edge_attr, + torch.tensor([[-1.0, 0.0], [1.0, 0.0], [-2.0, 0.0], [2.0, 0.0]]), + ) data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) data = Cartesian(norm=True)(data) assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[1, 0.25, 0.5], [2, 0.75, 0.5], - [3, 0, 0.5], [4, 1, 0.5]] + assert torch.equal(data.pos, pos) + assert torch.equal(data.edge_index, edge_index) + assert torch.allclose( + data.edge_attr, + torch.tensor([ + [1, 0.25, 0.5], + [2, 0.75, 0.5], + [3, 0, 0.5], + [4, 1, 0.5], + ]), + ) diff --git a/torch_geometric/transforms/cartesian.py b/torch_geometric/transforms/cartesian.py index 0f59922c67e4..ec5fd5dea73b 100644 --- a/torch_geometric/transforms/cartesian.py +++ b/torch_geometric/transforms/cartesian.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Tuple import torch @@ -10,27 +10,31 @@ @functional_transform('cartesian') class Cartesian(BaseTransform): r"""Saves the relative Cartesian coordinates of linked nodes in its edge - attributes (functional name: :obj:`cartesian`). + attributes (functional name: :obj:`cartesian`). Each coordinate gets + globally normalized to a specified interval (:math:`[0, 1]` by default). Args: norm (bool, optional): If set to :obj:`False`, the output will not be - normalized to the interval :math:`{[0, 1]}^D`. - (default: :obj:`True`) + normalized. (default: :obj:`True`) max_value (float, optional): If set and :obj:`norm=True`, normalization will be performed based on this value instead of the maximum value found in the data. (default: :obj:`None`) cat (bool, optional): If set to :obj:`False`, all existing edge attributes will be replaced. (default: :obj:`True`) + interval ((float, float), optional): A tuple specifying the lower and + upper bound for normalization. (default: :obj:`(0.0, 1.0)`) """ def __init__( - self, - norm: bool = True, - max_value: Optional[float] = None, - cat: bool = True, + self, + norm: bool = True, + max_value: Optional[float] = None, + cat: bool = True, + interval: Tuple[float, float] = (0.0, 1.0), ): self.norm = norm self.max = max_value self.cat = cat + self.interval = interval def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr @@ -40,7 +44,10 @@ def forward(self, data: Data) -> Data: if self.norm and cart.numel() > 0: max_value = cart.abs().max() if self.max is None else self.max - cart = cart / (2 * max_value) + 0.5 + + length = self.interval[1] - self.interval[0] + center = (self.interval[0] + self.interval[1]) / 2 + cart = length * cart / (2 * max_value) + center if pseudo is not None and self.cat: pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo From 53e7470dd09738a74a636f32af77be0ffee97fd8 Mon Sep 17 00:00:00 2001 From: YanbingJiang Date: Wed, 21 Jun 2023 18:14:01 +0800 Subject: [PATCH 1306/2432] Add `compile` option in benchmark scripts (#7522) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- benchmark/training/training_benchmark.py | 6 +++ torch_geometric/nn/models/basic_gnn.py | 47 ++++++++++++++++-------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index 15a4b2da7522..e17fd0368d75 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -17,6 +17,7 @@ test, write_to_csv, ) +from torch_geometric import compile from torch_geometric.loader import NeighborLoader from torch_geometric.nn import PNAConv from torch_geometric.profile import rename_profile_file, timeit, torch_profile @@ -191,6 +192,10 @@ def run(args: argparse.ArgumentParser): metadata=data.metadata() if hetero else None) model = model.to(device) model.train() + + if args.compile: + model = compile(model, dynamic=True) + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) @@ -330,6 +335,7 @@ def run(args: argparse.ArgumentParser): add('--export-chrome-trace', default=True, type=bool, help='Export chrome trace file. Works only with PyTorch profiler') add('--trim', action='http://github.com/store_true', help="Use `trim_to_layer` optimization") + add('--compile', action='http://github.com/store_true') args = argparser.parse_args() run(args) diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index fdea248ccc0e..7244893fc222 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -240,6 +240,31 @@ def forward( x = self.lin(x) if hasattr(self, 'lin') else x return x + @torch.no_grad() + def inference_per_layer( + self, + layer: int, + x: Tensor, + edge_index: Adj, + batch_size: int, + ) -> Tensor: + + x = self.convs[layer](x, edge_index)[:batch_size] + + if layer == self.num_layers - 1 and self.jk_mode is None: + return x + + if self.act is not None and self.act_first: + x = self.act(x) + if self.norms is not None: + x = self.norms[layer](x) + if self.act is not None and not self.act_first: + x = self.act(x) + if layer == self.num_layers - 1 and hasattr(self, 'lin'): + x = self.lin(x) + + return x + @torch.no_grad() def inference( self, @@ -280,37 +305,27 @@ def inference( pbar.set_description('Inference') x_all = loader.data.x.to(embedding_device) - loader.data.n_id = torch.arange(x_all.size(0)) for i in range(self.num_layers): xs: List[Tensor] = [] for batch in loader: x = x_all[batch.n_id].to(device) + batch_size = batch.batch_size if hasattr(batch, 'adj_t'): edge_index = batch.adj_t.to(device) else: edge_index = batch.edge_index.to(device) - x = self.convs[i](x, edge_index)[:batch.batch_size] - if i == self.num_layers - 1 and self.jk_mode is None: - xs.append(x.to(embedding_device)) - if progress_bar: - pbar.update(1) - continue - if self.act is not None and self.act_first: - x = self.act(x) - if self.norms is not None: - x = self.norms[i](x) - if self.act is not None and not self.act_first: - x = self.act(x) - if i == self.num_layers - 1 and hasattr(self, 'lin'): - x = self.lin(x) + + x = self.inference_per_layer(i, x, edge_index, batch_size) xs.append(x.to(embedding_device)) + if progress_bar: pbar.update(1) + x_all = torch.cat(xs, dim=0) + if progress_bar: pbar.close() - del loader.data.n_id return x_all From ce4fba6f8153cf330e487fcd146f354af6db7457 Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Wed, 21 Jun 2023 04:21:21 -0700 Subject: [PATCH 1307/2432] Fix `atol` in `HEATConv` tests (#7620) Signed-off-by: Serge Panev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- test/nn/conv/test_heat_conv.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/nn/conv/test_heat_conv.py b/test/nn/conv/test_heat_conv.py index 99815b0cb854..73e94f2cfd72 100644 --- a/test/nn/conv/test_heat_conv.py +++ b/test/nn/conv/test_heat_conv.py @@ -25,15 +25,18 @@ def test_heat_conv(concat): if torch_geometric.typing.WITH_TORCH_SPARSE: adj = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) - assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out) + assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out, + atol=1e-5) if is_full_test(): t = '(Tensor, Tensor, Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose( - jit(x, edge_index, node_type, edge_type, edge_attr), out) + jit(x, edge_index, node_type, edge_type, edge_attr), out, + atol=1e-5) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t(), node_type, edge_type), out) + assert torch.allclose(jit(x, adj.t(), node_type, edge_type), out, + atol=1e-5) From 5c30dcaf28ca3dd6481805d5497341e6e78096c2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 21 Jun 2023 13:25:26 +0200 Subject: [PATCH 1308/2432] Add `seed_time` to `GraphMixer` (#7621) --- test/nn/models/test_graph_mixer.py | 61 +++--- torch_geometric/nn/encoding.py | 6 +- torch_geometric/nn/models/graph_mixer.py | 257 ++++++++++++----------- 3 files changed, 160 insertions(+), 164 deletions(-) diff --git a/test/nn/models/test_graph_mixer.py b/test/nn/models/test_graph_mixer.py index 2e40ee631d7d..69b0ba19f33a 100644 --- a/test/nn/models/test_graph_mixer.py +++ b/test/nn/models/test_graph_mixer.py @@ -3,7 +3,7 @@ from torch_geometric.nn.models.graph_mixer import ( LinkEncoder, NodeEncoder, - get_latest_k_edge_attrs, + get_latest_k_edge_attr, ) @@ -14,6 +14,7 @@ def test_node_encoder(): seed_time = torch.tensor([2, 2, 2, 2]) encoder = NodeEncoder(time_window=2) + encoder.reset_parameters() assert str(encoder) == 'NodeEncoder(time_window=2)' out = encoder(x, edge_index, edge_time, seed_time) @@ -30,55 +31,45 @@ def test_node_encoder(): assert torch.allclose(out, expected) -def test_link_encoding(): +def test_link_encoder(): num_nodes = 3 num_edges = 6 - num_edge_features = 10 - edge_attr = torch.rand((num_edges, num_edge_features)) + edge_attr = torch.rand((num_edges, 10)) edge_index = torch.randint(low=0, high=num_nodes, size=(2, num_edges)) edge_time = torch.rand(num_edges) - - K = 3 - hidden_channels = 7 - out_channels = 11 - time_channels = 13 - dropout = 0.5 + seed_time = torch.ones(num_nodes) encoder = LinkEncoder( - K=K, - in_channels=num_edge_features, - hidden_channels=hidden_channels, - out_channels=out_channels, - time_channels=time_channels, - dropout=dropout, + k=3, + in_channels=edge_attr.size(1), + hidden_channels=7, + out_channels=11, + time_channels=13, ) - assert str(encoder) == (f'LinkEncoder(K={K}, ' - f'in_channels={num_edge_features}, ' - f'hidden_channels={hidden_channels}, ' - f'out_channels={out_channels}, ' - f'time_channels={time_channels}, ' - f'dropout={dropout})') + encoder.reset_parameters() + assert str(encoder) == ('LinkEncoder(k=3, in_channels=10, ' + 'hidden_channels=7, out_channels=11, ' + 'time_channels=13, dropout=0.0)') - out = encoder(edge_attr, edge_time, edge_index) - assert out.size() == (num_nodes, out_channels) + out = encoder(edge_index, edge_attr, edge_time, seed_time) + assert out.size() == (num_nodes, 11) def test_latest_k_edge_attr(): - num_nodes = 3 edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 0], [0, 1, 0, 1, 0, 1, 2]]) edge_time = torch.tensor([3, 1, 2, 3, 1, 2, 3]) edge_attr = torch.tensor([1, -1, 3, 4, -1, 6, 7]).view(-1, 1) k = 2 - latest_k_edge_attrs = get_latest_k_edge_attrs(k, edge_index, edge_time, - edge_attr, num_nodes) - expected_output = torch.tensor([[[1], [3]], [[4], [6]], [[7], [0]]]) - assert latest_k_edge_attrs.shape == (3, 2, 1) - assert latest_k_edge_attrs.equal(expected_output) + out = get_latest_k_edge_attr(k, edge_index, edge_attr, edge_time, + num_nodes=3) + expected = torch.tensor([[[1], [3]], [[4], [6]], [[7], [0]]]) + assert out.size() == (3, 2, 1) + assert torch.equal(out, expected) k = 1 - latest_k_edge_attrs = get_latest_k_edge_attrs(k, edge_index, edge_time, - edge_attr, num_nodes) - expected_output = torch.tensor([[[1]], [[4]], [[7]]]) - assert latest_k_edge_attrs.shape == (3, 1, 1) - assert latest_k_edge_attrs.equal(expected_output) + out = get_latest_k_edge_attr(k, edge_index, edge_attr, edge_time, + num_nodes=3) + expected = torch.tensor([[[1]], [[4]], [[7]]]) + assert out.size() == (3, 1, 1) + assert torch.equal(out, expected) diff --git a/torch_geometric/nn/encoding.py b/torch_geometric/nn/encoding.py index 6b5e297f2052..baa0e76271cb 100644 --- a/torch_geometric/nn/encoding.py +++ b/torch_geometric/nn/encoding.py @@ -62,9 +62,9 @@ class TemporalEncoding(torch.nn.Module): r"""The time-encoding function from the `"Do We Really Need Complicated Model Architectures for Temporal Networks?" `_ paper. - It first maps each entry to a vector with - exponentially decreasing values, and then uses the cosine - function to project all values to range :math:`[-1, 1]` + It first maps each entry to a vector with exponentially decreasing values, + and then uses the cosine function to project all values to range + :math:`[-1, 1]` .. math:: y_{i} = \cos \left(x \cdot \sqrt{d}^{-(i - 1)/\sqrt{d}} \right) diff --git a/torch_geometric/nn/models/graph_mixer.py b/torch_geometric/nn/models/graph_mixer.py index 82572cc59eb6..62c47cea1f39 100644 --- a/torch_geometric/nn/models/graph_mixer.py +++ b/torch_geometric/nn/models/graph_mixer.py @@ -1,13 +1,11 @@ -from typing import Optional - import numpy as np import torch import torch.nn.functional as F from torch import Tensor +from torch.nn import LayerNorm, Linear from torch_geometric.nn import TemporalEncoding from torch_geometric.utils import scatter, to_dense_batch -from torch_geometric.utils.num_nodes import maybe_num_nodes class NodeEncoder(torch.nn.Module): @@ -29,6 +27,9 @@ def __init__(self, time_window: int): super().__init__() self.time_window = time_window + def reset_parameters(self): + pass + def forward( self, x: Tensor, @@ -55,130 +56,126 @@ def __repr__(self) -> str: return f'{self.__class__.__name__}(time_window={self.time_window})' -# TODO: Generalize the module when needed class _MLPMixer(torch.nn.Module): - """1-layer MLP-mixer for GraphMixer. + r"""The MLP-Mixer module. Args: - num_tokens (int): The number of tokens (patches) in each sample. + num_tokens (int): Number of tokens/patches in each sample. in_channels (int): Input channels. out_channels (int): Output channels. - dropout (float, optional): The dropout probability. (default: :obj:`0`) + dropout (float, optional): Dropout probability. (default: :obj:`0.0`) """ - def __init__(self, num_tokens: int, in_channels: int, out_channels: int, - dropout: float = 0): + def __init__( + self, + num_tokens: int, + in_channels: int, + out_channels: int, + dropout: float = 0.0, + ): super().__init__() - self.num_tokens = num_tokens - self.in_channels = in_channels - self.out_channels = out_channels + self.dropout = dropout - # token mixing - self.token_layer_norm = torch.nn.LayerNorm((in_channels, )) - self.token_lin_1 = torch.nn.Linear(num_tokens, num_tokens // 2) - self.token_lin_2 = torch.nn.Linear(num_tokens // 2, num_tokens) + self.token_norm = LayerNorm(in_channels) + self.token_lin1 = Linear(num_tokens, num_tokens // 2) + self.token_lin2 = Linear(num_tokens // 2, num_tokens) + + self.channel_norm = LayerNorm(in_channels) + self.channel_lin1 = Linear(in_channels, 4 * in_channels) + self.channel_lin2 = Linear(4 * in_channels, in_channels) - # channel mixing - self.channel_layer_norm = torch.nn.LayerNorm((in_channels, )) - self.channel_lin_1 = torch.nn.Linear(in_channels, 4 * in_channels) - self.channel_lin_2 = torch.nn.Linear(4 * in_channels, in_channels) + self.head_norm = LayerNorm(in_channels) + self.head_lin = Linear(in_channels, out_channels) - # head - self.head_layer_norm = torch.nn.LayerNorm((in_channels, )) - self.head_lin = torch.nn.Linear(in_channels, out_channels) + def reset_parameters(self): + self.token_norm.reset_parameters() + self.token_lin1.reset_parameters() + self.token_lin2.reset_parameters() + self.channel_norm.reset_parameters() + self.channel_lin1.reset_parameters() + self.channel_lin2.reset_parameters() + self.head_norm.reset_parameters() + self.head_lin.reset_parameters() def forward(self, x: Tensor) -> Tensor: - """ + r""" Args: - x (torch.Tensor): Features tensor of size - :obj:`[N, num_tokens, in_channels]`. + x (torch.Tensor): Tensor of size + :obj:`[*, num_tokens, in_channels]`. Returns: - Tensor of size :obj:`[N, out_channels]`. + Tensor of size :obj:`[*, out_channels]`. """ - # token mixing - h = self.token_layer_norm(x).mT - h = self.token_lin_1(h) + # Token mixing: + h = self.token_norm(x).mT + h = self.token_lin1(h) h = F.gelu(h) h = F.dropout(h, p=self.dropout, training=self.training) - h = self.token_lin_2(h) + h = self.token_lin2(h) h = F.dropout(h, p=self.dropout, training=self.training) h_token = h.mT + x - # channel mixing - h = self.channel_layer_norm(h_token) - h = self.channel_lin_1(h) + # Channel mixing: + h = self.channel_norm(h_token) + h = self.channel_lin1(h) h = F.gelu(h) h = F.dropout(h, p=self.dropout, training=self.training) - h = self.channel_lin_2(h) + h = self.channel_lin2(h) h = F.dropout(h, p=self.dropout, training=self.training) h_channel = h + h_token - # head - h_channel = self.head_layer_norm(h_channel) - t = torch.mean(h_channel, dim=1) - return self.head_lin(t) - - def __repr__(self) -> str: - return (f"{self.__class__.__name__}(" - f"num_tokens={self.num_tokens}, " - f"in_channels={self.in_channels}, " - f"out_channels={self.out_channels}, " - f"dropout={self.dropout})") - - -def get_latest_k_edge_attrs(K: int, edge_index: Tensor, edge_time: Tensor, - edge_attr: Tensor, num_nodes: int) -> Tensor: - r"""Returns the latest :obj:`K` incoming edge attributes by - :obj:`edge_time` for each node. The shape - of the output tensor is :obj:`[num_nodes, K, edge_attr_dim]`. - Nodes with fewer than :obj:`K` incoming edges are zero-padded. - Args: - K (int): The number of edges to keep for each node. - edge_index (LongTensor): The edge indices. - edge_time (Tensor): The edge timestamps. - edge_attr (Tensor): The edge attributes. - num_nodes (int): The number of nodes in the graph. - :rtype: :class:`Tensor` - """ - assert (edge_time >= 0).all() + # Head: + out = self.head_norm(h_channel) + out = out.mean(dim=1) + out = self.head_lin(out) + return out + + +def get_latest_k_edge_attr( + k: int, + edge_index: Tensor, + edge_attr: Tensor, + edge_time: Tensor, + num_nodes: int, + is_sorted: bool = False, +) -> Tensor: + r"""Returns the latest :obj:`k` incoming edge attributes by + :obj:`edge_time` for each node. + The shape of the output tensor is :obj:`[num_nodes, k, edge_attr_dim]`. + Nodes with fewer than :obj:`k` incoming edges are zero-padded.""" _, col = edge_index - perm = np.lexsort( - [-edge_time.detach().cpu().numpy(), - col.detach().cpu().numpy()]) - perm = torch.from_numpy(perm).to(edge_index.device) - col = col[perm] - edge_attr = edge_attr[perm] - - # zero-pad each node's edges: - # [num_edges, hidden_channels] -> [num_nodes*K, hidden_channels] - edge_attr, _ = to_dense_batch( + + if not is_sorted: + perm = np.lexsort([ + -edge_time.detach().cpu().numpy(), + col.detach().cpu().numpy(), + ]) + perm = torch.from_numpy(perm).to(edge_index.device) + col = col[perm] + edge_attr = edge_attr[perm] + + return to_dense_batch( edge_attr, col, - max_num_nodes=K, + max_num_nodes=k, batch_size=num_nodes, - ) - return edge_attr + )[0] class LinkEncoder(torch.nn.Module): - r"""The link-encoding function from the `"Do We Really Need Complicated + r"""The link encoder module from the `"Do We Really Need Complicated Model Architectures for Temporal Networks?" `_ paper. - It is composed of two components. The first component is - :class:`TemporalEncoding` that maps each edge timestamp to a - :obj:`time_channels` dimensional vector. - The second component a 1-layer MLP that maps each encoded timestamp - feature concatenated with its corresponding link feature to a - :obj:`out_channels` dimensional vector. + It is composed of two components: (1) :class:`TemporalEncoding` maps each + edge timestamp to a :obj:`time_channels`-dimensional vector; (2) an MLP + that groups and maps the :math:`k`-latest encoded timestamps and edge + features to a :obj:`out_channels`-dimensional representation. Args: - K (int): The number of most recent teomporal links to use to construct - an intermediate feature representation for each node. - in_channels (int): Edge feature dimensionality. + k (int): The number of most recent temporal links to use. + in_channels (int): The edge feature dimensionality. hidden_channels (int): Size of each hidden sample. - time_channels (int): Size of encoded timestamp using - :class:`TemporalEncoding`. + time_channels (int): Size of encoded timestamp. out_channels (int): Size of each output sample. is_sorted (bool, optional): If set to :obj:`True`, assumes that :obj:`edge_index` is sorted by column and the @@ -188,11 +185,10 @@ class LinkEncoder(torch.nn.Module): efficiency. (default: :obj:`False`) dropout (float, optional): Dropout probability of the MLP layer. (default: :obj:`0.0`) - """ def __init__( self, - K: int, + k: int, in_channels: int, hidden_channels: int, out_channels: int, @@ -201,7 +197,8 @@ def __init__( dropout: float = 0.0, ): super().__init__() - self.K = K + + self.k = k self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels @@ -209,59 +206,67 @@ def __init__( self.is_sorted = is_sorted self.dropout = dropout - # teomporal encoder self.temporal_encoder = TemporalEncoding(time_channels) - self.temporal_encoder_head = torch.nn.Linear( - time_channels + in_channels, - hidden_channels, - ) + self.temporal_head = Linear(time_channels + in_channels, + hidden_channels) - # MLP that summarises temporal embedding. - self.mlp_mixer = _MLPMixer( - num_tokens=K, + self.mlp_mixer = _MLPMixer( # MLP that summarizes temporal embeddings: + num_tokens=k, in_channels=hidden_channels, out_channels=out_channels, dropout=dropout, ) + def reset_parameters(self): + self.temporal_encoder.reset_parameters() + self.temporal_head.reset_parameters() + self.mlp_mixer.reset_parameters() + def forward( self, + edge_index: Tensor, edge_attr: Tensor, edge_time: Tensor, - edge_index: Tensor, - num_nodes: Optional[int] = None, + seed_time: Tensor, ) -> Tensor: - """ + r""" Args: + edge_index (torch.Tensor): The edge indices. edge_attr (torch.Tensor): The edge features of shape :obj:`[num_edges, in_channels]`. edge_time (torch.Tensor): The time tensor of shape :obj:`[num_edges]`. This can be in the order of millions. - edge_index (torch.Tensor): The edge indicies. - num_nodes (int, optional): The number of nodes in the graph. - (default: :obj:`None`) + seed_time (torch.Tensor): The seed time :math:`t_0` for every + destination node. Returns: A node embedding tensor of shape :obj:`[num_nodes, out_channels]`. """ - num_nodes = maybe_num_nodes(edge_index, num_nodes) - time_info = self.temporal_encoder(edge_time) - edge_attr_time = torch.cat((time_info, edge_attr), dim=1) - edge_attr_time = self.temporal_encoder_head(edge_attr_time) - - if not self.is_sorted: - edge_attr_time = get_latest_k_edge_attrs(self.K, edge_index, - edge_time, edge_attr_time, - num_nodes) - - return self.mlp_mixer( - edge_attr_time.view(-1, self.K, self.hidden_channels)) - - def __repr__(self): - return (f"{self.__class__.__name__}(" - f"K={self.K}, " - f"in_channels={self.in_channels}, " - f"hidden_channels={self.hidden_channels}, " - f"out_channels={self.out_channels}, " - f"time_channels={self.time_channels}, " - f"dropout={self.dropout})") + mask = edge_time <= seed_time[edge_index[1]] + + edge_index = edge_index[:, mask] + edge_attr = edge_attr[mask] + edge_time = edge_time[mask] + + time_enc = self.temporal_encoder(seed_time[edge_index[1]] - edge_time) + edge_attr = torch.cat([time_enc, edge_attr], dim=-1) + edge_attr = self.temporal_head(edge_attr) + + edge_attr = get_latest_k_edge_attr( + k=self.k, + edge_index=edge_index, + edge_attr=edge_attr, + edge_time=edge_time, + num_nodes=seed_time.size(0), + is_sorted=self.is_sorted, + ) + + return self.mlp_mixer(edge_attr) + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}(k={self.k}, ' + f'in_channels={self.in_channels}, ' + f'hidden_channels={self.hidden_channels}, ' + f'out_channels={self.out_channels}, ' + f'time_channels={self.time_channels}, ' + f'dropout={self.dropout})') From ba54c72512859bdd4996f5e64ea5566d5d74db0a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 21 Jun 2023 17:12:03 +0200 Subject: [PATCH 1309/2432] Fix `DDPStrategy` and `LinkLoader` in PyTorchLightning (#7624) Fixes https://github.com/pyg-team/pytorch_geometric/issues/7610 --- torch_geometric/loader/link_loader.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/torch_geometric/loader/link_loader.py b/torch_geometric/loader/link_loader.py index d34a331405cc..06f2a854702a 100644 --- a/torch_geometric/loader/link_loader.py +++ b/torch_geometric/loader/link_loader.py @@ -139,6 +139,8 @@ def __init__( # Remove for PyTorch Lightning: kwargs.pop('dataset', None) kwargs.pop('collate_fn', None) + # Save for PyTorch Lightning: + self.edge_label_index = edge_label_index if neg_sampling_ratio is not None and neg_sampling_ratio != 0.0: # TODO: Deprecation warning. From 613e473c5b51ee501cda45447f121a3b3b6e7d9e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 22 Jun 2023 08:48:31 +0200 Subject: [PATCH 1310/2432] Make `Data.keys` a method rather than a property (#7629) Ensures consistency with `dict` and fixes https://github.com/pyg-team/pytorch_geometric/discussions/7626 --- CHANGELOG.md | 1 + docs/source/get_started/introduction.rst | 2 +- test/data/test_data.py | 4 ++-- test/data/test_hetero_data.py | 4 ++-- test/data/test_temporal.py | 4 ++-- test/distributed/test_partition.py | 16 +++++++++++++--- test/transforms/test_pad.py | 14 +++++++------- torch_geometric/data/data.py | 7 +++---- torch_geometric/explain/explanation.py | 2 +- torch_geometric/graphgym/loader.py | 4 ++-- torch_geometric/loader/dense_data_loader.py | 2 +- 11 files changed, 35 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aea479654d08..43d93d370164 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Breaking Change: Made `Data.keys` a method rather than a property ([#7629](https://github.com/pyg-team/pytorch_geometric/pull/7629)) - Added a `num_edges` parameter to the forward method of `HypergraphConv` ([#7560](https://github.com/pyg-team/pytorch_geometric/pull/7560)) - Fixed `get_mesh_laplacian` for `normalization="sym"` ([#7544](https://github.com/pyg-team/pytorch_geometric/pull/7544)) - Use `dim_size` to initialize output size of the `EquilibriumAggregation` layer ([#7530](https://github.com/pyg-team/pytorch_geometric/pull/7530)) diff --git a/docs/source/get_started/introduction.rst b/docs/source/get_started/introduction.rst index 5616c32f57d4..c26629d77b49 100644 --- a/docs/source/get_started/introduction.rst +++ b/docs/source/get_started/introduction.rst @@ -86,7 +86,7 @@ Besides holding a number of node-level, edge-level or graph-level attributes, :c .. code-block:: python - print(data.keys) + print(data.keys()) >>> ['x', 'edge_index'] print(data['x']) diff --git a/test/data/test_data.py b/test/data/test_data.py index b38578c7f936..144c94fa1421 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -30,7 +30,7 @@ def test_data(): assert data.get('y', 2) == 2 assert data.get('y', None) is None - assert sorted(data.keys) == ['edge_index', 'x'] + assert sorted(data.keys()) == ['edge_index', 'x'] assert len(data) == 2 assert 'x' in data and 'edge_index' in data and 'pos' not in data @@ -81,7 +81,7 @@ def test_data(): dictionary = {'x': data.x, 'edge_index': data.edge_index} data = Data.from_dict(dictionary) - assert sorted(data.keys) == ['edge_index', 'x'] + assert sorted(data.keys()) == ['edge_index', 'x'] assert not data.has_isolated_nodes() assert not data.has_self_loops() diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 5698ce7a0312..75a37ee7789c 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -113,7 +113,7 @@ def test_hetero_data_functions(): data['author', 'paper'].edge_index = edge_index_author_paper data['paper', 'paper'].edge_attr = edge_attr_paper_paper assert len(data) == 3 - assert sorted(data.keys) == ['edge_attr', 'edge_index', 'x'] + assert sorted(data.keys()) == ['edge_attr', 'edge_index', 'x'] assert 'x' in data and 'edge_index' in data and 'edge_attr' in data assert data.num_nodes == 15 assert data.num_edges == 110 @@ -145,7 +145,7 @@ def test_hetero_data_functions(): data.y = 0 assert data['y'] == 0 and data.y == 0 assert len(data) == 4 - assert sorted(data.keys) == ['edge_attr', 'edge_index', 'x', 'y'] + assert sorted(data.keys()) == ['edge_attr', 'edge_index', 'x', 'y'] del data['paper', 'author'] node_types, edge_types = data.metadata() diff --git a/test/data/test_temporal.py b/test/data/test_temporal.py index 7c6acfc061c5..3d066ac28e45 100644 --- a/test/data/test_temporal.py +++ b/test/data/test_temporal.py @@ -32,8 +32,8 @@ def test_temporal_data(): del data.edge_index assert data.edge_index.tolist() == [[0, 1, 2], [3, 4, 5]] - assert sorted(data.keys) == ['dst', 'msg', 'src', 't', 'y'] - assert sorted(data.to_dict().keys()) == sorted(data.keys) + assert sorted(data.keys()) == ['dst', 'msg', 'src', 't', 'y'] + assert sorted(data.to_dict().keys()) == sorted(data.keys()) data_tuple = data.to_namedtuple() assert len(data_tuple) == 5 diff --git a/test/distributed/test_partition.py b/test/distributed/test_partition.py index 272c2184fd7a..ee98c93a25bb 100644 --- a/test/distributed/test_partition.py +++ b/test/distributed/test_partition.py @@ -1,14 +1,24 @@ import os.path as osp +import pytest import torch from torch_geometric.datasets import FakeDataset, FakeHeteroDataset from torch_geometric.distributed import Partitioner -from torch_geometric.testing import withPackage from torch_geometric.typing import EdgeTypeStr +try: + # TODO Using `pyg-lib` metis partitioning leads to some weird bugs in the + # CI. As such, we require `torch-sparse` for these tests for now. + rowptr = torch.tensor([0, 1]) + col = torch.tensor([0]) + torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) + WITH_METIS = True +except (AttributeError, RuntimeError): + WITH_METIS = False -@withPackage('pyg_lib') + +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_partition_data(tmp_path): data = FakeDataset()[0] num_parts = 2 @@ -55,7 +65,7 @@ def test_partition_data(tmp_path): node_feats1['feats']['x']) -@withPackage('pyg_lib') +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_partition_hetero_data(tmp_path): data = FakeHeteroDataset()[0] num_parts = 2 diff --git a/test/transforms/test_pad.py b/test/transforms/test_pad.py index 69225a942c37..40c8223d9ab8 100644 --- a/test/transforms/test_pad.py +++ b/test/transforms/test_pad.py @@ -29,13 +29,13 @@ def fake_hetero_data(node_types=2, edge_types=5) -> HeteroData: def _generate_homodata_node_attrs(data: Data) -> Generator[str, None, None]: - for attr in data.keys: + for attr in data.keys(): if data.is_node_attr(attr): yield attr def _generate_homodata_edge_attrs(data: Data) -> Generator[str, None, None]: - for attr in data.keys: + for attr in data.keys(): if data.is_edge_attr(attr): yield attr @@ -75,10 +75,10 @@ def _check_homo_data_nodes( for attr in _generate_homodata_node_attrs(original): if attr in exclude_keys: - assert attr not in padded.keys + assert attr not in padded.keys() continue - assert attr in padded.keys + assert attr in padded.keys() if not isinstance(padded[attr], torch.Tensor): continue @@ -133,10 +133,10 @@ def _check_homo_data_edges( if attr == 'edge_index': continue if attr in exclude_keys: - assert attr not in padded.keys + assert attr not in padded.keys() continue - assert attr in padded.keys + assert attr in padded.keys() if not isinstance(padded[attr], torch.Tensor): continue @@ -474,7 +474,7 @@ def test_pad_data_non_tensor_attr(): exclude_transform = Pad(max_num_nodes=101, exclude_keys=('batch_size', )) padded = exclude_transform(data) - assert 'batch_size' not in padded.keys + assert 'batch_size' not in padded.keys() @pytest.mark.parametrize('mask_pad_value', [True, False]) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 06e2b371f8a5..0ed967dd1a07 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -127,7 +127,6 @@ def debug(self): ########################################################################### - @property def keys(self) -> List[str]: r"""Returns a list of all graph attribute names.""" out = [] @@ -137,12 +136,12 @@ def keys(self) -> List[str]: def __len__(self) -> int: r"""Returns the number of graph attributes.""" - return len(self.keys) + return len(self.keys()) def __contains__(self, key: str) -> bool: r"""Returns :obj:`True` if the attribute :obj:`key` is present in the data.""" - return key in self.keys + return key in self.keys() def __getstate__(self) -> Dict[str, Any]: return self.__dict__ @@ -738,7 +737,7 @@ def to_heterogeneous( data[key][attr] = value.index_select(cat_dim, edge_ids[i]) # Add global attributes. - exclude_keys = set(data.keys) | { + exclude_keys = set(data.keys()) | { 'node_type', 'edge_type', 'edge_index', 'num_nodes', 'ptr' } for attr, value in self.items(): diff --git a/torch_geometric/explain/explanation.py b/torch_geometric/explain/explanation.py index 5c2ec411892c..57e4b98ed7ce 100644 --- a/torch_geometric/explain/explanation.py +++ b/torch_geometric/explain/explanation.py @@ -15,7 +15,7 @@ class ExplanationMixin: @property def available_explanations(self) -> List[str]: """Returns the available explanation masks.""" - return [key for key in self.keys if key.endswith('_mask')] + return [key for key in self.keys() if key.endswith('_mask')] def validate_masks(self, raise_on_error: bool = True) -> bool: r"""Validates the correctness of the :class:`Explanation` masks.""" diff --git a/torch_geometric/graphgym/loader.py b/torch_geometric/graphgym/loader.py index b3cf6f3dd73f..0dfeb55051da 100644 --- a/torch_geometric/graphgym/loader.py +++ b/torch_geometric/graphgym/loader.py @@ -222,11 +222,11 @@ def set_dataset_info(dataset): # count number of dataset splits cfg.share.num_splits = 1 - for key in dataset._data.keys: + for key in dataset._data.keys(): if 'val' in key: cfg.share.num_splits += 1 break - for key in dataset._data.keys: + for key in dataset._data.keys(): if 'test' in key: cfg.share.num_splits += 1 break diff --git a/torch_geometric/loader/dense_data_loader.py b/torch_geometric/loader/dense_data_loader.py index 2488c0399cd3..0ffc8a334120 100644 --- a/torch_geometric/loader/dense_data_loader.py +++ b/torch_geometric/loader/dense_data_loader.py @@ -8,7 +8,7 @@ def collate_fn(data_list: List[Data]) -> Batch: batch = Batch() - for key in data_list[0].keys: + for key in data_list[0].keys(): batch[key] = default_collate([data[key] for data in data_list]) return batch From 0f315467dc65fce9f07b77069fbc2d9da7e91a28 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 22 Jun 2023 14:50:08 +0200 Subject: [PATCH 1311/2432] Clean-up `GenerateMeshNormals` transform (#7632) --- ...{test_generate_normals.py => test_generate_mesh_normals.py} | 2 +- torch_geometric/transforms/generate_mesh_normals.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) rename test/transforms/{test_generate_normals.py => test_generate_mesh_normals.py} (95%) diff --git a/test/transforms/test_generate_normals.py b/test/transforms/test_generate_mesh_normals.py similarity index 95% rename from test/transforms/test_generate_normals.py rename to test/transforms/test_generate_mesh_normals.py index a426c1996a50..4ff0b8c5f404 100644 --- a/test/transforms/test_generate_normals.py +++ b/test/transforms/test_generate_mesh_normals.py @@ -4,7 +4,7 @@ from torch_geometric.transforms import GenerateMeshNormals -def test_generate_normals(): +def test_generate_mesh_normals(): transform = GenerateMeshNormals() assert str(transform) == 'GenerateMeshNormals()' diff --git a/torch_geometric/transforms/generate_mesh_normals.py b/torch_geometric/transforms/generate_mesh_normals.py index 0bc5e5e6816b..461a8122bb15 100644 --- a/torch_geometric/transforms/generate_mesh_normals.py +++ b/torch_geometric/transforms/generate_mesh_normals.py @@ -1,4 +1,3 @@ -import torch import torch.nn.functional as F from torch_geometric.data import Data @@ -19,8 +18,8 @@ def forward(self, data: Data) -> Data: vec2 = pos[face[2]] - pos[face[0]] face_norm = F.normalize(vec1.cross(vec2), p=2, dim=-1) # [F, 3] - idx = torch.cat([face[0], face[1], face[2]], dim=0) face_norm = face_norm.repeat(3, 1) + idx = face.view(-1) norm = scatter(face_norm, idx, 0, pos.size(0), reduce='sum') norm = F.normalize(norm, p=2, dim=-1) # [N, 3] From ac4c8048da7bcdda2352a6cb7618e76f6c99ab33 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Thu, 22 Jun 2023 06:24:00 -0700 Subject: [PATCH 1312/2432] Using `Select` and `Connect` modules in pooling layers (#7625) need to fix failing CI --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- torch_geometric/nn/pool/asap.py | 8 ++++-- torch_geometric/nn/pool/pan_pool.py | 32 ++++++++++++------------ torch_geometric/nn/pool/sag_pool.py | 38 ++++++++++++++--------------- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43d93d370164..e790c3a51eb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) - Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) -- Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308)) +- Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308), [#7625](https://github.com/pyg-team/pytorch_geometric/pull/7625)) - Added support for tuples as keys in `ModuleDict`/`ParameterDict` ([#7294](https://github.com/pyg-team/pytorch_geometric/pull/7294)) - Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) - Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) diff --git a/torch_geometric/nn/pool/asap.py b/torch_geometric/nn/pool/asap.py index ec391c43601c..c87aad1bfbd5 100644 --- a/torch_geometric/nn/pool/asap.py +++ b/torch_geometric/nn/pool/asap.py @@ -7,7 +7,7 @@ from torch.nn import Linear from torch_geometric.nn import LEConv -from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.nn.pool.select import SelectTopK from torch_geometric.utils import ( add_remaining_self_loops, remove_self_loops, @@ -68,6 +68,9 @@ def __init__(self, in_channels: int, ratio: Union[float, int] = 0.5, **kwargs) else: self.gnn_intra_cluster = None + + self.select = SelectTopK(1, ratio) + self.reset_parameters() def reset_parameters(self): @@ -77,6 +80,7 @@ def reset_parameters(self): self.gnn_score.reset_parameters() if self.gnn_intra_cluster is not None: self.gnn_intra_cluster.reset_parameters() + self.select.reset_parameters() def forward( self, @@ -135,7 +139,7 @@ def forward( # Cluster selection. fitness = self.gnn_score(x, edge_index).sigmoid().view(-1) - perm = topk(fitness, self.ratio, batch) + perm = self.select(fitness, batch).node_index x = x[perm] * fitness[perm].view(-1, 1) batch = batch[perm] diff --git a/torch_geometric/nn/pool/pan_pool.py b/torch_geometric/nn/pool/pan_pool.py index 174c63c90f57..74ad2fa95e98 100644 --- a/torch_geometric/nn/pool/pan_pool.py +++ b/torch_geometric/nn/pool/pan_pool.py @@ -4,10 +4,10 @@ from torch import Tensor from torch.nn import Parameter -from torch_geometric.nn.pool.connect.filter_edges import filter_adj -from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.nn.pool.connect import FilterEdges +from torch_geometric.nn.pool.select import SelectTopK from torch_geometric.typing import OptTensor, SparseTensor -from torch_geometric.utils import scatter, softmax +from torch_geometric.utils import scatter class PANPooling(torch.nn.Module): @@ -48,17 +48,15 @@ def __init__( ): super().__init__() - if isinstance(nonlinearity, str): - nonlinearity = getattr(torch, nonlinearity) - self.in_channels = in_channels self.ratio = ratio self.min_score = min_score self.multiplier = multiplier - self.nonlinearity = nonlinearity self.p = Parameter(torch.Tensor(in_channels)) self.beta = Parameter(torch.Tensor(2)) + self.select = SelectTopK(1, ratio, min_score, nonlinearity) + self.connect = FilterEdges() self.reset_parameters() @@ -66,6 +64,7 @@ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" self.p.data.fill_(1) self.beta.data.fill_(0.5) + self.select.reset_parameters() def forward( self, @@ -91,21 +90,20 @@ def forward( score2 = scatter(edge_weight, col, 0, dim_size=x.size(0), reduce='sum') score = self.beta[0] * score1 + self.beta[1] * score2 - if self.min_score is None: - score = self.nonlinearity(score) - else: - score = softmax(score, batch) + select_out = self.select(score, batch) - perm = topk(score, self.ratio, batch, self.min_score) - x = x[perm] * score[perm].view(-1, 1) + perm = select_out.node_index + score = select_out.weight + assert score is not None + + x = x[perm] * score.view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x edge_index = torch.stack([col, row], dim=0) - edge_index, edge_weight = filter_adj(edge_index, edge_weight, perm, - num_nodes=score.size(0)) - assert edge_weight is not None + connect_out = self.connect(select_out, edge_index, edge_weight, batch) - return x, edge_index, edge_weight, batch[perm], perm, score[perm] + return (x, connect_out.edge_index, connect_out.edge_attr, + connect_out.batch, perm, score) def __repr__(self) -> str: if self.min_score is None: diff --git a/torch_geometric/nn/pool/sag_pool.py b/torch_geometric/nn/pool/sag_pool.py index ec18f26f37f8..83eef5bb7051 100644 --- a/torch_geometric/nn/pool/sag_pool.py +++ b/torch_geometric/nn/pool/sag_pool.py @@ -4,10 +4,9 @@ from torch import Tensor from torch_geometric.nn import GraphConv -from torch_geometric.nn.pool.connect.filter_edges import filter_adj -from torch_geometric.nn.pool.select.topk import topk +from torch_geometric.nn.pool.connect import FilterEdges +from torch_geometric.nn.pool.select import SelectTopK from torch_geometric.typing import OptTensor -from torch_geometric.utils import softmax class SAGPooling(torch.nn.Module): @@ -82,21 +81,21 @@ def __init__( ): super().__init__() - if isinstance(nonlinearity, str): - nonlinearity = getattr(torch, nonlinearity) - self.in_channels = in_channels self.ratio = ratio - self.gnn = GNN(in_channels, 1, **kwargs) self.min_score = min_score self.multiplier = multiplier - self.nonlinearity = nonlinearity + + self.gnn = GNN(in_channels, 1, **kwargs) + self.select = SelectTopK(1, ratio, min_score, nonlinearity) + self.connect = FilterEdges() self.reset_parameters() def reset_parameters(self): r"""Resets all learnable parameters of the module.""" self.gnn.reset_parameters() + self.select.reset_parameters() def forward( self, @@ -123,23 +122,22 @@ def forward( batch = edge_index.new_zeros(x.size(0)) attn = x if attn is None else attn - attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn - score = self.gnn(attn, edge_index).view(-1) + attn = attn.view(-1, 1) if attn.dim() == 1 else attn + attn = self.gnn(attn, edge_index) - if self.min_score is None: - score = self.nonlinearity(score) - else: - score = softmax(score, batch) + select_out = self.select(attn, batch) + + perm = select_out.node_index + score = select_out.weight + assert score is not None - perm = topk(score, self.ratio, batch, self.min_score) - x = x[perm] * score[perm].view(-1, 1) + x = x[perm] * score.view(-1, 1) x = self.multiplier * x if self.multiplier != 1 else x - batch = batch[perm] - edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, - num_nodes=score.size(0)) + connect_out = self.connect(select_out, edge_index, edge_attr, batch) - return x, edge_index, edge_attr, batch, perm, score[perm] + return (x, connect_out.edge_index, connect_out.edge_attr, + connect_out.batch, perm, score) def __repr__(self) -> str: if self.min_score is None: From 125b59560c23a9dfa09bf931e4e93069b8745930 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Thu, 22 Jun 2023 08:22:14 -0700 Subject: [PATCH 1313/2432] Add `from_partition` in `LocalGraphStore`/`LocalFeatureStore` (#7628) This code belongs to the part of the whole distributed training for PyG. This PR will replace the previous PR (#7604). The main difference is to totally initialize the graph and features separately accord to the comments from PR (#7604) So this class will do 1. Add the from_partition() function in LocalGraphStore/LocalFeatureStore to load partition .pt files for further initialization. 2. Separately initialize the graph and features by LocalGraphStore.from_data()/LocalFeatureStore.from_data() which will give more freedom to graph and features, or in memory or in others. 3. LocalGraphStore.from_partition() will only initialize the graph data 4. LocalFeatureStore.from_partition() will only initialize the feature data The files related - distributed/local_graph_store.py distributed/local_feature_store.py test/test_init_graph_feature_from_partition.py to cover the graph/feature initialization from partition based on FakeDataset/FakeHeteroDataset for both homo/hetero. Any comments please let us know. thanks --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/distributed/test_partition.py | 69 ++++++++++++++++++- .../distributed/local_feature_store.py | 52 +++++++++++++- .../distributed/local_graph_store.py | 30 ++++++++ 4 files changed, 150 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e790c3a51eb2..4b51ec3094d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_partition.py b/test/distributed/test_partition.py index ee98c93a25bb..21906533a4df 100644 --- a/test/distributed/test_partition.py +++ b/test/distributed/test_partition.py @@ -4,7 +4,11 @@ import torch from torch_geometric.datasets import FakeDataset, FakeHeteroDataset -from torch_geometric.distributed import Partitioner +from torch_geometric.distributed import ( + LocalFeatureStore, + LocalGraphStore, + Partitioner, +) from torch_geometric.typing import EdgeTypeStr try: @@ -97,3 +101,66 @@ def test_partition_hetero_data(tmp_path): assert osp.exists(node_feats_path) edge_feats_path = osp.join(tmp_path, f'part_{pid}', 'edge_feats.pt') assert osp.exists(edge_feats_path) + + +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') +def test_from_partition_data(tmp_path): + data = FakeDataset()[0] + num_parts = 2 + + partitioner = Partitioner(data, num_parts, tmp_path) + partitioner.generate_partition() + + graph_store1 = LocalGraphStore.from_partition(tmp_path, pid=0) + graph_store2 = LocalGraphStore.from_partition(tmp_path, pid=1) + + attr1 = graph_store1.get_all_edge_attrs()[0] + (row1, col1) = graph_store1.get_edge_index(attr1) + attr2 = graph_store2.get_all_edge_attrs()[0] + (row2, col2) = graph_store2.get_edge_index(attr2) + assert row1.size(0) + row2.size(0) == data.num_edges + + feat_store1 = LocalFeatureStore.from_partition(tmp_path, pid=0) + feat_store2 = LocalFeatureStore.from_partition(tmp_path, pid=1) + + node_attr1 = feat_store1.get_all_tensor_attrs()[0] + assert node_attr1.attr_name == 'x' + x1 = feat_store1.get_tensor(node_attr1) + id1 = feat_store1.get_global_id(node_attr1.group_name) + + node_attr2 = feat_store2.get_all_tensor_attrs()[0] + assert node_attr2.attr_name == 'x' + x2 = feat_store2.get_tensor(node_attr2) + id2 = feat_store2.get_global_id(node_attr2.group_name) + + assert x1.size(0) + x2.size(0) == data.num_nodes + assert torch.allclose(data.x[id1], x1) + assert torch.allclose(data.x[id2], x2) + + +@pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') +def test_from_partition_hetero_data(tmp_path): + data = FakeHeteroDataset()[0] + num_parts = 2 + + partitioner = Partitioner(data, num_parts, tmp_path) + partitioner.generate_partition() + + graph_store1 = LocalGraphStore.from_partition(tmp_path, pid=0) + graph_store2 = LocalGraphStore.from_partition(tmp_path, pid=1) + + attrs1 = graph_store1.get_all_edge_attrs() + attrs2 = graph_store2.get_all_edge_attrs() + assert len(data.edge_types) == len(attrs1) == len(attrs2) + + node_types = set() + for attr in attrs1: + node_types.add(attr.edge_type[0]) + node_types.add(attr.edge_type[2]) + assert node_types == set(data.node_types) + + node_types = set() + for attr in attrs2: + node_types.add(attr.edge_type[0]) + node_types.add(attr.edge_type[2]) + assert node_types == set(data.node_types) diff --git a/torch_geometric/distributed/local_feature_store.py b/torch_geometric/distributed/local_feature_store.py index 76442c005f02..9cc5c395e0fb 100644 --- a/torch_geometric/distributed/local_feature_store.py +++ b/torch_geometric/distributed/local_feature_store.py @@ -1,6 +1,8 @@ import copy +import json +import os.path as osp from dataclasses import dataclass -from typing import Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import Tensor @@ -190,3 +192,51 @@ def from_hetero_data( attr_name='edge_attr') return feat_store + + @classmethod + def from_partition(cls, root: str, pid: int) -> 'LocalFeatureStore': + with open(osp.join(root, 'META.json'), 'r') as f: + meta = json.load(f) + + part_dir = osp.join(root, f'part_{pid}') + assert osp.exists(part_dir) + + node_feats: Optional[Dict[str, Any]] = None + if osp.exists(osp.join(part_dir, 'node_feats.pt')): + node_feats = torch.load(osp.join(part_dir, 'node_feats.pt')) + + edge_feats: Optional[Dict[str, Any]] = None + if osp.exists(osp.join(part_dir, 'edge_feats.pt')): + edge_feats = torch.load(osp.join(part_dir, 'edge_feats.pt')) + + feat_store = cls() + + if not meta['is_hetero'] and node_feats is not None: + feat_store.put_global_id(node_feats['global_id'], group_name=None) + for key, value in node_feats['feats'].items(): + feat_store.put_tensor(value, group_name=None, attr_name=key) + + if not meta['is_hetero'] and edge_feats is not None: + feat_store.put_global_id(edge_feats['global_id'], + group_name=(None, None)) + for key, value in edge_feats['feats'].items(): + feat_store.put_tensor(value, group_name=(None, None), + attr_name=key) + + if meta['is_hetero'] and node_feats is not None: + for node_type, node_feat in node_feats.items(): + feat_store.put_global_id(node_feat['global_id'], + group_name=node_type) + for key, value in node_feat['feats'].items(): + feat_store.put_tensor(value, group_name=node_type, + attr_name=key) + + if meta['is_hetero'] and edge_feats is not None: + for edge_type, edge_feat in edge_feats.items(): + feat_store.put_global_id(edge_feat['global_id'], + group_name=edge_type) + for key, value in edge_feat['feats'].items(): + feat_store.put_tensor(value, group_name=edge_type, + attr_name=key) + + return feat_store diff --git a/torch_geometric/distributed/local_graph_store.py b/torch_geometric/distributed/local_graph_store.py index ecdaf7dfd367..fb05f2ecc659 100644 --- a/torch_geometric/distributed/local_graph_store.py +++ b/torch_geometric/distributed/local_graph_store.py @@ -1,5 +1,8 @@ +import json +import os.path as osp from typing import Dict, List, Optional, Tuple +import torch from torch import Tensor from torch_geometric.data import EdgeAttr, GraphStore @@ -108,3 +111,30 @@ def from_hetero_data( graph_store.put_edge_index(edge_index, **attr) graph_store.put_edge_id(edge_id, **attr) return graph_store + + @classmethod + def from_partition(cls, root: str, pid: int) -> 'LocalGraphStore': + with open(osp.join(root, 'META.json'), 'r') as f: + meta = json.load(f) + + part_dir = osp.join(root, f'part_{pid}') + assert osp.exists(part_dir) + + graph_data = torch.load(osp.join(part_dir, 'graph.pt')) + + graph_store = cls() + + if not meta['is_hetero']: + attr = dict(edge_type=None, layout='coo', size=graph_data['size']) + graph_store.put_edge_index((graph_data['row'], graph_data['col']), + **attr) + graph_store.put_edge_id(graph_data['edge_id'], **attr) + + if meta['is_hetero']: + for edge_type, data in graph_data.items(): + attr = dict(edge_type=edge_type, layout='coo', + size=data['size']) + graph_store.put_edge_index((data['row'], data['col']), **attr) + graph_store.put_edge_id(data['edge_id'], **attr) + + return graph_store From 46098557d3718f5be91517ddb367005a56a096aa Mon Sep 17 00:00:00 2001 From: rusty1s Date: Fri, 23 Jun 2023 06:27:16 +0000 Subject: [PATCH 1314/2432] update --- torch_geometric/nn/pool/pan_pool.py | 8 +++++--- torch_geometric/nn/pool/sag_pool.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/torch_geometric/nn/pool/pan_pool.py b/torch_geometric/nn/pool/pan_pool.py index 74ad2fa95e98..4bcac3920275 100644 --- a/torch_geometric/nn/pool/pan_pool.py +++ b/torch_geometric/nn/pool/pan_pool.py @@ -71,7 +71,7 @@ def forward( x: Tensor, M: SparseTensor, batch: OptTensor = None, - ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + ) -> Tuple[Tensor, Tensor, Tensor, OptTensor, Tensor, Tensor]: r""" Args: x (torch.Tensor): The node feature matrix. @@ -101,9 +101,11 @@ def forward( edge_index = torch.stack([col, row], dim=0) connect_out = self.connect(select_out, edge_index, edge_weight, batch) + edge_weight = connect_out.edge_attr + assert edge_weight is not None - return (x, connect_out.edge_index, connect_out.edge_attr, - connect_out.batch, perm, score) + return (x, connect_out.edge_index, edge_weight, connect_out.batch, + perm, score) def __repr__(self) -> str: if self.min_score is None: diff --git a/torch_geometric/nn/pool/sag_pool.py b/torch_geometric/nn/pool/sag_pool.py index 83eef5bb7051..8ae8c5068604 100644 --- a/torch_geometric/nn/pool/sag_pool.py +++ b/torch_geometric/nn/pool/sag_pool.py @@ -104,7 +104,7 @@ def forward( edge_attr: OptTensor = None, batch: OptTensor = None, attn: OptTensor = None, - ) -> Tuple[Tensor, Tensor, OptTensor, Tensor, Tensor, Tensor]: + ) -> Tuple[Tensor, Tensor, OptTensor, OptTensor, Tensor, Tensor]: r""" Args: x (torch.Tensor): The node feature matrix. From 35f51f0c10b69f12621b31f7c7059edc27cfbc5b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 23 Jun 2023 16:42:56 +0200 Subject: [PATCH 1315/2432] Error out in case of bipartite message passing via `GCNConv` (#7637) --- torch_geometric/nn/conv/gcn_conv.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/torch_geometric/nn/conv/gcn_conv.py b/torch_geometric/nn/conv/gcn_conv.py index 7ad729ae9cde..12cf423970fb 100644 --- a/torch_geometric/nn/conv/gcn_conv.py +++ b/torch_geometric/nn/conv/gcn_conv.py @@ -203,6 +203,13 @@ def reset_parameters(self): def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor = None) -> Tensor: + if isinstance(x, (tuple, list)): + raise ValueError(f"'{self.__class__.__name__}' received a tuple " + f"of node features as input while this layer " + f"does not support bipartite message passing. " + f"Please try other layers such as 'SAGEConv' or " + f"'GraphConv' instead") + if self.normalize: if isinstance(edge_index, Tensor): cache = self._cached_edge_index From fc2bc16c215de0abe55547a409409c6b2d49c256 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Sun, 25 Jun 2023 02:15:40 -0700 Subject: [PATCH 1316/2432] Add `index_add` benchmark (#7638) using: https://github.com/puririshi98/rgcn_pyg_lib_forward_bench/blob/main/scatter_v_index_bench.py I find that index_add is faster ``` original implementation takes 3.62396240234375e-07 s/iter new implementation takes 2.765655517578125e-07 s/iter ``` however using test/utils/test_scatter.py I am seeing the opposite result ``` Benchmarking w/ (num_nodes, num_edges) = (1000, 50000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 0.0512s | | Optimized PyG Scatter | 0.0699s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (2000, 100000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 0.0752s | | Optimized PyG Scatter | 0.1124s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (4000, 200000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 0.1227s | | Optimized PyG Scatter | 0.2087s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (8000, 400000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 0.2408s | | Optimized PyG Scatter | 0.4129s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (16000, 800000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 0.5714s | | Optimized PyG Scatter | 0.8529s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (32000, 1600000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 1.3682s | | Optimized PyG Scatter | 1.8042s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (64000, 3200000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 3.2057s | | Optimized PyG Scatter | 3.7839s | +-----------------------+-----------+ Benchmarking w/ (num_nodes, num_edges) = (128000, 6400000) Aggregator: sum +-----------------------+-----------+ | Name | Forward | |-----------------------+-----------| | PyTorch Scatter | 6.9272s | | Optimized PyG Scatter | 7.8657s | +-----------------------+-----------+ ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/utils/test_scatter.py | 51 ++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/test/utils/test_scatter.py b/test/utils/test_scatter.py index d93f170008b5..3f8e9b1b453c 100644 --- a/test/utils/test_scatter.py +++ b/test/utils/test_scatter.py @@ -1,3 +1,5 @@ +from itertools import product + import pytest import torch @@ -101,28 +103,39 @@ def test_scatter_argmax(device): # * Prefer `torch_sparse` implementation with gradients import argparse - import torch_scatter + from torch_geometric.typing import WITH_TORCH_SCATTER, torch_scatter parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--backward', action='http://github.com/store_true') + parser.add_argument('--aggr', type=str, default='all') args = parser.parse_args() - num_nodes, num_edges = 1_000, 50_000 - x = torch.randn(num_edges, 64, device=args.device) - index = torch.randint(num_nodes, (num_edges, ), device=args.device) + num_nodes_list = [4_000, 8_000, 16_000, 32_000, 64_000] + + if args.aggr == 'all': + aggrs = ['sum', 'mean', 'min', 'max', 'mul'] + else: + aggrs = args.aggr.split(',') def pytorch_scatter(x, index, dim_size, reduce): if reduce == 'min' or reduce == 'max': reduce = f'a{aggr}' # `amin` or `amax` elif reduce == 'mul': reduce = 'prod' - out = x.new_zeros((dim_size, x.size(-1))) + out = x.new_zeros(dim_size, x.size(-1)) include_self = reduce in ['sum', 'mean'] index = index.view(-1, 1).expand(-1, x.size(-1)) out.scatter_reduce_(0, index, x, reduce, include_self=include_self) return out + def pytorch_index_add(x, index, dim_size, reduce): + if reduce != 'sum': + raise NotImplementedError + out = x.new_zeros(dim_size, x.size(-1)) + out.index_add_(0, index, x) + return out + def own_scatter(x, index, dim_size, reduce): return torch_scatter.scatter(x, index, dim=0, dim_size=num_nodes, reduce=reduce) @@ -130,12 +143,30 @@ def own_scatter(x, index, dim_size, reduce): def optimized_scatter(x, index, dim_size, reduce): return scatter(x, index, dim=0, dim_size=dim_size, reduce=reduce) - aggrs = ['sum', 'mean', 'min', 'max', 'mul'] - for aggr in aggrs: - print(f'Aggregator: {aggr}') + for aggr, num_nodes in product(aggrs, num_nodes_list): + num_edges = num_nodes * 50 + print(f'aggr: {aggr}, #nodes: {num_nodes}, #edges: {num_edges}') + + x = torch.randn(num_edges, 64, device=args.device) + index = torch.randint(num_nodes, (num_edges, ), device=args.device) + + funcs = [pytorch_scatter] + func_names = ['PyTorch scatter_reduce'] + + if aggr == 'sum': + funcs.append(pytorch_index_add) + func_names.append('PyTorch index_add') + + if WITH_TORCH_SCATTER: + funcs.append(own_scatter) + func_names.append('torch_scatter') + + funcs.append(optimized_scatter) + func_names.append('Optimized PyG Scatter') + benchmark( - funcs=[pytorch_scatter, own_scatter, optimized_scatter], - func_names=['PyTorch', 'torch_scatter', 'Optimized'], + funcs=funcs, + func_names=func_names, args=(x, index, num_nodes, aggr), num_steps=100 if args.device == 'cpu' else 1000, num_warmups=50 if args.device == 'cpu' else 500, From e8260da7bd86add4a0c00f2812b163cb300d8a73 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 25 Jun 2023 12:11:20 +0200 Subject: [PATCH 1317/2432] Fix link in `NodePropertySplit` (#7639) --- torch_geometric/transforms/node_property_split.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/transforms/node_property_split.py b/torch_geometric/transforms/node_property_split.py index 1d540e36a7a1..9a9f9159805d 100644 --- a/torch_geometric/transforms/node_property_split.py +++ b/torch_geometric/transforms/node_property_split.py @@ -14,7 +14,7 @@ class NodePropertySplit(BaseTransform): r"""Creates a node-level split with distributional shift based on a given node property, as proposed in the `"Evaluating Robustness and Uncertainty of Graph Models Under Structural Distributional Shifts" - `__ paper + `__ paper (functional name: :obj:`node_property_split`). It splits the nodes in a given graph into five non-intersecting parts From b60fa62eb80060849ae98410f18d720a4043bf5e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 26 Jun 2023 15:44:29 +0200 Subject: [PATCH 1318/2432] `torch.nested_tensor` support in `Data` and `Batch` (#7643) Fixes https://github.com/pyg-team/pytorch_geometric/discussions/7622 --- CHANGELOG.md | 1 + test/data/test_batch.py | 17 +++++++++++++++++ torch_geometric/data/collate.py | 9 ++++++++- torch_geometric/data/data.py | 2 ++ torch_geometric/data/storage.py | 10 ++++++++-- 5 files changed, 36 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b51ec3094d5..48f0052c83dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643)) - Added `interval` argument to `Cartesian` and `LocalCartesian` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614)) - Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) - Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) diff --git a/test/data/test_batch.py b/test/data/test_batch.py index ca5f46324ede..86f1d523b74c 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -3,6 +3,7 @@ import numpy as np import pytest import torch +from torch.nested import nested_tensor import torch_geometric from torch_geometric.data import Batch, Data, HeteroData @@ -517,3 +518,19 @@ def test_torch_sparse_batch(layout): out = to_edge_index(batch.adj.to_sparse(layout=torch.sparse_csr)) assert torch.equal(out[0], torch.cat([edge_index, edge_index + 3], 1)) assert torch.equal(out[1], torch.cat([edge_attr, edge_attr], 0)) + + +def test_torch_nested_batch(): + x1 = nested_tensor([torch.randn(3), torch.randn(4)]) + data1 = Data(x=x1) + assert str(data1) == 'Data(x=[2, 4])' + + x2 = nested_tensor([torch.randn(3), torch.randn(4), torch.randn(5)]) + data2 = Data(x=x2) + assert str(data2) == 'Data(x=[3, 5])' + + batch = Batch.from_data_list([data1, data2]) + assert str(batch) == 'DataBatch(x=[5, 5], batch=[5], ptr=[3])' + + x = nested_tensor(list(x1.unbind() + x2.unbind())).to_padded_tensor(0.0) + assert torch.equal(batch.x.to_padded_tensor(0.0), x) diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index a9896916dc91..cc5c1c6444f7 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -159,7 +159,14 @@ def _collate( else: out = None - value = torch.cat(values, dim=cat_dim or 0, out=out) + if elem.is_nested: + tensors = [] + for nested_tensor in values: + tensors.extend(nested_tensor.unbind()) + value = torch.nested.nested_tensor(tensors) + else: + value = torch.cat(values, dim=cat_dim or 0, out=out) + return value, slices, incs elif is_sparse(elem) and increment: diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 0ed967dd1a07..a9fa1f237925 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -955,6 +955,8 @@ def size_repr(key: Any, value: Any, indent: int = 0) -> str: pad = ' ' * indent if isinstance(value, Tensor) and value.dim() == 0: out = value.item() + elif isinstance(value, Tensor) and value.is_nested: + out = str(list(value.to_padded_tensor(padding=0.0).size())) elif isinstance(value, Tensor): out = str(list(value.size())) elif isinstance(value, np.ndarray): diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index f43cf1254354..b46b0484a82a 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -296,11 +296,17 @@ def num_nodes(self) -> Optional[int]: if 'num_nodes' in self: return self['num_nodes'] for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and key in N_KEYS: + if isinstance(value, Tensor) and key in N_KEYS: + cat_dim = self._parent().__cat_dim__(key, value, self) + return value.size(cat_dim) + if isinstance(value, np.ndarray) and key in N_KEYS: cat_dim = self._parent().__cat_dim__(key, value, self) return value.shape[cat_dim] for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and 'node' in key: + if isinstance(value, Tensor) and 'node' in key: + cat_dim = self._parent().__cat_dim__(key, value, self) + return value.size(cat_dim) + if isinstance(value, np.ndarray) and 'node' in key: cat_dim = self._parent().__cat_dim__(key, value, self) return value.shape[cat_dim] if 'adj' in self and isinstance(self.adj, SparseTensor): From fc32568934e75c92fc96e45e3b48b3482812d3f4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 27 Jun 2023 08:25:29 +0200 Subject: [PATCH 1319/2432] Test `__inc__` with nested tensors (#7647) --- CHANGELOG.md | 2 +- test/data/test_batch.py | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48f0052c83dd..97cf602cd3ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643)) +- Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643), [#7647](https://github.com/pyg-team/pytorch_geometric/pull/7647)) - Added `interval` argument to `Cartesian` and `LocalCartesian` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614)) - Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) - Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 86f1d523b74c..3c83a8d4bf08 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -521,16 +521,23 @@ def test_torch_sparse_batch(layout): def test_torch_nested_batch(): + class MyData(Data): + def __inc__(self, key, value, *args, **kwargs) -> int: + return 2 + x1 = nested_tensor([torch.randn(3), torch.randn(4)]) - data1 = Data(x=x1) - assert str(data1) == 'Data(x=[2, 4])' + data1 = MyData(x=x1) + assert str(data1) == 'MyData(x=[2, 4])' x2 = nested_tensor([torch.randn(3), torch.randn(4), torch.randn(5)]) - data2 = Data(x=x2) - assert str(data2) == 'Data(x=[3, 5])' + data2 = MyData(x=x2) + assert str(data2) == 'MyData(x=[3, 5])' batch = Batch.from_data_list([data1, data2]) - assert str(batch) == 'DataBatch(x=[5, 5], batch=[5], ptr=[3])' + assert str(batch) == 'MyDataBatch(x=[5, 5], batch=[5], ptr=[3])' - x = nested_tensor(list(x1.unbind() + x2.unbind())).to_padded_tensor(0.0) - assert torch.equal(batch.x.to_padded_tensor(0.0), x) + expected = nested_tensor(list(x1.unbind() + (x2 + 2).unbind())) + assert torch.equal( + batch.x.to_padded_tensor(0.0), + expected.to_padded_tensor(0.0), + ) From 8fd53f6b654396d5d5c848933993ad3e1fb32bf6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 27 Jun 2023 08:42:43 +0200 Subject: [PATCH 1320/2432] Allow usage of `BasicGNN` in `DeepGraphInfomax` (#7648) Fixes https://github.com/pyg-team/pytorch_geometric/discussions/7640 --- CHANGELOG.md | 1 + examples/infomax_transductive.py | 15 ++-- test/nn/models/test_deep_graph_infomax.py | 68 +++++++++++++++---- .../nn/models/deep_graph_infomax.py | 11 ++- 4 files changed, 73 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 97cf602cd3ab..d13eb7fa3e09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,6 +69,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Allowed the usage of `BasicGNN` models in `DeepGraphInfomax` ([#7648](https://github.com/pyg-team/pytorch_geometric/pull/7648)) - Breaking Change: Made `Data.keys` a method rather than a property ([#7629](https://github.com/pyg-team/pytorch_geometric/pull/7629)) - Added a `num_edges` parameter to the forward method of `HypergraphConv` ([#7560](https://github.com/pyg-team/pytorch_geometric/pull/7560)) - Fixed `get_mesh_laplacian` for `normalization="sym"` ([#7544](https://github.com/pyg-team/pytorch_geometric/pull/7544)) diff --git a/examples/infomax_transductive.py b/examples/infomax_transductive.py index e268d51e9e0d..55670e3485a8 100644 --- a/examples/infomax_transductive.py +++ b/examples/infomax_transductive.py @@ -1,7 +1,6 @@ import os.path as osp import torch -import torch.nn as nn from torch_geometric.datasets import Planetoid from torch_geometric.nn import DeepGraphInfomax, GCNConv @@ -11,11 +10,11 @@ dataset = Planetoid(path, dataset) -class Encoder(nn.Module): +class Encoder(torch.nn.Module): def __init__(self, in_channels, hidden_channels): super().__init__() self.conv = GCNConv(in_channels, hidden_channels, cached=True) - self.prelu = nn.PReLU(hidden_channels) + self.prelu = torch.nn.PReLU(hidden_channels) def forward(self, x, edge_index): x = self.conv(x, edge_index) @@ -24,14 +23,16 @@ def forward(self, x, edge_index): def corruption(x, edge_index): - return x[torch.randperm(x.size(0))], edge_index + return x[torch.randperm(x.size(0), device=x.device)], edge_index device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = DeepGraphInfomax( - hidden_channels=512, encoder=Encoder(dataset.num_features, 512), - summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)), - corruption=corruption).to(device) + hidden_channels=512, + encoder=Encoder(dataset.num_features, 512), + summary=lambda z, *args, **kwargs: z.mean(dim=0).sigmoid(), + corruption=corruption, +).to(device) data = dataset[0].to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) diff --git a/test/nn/models/test_deep_graph_infomax.py b/test/nn/models/test_deep_graph_infomax.py index a3e5fb49fd65..037c43bb903e 100644 --- a/test/nn/models/test_deep_graph_infomax.py +++ b/test/nn/models/test_deep_graph_infomax.py @@ -1,34 +1,74 @@ import torch -from torch_geometric.nn import DeepGraphInfomax -from torch_geometric.testing import is_full_test +from torch_geometric.nn import GCN, DeepGraphInfomax +from torch_geometric.testing import is_full_test, withCUDA -def test_deep_graph_infomax(): +@withCUDA +def test_infomax(device): def corruption(z): return z + 1 - model = DeepGraphInfomax(hidden_channels=16, encoder=lambda x: x, - summary=lambda z, *args: z.mean(dim=0), - corruption=lambda x: x + 1) - + model = DeepGraphInfomax( + hidden_channels=16, + encoder=lambda x: x, + summary=lambda z, *args: z.mean(dim=0), + corruption=lambda x: x + 1, + ).to(device) assert str(model) == 'DeepGraphInfomax(16)' - x = torch.ones(20, 16) + x = torch.ones(20, 16, device=device) pos_z, neg_z, summary = model(x) - assert pos_z.size() == (20, 16) and neg_z.size() == (20, 16) + assert pos_z.size() == (20, 16) + assert neg_z.size() == (20, 16) assert summary.size() == (16, ) + loss = model.loss(pos_z, neg_z, summary) + assert float(loss) >= 0 + if is_full_test(): jit = torch.jit.export(model) pos_z, neg_z, summary = jit(x) assert pos_z.size() == (20, 16) and neg_z.size() == (20, 16) assert summary.size() == (16, ) - loss = model.loss(pos_z, neg_z, summary) - assert 0 <= loss.item() + acc = model.test( + train_z=torch.ones(20, 16), + train_y=torch.randint(10, (20, )), + test_z=torch.ones(20, 16), + test_y=torch.randint(10, (20, )), + ) + assert 0 <= acc <= 1 + + +@withCUDA +def test_infomax_predefined_model(device): + def corruption(x, edge_index, edge_weight): + return ( + x[torch.randperm(x.size(0), device=x.device)], + edge_index, + edge_weight, + ) - acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )), - torch.ones(20, 16), torch.randint(10, (20, ))) - assert 0 <= acc and acc <= 1 + model = DeepGraphInfomax( + hidden_channels=16, + encoder=GCN(16, 16, num_layers=2), + summary=lambda z, *args, **kwargs: z.mean(dim=0).sigmoid(), + corruption=corruption, + ).to(device) + + x = torch.randn(4, 16, device=device) + edge_index = torch.tensor( + [[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]], + device=device, + ) + edge_weight = torch.rand(edge_index.size(1), device=device) + + pos_z, neg_z, summary = model(x, edge_index, edge_weight=edge_weight) + assert pos_z.size() == (4, 16) + assert neg_z.size() == (4, 16) + assert summary.size() == (16, ) + + loss = model.loss(pos_z, neg_z, summary) + assert float(loss) >= 0 diff --git a/torch_geometric/nn/models/deep_graph_infomax.py b/torch_geometric/nn/models/deep_graph_infomax.py index ba75f26dffec..36ff63fdc017 100644 --- a/torch_geometric/nn/models/deep_graph_infomax.py +++ b/torch_geometric/nn/models/deep_graph_infomax.py @@ -1,3 +1,4 @@ +import copy from typing import Callable, Tuple import torch @@ -49,10 +50,18 @@ def forward(self, *args, **kwargs) -> Tuple[Tensor, Tensor, Tensor]: """Returns the latent space for the input arguments, their corruptions and their summary representation.""" pos_z = self.encoder(*args, **kwargs) + cor = self.corruption(*args, **kwargs) cor = cor if isinstance(cor, tuple) else (cor, ) - neg_z = self.encoder(*cor) + cor_args = cor[:len(args)] + cor_kwargs = copy.copy(kwargs) + for key, value in zip(kwargs.keys(), cor[len(args):]): + cor_kwargs[key] = value + + neg_z = self.encoder(*cor_args, **cor_kwargs) + summary = self.summary(pos_z, *args, **kwargs) + return pos_z, neg_z, summary def discriminate(self, z: Tensor, summary: Tensor, From a3604acdb066c7d1766c4d07046a4fb5aaf86698 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 27 Jun 2023 10:28:39 +0200 Subject: [PATCH 1321/2432] Add `Data.sort()` and better documentation on sort requirements in `Aggregation` (#7649) --- CHANGELOG.md | 1 + test/data/test_data.py | 30 ++++++++++++++++ torch_geometric/data/data.py | 41 +++++++++++++++++----- torch_geometric/data/storage.py | 20 ++++++++++- torch_geometric/nn/aggr/base.py | 6 +++- torch_geometric/nn/aggr/gmt.py | 9 +++++ torch_geometric/nn/aggr/gru.py | 10 ++++++ torch_geometric/nn/aggr/lstm.py | 10 ++++++ torch_geometric/nn/aggr/mlp.py | 10 ++++++ torch_geometric/nn/aggr/set2set.py | 1 - torch_geometric/nn/aggr/set_transformer.py | 9 +++++ torch_geometric/nn/aggr/sort.py | 9 +++++ torch_geometric/utils/sort_edge_index.py | 3 +- 13 files changed, 147 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d13eb7fa3e09..be9a97918bc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `Data.sort()` and `HeteroData.sort()` functionalities ([#7649](https://github.com/pyg-team/pytorch_geometric/pull/7649)) - Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643), [#7647](https://github.com/pyg-team/pytorch_geometric/pull/7647)) - Added `interval` argument to `Cartesian` and `LocalCartesian` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614)) - Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) diff --git a/test/data/test_data.py b/test/data/test_data.py index 144c94fa1421..4b4fd3ee1ac3 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -284,6 +284,36 @@ def test_copy_data(): assert data.x.tolist() == out.x.tolist() +def test_data_sort(): + x = torch.randn(4, 16) + edge_index = torch.tensor([[0, 0, 0, 2, 1, 3], [1, 2, 3, 0, 0, 0]]) + edge_attr = torch.randn(6, 8) + + data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) + assert not data.is_sorted(sort_by_row=True) + assert not data.is_sorted(sort_by_row=False) + + out = data.sort(sort_by_row=True) + assert out.is_sorted(sort_by_row=True) + assert not out.is_sorted(sort_by_row=False) + assert torch.equal(out.x, data.x) + assert out.edge_index.tolist() == [[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]] + assert torch.equal( + out.edge_attr, + data.edge_attr[torch.tensor([0, 1, 2, 4, 3, 5])], + ) + + out = data.sort(sort_by_row=False) + assert not out.is_sorted(sort_by_row=True) + assert out.is_sorted(sort_by_row=False) + assert torch.equal(out.x, data.x) + assert out.edge_index.tolist() == [[1, 2, 3, 0, 0, 0], [0, 0, 0, 1, 2, 3]] + assert torch.equal( + out.edge_attr, + data.edge_attr[torch.tensor([4, 3, 5, 0, 1, 2])], + ) + + def test_debug_data(): torch_geometric.set_debug(True) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index a9fa1f237925..35bdaf37e493 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -194,11 +194,6 @@ def edge_attrs(self) -> List[str]: r"""Returns all edge-level tensor attribute names.""" return list(set(chain(*[s.edge_attrs() for s in self.edge_stores]))) - def is_coalesced(self) -> bool: - r"""Returns :obj:`True` if edge indices :obj:`edge_index` are sorted - and do not contain duplicate entries.""" - return all([store.is_coalesced() for store in self.edge_stores]) - def generate_ids(self): r"""Generates and sets :obj:`n_id` and :obj:`e_id` attributes to assign each node and edge to a continuously ascending and unique ID.""" @@ -207,12 +202,42 @@ def generate_ids(self): for store in self.edge_stores: store.e_id = torch.arange(store.num_edges) - def coalesce(self): + def is_sorted(self, sort_by_row: bool = True) -> bool: + r"""Returns :obj:`True` if edge indices :obj:`edge_index` are sorted. + + Args: + sort_by_row (bool, optional): If set to :obj:`False`, will require + column-wise order/by destination node of :obj:`edge_index`. + (default: :obj:`True`) + """ + return all( + [store.is_sorted(sort_by_row) for store in self.edge_stores]) + + def sort(self, sort_by_row: bool = True) -> 'Data': + r"""Sorts edge indices :obj:`edge_index`. + + Args: + sort_by_row (bool, optional): If set to :obj:`False`, will sort + :obj:`edge_index` column-wise order/by destination node. + (default: :obj:`True`) + """ + out = copy.copy(self) + for store in out.edge_stores: + store.sort(sort_by_row) + return out + + def is_coalesced(self) -> bool: + r"""Returns :obj:`True` if edge indices :obj:`edge_index` are sorted + and do not contain duplicate entries.""" + return all([store.is_coalesced() for store in self.edge_stores]) + + def coalesce(self) -> 'Data': r"""Sorts and removes duplicated entries from edge indices :obj:`edge_index`.""" - for store in self.edge_stores: + out = copy.copy(self) + for store in out.edge_stores: store.coalesce() - return self + return out def has_isolated_nodes(self) -> bool: r"""Returns :obj:`True` if the graph contains isolated nodes.""" diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index b46b0484a82a..716d00d92595 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -28,6 +28,7 @@ contains_isolated_nodes, is_torch_sparse_tensor, is_undirected, + sort_edge_index, ) N_KEYS = {'x', 'feat', 'pos', 'batch', 'node_type', 'n_id'} @@ -496,6 +497,23 @@ def is_edge_attr(self, key: str) -> bool: def edge_attrs(self) -> List[str]: return [key for key in self.keys() if self.is_edge_attr(key)] + def is_sorted(self, sort_by_row: bool = True) -> bool: + if 'edge_index' in self: + index = self.edge_index[0] if sort_by_row else self.edge_index[1] + return bool(torch.all(index[:-1] <= index[1:])) + return True + + def sort(self, sort_by_row: bool = True) -> 'EdgeStorage': + if 'edge_index' in self: + edge_attrs = self.edge_attrs() + edge_attrs.remove('edge_index') + edge_feats = [self[edge_attr] for edge_attr in edge_attrs] + self.edge_index, edge_feats = sort_edge_index( + self.edge_index, edge_feats, sort_by_row=sort_by_row) + for key, edge_feat in zip(edge_attrs, edge_feats): + self[key] = edge_feat + return self + def is_coalesced(self) -> bool: for value in self.values('adj', 'adj_t'): return value.is_coalesced() @@ -510,7 +528,7 @@ def is_coalesced(self) -> bool: return True - def coalesce(self, reduce: str = 'sum'): + def coalesce(self, reduce: str = 'sum') -> 'EdgeStorage': for key, value in self.items('adj', 'adj_t'): self[key] = value.coalesce(reduce) diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 593cce8a5ff8..58be08b3dbb9 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -140,7 +140,11 @@ def assert_index_present(self, index: Optional[Tensor]): def assert_sorted_index(self, index: Optional[Tensor]): if index is not None and not torch.all(index[:-1] <= index[1:]): raise ValueError("Can not perform aggregation since the 'index' " - "tensor is not sorted") + "tensor is not sorted. Specifically, if you use " + "this aggregation as part of 'MessagePassing`, " + "ensure that 'edge_index' is sorted by " + "destination nodes, e.g., by calling " + "`data.sort(sort_by_row=False)`") def assert_two_dimensional_input(self, x: Tensor, dim: int): if x.dim() != 2: diff --git a/torch_geometric/nn/aggr/gmt.py b/torch_geometric/nn/aggr/gmt.py index 3f81785b141c..b18bb8d6d5c9 100644 --- a/torch_geometric/nn/aggr/gmt.py +++ b/torch_geometric/nn/aggr/gmt.py @@ -22,6 +22,15 @@ class GraphMultisetTransformer(Aggregation): and finally pools the representative elements via attention-based pooling into a single cluster. + .. note:: + + :class:`GraphMultisetTransformer` requires sorted indices :obj:`index` + as input. Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`~torch_geometric.data.Data.sort`. + Args: channels (int): Size of each input sample. k (int): Number of :math:`k` representative nodes after pooling. diff --git a/torch_geometric/nn/aggr/gru.py b/torch_geometric/nn/aggr/gru.py index c731d60252b4..29abbbb2e983 100644 --- a/torch_geometric/nn/aggr/gru.py +++ b/torch_geometric/nn/aggr/gru.py @@ -12,7 +12,17 @@ class GRUAggregation(Aggregation): interpreted as a sequence, as described in the `"Graph Neural Networks with Adaptive Readouts" `_ paper. + .. note:: + + :class:`MLPAggregation` requires sorted indices :obj:`index` as input. + Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`~torch_geometric.data.Data.sort`. + .. warning:: + :class:`GRUAggregation` is not a permutation-invariant operator. Args: diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py index 25da0b0309a7..6cab8b55faa0 100644 --- a/torch_geometric/nn/aggr/lstm.py +++ b/torch_geometric/nn/aggr/lstm.py @@ -12,7 +12,17 @@ class LSTMAggregation(Aggregation): interpreted as a sequence, as described in the `"Inductive Representation Learning on Large Graphs" `_ paper. + .. note:: + + :class:`LSTMAggregation` requires sorted indices :obj:`index` as input. + Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`~torch_geometric.data.Data.sort`. + .. warning:: + :class:`LSTMAggregation` is not a permutation-invariant operator. Args: diff --git a/torch_geometric/nn/aggr/mlp.py b/torch_geometric/nn/aggr/mlp.py index bfbd34c46103..8410b0e707f1 100644 --- a/torch_geometric/nn/aggr/mlp.py +++ b/torch_geometric/nn/aggr/mlp.py @@ -11,7 +11,17 @@ class MLPAggregation(Aggregation): a Multi-Layer Perceptron (MLP), as described in the `"Graph Neural Networks with Adaptive Readouts" `_ paper. + .. note:: + + :class:`GRUAggregation` requires sorted indices :obj:`index` as input. + Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`~torch_geometric.data.Data.sort`. + .. warning:: + :class:`MLPAggregation` is not a permutation-invariant operator. Args: diff --git a/torch_geometric/nn/aggr/set2set.py b/torch_geometric/nn/aggr/set2set.py index 50bc7c4b27b7..0e281e9664f2 100644 --- a/torch_geometric/nn/aggr/set2set.py +++ b/torch_geometric/nn/aggr/set2set.py @@ -44,7 +44,6 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, dim: int = -2) -> Tensor: - # TODO Currently, `to_dense_batch` can only operate on `index`: self.assert_index_present(index) self.assert_two_dimensional_input(x, dim) diff --git a/torch_geometric/nn/aggr/set_transformer.py b/torch_geometric/nn/aggr/set_transformer.py index 01b8379662f9..72afdd07d807 100644 --- a/torch_geometric/nn/aggr/set_transformer.py +++ b/torch_geometric/nn/aggr/set_transformer.py @@ -17,6 +17,15 @@ class SetTransformerAggregation(Aggregation): the `"Graph Neural Networks with Adaptive Readouts" `_ paper. + .. note:: + + :class:`SetTransformerAggregation` requires sorted indices :obj:`index` + as input. Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`~torch_geometric.data.Data.sort`. + Args: channels (int): Size of each input sample. num_seed_points (int, optional): Number of seed points. diff --git a/torch_geometric/nn/aggr/sort.py b/torch_geometric/nn/aggr/sort.py index ba7c216fff71..79ee9a5ab9e4 100644 --- a/torch_geometric/nn/aggr/sort.py +++ b/torch_geometric/nn/aggr/sort.py @@ -14,6 +14,15 @@ class SortAggregation(Aggregation): where node features are sorted in descending order based on their last feature channel. The first :math:`k` nodes form the output of the layer. + .. note:: + + :class:`SortAggregation` requires sorted indices :obj:`index` as input. + Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`~torch_geometric.data.Data.sort`. + Args: k (int): The number of nodes to hold for each graph. """ diff --git a/torch_geometric/utils/sort_edge_index.py b/torch_geometric/utils/sort_edge_index.py index 3b7a6fe27361..d09d0e2306cf 100644 --- a/torch_geometric/utils/sort_edge_index.py +++ b/torch_geometric/utils/sort_edge_index.py @@ -45,7 +45,8 @@ def sort_edge_index( # noqa num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) sort_by_row (bool, optional): If set to :obj:`False`, will sort - :obj:`edge_index` column-wise. + :obj:`edge_index` column-wise/by destination node. + (default: :obj:`True`) :rtype: :class:`LongTensor` if :attr:`edge_attr` is not passed, else (:class:`LongTensor`, :obj:`Optional[Tensor]` or :obj:`List[Tensor]]`) From 2ec1c4bbd9247f054855abd77acd6d63ed97bab3 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 27 Jun 2023 10:34:29 +0200 Subject: [PATCH 1322/2432] Documentation typos (#7650) --- torch_geometric/data/data.py | 9 +++++---- torch_geometric/nn/aggr/gmt.py | 2 +- torch_geometric/nn/aggr/gru.py | 2 +- torch_geometric/nn/aggr/lstm.py | 2 +- torch_geometric/nn/aggr/mlp.py | 2 +- torch_geometric/nn/aggr/set_transformer.py | 2 +- torch_geometric/nn/aggr/sort.py | 2 +- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 35bdaf37e493..9da318d6d4ac 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -207,18 +207,19 @@ def is_sorted(self, sort_by_row: bool = True) -> bool: Args: sort_by_row (bool, optional): If set to :obj:`False`, will require - column-wise order/by destination node of :obj:`edge_index`. - (default: :obj:`True`) + column-wise order/by destination node order of + :obj:`edge_index`. (default: :obj:`True`) """ return all( [store.is_sorted(sort_by_row) for store in self.edge_stores]) def sort(self, sort_by_row: bool = True) -> 'Data': - r"""Sorts edge indices :obj:`edge_index`. + r"""Sorts edge indices :obj:`edge_index` and their corresponding edge + features. Args: sort_by_row (bool, optional): If set to :obj:`False`, will sort - :obj:`edge_index` column-wise order/by destination node. + :obj:`edge_index` in column-wise order/by destination node. (default: :obj:`True`) """ out = copy.copy(self) diff --git a/torch_geometric/nn/aggr/gmt.py b/torch_geometric/nn/aggr/gmt.py index b18bb8d6d5c9..a455d854e6c8 100644 --- a/torch_geometric/nn/aggr/gmt.py +++ b/torch_geometric/nn/aggr/gmt.py @@ -29,7 +29,7 @@ class GraphMultisetTransformer(Aggregation): :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that :obj:`edge_index` is sorted by destination nodes, either by manually sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` - or by calling :meth:`~torch_geometric.data.Data.sort`. + or by calling :meth:`torch_geometric.data.Data.sort`. Args: channels (int): Size of each input sample. diff --git a/torch_geometric/nn/aggr/gru.py b/torch_geometric/nn/aggr/gru.py index 29abbbb2e983..79267897829d 100644 --- a/torch_geometric/nn/aggr/gru.py +++ b/torch_geometric/nn/aggr/gru.py @@ -19,7 +19,7 @@ class GRUAggregation(Aggregation): :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that :obj:`edge_index` is sorted by destination nodes, either by manually sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` - or by calling :meth:`~torch_geometric.data.Data.sort`. + or by calling :meth:`torch_geometric.data.Data.sort`. .. warning:: diff --git a/torch_geometric/nn/aggr/lstm.py b/torch_geometric/nn/aggr/lstm.py index 6cab8b55faa0..889a957b796a 100644 --- a/torch_geometric/nn/aggr/lstm.py +++ b/torch_geometric/nn/aggr/lstm.py @@ -19,7 +19,7 @@ class LSTMAggregation(Aggregation): :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that :obj:`edge_index` is sorted by destination nodes, either by manually sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` - or by calling :meth:`~torch_geometric.data.Data.sort`. + or by calling :meth:`torch_geometric.data.Data.sort`. .. warning:: diff --git a/torch_geometric/nn/aggr/mlp.py b/torch_geometric/nn/aggr/mlp.py index 8410b0e707f1..99a72986783d 100644 --- a/torch_geometric/nn/aggr/mlp.py +++ b/torch_geometric/nn/aggr/mlp.py @@ -18,7 +18,7 @@ class MLPAggregation(Aggregation): :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that :obj:`edge_index` is sorted by destination nodes, either by manually sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` - or by calling :meth:`~torch_geometric.data.Data.sort`. + or by calling :meth:`torch_geometric.data.Data.sort`. .. warning:: diff --git a/torch_geometric/nn/aggr/set_transformer.py b/torch_geometric/nn/aggr/set_transformer.py index 72afdd07d807..20e6ba17cadb 100644 --- a/torch_geometric/nn/aggr/set_transformer.py +++ b/torch_geometric/nn/aggr/set_transformer.py @@ -24,7 +24,7 @@ class SetTransformerAggregation(Aggregation): :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that :obj:`edge_index` is sorted by destination nodes, either by manually sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` - or by calling :meth:`~torch_geometric.data.Data.sort`. + or by calling :meth:`torch_geometric.data.Data.sort`. Args: channels (int): Size of each input sample. diff --git a/torch_geometric/nn/aggr/sort.py b/torch_geometric/nn/aggr/sort.py index 79ee9a5ab9e4..12f21f19431a 100644 --- a/torch_geometric/nn/aggr/sort.py +++ b/torch_geometric/nn/aggr/sort.py @@ -21,7 +21,7 @@ class SortAggregation(Aggregation): :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that :obj:`edge_index` is sorted by destination nodes, either by manually sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` - or by calling :meth:`~torch_geometric.data.Data.sort`. + or by calling :meth:`torch_geometric.data.Data.sort`. Args: k (int): The number of nodes to hold for each graph. From 10b737322cfb39376a3d535df0c1a11c91f5adaa Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 27 Jun 2023 14:02:21 +0200 Subject: [PATCH 1323/2432] Only add true negatives in `add_random_edge` augmentation (#7654) Fixes #7653 --- CHANGELOG.md | 1 + test/utils/test_augmentation.py | 33 ++++++------ torch_geometric/utils/__init__.py | 8 +-- torch_geometric/utils/augmentation.py | 59 ++++++++++++---------- torch_geometric/utils/negative_sampling.py | 12 +++-- 5 files changed, 60 insertions(+), 53 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be9a97918bc7..f9b973be76a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Changed `add_random_edge` to only add true negative edges ([#7654](https://github.com/pyg-team/pytorch_geometric/pull/7654)) - Allowed the usage of `BasicGNN` models in `DeepGraphInfomax` ([#7648](https://github.com/pyg-team/pytorch_geometric/pull/7648)) - Breaking Change: Made `Data.keys` a method rather than a property ([#7629](https://github.com/pyg-team/pytorch_geometric/pull/7629)) - Added a `num_edges` parameter to the forward method of `HypergraphConv` ([#7560](https://github.com/pyg-team/pytorch_geometric/pull/7560)) diff --git a/test/utils/test_augmentation.py b/test/utils/test_augmentation.py index 2c6ad8f2f200..68c4e343cdc6 100644 --- a/test/utils/test_augmentation.py +++ b/test/utils/test_augmentation.py @@ -1,6 +1,7 @@ import pytest import torch +from torch_geometric import seed_everything from torch_geometric.utils import ( add_random_edge, is_undirected, @@ -77,28 +78,26 @@ def test_add_random_edge(): assert out[0].tolist() == edge_index.tolist() assert out[1].tolist() == [[], []] - torch.manual_seed(5) + seed_everything(5) out = add_random_edge(edge_index, p=0.5) - assert out[0].tolist() == [[0, 1, 1, 2, 2, 3, 3, 2, 3], - [1, 0, 2, 1, 3, 2, 1, 2, 2]] - - assert out[1].tolist() == [[3, 2, 3], [1, 2, 2]] + assert out[0].tolist() == [[0, 1, 1, 2, 2, 3, 3, 1, 2], + [1, 0, 2, 1, 3, 2, 0, 3, 0]] + assert out[1].tolist() == [[3, 1, 2], [0, 3, 0]] - torch.manual_seed(6) + seed_everything(6) out = add_random_edge(edge_index, p=0.5, force_undirected=True) - assert out[0].tolist() == [[0, 1, 1, 2, 2, 3, 1, 2], - [1, 0, 2, 1, 3, 2, 2, 1]] - assert out[1].tolist() == [[1, 2], [2, 1]] + assert out[0].tolist() == [[0, 1, 1, 2, 2, 3, 1, 3], + [1, 0, 2, 1, 3, 2, 3, 1]] + assert out[1].tolist() == [[1, 3], [3, 1]] assert is_undirected(out[0]) assert is_undirected(out[1]) - # test with bipartite graph - torch.manual_seed(7) + # Test for bipartite graph: + seed_everything(7) edge_index = torch.tensor([[0, 1, 2, 3, 4, 5], [2, 3, 1, 4, 2, 1]]) - with pytest.raises(RuntimeError, - match="not supported for heterogeneous graphs"): - out = add_random_edge(edge_index, p=0.5, force_undirected=True, - num_nodes=(6, 5)) + with pytest.raises(RuntimeError, match="not supported for bipartite"): + add_random_edge(edge_index, force_undirected=True, num_nodes=(6, 5)) out = add_random_edge(edge_index, p=0.5, num_nodes=(6, 5)) - out[0].tolist() == [[0, 1, 2, 3, 4, 5, 3, 4, 1], - [2, 3, 1, 4, 2, 1, 1, 3, 2]] + assert out[0].tolist() == [[0, 1, 2, 3, 4, 5, 2, 0, 2], + [2, 3, 1, 4, 2, 1, 0, 4, 2]] + assert out[1].tolist() == [[2, 0, 2], [0, 4, 2]] diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 552a83c363cc..951559adf2bd 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -6,7 +6,6 @@ from .degree import degree from .softmax import softmax from .dropout import dropout_adj, dropout_node, dropout_edge, dropout_path -from .augmentation import shuffle_node, mask_feature, add_random_edge from .sort_edge_index import sort_edge_index from .coalesce import coalesce from .undirected import is_undirected, to_undirected @@ -47,6 +46,7 @@ from .negative_sampling import (negative_sampling, batched_negative_sampling, structured_negative_sampling, structured_negative_sampling_feasible) +from .augmentation import shuffle_node, mask_feature, add_random_edge from .tree_decomposition import tree_decomposition from .embedding import get_embeddings from .trim_to_layer import trim_to_layer @@ -62,9 +62,6 @@ 'dropout_edge', 'dropout_path', 'dropout_adj', - 'shuffle_node', - 'mask_feature', - 'add_random_edge', 'sort_edge_index', 'coalesce', 'is_undirected', @@ -130,6 +127,9 @@ 'batched_negative_sampling', 'structured_negative_sampling', 'structured_negative_sampling_feasible', + 'shuffle_node', + 'mask_feature', + 'add_random_edge', 'tree_decomposition', 'get_embeddings', 'trim_to_layer', diff --git a/torch_geometric/utils/augmentation.py b/torch_geometric/utils/augmentation.py index 82b07a6758fc..830145319dce 100644 --- a/torch_geometric/utils/augmentation.py +++ b/torch_geometric/utils/augmentation.py @@ -3,12 +3,14 @@ import torch from torch import Tensor -from torch_geometric.utils import scatter -from torch_geometric.utils.num_nodes import maybe_num_nodes +from torch_geometric.utils import negative_sampling, scatter -def shuffle_node(x: Tensor, batch: Optional[Tensor] = None, - training: bool = True) -> Tuple[Tensor, Tensor]: +def shuffle_node( + x: Tensor, + batch: Optional[Tensor] = None, + training: bool = True, +) -> Tuple[Tensor, Tensor]: r"""Randomly shuffle the feature matrix :obj:`x` along the first dimmension. @@ -67,9 +69,13 @@ def shuffle_node(x: Tensor, batch: Optional[Tensor] = None, return x[perm], perm -def mask_feature(x: Tensor, p: float = 0.5, mode: str = 'col', - fill_value: float = 0., - training: bool = True) -> Tuple[Tensor, Tensor]: +def mask_feature( + x: Tensor, + p: float = 0.5, + mode: str = 'col', + fill_value: float = 0., + training: bool = True, +) -> Tuple[Tensor, Tensor]: r"""Randomly masks feature from the feature matrix :obj:`x` with probability :obj:`p` using samples from a Bernoulli distribution. @@ -149,9 +155,13 @@ def mask_feature(x: Tensor, p: float = 0.5, mode: str = 'col', return x, mask -def add_random_edge(edge_index, p: float, force_undirected: bool = False, - num_nodes: Optional[Union[Tuple[int], int]] = None, - training: bool = True) -> Tuple[Tensor, Tensor]: +def add_random_edge( + edge_index, + p: float = 0.5, + force_undirected: bool = False, + num_nodes: Optional[Union[int, Tuple[int, int]]] = None, + training: bool = True, +) -> Tuple[Tensor, Tensor]: r"""Randomly adds edges to :obj:`edge_index`. The method returns (1) the retained :obj:`edge_index`, (2) the added @@ -160,6 +170,7 @@ def add_random_edge(edge_index, p: float, force_undirected: bool = False, Args: edge_index (LongTensor): The edge indices. p (float): Ratio of added edges to the existing edges. + (default: :obj:`0.5`) force_undirected (bool, optional): If set to :obj:`True`, added edges will be undirected. (default: :obj:`False`) @@ -208,30 +219,24 @@ def add_random_edge(edge_index, p: float, force_undirected: bool = False, [1, 3, 2]]) """ if p < 0. or p > 1.: - raise ValueError(f'Ratio of added edges has to be between 0 and 1 ' - f'(got {p}') + raise ValueError(f"Ratio of added edges has to be between 0 and 1 " + f"(got '{p}')") if force_undirected and isinstance(num_nodes, (tuple, list)): - raise RuntimeError('`force_undirected` is not supported for' - ' heterogeneous graphs') + raise RuntimeError("'force_undirected' is not supported for " + "bipartite graphs") device = edge_index.device if not training or p == 0.0: edge_index_to_add = torch.tensor([[], []], device=device) return edge_index, edge_index_to_add - if not isinstance(num_nodes, (tuple, list)): - num_nodes = (num_nodes, num_nodes) - num_src_nodes = maybe_num_nodes(edge_index, num_nodes[0]) - num_dst_nodes = maybe_num_nodes(edge_index, num_nodes[1]) - - num_edges_to_add = round(edge_index.size(1) * p) - row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, )) - col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, )) + edge_index_to_add = negative_sampling( + edge_index=edge_index, + num_nodes=num_nodes, + num_neg_samples=round(edge_index.size(1) * p), + force_undirected=force_undirected, + ) - if force_undirected: - mask = row < col - row, col = row[mask], col[mask] - row, col = torch.cat([row, col]), torch.cat([col, row]) - edge_index_to_add = torch.stack([row, col], dim=0).to(device) edge_index = torch.cat([edge_index, edge_index_to_add], dim=1) + return edge_index, edge_index_to_add diff --git a/torch_geometric/utils/negative_sampling.py b/torch_geometric/utils/negative_sampling.py index d02826a219ba..b7e0d0f0981d 100644 --- a/torch_geometric/utils/negative_sampling.py +++ b/torch_geometric/utils/negative_sampling.py @@ -9,11 +9,13 @@ from torch_geometric.utils.num_nodes import maybe_num_nodes -def negative_sampling(edge_index: Tensor, - num_nodes: Optional[Union[int, Tuple[int, int]]] = None, - num_neg_samples: Optional[int] = None, - method: str = "sparse", - force_undirected: bool = False) -> Tensor: +def negative_sampling( + edge_index: Tensor, + num_nodes: Optional[Union[int, Tuple[int, int]]] = None, + num_neg_samples: Optional[int] = None, + method: str = "sparse", + force_undirected: bool = False, +) -> Tensor: r"""Samples random negative edges of a graph given by :attr:`edge_index`. Args: From 1b4cb52c6842aa2555be28f5d768c4e2da6668a2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 27 Jun 2023 15:42:10 +0200 Subject: [PATCH 1324/2432] Fix paper links in GNN cheatsheet (#7655) Fixes #7651 --- docs/source/cheatsheet/gnn_cheatsheet.rst | 8 ++++---- torch_geometric/datasets/utils/cheatsheet.py | 2 +- torch_geometric/nn/conv/utils/cheatsheet.py | 5 +++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/source/cheatsheet/gnn_cheatsheet.rst b/docs/source/cheatsheet/gnn_cheatsheet.rst index 6d4469235bb7..646aa49205ba 100644 --- a/docs/source/cheatsheet/gnn_cheatsheet.rst +++ b/docs/source/cheatsheet/gnn_cheatsheet.rst @@ -31,7 +31,7 @@ Graph Neural Network Operators {% if not torch_geometric.nn.conv.utils.processes_heterogeneous_graphs(cls) and not torch_geometric.nn.conv.utils.processes_hypergraphs(cls) and not torch_geometric.nn.conv.utils.processes_point_clouds(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_weights(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_features(cls) %}✓{% endif %} @@ -57,7 +57,7 @@ Heterogeneous Graph Neural Network Operators - lazy {% for cls in torch_geometric.nn.conv.classes[1:] %} {% if torch_geometric.nn.conv.utils.processes_heterogeneous_graphs(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_weights(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_features(cls) %}✓{% endif %} @@ -83,7 +83,7 @@ Hypergraph Neural Network Operators - lazy {% for cls in torch_geometric.nn.conv.classes[1:] %} {% if torch_geometric.nn.conv.utils.processes_hypergraphs(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_weights(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_features(cls) %}✓{% endif %} @@ -105,7 +105,7 @@ Point Cloud Neural Network Operators - lazy {% for cls in torch_geometric.nn.conv.classes[1:] %} {% if torch_geometric.nn.conv.utils.processes_point_clouds(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_bipartite_graphs(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_lazy_initialization(cls) %}✓{% endif %} {% endif %} diff --git a/torch_geometric/datasets/utils/cheatsheet.py b/torch_geometric/datasets/utils/cheatsheet.py index ad5c084e17e4..821fcae2177c 100644 --- a/torch_geometric/datasets/utils/cheatsheet.py +++ b/torch_geometric/datasets/utils/cheatsheet.py @@ -4,7 +4,7 @@ from typing import Any, List, Optional -def paper_link(cls: str) -> str: +def paper_link(cls: str) -> Optional[str]: cls = importlib.import_module('torch_geometric.datasets').__dict__[cls] match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL) return None if match is None else match.group().replace('\n', ' ')[1:-1] diff --git a/torch_geometric/nn/conv/utils/cheatsheet.py b/torch_geometric/nn/conv/utils/cheatsheet.py index 03f2d0e3728c..4586a32d6067 100644 --- a/torch_geometric/nn/conv/utils/cheatsheet.py +++ b/torch_geometric/nn/conv/utils/cheatsheet.py @@ -1,15 +1,16 @@ import importlib import inspect import re +from typing import Optional -def paper_title(cls: str) -> str: +def paper_title(cls: str) -> Optional[str]: cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] match = re.search('`\".+?\"', inspect.getdoc(cls), flags=re.DOTALL) return None if match is None else match.group().replace('\n', ' ')[2:-1] -def paper_link(cls: str) -> str: +def paper_link(cls: str) -> Optional[str]: cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL) return None if match is None else match.group().replace('\n', ' ')[1:-1] From c0eb49f95198183e8cf15007bbd286c9ced7829a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 29 Jun 2023 06:10:56 +0200 Subject: [PATCH 1325/2432] Test PyG on PyTorch >= 1.11 (#7656) --- .github/actions/setup/action.yml | 5 +-- .github/workflows/prev_testing.yml | 18 ++++++++-- CHANGELOG.md | 1 + test/data/lightning/test_datamodule.py | 4 +++ test/data/test_batch.py | 4 ++- test/data/test_graph_store.py | 7 +++- test/datasets/test_bzr.py | 4 ++- test/datasets/test_elliptic.py | 3 +- test/datasets/test_enzymes.py | 4 +++ test/datasets/test_imdb_binary.py | 3 +- test/datasets/test_mutag.py | 5 +++ test/datasets/test_planetoid.py | 13 +++++-- test/datasets/test_snap_dataset.py | 5 ++- test/datasets/test_suite_sparse.py | 4 ++- test/graphgym/test_graphgym.py | 5 ++- test/loader/test_cluster.py | 3 +- test/loader/test_hgt_loader.py | 7 +++- test/loader/test_neighbor_loader.py | 40 +++++++++++++-------- test/loader/test_neighbor_sampler.py | 3 +- test/nn/conv/test_gat_conv.py | 10 +++--- test/nn/conv/test_gatv2_conv.py | 10 +++--- test/nn/conv/test_gps_conv.py | 10 +++--- test/nn/conv/test_heat_conv.py | 3 +- test/nn/conv/test_hgt_conv.py | 5 ++- test/nn/conv/test_message_passing.py | 11 +++--- test/nn/conv/test_rgcn_conv.py | 4 ++- test/nn/dense/test_linear.py | 1 + test/nn/models/test_basic_gnn.py | 3 ++ test/nn/models/test_dimenet.py | 1 + test/nn/models/test_graph_unet.py | 3 +- test/nn/pool/connect/test_filter_edges.py | 3 +- test/nn/pool/test_asap.py | 8 ++++- test/nn/pool/test_pan_pool.py | 3 +- test/nn/pool/test_sag_pool.py | 3 +- test/nn/pool/test_topk_pool.py | 3 +- test/nn/test_to_hetero_module.py | 2 ++ test/nn/test_to_hetero_transformer.py | 2 ++ test/profile/test_profile.py | 10 +++++- test/profile/test_profiler.py | 3 +- test/transforms/test_feature_propagation.py | 2 ++ test/transforms/test_random_link_split.py | 7 +++- test/transforms/test_rooted_subgraph.py | 2 ++ test/transforms/test_sign.py | 2 ++ test/transforms/test_to_sparse_tensor.py | 21 +++++++---- test/transforms/test_to_superpixels.py | 3 +- test/utils/test_nested.py | 4 +++ test/utils/test_scatter.py | 1 + test/utils/test_sparse.py | 13 ++++--- test/utils/test_spmm.py | 1 + torch_geometric/data/collate.py | 23 ++++++------ torch_geometric/data/data.py | 2 +- torch_geometric/nn/pool/connect/base.py | 6 +++- torch_geometric/nn/pool/select/base.py | 7 +++- torch_geometric/testing/__init__.py | 2 ++ torch_geometric/testing/decorators.py | 22 ++++++++++++ torch_geometric/typing.py | 27 ++++++++++++++ torch_geometric/utils/scatter.py | 18 +++++----- torch_geometric/utils/sparse.py | 36 +++++++++++++++---- torch_geometric/utils/spmm.py | 3 +- 59 files changed, 335 insertions(+), 103 deletions(-) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index ff73c90022b6..c7935465aa47 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -49,12 +49,13 @@ runs: - name: Install pyg-lib # pyg-lib is currently only available on Linux. if: ${{ inputs.torch-version != 'nightly' && runner.os == 'Linux' }} run: | - pip install pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html + pip install --no-index pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash - name: Install extension packages if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' }} run: | pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} - pip install torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html + pip install scipy + pip install --no-index torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash diff --git a/.github/workflows/prev_testing.yml b/.github/workflows/prev_testing.yml index 9d73dc6636ce..499113982654 100644 --- a/.github/workflows/prev_testing.yml +++ b/.github/workflows/prev_testing.yml @@ -1,4 +1,4 @@ -name: Testing PyTorch 1.13 +name: Testing previous PyTorch versions on: # yamllint disable-line rule:truthy push: @@ -11,6 +11,18 @@ jobs: prev_pytest: runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + torch-version: [1.11.0, 1.12.0, 1.13.0] + include: + - torch-version: 1.11.0 + torchvision-version: 0.12.0 + - torch-version: 1.12.0 + torchvision-version: 0.13.0 + - torch-version: 1.13.0 + torchvision-version: 0.14.0 + steps: - name: Checkout repository uses: actions/checkout@v3 @@ -36,8 +48,8 @@ jobs: if: steps.changed-files-specific.outputs.only_changed != 'true' uses: ./.github/actions/setup with: - torch-version: 1.13.0 - torchvision-version: 0.14.0 + torch-version: ${{ matrix.torch-version }} + torchvision-version: ${{ matrix.torchvision-version }} - name: Install main package if: steps.changed-files-specific.outputs.only_changed != 'true' diff --git a/CHANGELOG.md b/CHANGELOG.md index f9b973be76a3..08a3673dbd7a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added back support for PyTorch >= 1.11.0 ([#7656](https://github.com/pyg-team/pytorch_geometric/pull/7656)) - Added `Data.sort()` and `HeteroData.sort()` functionalities ([#7649](https://github.com/pyg-team/pytorch_geometric/pull/7649)) - Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643), [#7647](https://github.com/pyg-team/pytorch_geometric/pull/7647)) - Added `interval` argument to `Cartesian` and `LocalCartesian` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614)) diff --git a/test/data/lightning/test_datamodule.py b/test/data/lightning/test_datamodule.py index 55b7803ab52d..46457184ba8f 100644 --- a/test/data/lightning/test_datamodule.py +++ b/test/data/lightning/test_datamodule.py @@ -19,6 +19,7 @@ get_random_edge_index, onlyCUDA, onlyFullTest, + onlyOnline, withPackage, ) @@ -73,6 +74,7 @@ def configure_optimizers(self): @onlyCUDA +@onlyOnline @onlyFullTest @withPackage('pytorch_lightning>=2.0.0') @withPackage('torchmetrics>=0.11.0') @@ -176,6 +178,7 @@ def configure_optimizers(self): @onlyCUDA +@onlyOnline @onlyFullTest @withPackage('pyg_lib') @withPackage('pytorch_lightning>=2.0.0') @@ -423,6 +426,7 @@ def test_lightning_hetero_link_data_custom_store(): assert 'edge_label_index' in batch['author', 'paper'] +@onlyOnline @withPackage('pyg_lib') @withPackage('pytorch_lightning') def test_eval_loader_kwargs(get_dataset): diff --git a/test/data/test_batch.py b/test/data/test_batch.py index 3c83a8d4bf08..e382ca9506c1 100644 --- a/test/data/test_batch.py +++ b/test/data/test_batch.py @@ -3,7 +3,6 @@ import numpy as np import pytest import torch -from torch.nested import nested_tensor import torch_geometric from torch_geometric.data import Batch, Data, HeteroData @@ -520,7 +519,10 @@ def test_torch_sparse_batch(layout): assert torch.equal(out[1], torch.cat([edge_attr, edge_attr], 0)) +@withPackage('torch>=1.13.0') def test_torch_nested_batch(): + from torch.nested import nested_tensor + class MyData(Data): def __inc__(self, key, value, *args, **kwargs) -> int: return 2 diff --git a/test/data/test_graph_store.py b/test/data/test_graph_store.py index 8628bf699c6e..3c7e9882d3b2 100644 --- a/test/data/test_graph_store.py +++ b/test/data/test_graph_store.py @@ -2,7 +2,11 @@ import torch from torch_geometric.data.graph_store import EdgeAttr, EdgeLayout -from torch_geometric.testing import MyGraphStore, get_random_edge_index +from torch_geometric.testing import ( + MyGraphStore, + get_random_edge_index, + withPackage, +) from torch_geometric.utils import ( to_torch_coo_tensor, to_torch_csc_tensor, @@ -40,6 +44,7 @@ def test_graph_store(): graph_store['edge_type_2', 'coo'] +@withPackage('torch>=1.12.0') def test_graph_store_conversion(): graph_store = MyGraphStore() diff --git a/test/datasets/test_bzr.py b/test/datasets/test_bzr.py index c7c662f007f1..57bad05f3783 100644 --- a/test/datasets/test_bzr.py +++ b/test/datasets/test_bzr.py @@ -1,6 +1,7 @@ -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, onlyOnline +@onlyOnline @onlyFullTest def test_bzr(get_dataset): dataset = get_dataset(name='BZR') @@ -13,6 +14,7 @@ def test_bzr(get_dataset): assert len(dataset[0]) == 3 +@onlyOnline @onlyFullTest def test_bzr_with_node_attr(get_dataset): dataset = get_dataset(name='BZR', use_node_attr=True) diff --git a/test/datasets/test_elliptic.py b/test/datasets/test_elliptic.py index ea770d7a922c..b7b17b15ac53 100644 --- a/test/datasets/test_elliptic.py +++ b/test/datasets/test_elliptic.py @@ -1,6 +1,7 @@ -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, onlyOnline +@onlyOnline @onlyFullTest def test_elliptic_bitcoin_dataset(get_dataset): dataset = get_dataset(name='EllipticBitcoinDataset') diff --git a/test/datasets/test_enzymes.py b/test/datasets/test_enzymes.py index 45c16383f1ba..4b7cc07665cb 100644 --- a/test/datasets/test_enzymes.py +++ b/test/datasets/test_enzymes.py @@ -2,9 +2,11 @@ import torch from torch_geometric.loader import DataListLoader, DataLoader, DenseDataLoader +from torch_geometric.testing import onlyOnline from torch_geometric.transforms import ToDense +@onlyOnline def test_enzymes(get_dataset): dataset = get_dataset(name='ENZYMES') assert len(dataset) == 600 @@ -54,6 +56,7 @@ def test_enzymes(get_dataset): assert list(data.y.size()) == [600, 1] +@onlyOnline def test_enzymes_with_node_attr(get_dataset): dataset = get_dataset(name='ENZYMES', use_node_attr=True) assert dataset.num_node_features == 21 @@ -61,6 +64,7 @@ def test_enzymes_with_node_attr(get_dataset): assert dataset.num_edge_features == 0 +@onlyOnline def test_cleaned_enzymes(get_dataset): dataset = get_dataset(name='ENZYMES', cleaned=True) assert len(dataset) == 595 diff --git a/test/datasets/test_imdb_binary.py b/test/datasets/test_imdb_binary.py index 56976a13e4a8..dd2fcd243118 100644 --- a/test/datasets/test_imdb_binary.py +++ b/test/datasets/test_imdb_binary.py @@ -1,6 +1,7 @@ -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, onlyOnline +@onlyOnline @onlyFullTest def test_imdb_binary(get_dataset): dataset = get_dataset(name='IMDB-BINARY') diff --git a/test/datasets/test_mutag.py b/test/datasets/test_mutag.py index d7d556101f20..d5987f276564 100644 --- a/test/datasets/test_mutag.py +++ b/test/datasets/test_mutag.py @@ -1,3 +1,7 @@ +from torch_geometric.testing import onlyOnline + + +@onlyOnline def test_mutag(get_dataset): dataset = get_dataset(name='MUTAG') assert len(dataset) == 188 @@ -9,6 +13,7 @@ def test_mutag(get_dataset): assert dataset[0].edge_attr.size(1) == 4 +@onlyOnline def test_mutag_with_node_attr(get_dataset): dataset = get_dataset(name='MUTAG', use_node_attr=True) assert dataset.num_features == 7 diff --git a/test/datasets/test_planetoid.py b/test/datasets/test_planetoid.py index c6e2f1249c22..05dd9ebbc8a4 100644 --- a/test/datasets/test_planetoid.py +++ b/test/datasets/test_planetoid.py @@ -1,6 +1,8 @@ from torch_geometric.loader import DataLoader +from torch_geometric.testing import onlyOnline +@onlyOnline def test_citeseer(get_dataset): dataset = get_dataset(name='CiteSeer') loader = DataLoader(dataset, batch_size=len(dataset)) @@ -28,6 +30,7 @@ def test_citeseer(get_dataset): assert batch.is_undirected() +@onlyOnline def test_citeseer_with_full_split(get_dataset): dataset = get_dataset(name='CiteSeer', split='full') data = dataset[0] @@ -37,9 +40,15 @@ def test_citeseer_with_full_split(get_dataset): assert (data.train_mask & data.val_mask & data.test_mask).sum() == 0 +@onlyOnline def test_citeseer_with_random_split(get_dataset): - dataset = get_dataset(name='CiteSeer', split='random', - num_train_per_class=11, num_val=29, num_test=41) + dataset = get_dataset( + name='CiteSeer', + split='random', + num_train_per_class=11, + num_val=29, + num_test=41, + ) data = dataset[0] assert data.train_mask.sum() == dataset.num_classes * 11 assert data.val_mask.sum() == 29 diff --git a/test/datasets/test_snap_dataset.py b/test/datasets/test_snap_dataset.py index f360ae660843..f2c9fad87d8c 100644 --- a/test/datasets/test_snap_dataset.py +++ b/test/datasets/test_snap_dataset.py @@ -1,6 +1,7 @@ -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, onlyOnline +@onlyOnline @onlyFullTest def test_ego_facebook_snap_dataset(get_dataset): dataset = get_dataset(name='ego-facebook') @@ -8,6 +9,7 @@ def test_ego_facebook_snap_dataset(get_dataset): assert len(dataset) == 10 +@onlyOnline @onlyFullTest def test_soc_slashdot_snap_dataset(get_dataset): dataset = get_dataset(name='soc-Slashdot0811') @@ -15,6 +17,7 @@ def test_soc_slashdot_snap_dataset(get_dataset): assert len(dataset) == 1 +@onlyOnline @onlyFullTest def test_wiki_vote_snap_dataset(get_dataset): dataset = get_dataset(name='wiki-vote') diff --git a/test/datasets/test_suite_sparse.py b/test/datasets/test_suite_sparse.py index 95b89ea4c129..07ab82d78d71 100644 --- a/test/datasets/test_suite_sparse.py +++ b/test/datasets/test_suite_sparse.py @@ -1,6 +1,7 @@ -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, onlyOnline +@onlyOnline @onlyFullTest def test_suite_sparse_dataset(get_dataset): dataset = get_dataset(group='DIMACS10', name='citationCiteseer') @@ -9,6 +10,7 @@ def test_suite_sparse_dataset(get_dataset): assert len(dataset) == 1 +@onlyOnline @onlyFullTest def test_illc1850_suite_sparse_dataset(get_dataset): dataset = get_dataset(group='HB', name='illc1850') diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index 7b33495e0cdd..5c68be113ce4 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -26,7 +26,7 @@ auto_select_device, params_count, ) -from torch_geometric.testing import withPackage +from torch_geometric.testing import onlyOnline, withPackage num_trivial_metric_calls = 0 @@ -41,6 +41,7 @@ def trivial_metric(true, pred, task_type): return 1 +@onlyOnline @withPackage('yacs') @withPackage('pytorch_lightning') @pytest.mark.parametrize('auto_resume', [True, False]) @@ -109,6 +110,7 @@ def test_run_single_graphgym(tmp_path, capfd, auto_resume, skip_train_eval, assert "val: {'epoch': 5," in out +@onlyOnline @withPackage('yacs') @withPackage('pytorch_lightning') def test_graphgym_module(tmp_path): @@ -163,6 +165,7 @@ def test_graphgym_module(tmp_path): assert isinstance(outputs["loss"], torch.Tensor) +@onlyOnline @withPackage('yacs') @withPackage('pytorch_lightning') def test_train(tmp_path, capfd): diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index 292f2a551380..2e10f5202614 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -3,7 +3,7 @@ from torch_geometric.data import Data from torch_geometric.loader import ClusterData, ClusterLoader -from torch_geometric.testing import onlyFullTest +from torch_geometric.testing import onlyFullTest, onlyOnline from torch_geometric.utils import sort_edge_index try: @@ -145,6 +145,7 @@ def test_keep_inter_cluster_edges(): assert data.edge_index.size(1) == data.edge_attr.size(0) +@onlyOnline @onlyFullTest @pytest.mark.skipif(not WITH_METIS, reason='Not compiled with METIS support') def test_cluster_gcn_correctness(get_dataset): diff --git a/test/loader/test_hgt_loader.py b/test/loader/test_hgt_loader.py index f34879c9a0f3..52b9d3c5477e 100644 --- a/test/loader/test_hgt_loader.py +++ b/test/loader/test_hgt_loader.py @@ -4,7 +4,11 @@ from torch_geometric.data import HeteroData from torch_geometric.loader import HGTLoader from torch_geometric.nn import GraphConv, to_hetero -from torch_geometric.testing import get_random_edge_index, withPackage +from torch_geometric.testing import ( + get_random_edge_index, + onlyOnline, + withPackage, +) from torch_geometric.typing import SparseTensor from torch_geometric.utils import k_hop_subgraph @@ -132,6 +136,7 @@ def test_hgt_loader(): assert torch.cat([row, col]).unique().numel() >= 59 +@onlyOnline @withPackage('torch_sparse') def test_hgt_loader_on_cora(get_dataset): dataset = get_dataset(name='Cora') diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 34134e7f2ae7..ba15253fecaa 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -6,6 +6,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero @@ -16,6 +17,7 @@ get_random_edge_index, onlyLinux, onlyNeighborSampler, + onlyOnline, withCUDA, withPackage, ) @@ -258,6 +260,7 @@ def test_hetero_neighbor_loader_basic(subgraph_type, dtype): assert not batch.has_isolated_nodes() +@onlyOnline @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', list(SubgraphType)) def test_homo_neighbor_loader_on_cora(get_dataset, subgraph_type): @@ -303,6 +306,7 @@ def forward(self, x, edge_index, edge_weight): assert torch.allclose(out1, out2, atol=1e-6) +@onlyOnline @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', list(SubgraphType)) def test_hetero_neighbor_loader_on_cora(get_dataset, subgraph_type): @@ -349,6 +353,7 @@ def forward(self, x, edge_index): assert torch.allclose(out1, out2, atol=1e-6) +@onlyOnline @withPackage('pyg_lib') def test_temporal_hetero_neighbor_loader_on_cora(get_dataset): dataset = get_dataset(name='Cora') @@ -402,13 +407,14 @@ def test_custom_neighbor_loader(): layout='csr', size=(100, 200)) # CSC: - edge_index = get_random_edge_index(200, 100, 1000) - data['author', 'to', 'paper'].edge_index = edge_index - adj = to_torch_csc_tensor(edge_index, size=(200, 100)) - csc = (adj.row_indices(), adj.ccol_indices()) - graph_store.put_edge_index(edge_index=csc, - edge_type=('author', 'to', 'paper'), - layout='csc', size=(200, 100)) + if torch_geometric.typing.WITH_PT112: + edge_index = get_random_edge_index(200, 100, 1000) + data['author', 'to', 'paper'].edge_index = edge_index + adj = to_torch_csc_tensor(edge_index, size=(200, 100)) + csc = (adj.row_indices(), adj.ccol_indices()) + graph_store.put_edge_index(edge_index=csc, + edge_type=('author', 'to', 'paper'), + layout='csc', size=(200, 100)) # COO (sorted): edge_index = get_random_edge_index(200, 200, 100) @@ -446,10 +452,14 @@ def test_custom_neighbor_loader(): 'paper', 'to', 'paper'].edge_index.size()) assert (batch1['paper', 'to', 'author'].edge_index.size() == batch1[ 'paper', 'to', 'author'].edge_index.size()) - assert (batch1['author', 'to', 'paper'].edge_index.size() == batch1[ - 'author', 'to', 'paper'].edge_index.size()) + if torch_geometric.typing.WITH_PT112: + assert (batch1['author', 'to', 'paper'].edge_index.size() == + batch1['author', 'to', 'paper'].edge_index.size()) + assert (batch1['author', 'to', 'author'].edge_index.size() == batch1[ + 'author', 'to', 'author'].edge_index.size()) +@onlyOnline @withPackage('pyg_lib') def test_temporal_custom_neighbor_loader_on_cora(get_dataset): # Initialize dataset (once): @@ -516,8 +526,8 @@ def test_temporal_custom_neighbor_loader_on_cora(get_dataset): @withPackage('torch_sparse') def test_pyg_lib_and_torch_sparse_homo_equality(): edge_index = get_random_edge_index(20, 20, 100) - adj = to_torch_csc_tensor(edge_index, size=(20, 20)) - colptr, row = adj.ccol_indices(), adj.row_indices() + adj = to_torch_csr_tensor(edge_index.flip([0]), size=(20, 20)) + colptr, row = adj.crow_indices(), adj.col_indices() seed = torch.arange(10) @@ -538,12 +548,12 @@ def test_pyg_lib_and_torch_sparse_homo_equality(): @withPackage('torch_sparse') def test_pyg_lib_and_torch_sparse_hetero_equality(): edge_index = get_random_edge_index(20, 10, 50) - adj = to_torch_csc_tensor(edge_index, size=(20, 10)) - colptr1, row1 = adj.ccol_indices(), adj.row_indices() + adj = to_torch_csr_tensor(edge_index.flip([0]), size=(10, 20)) + colptr1, row1 = adj.crow_indices(), adj.col_indices() edge_index = get_random_edge_index(10, 20, 50) - adj = to_torch_csc_tensor(edge_index, size=(10, 20)) - colptr2, row2 = adj.ccol_indices(), adj.row_indices() + adj = to_torch_csr_tensor(edge_index.flip([0]), size=(20, 10)) + colptr2, row2 = adj.crow_indices(), adj.col_indices() node_types = ['paper', 'author'] edge_types = [('paper', 'to', 'author'), ('author', 'to', 'paper')] diff --git a/test/loader/test_neighbor_sampler.py b/test/loader/test_neighbor_sampler.py index 53bb849d5629..37d95ebc14f2 100644 --- a/test/loader/test_neighbor_sampler.py +++ b/test/loader/test_neighbor_sampler.py @@ -3,7 +3,7 @@ from torch_geometric.loader import NeighborSampler from torch_geometric.nn.conv import GATConv, SAGEConv -from torch_geometric.testing import withPackage +from torch_geometric.testing import onlyOnline, withPackage from torch_geometric.typing import SparseTensor from torch_geometric.utils import erdos_renyi_graph @@ -47,6 +47,7 @@ def test_neighbor_sampler_invalid_kwargs(): NeighborSampler(edge_index, sizes=[-1], collate_fn=None, dataset=None) +@onlyOnline @withPackage('torch_sparse') def test_neighbor_sampler_on_cora(get_dataset): dataset = get_dataset(name='Cora') diff --git a/test/nn/conv/test_gat_conv.py b/test/nn/conv/test_gat_conv.py index f17240fd0532..008d250d653c 100644 --- a/test/nn/conv/test_gat_conv.py +++ b/test/nn/conv/test_gat_conv.py @@ -43,10 +43,12 @@ def test_gat_conv(): assert result[1][1].size() == (7, 2) assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - result = conv(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1][0].size() == torch.Size([4, 4, 2]) - assert result[1][0]._nnz() == 7 + if torch_geometric.typing.WITH_PT113: + # PyTorch < 1.13 does not support multi-dimensional CSR values :( + result = conv(x1, adj1.t(), return_attention_weights=True) + assert torch.allclose(result[0], out, atol=1e-6) + assert result[1][0].size() == torch.Size([4, 4, 2]) + assert result[1][0]._nnz() == 7 if torch_geometric.typing.WITH_TORCH_SPARSE: result = conv(x1, adj2.t(), return_attention_weights=True) diff --git a/test/nn/conv/test_gatv2_conv.py b/test/nn/conv/test_gatv2_conv.py index 56e330b84db1..52934d623782 100644 --- a/test/nn/conv/test_gatv2_conv.py +++ b/test/nn/conv/test_gatv2_conv.py @@ -42,10 +42,12 @@ def test_gatv2_conv(): assert result[1][1].min() >= 0 and result[1][1].max() <= 1 assert conv._alpha is None - result = conv(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1][0].size() == torch.Size([4, 4, 2]) - assert result[1][0]._nnz() == 7 + if torch_geometric.typing.WITH_PT113: + # PyTorch < 1.13 does not support multi-dimensional CSR values :( + result = conv(x1, adj1.t(), return_attention_weights=True) + assert torch.allclose(result[0], out, atol=1e-6) + assert result[1][0].size() == torch.Size([4, 4, 2]) + assert result[1][0]._nnz() == 7 if torch_geometric.typing.WITH_TORCH_SPARSE: result = conv(x1, adj2.t(), return_attention_weights=True) diff --git a/test/nn/conv/test_gps_conv.py b/test/nn/conv/test_gps_conv.py index 1e063f2e4e8c..a1acd496d360 100644 --- a/test/nn/conv/test_gps_conv.py +++ b/test/nn/conv/test_gps_conv.py @@ -6,15 +6,15 @@ from torch_geometric.typing import SparseTensor from torch_geometric.utils import to_torch_csc_tensor -x = torch.randn(4, 16) -edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) -batch = torch.tensor([0, 0, 1, 1]) -adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) - @pytest.mark.parametrize('attn_type', ['multihead', 'performer']) @pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm']) def test_gps_conv(norm, attn_type): + x = torch.randn(4, 16) + edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) + batch = torch.tensor([0, 0, 1, 1]) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + conv = GPSConv(16, conv=SAGEConv(16, 16), heads=4, norm=norm, attn_type=attn_type) conv.reset_parameters() diff --git a/test/nn/conv/test_heat_conv.py b/test/nn/conv/test_heat_conv.py index 73e94f2cfd72..cf99abc19111 100644 --- a/test/nn/conv/test_heat_conv.py +++ b/test/nn/conv/test_heat_conv.py @@ -3,10 +3,11 @@ import torch_geometric.typing from torch_geometric.nn import HEATConv -from torch_geometric.testing import is_full_test +from torch_geometric.testing import is_full_test, withPackage from torch_geometric.typing import SparseTensor +@withPackage('torch>=1.12.0') # TODO Investigate error @pytest.mark.parametrize('concat', [True, False]) def test_heat_conv(concat): x = torch.randn(4, 8) diff --git a/test/nn/conv/test_hgt_conv.py b/test/nn/conv/test_hgt_conv.py index a41a755a9e57..890e1b162d02 100644 --- a/test/nn/conv/test_hgt_conv.py +++ b/test/nn/conv/test_hgt_conv.py @@ -4,11 +4,12 @@ from torch_geometric.data import HeteroData from torch_geometric.nn import HGTConv from torch_geometric.profile import benchmark -from torch_geometric.testing import get_random_edge_index +from torch_geometric.testing import get_random_edge_index, withPackage from torch_geometric.typing import SparseTensor from torch_geometric.utils import coalesce, to_torch_csc_tensor +@withPackage('torch>=1.12.0') # TODO Investigate error def test_hgt_conv_same_dimensions(): x_dict = { 'author': torch.randn(4, 16), @@ -59,6 +60,7 @@ def test_hgt_conv_same_dimensions(): # allows indexing `ParameterDict` mappings :( +@withPackage('torch>=1.12.0') # TODO Investigate error def test_hgt_conv_different_dimensions(): x_dict = { 'author': torch.randn(4, 16), @@ -109,6 +111,7 @@ def test_hgt_conv_different_dimensions(): assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) +@withPackage('torch>=1.12.0') # TODO Investigate error def test_hgt_conv_lazy(): x_dict = { 'author': torch.randn(4, 16), diff --git a/test/nn/conv/test_message_passing.py b/test/nn/conv/test_message_passing.py index d77f7a7a68b2..04c99fc854cc 100644 --- a/test/nn/conv/test_message_passing.py +++ b/test/nn/conv/test_message_passing.py @@ -115,11 +115,12 @@ def test_my_conv_basic(): assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) # Test gradient computation for `torch.sparse` tensors: - conv.fuse = True - torch_adj_t = adj1.t().requires_grad_() - out = conv((x1, x2), torch_adj_t) - out.sum().backward() - assert torch_adj_t.grad is not None + if torch_geometric.typing.WITH_PT112: + conv.fuse = True + torch_adj_t = adj1.t().requires_grad_() + out = conv((x1, x2), torch_adj_t) + out.sum().backward() + assert torch_adj_t.grad is not None def test_my_conv_out_of_bounds(): diff --git a/test/nn/conv/test_rgcn_conv.py b/test/nn/conv/test_rgcn_conv.py index 1df7aaf65144..3087d57de94d 100644 --- a/test/nn/conv/test_rgcn_conv.py +++ b/test/nn/conv/test_rgcn_conv.py @@ -3,7 +3,7 @@ import torch_geometric.typing from torch_geometric.nn import FastRGCNConv, RGCNConv -from torch_geometric.testing import is_full_test, withCUDA +from torch_geometric.testing import is_full_test, withCUDA, withPackage from torch_geometric.typing import SparseTensor classes = [RGCNConv, FastRGCNConv] @@ -11,6 +11,7 @@ @withCUDA +@withPackage('torch>=1.12.0') # TODO Investigate error @pytest.mark.parametrize('conf', confs) def test_rgcn_conv_equality(conf, device): num_bases, num_blocks = conf @@ -47,6 +48,7 @@ def test_rgcn_conv_equality(conf, device): @withCUDA +@withPackage('torch>=1.12.0') # TODO Investigate error @pytest.mark.parametrize('cls', classes) @pytest.mark.parametrize('conf', confs) def test_rgcn_conv(cls, conf, device): diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index d8ddbd8e764f..d193276dc2a9 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -216,6 +216,7 @@ def test_lazy_hetero_dict_linear(device): @withCUDA @withPackage('pyg_lib') +@withPackage('torch>=1.12.0') # TODO Investigate error @pytest.mark.parametrize('type_vec', [ torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 1, 2, 0, 1, 2]), diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 4ca973419bbb..d7020d46a2d2 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -17,6 +17,7 @@ onlyFullTest, onlyLinux, onlyNeighborSampler, + onlyOnline, withCUDA, withPackage, ) @@ -147,6 +148,7 @@ def test_one_layer_gnn(out_dim, jk): assert model(x, edge_index).size() == (3, out_channels) +@onlyOnline @onlyNeighborSampler @pytest.mark.parametrize('jk', [None, 'last']) def test_basic_gnn_inference(get_dataset, jk): @@ -215,6 +217,7 @@ def test_packaging(): assert model(x, edge_index).size() == (3, 16) +@withPackage('torch>=1.12.0') @withPackage('onnx', 'onnxruntime') def test_onnx(tmp_path, capfd): import onnx diff --git a/test/nn/models/test_dimenet.py b/test/nn/models/test_dimenet.py index 953948981969..8375cbe7a7bb 100644 --- a/test/nn/models/test_dimenet.py +++ b/test/nn/models/test_dimenet.py @@ -25,6 +25,7 @@ def test_dimenet_modules(): assert rl(x).size() == (128, 128) # Isotonic layer. +@withPackage('sympy') @withPackage('torch_sparse') # TODO `triplet` requires `SparseTensor` for now. @pytest.mark.parametrize('Model', [DimeNet, DimeNetPlusPlus]) def test_dimenet(Model): diff --git a/test/nn/models/test_graph_unet.py b/test/nn/models/test_graph_unet.py index cb15a2f80637..220cf7cf4320 100644 --- a/test/nn/models/test_graph_unet.py +++ b/test/nn/models/test_graph_unet.py @@ -1,10 +1,11 @@ import torch from torch_geometric.nn import GraphUNet -from torch_geometric.testing import is_full_test, onlyLinux +from torch_geometric.testing import is_full_test, onlyLinux, withPackage @onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. +@withPackage('torch>=1.12.0') def test_graph_unet(): model = GraphUNet(16, 32, 8, depth=3) out = 'GraphUNet(16, 32, 8, depth=3, pool_ratios=[0.5, 0.5, 0.5])' diff --git a/test/nn/pool/connect/test_filter_edges.py b/test/nn/pool/connect/test_filter_edges.py index d2e9428539cf..6f07f2430bba 100644 --- a/test/nn/pool/connect/test_filter_edges.py +++ b/test/nn/pool/connect/test_filter_edges.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn.pool.connect import FilterEdges from torch_geometric.nn.pool.select import SelectOutput from torch_geometric.testing import is_full_test @@ -25,7 +26,7 @@ def test_filter_edges(): assert out1.edge_attr.tolist() == [3, 5] assert out1.batch.tolist() == [0, 1] - if is_full_test(): + if torch_geometric.typing.WITH_PT112 and is_full_test(): jit = torch.jit.script(connect) out2 = jit(select_output, edge_index, edge_attr, batch) torch.equal(out1.edge_index, out2.edge_index) diff --git a/test/nn/pool/test_asap.py b/test/nn/pool/test_asap.py index b77c91ef7f99..b861b98a35e7 100644 --- a/test/nn/pool/test_asap.py +++ b/test/nn/pool/test_asap.py @@ -4,7 +4,12 @@ import torch from torch_geometric.nn import ASAPooling, GCNConv, GraphConv -from torch_geometric.testing import is_full_test, onlyFullTest, onlyLinux +from torch_geometric.testing import ( + is_full_test, + onlyFullTest, + onlyLinux, + withPackage, +) @onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. @@ -40,6 +45,7 @@ def test_asap(): @onlyFullTest +@withPackage('torch>=1.12.0') def test_asap_jit_save(): pool = ASAPooling(in_channels=16) pool_jit = pool.jittable() diff --git a/test/nn/pool/test_pan_pool.py b/test/nn/pool/test_pan_pool.py index 1e8dfa9390b4..95818502d3d6 100644 --- a/test/nn/pool/test_pan_pool.py +++ b/test/nn/pool/test_pan_pool.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn import PANConv, PANPooling from torch_geometric.testing import is_full_test, withPackage @@ -24,7 +25,7 @@ def test_pan_pooling(): assert perm.size() == (2, ) assert score.size() == (2, ) - if is_full_test(): + if torch_geometric.typing.WITH_PT112 and is_full_test(): jit = torch.jit.script(pool) out = jit(x, M) assert torch.allclose(h, out[0]) diff --git a/test/nn/pool/test_sag_pool.py b/test/nn/pool/test_sag_pool.py index 98b668fa6cc0..006eb3f92ba9 100644 --- a/test/nn/pool/test_sag_pool.py +++ b/test/nn/pool/test_sag_pool.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn import ( GATConv, GCNConv, @@ -39,7 +40,7 @@ def test_sag_pooling(): assert out3[0].size() == (2, in_channels) assert out3[1].size() == (2, 2) - if is_full_test(): + if torch_geometric.typing.WITH_PT112 and is_full_test(): pool1.gnn = pool1.gnn.jittable() jit1 = torch.jit.script(pool1) assert torch.allclose(jit1(x, edge_index)[0], out1[0]) diff --git a/test/nn/pool/test_topk_pool.py b/test/nn/pool/test_topk_pool.py index 5e3fb20956b7..c9c182ead4f0 100644 --- a/test/nn/pool/test_topk_pool.py +++ b/test/nn/pool/test_topk_pool.py @@ -1,5 +1,6 @@ import torch +import torch_geometric.typing from torch_geometric.nn.pool import TopKPooling from torch_geometric.nn.pool.connect.filter_edges import filter_adj from torch_geometric.testing import is_full_test @@ -48,7 +49,7 @@ def test_topk_pooling(): assert out3[0].size() == (2, in_channels) assert out3[1].size() == (2, 2) - if is_full_test(): + if torch_geometric.typing.WITH_PT112 and is_full_test(): jit1 = torch.jit.script(pool1) assert torch.allclose(jit1(x, edge_index)[0], out1[0]) diff --git a/test/nn/test_to_hetero_module.py b/test/nn/test_to_hetero_module.py index bc65d4d729c8..5edf3703f28a 100644 --- a/test/nn/test_to_hetero_module.py +++ b/test/nn/test_to_hetero_module.py @@ -7,8 +7,10 @@ ToHeteroLinear, ToHeteroMessagePassing, ) +from torch_geometric.testing import withPackage +@withPackage('torch>=1.12.0') # TODO Investigate error @pytest.mark.parametrize('LinearCls', [torch.nn.Linear, Linear]) def test_to_hetero_linear(LinearCls): x_dict = {'1': torch.randn(5, 16), '2': torch.randn(4, 16)} diff --git a/test/nn/test_to_hetero_transformer.py b/test/nn/test_to_hetero_transformer.py index fc6ee1879796..768806bbd08a 100644 --- a/test/nn/test_to_hetero_transformer.py +++ b/test/nn/test_to_hetero_transformer.py @@ -16,6 +16,7 @@ SAGEConv, to_hetero, ) +from torch_geometric.testing import withPackage from torch_geometric.typing import SparseTensor from torch_geometric.utils import dropout_edge @@ -378,6 +379,7 @@ def forward(self, x, edge_index): return self.lin(x) + self.conv(x, edge_index) +@withPackage('torch>=1.12.0') # TODO Investigate error def test_to_hetero_and_rgcn_equal_output(): torch.manual_seed(1234) diff --git a/test/profile/test_profile.py b/test/profile/test_profile.py index d3a247786ae9..c4b5c38f3f65 100644 --- a/test/profile/test_profile.py +++ b/test/profile/test_profile.py @@ -12,7 +12,13 @@ timeit, ) from torch_geometric.profile.profile import torch_profile -from torch_geometric.testing import onlyCUDA, onlyLinux, withCUDA, withPackage +from torch_geometric.testing import ( + onlyCUDA, + onlyLinux, + onlyOnline, + withCUDA, + withPackage, +) @withCUDA @@ -35,6 +41,7 @@ def test_timeit(device): @onlyCUDA +@onlyOnline @withPackage('pytorch_memlab') def test_profileit(get_dataset): warnings.filterwarnings('ignore', '.*arguments of DataFrame.drop.*') @@ -80,6 +87,7 @@ def train(model, x, edge_index, y): @withCUDA +@onlyOnline def test_torch_profile(capfd, get_dataset, device): dataset = get_dataset(name='Cora') data = dataset[0].to(device) diff --git a/test/profile/test_profiler.py b/test/profile/test_profiler.py index 61acb3d4dc52..a6c9a1ae9114 100644 --- a/test/profile/test_profiler.py +++ b/test/profile/test_profiler.py @@ -2,10 +2,11 @@ from torch_geometric.nn import GraphSAGE from torch_geometric.profile.profiler import Profiler -from torch_geometric.testing import withCUDA +from torch_geometric.testing import withCUDA, withPackage @withCUDA +@withPackage('torch>=1.13.0') # TODO Investigate test errors def test_profiler(capfd, get_dataset, device): x = torch.randn(10, 16, device=device) edge_index = torch.tensor([ diff --git a/test/transforms/test_feature_propagation.py b/test/transforms/test_feature_propagation.py index dabd35869a2c..06beba62b59d 100644 --- a/test/transforms/test_feature_propagation.py +++ b/test/transforms/test_feature_propagation.py @@ -1,9 +1,11 @@ import torch from torch_geometric.data import Data +from torch_geometric.testing import withPackage from torch_geometric.transforms import FeaturePropagation, ToSparseTensor +@withPackage('torch>=1.12.0') def test_feature_propagation(): x = torch.randn(6, 4) x[0, 1] = float('nan') diff --git a/test/transforms/test_random_link_split.py b/test/transforms/test_random_link_split.py index 764f7b4b0cd6..7c755d6544e9 100644 --- a/test/transforms/test_random_link_split.py +++ b/test/transforms/test_random_link_split.py @@ -2,7 +2,11 @@ import torch from torch_geometric.data import Data, HeteroData -from torch_geometric.testing import get_random_edge_index, onlyFullTest +from torch_geometric.testing import ( + get_random_edge_index, + onlyFullTest, + onlyOnline, +) from torch_geometric.transforms import RandomLinkSplit from torch_geometric.utils import is_undirected, to_undirected @@ -290,6 +294,7 @@ def test_random_link_split_non_contiguous(): assert train_data['p', 'p'].edge_index.is_contiguous() +@onlyOnline @onlyFullTest def test_random_link_split_on_dataset(get_dataset): dataset = get_dataset(name='MUTAG') diff --git a/test/transforms/test_rooted_subgraph.py b/test/transforms/test_rooted_subgraph.py index 1cc025e1888a..ae7699a48637 100644 --- a/test/transforms/test_rooted_subgraph.py +++ b/test/transforms/test_rooted_subgraph.py @@ -6,6 +6,7 @@ from torch_geometric.transforms import RootedEgoNets, RootedRWSubgraph +@withPackage('torch>=1.12.0') def test_rooted_ego_nets(): x = torch.randn(3, 8) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) @@ -61,6 +62,7 @@ def test_rooted_rw_subgraph(): assert out.n_sub_batch.tolist() == [0, 0, 1, 1, 2, 2] +@withPackage('torch>=1.12.0') def test_rooted_subgraph_minibatch(): x = torch.randn(3, 8) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) diff --git a/test/transforms/test_sign.py b/test/transforms/test_sign.py index 94c371938238..dddcf6cd41ea 100644 --- a/test/transforms/test_sign.py +++ b/test/transforms/test_sign.py @@ -1,9 +1,11 @@ import torch from torch_geometric.data import Data +from torch_geometric.testing import withPackage from torch_geometric.transforms import SIGN +@withPackage('torch>=1.12.0') def test_sign(): x = torch.ones(5, 3) edge_index = torch.tensor([ diff --git a/test/transforms/test_to_sparse_tensor.py b/test/transforms/test_to_sparse_tensor.py index b2c85016f785..a904aaa60501 100644 --- a/test/transforms/test_to_sparse_tensor.py +++ b/test/transforms/test_to_sparse_tensor.py @@ -37,8 +37,11 @@ def test_to_sparse_tensor_basic(layout): assert adj_t.layout == layout or torch.sparse_csr if layout != torch.sparse_coo: adj_t = adj_t.to_sparse_coo() - assert adj_t.indices().tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - assert torch.equal(adj_t.values(), edge_weight[perm]) + assert adj_t.coalesce().indices().tolist() == [ + [0, 1, 1, 2], + [1, 0, 2, 1], + ] + assert torch.equal(adj_t.coalesce().values(), edge_weight[perm]) def test_to_sparse_tensor_and_keep_edge_index(): @@ -85,15 +88,21 @@ def test_hetero_to_sparse_tensor(layout): assert adj_t.layout == layout or torch.sparse_csr if layout != torch.sparse_coo: adj_t = adj_t.to_sparse_coo() - assert adj_t.indices().tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - assert adj_t.values().tolist() == [1., 1., 1., 1.] + assert adj_t.coalesce().indices().tolist() == [ + [0, 1, 1, 2], + [1, 0, 2, 1], + ] + assert adj_t.coalesce().values().tolist() == [1., 1., 1., 1.] adj_t = data['v', 'w'].adj_t assert adj_t.layout == layout or torch.sparse_csr if layout != torch.sparse_coo: adj_t = adj_t.to_sparse_coo() - assert adj_t.indices().tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - assert adj_t.values().tolist() == [1., 1., 1., 1.] + assert adj_t.coalesce().indices().tolist() == [ + [0, 1, 1, 2], + [1, 0, 2, 1], + ] + assert adj_t.coalesce().values().tolist() == [1., 1., 1., 1.] def test_to_sparse_tensor_num_nodes_equals_num_edges(): diff --git a/test/transforms/test_to_superpixels.py b/test/transforms/test_to_superpixels.py index 002c9fd6cfa9..bd7e05127030 100644 --- a/test/transforms/test_to_superpixels.py +++ b/test/transforms/test_to_superpixels.py @@ -5,7 +5,7 @@ from torch_geometric.data import download_url, extract_gz from torch_geometric.data.makedirs import makedirs from torch_geometric.loader import DataLoader -from torch_geometric.testing import withPackage +from torch_geometric.testing import onlyOnline, withPackage from torch_geometric.transforms import ToSLIC resources = [ @@ -14,6 +14,7 @@ ] +@onlyOnline @withPackage('torchvision', 'skimage') def test_to_superpixels(tmp_path): import torchvision.transforms as T diff --git a/test/utils/test_nested.py b/test/utils/test_nested.py index 2a29c7f92358..85373c5aded0 100644 --- a/test/utils/test_nested.py +++ b/test/utils/test_nested.py @@ -1,9 +1,11 @@ import pytest import torch +from torch_geometric.testing import withPackage from torch_geometric.utils import from_nested_tensor, to_nested_tensor +@withPackage('torch>=1.13.0') def test_to_nested_tensor(): x = torch.randn(5, 4, 3) @@ -25,6 +27,7 @@ def test_to_nested_tensor(): assert torch.allclose(out[0], x) +@withPackage('torch>=1.13.0') def test_from_nested_tensor(): x = torch.randn(5, 4, 3) @@ -46,6 +49,7 @@ def test_from_nested_tensor(): assert torch.equal(nested.to_padded_tensor(padding=0)[1, :3], out[2:5]) +@withPackage('torch>=1.13.0') def test_to_and_from_nested_tensor_autograd(): x = torch.randn(5, 4, 3, requires_grad=True) grad = torch.randn_like(x) diff --git a/test/utils/test_scatter.py b/test/utils/test_scatter.py index 3f8e9b1b453c..44e132c1c11e 100644 --- a/test/utils/test_scatter.py +++ b/test/utils/test_scatter.py @@ -9,6 +9,7 @@ from torch_geometric.utils.scatter import scatter_argmax +@withPackage('torch>=1.12.0') def test_scatter_validate(): src = torch.randn(100, 32) index = torch.randint(0, 10, (100, ), dtype=torch.long) diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 10ef78c32966..9fdf3331dd98 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -124,24 +124,27 @@ def test_to_torch_csr_tensor(): adj = to_torch_csr_tensor(edge_index) assert adj.size() == (4, 4) assert adj.layout == torch.sparse_csr - assert torch.allclose(adj.to_sparse_coo().indices(), edge_index) + assert torch.allclose(adj.to_sparse_coo().coalesce().indices(), edge_index) edge_weight = torch.randn(edge_index.size(1)) adj = to_torch_csr_tensor(edge_index, edge_weight) assert adj.size() == (4, 4) assert adj.layout == torch.sparse_csr - assert torch.allclose(adj.to_sparse_coo().indices(), edge_index) - assert torch.allclose(adj.to_sparse_coo().values(), edge_weight) + coo = adj.to_sparse_coo().coalesce() + assert torch.allclose(coo.indices(), edge_index) + assert torch.allclose(coo.values(), edge_weight) if torch_geometric.typing.WITH_PT2: edge_attr = torch.randn(edge_index.size(1), 8) adj = to_torch_csr_tensor(edge_index, edge_attr) assert adj.size() == (4, 4, 8) assert adj.layout == torch.sparse_csr - assert torch.allclose(adj.to_sparse_coo().indices(), edge_index) - assert torch.allclose(adj.to_sparse_coo().values(), edge_attr) + coo = adj.to_sparse_coo().coalesce() + assert torch.allclose(coo.indices(), edge_index) + assert torch.allclose(coo.values(), edge_attr) +@withPackage('torch>=1.12.0') def test_to_torch_csc_tensor(): edge_index = torch.tensor([ [0, 1, 1, 2, 2, 3], diff --git a/test/utils/test_spmm.py b/test/utils/test_spmm.py index 6667603694c0..a856a3cb33a4 100644 --- a/test/utils/test_spmm.py +++ b/test/utils/test_spmm.py @@ -81,6 +81,7 @@ def test_spmm_layout(device, layout, reduce): spmm(src, other, reduce=reduce) +@withPackage('torch>=1.12.0') @pytest.mark.parametrize('reduce', ['sum', 'mean']) def test_spmm_jit(reduce): @torch.jit.script diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index cc5c1c6444f7..134efb7a1d95 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -142,30 +142,33 @@ def _collate( else: incs = None + if getattr(elem, 'is_nested', False): + tensors = [] + for nested_tensor in values: + tensors.extend(nested_tensor.unbind()) + value = torch.nested.nested_tensor(tensors) + + return value, slices, incs + + out = None if torch.utils.data.get_worker_info() is not None: # Write directly into shared memory to avoid an extra copy: numel = sum(value.numel() for value in values) if torch_geometric.typing.WITH_PT2: storage = elem.untyped_storage()._new_shared( numel * elem.element_size(), device=elem.device) - else: + elif torch_geometric.typing.WITH_PT112: storage = elem.storage()._new_shared(numel, device=elem.device) + else: + storage = elem.storage()._new_shared(numel) shape = list(elem.size()) if cat_dim is None or elem.dim() == 0: shape = [len(values)] + shape else: shape[cat_dim] = int(slices[-1]) out = elem.new(storage).resize_(*shape) - else: - out = None - if elem.is_nested: - tensors = [] - for nested_tensor in values: - tensors.extend(nested_tensor.unbind()) - value = torch.nested.nested_tensor(tensors) - else: - value = torch.cat(values, dim=cat_dim or 0, out=out) + value = torch.cat(values, dim=cat_dim or 0, out=out) return value, slices, incs diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 9da318d6d4ac..c6d36d23df3a 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -981,7 +981,7 @@ def size_repr(key: Any, value: Any, indent: int = 0) -> str: pad = ' ' * indent if isinstance(value, Tensor) and value.dim() == 0: out = value.item() - elif isinstance(value, Tensor) and value.is_nested: + elif isinstance(value, Tensor) and getattr(value, 'is_nested', False): out = str(list(value.to_padded_tensor(padding=0.0).size())) elif isinstance(value, Tensor): out = str(list(value.size())) diff --git a/torch_geometric/nn/pool/connect/base.py b/torch_geometric/nn/pool/connect/base.py index 23d0dd44346b..e3ddf3b06f1d 100644 --- a/torch_geometric/nn/pool/connect/base.py +++ b/torch_geometric/nn/pool/connect/base.py @@ -4,10 +4,10 @@ import torch from torch import Tensor +import torch_geometric.typing from torch_geometric.nn.pool.select import SelectOutput -@torch.jit.script @dataclass(init=False) class ConnectOutput: r"""The output of the :class:`Connect` method, which holds the coarsened @@ -49,6 +49,10 @@ def __init__( self.batch = batch +if torch_geometric.typing.WITH_PT113: + ConnectOutput = torch.jit.script(ConnectOutput) + + class Connect(torch.nn.Module): r"""An abstract base class for implementing custom edge connection operators as described in the `"Understanding Pooling in Graph Neural diff --git a/torch_geometric/nn/pool/select/base.py b/torch_geometric/nn/pool/select/base.py index fcde4823477c..d16bc94d0f1a 100644 --- a/torch_geometric/nn/pool/select/base.py +++ b/torch_geometric/nn/pool/select/base.py @@ -4,8 +4,9 @@ import torch from torch import Tensor +import torch_geometric.typing + -@torch.jit.script @dataclass(init=False) class SelectOutput: r"""The output of the :class:`Select` method, which holds an assignment @@ -63,6 +64,10 @@ def __init__( self.weight = weight +if torch_geometric.typing.WITH_PT113: + SelectOutput = torch.jit.script(SelectOutput) + + class Select(torch.nn.Module): r"""An abstract base class for implementing custom node selections as described in the `"Understanding Pooling in Graph Neural Networks" diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py index db2bc6b78713..83f9820416b3 100644 --- a/torch_geometric/testing/__init__.py +++ b/torch_geometric/testing/__init__.py @@ -4,6 +4,7 @@ onlyLinux, onlyPython, onlyCUDA, + onlyOnline, onlyGraphviz, onlyNeighborSampler, withPackage, @@ -21,6 +22,7 @@ 'onlyLinux', 'onlyPython', 'onlyCUDA', + 'onlyOnline', 'onlyGraphviz', 'onlyNeighborSampler', 'withPackage', diff --git a/torch_geometric/testing/decorators.py b/torch_geometric/testing/decorators.py index e6ea08938f28..e33a7ab9e4c9 100644 --- a/torch_geometric/testing/decorators.py +++ b/torch_geometric/testing/decorators.py @@ -59,6 +59,28 @@ def onlyCUDA(func: Callable) -> Callable: )(func) +def onlyOnline(func: Callable): + r"""A decorator to skip tests if there exists no connection to the + internet.""" + import http.client as httplib + + import pytest + + has_connection = True + connection = httplib.HTTPSConnection('8.8.8.8', timeout=5) + try: + connection.request('HEAD', '/') + except Exception: + has_connection = False + finally: + connection.close() + + return pytest.mark.skipif( + not has_connection, + reason="No internet connection", + )(func) + + def onlyGraphviz(func: Callable) -> Callable: r"""A decorator to specify that this function should only execute in case :obj:`graphviz` is installed.""" diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 434d26747e56..de1868746c4b 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -6,6 +6,12 @@ from torch import Tensor WITH_PT2 = int(torch.__version__.split('.')[0]) >= 2 +WITH_PT111 = WITH_PT2 or int(torch.__version__.split('.')[1]) >= 11 +WITH_PT112 = WITH_PT2 or int(torch.__version__.split('.')[1]) >= 12 +WITH_PT113 = WITH_PT2 or int(torch.__version__.split('.')[1]) >= 13 + +if not hasattr(torch, 'sparse_csc'): + torch.sparse_csc = -1 try: import pyg_lib # noqa @@ -177,6 +183,27 @@ def masked_select_nnz(src: SparseTensor, mask: Tensor, raise ImportError("'masked_select_nnz' requires 'torch-sparse'") +class MockTorchCSCTensor: + def __init__( + self, + edge_index: Tensor, + edge_attr: Optional[Tensor] = None, + size: Optional[Union[int, Tuple[int, int]]] = None, + ): + self.edge_index = edge_index + self.edge_attr = edge_attr + self.size = size + + def t(self) -> Tensor: # Only support accessing its transpose: + from torch_geometric.utils import to_torch_csr_tensor + size = self.size + return to_torch_csr_tensor( + self.edge_index.flip([0]), + self.edge_attr, + size[::-1] if isinstance(size, (tuple, list)) else size, + ) + + # Types for accessing data #################################################### # Node-types are denoted by a single string, e.g.: `data['paper']`: diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index 0366f4868805..7a87aa7420fb 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -7,11 +7,7 @@ import torch_geometric.typing from torch_geometric.typing import torch_scatter -major, minor, _ = torch.__version__.split('.', maxsplit=2) -major, minor = int(major), int(minor) -has_pytorch112 = major > 1 or (major == 1 and minor >= 12) - -if has_pytorch112: # pragma: no cover +if torch_geometric.typing.WITH_PT112: # pragma: no cover warnings.filterwarnings('ignore', '.*is in beta and the API may change.*') @@ -180,9 +176,15 @@ def scatter_argmax(src: Tensor, index: Tensor, dim: int = 0, if dim_size is None: dim_size = index.max() + 1 if index.numel() > 0 else 0 - res = src.new_empty(dim_size) - res.scatter_reduce_(0, index, src.detach(), reduce='amax', - include_self=False) + if torch_geometric.typing.WITH_PT112: + res = src.new_empty(dim_size) + res.scatter_reduce_(0, index, src.detach(), reduce='amax', + include_self=False) + elif torch_geometric.typing.WITH_PT111: + res = torch.scatter_reduce(src.detach(), 0, index, reduce='amax', + output_size=dim_size) + else: + raise ValueError("'scatter_argmax' requires PyTorch >= 1.11") out = index.new_full((dim_size, ), fill_value=dim_size - 1) nonzero = (src == res[index]).nonzero().view(-1) diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py index ada6d2e6a51a..770169855615 100644 --- a/torch_geometric/utils/sparse.py +++ b/torch_geometric/utils/sparse.py @@ -3,6 +3,7 @@ import torch from torch import Tensor +import torch_geometric.typing from torch_geometric.typing import SparseTensor from torch_geometric.utils import coalesce @@ -66,7 +67,8 @@ def is_torch_sparse_tensor(src: Any) -> bool: return True if src.layout == torch.sparse_csr: return True - if src.layout == torch.sparse_csc: + if (torch_geometric.typing.WITH_PT112 + and src.layout == torch.sparse_csc): return True return False @@ -176,8 +178,26 @@ def to_torch_csr_tensor( size=(4, 4), nnz=6, layout=torch.sparse_csr) """ - adj = to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) - return adj.to_sparse_csr() + if size is None: + size = int(edge_index.max()) + 1 + if not isinstance(size, (tuple, list)): + size = (size, size) + + if not is_coalesced: + edge_index, edge_attr = coalesce(edge_index, edge_attr, max(size)) + + if edge_attr is None: + edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) + + adj = torch.sparse_csr_tensor( + crow_indices=index2ptr(edge_index[0], size[0]), + col_indices=edge_index[1], + values=edge_attr, + size=tuple(size) + edge_attr.size()[1:], + device=edge_index.device, + ) + + return adj def to_torch_csc_tensor( @@ -216,6 +236,10 @@ def to_torch_csc_tensor( size=(4, 4), nnz=6, layout=torch.sparse_csc) """ + if not torch_geometric.typing.WITH_PT112: + return torch_geometric.typing.MockTorchCSCTensor( + edge_index, edge_attr, size) + if size is None: size = int(edge_index.max()) + 1 if not isinstance(size, (tuple, list)): @@ -271,7 +295,7 @@ def to_torch_sparse_tensor( return to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) if layout == torch.sparse_csr: return to_torch_csr_tensor(edge_index, edge_attr, size, is_coalesced) - if layout == torch.sparse_csc: + if torch_geometric.typing.WITH_PT112 and layout == torch.sparse_csc: return to_torch_csc_tensor(edge_index, edge_attr, size, is_coalesced) raise ValueError(f"Unexpected sparse tensor layout (got '{layout}')") @@ -310,7 +334,7 @@ def to_edge_index(adj: Union[Tensor, SparseTensor]) -> Tuple[Tensor, Tensor]: col = adj.col_indices().detach() return torch.stack([row, col], dim=0).long(), adj.values() - if adj.layout == torch.sparse_csc: + if torch_geometric.typing.WITH_PT112 and adj.layout == torch.sparse_csc: col = ptr2index(adj.ccol_indices().detach()) row = adj.row_indices().detach() return torch.stack([row, col], dim=0).long(), adj.values() @@ -359,7 +383,7 @@ def set_sparse_value(adj: Tensor, value: Tensor) -> Tensor: device=value.device, ) - if adj.layout == torch.sparse_csc: + if torch_geometric.typing.WITH_PT112 and adj.layout == torch.sparse_csc: return torch.sparse_csc_tensor( ccol_indices=adj.ccol_indices(), row_indices=adj.row_indices(), diff --git a/torch_geometric/utils/spmm.py b/torch_geometric/utils/spmm.py index 3d523c5d2761..0e63e5312536 100644 --- a/torch_geometric/utils/spmm.py +++ b/torch_geometric/utils/spmm.py @@ -117,7 +117,8 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: if src.layout == torch.sparse_csr: ptr = src.crow_indices() deg = ptr[1:] - ptr[:-1] - elif src.layout == torch.sparse_csc: + elif (torch_geometric.typing.WITH_PT112 + and src.layout == torch.sparse_csc): assert src.layout == torch.sparse_csc deg = scatter(torch.ones_like(src.values()), src.row_indices(), dim=0, dim_size=src.size(0), reduce='sum') From ae84a38f14591ba9b8ce64e704e04ea1271c3b78 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 28 Jun 2023 21:18:57 -0700 Subject: [PATCH 1326/2432] Suggest to users: RMM gives 50% speedboost to cudf pathway for `map_index` (#7659) repro: cd /opt/pyg; pip uninstall -y torch-geometric; rm -rf pytorch_geometric; git clone -b map_index_rmm https://github.com/pyg-team/pytorch_geometric.git; cd /opt/pyg/pytorch_geometric; pip install .; python3 test/utils/test_map.py w/ RMM: ``` root@79c6dc981596:/opt/pyg/pytorch_geometric# python3 test/utils/test_map.py Inclusive: +-----------+-----------+ | Name | Forward | |-----------+-----------| | map_index | 0.1987s | +-----------+-----------+ Exclusive: +-----------+-----------+ | Name | Forward | |-----------+-----------| | map_index | 0.2195s | ``` +-----------+-----------+ w/o: ``` root@79c6dc981596:/opt/pyg/pytorch_geometric# python3 test/utils/test_map.py Inclusive: +-----------+-----------+ | Name | Forward | |-----------+-----------| | map_index | 0.3201s | +-----------+-----------+ Exclusive: +-----------+-----------+ | Name | Forward | |-----------+-----------| | map_index | 0.3158s | +-----------+-----------+ ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- torch_geometric/utils/map.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/torch_geometric/utils/map.py b/torch_geometric/utils/map.py index 39db6520d7cf..cdb6c217c7b1 100644 --- a/torch_geometric/utils/map.py +++ b/torch_geometric/utils/map.py @@ -40,6 +40,19 @@ def map_index( >>> map_index(src, index) (tensor([1, 2, 2, 0]), tensor([True, True, False, True, True])) + + .. note:: + + If inputs are on GPU and :obj:`cudf` is available, consider using RMM + for significant speed boosts. + Proceed with caution as RMM may conflict with other allocators or + fragments. + + .. code-block:: python + + import rmm + rmm.reinitialize(pool_allocator=True) + torch.cuda.memory.change_current_allocator(rmm.rmm_torch_allocator) """ if src.is_floating_point(): raise ValueError(f"Expected 'src' to be an index (got '{src.dtype}')") From 222bd814ac6970534c908a4b37b2e6917a053126 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 2 Jul 2023 10:24:22 +0700 Subject: [PATCH 1327/2432] Remove `torch.jit.trace` warnings (#7673) --- torch_geometric/nn/conv/message_passing.jinja | 2 +- torch_geometric/nn/conv/message_passing.py | 2 +- torch_geometric/utils/loop.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/torch_geometric/nn/conv/message_passing.jinja b/torch_geometric/nn/conv/message_passing.jinja index 13a32aa4c5d9..24a551e50ff6 100644 --- a/torch_geometric/nn/conv/message_passing.jinja +++ b/torch_geometric/nn/conv/message_passing.jinja @@ -84,7 +84,7 @@ class {{cls_name}}({{parent_cls_name}}): pass def _collect(self, edge_def, size, kwargs): - init = torch.tensor(0.) + init = torch.zeros(1) i, j = (1, 0) if self.flow == 'source_to_target' else (0, 1) {% for arg in user_args %} {%- if arg[-2:] not in ['_i', '_j'] %} diff --git a/torch_geometric/nn/conv/message_passing.py b/torch_geometric/nn/conv/message_passing.py index 747f041c5d45..1daf66490eac 100644 --- a/torch_geometric/nn/conv/message_passing.py +++ b/torch_geometric/nn/conv/message_passing.py @@ -217,7 +217,7 @@ def _check_input(self, edge_index, size): if edge_index.dim() != 2: raise ValueError(f"Expected 'edge_index' to be two-dimensional" f" (got {edge_index.dim()} dimensions)") - if edge_index.size(0) != 2: + if not torch.jit.is_tracing() and edge_index.size(0) != 2: raise ValueError(f"Expected 'edge_index' to have size '2' in " f"the first dimension (got " f"'{edge_index.size(0)}')") diff --git a/torch_geometric/utils/loop.py b/torch_geometric/utils/loop.py index bb847e2fa9e2..59265cd39fb1 100644 --- a/torch_geometric/utils/loop.py +++ b/torch_geometric/utils/loop.py @@ -232,15 +232,15 @@ def add_self_loops( assert edge_attr is None layout = edge_index.layout size = (edge_index.size(0), edge_index.size(1)) + N = min(size) edge_index, edge_attr = to_edge_index(edge_index) elif isinstance(num_nodes, (tuple, list)): size = (num_nodes[0], num_nodes[1]) + N = min(size) else: N = maybe_num_nodes(edge_index, num_nodes) size = (N, N) - N = min(size) - loop_index = torch.arange(0, N, dtype=torch.long, device=edge_index.device) loop_index = loop_index.unsqueeze(0).repeat(2, 1) From 89165f2a378c4c1e2f2ad7dd4c33872cd42eef3d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 2 Jul 2023 10:33:50 +0700 Subject: [PATCH 1328/2432] Allow `xfail` of `SNAPDataset` tests (#7674) Workaround since `stanford.edu` is down. --- .github/workflows/full_testing.yml | 6 +++++- test/datasets/test_snap_dataset.py | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 94f06799d05d..33f58f571c07 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -16,8 +16,12 @@ jobs: matrix: os: [ubuntu-latest, windows-latest] python-version: ['3.8', '3.10'] - torch-version: [1.13.0, 2.0.0, nightly] + torch-version: [1.11.0, 1.12.0, 1.13.0, 2.0.0, nightly] include: + - torch-version: 1.11.0 + torchvision-version: 0.12.0 + - torch-version: 1.12.0 + torchvision-version: 0.13.0 - torch-version: 1.13.0 torchvision-version: 0.14.0 - torch-version: 2.0.0 diff --git a/test/datasets/test_snap_dataset.py b/test/datasets/test_snap_dataset.py index f2c9fad87d8c..1c26e2fd481d 100644 --- a/test/datasets/test_snap_dataset.py +++ b/test/datasets/test_snap_dataset.py @@ -1,8 +1,11 @@ +import pytest + from torch_geometric.testing import onlyFullTest, onlyOnline @onlyOnline @onlyFullTest +@pytest.mark.xfail def test_ego_facebook_snap_dataset(get_dataset): dataset = get_dataset(name='ego-facebook') assert str(dataset) == 'SNAP-ego-facebook(10)' @@ -11,6 +14,7 @@ def test_ego_facebook_snap_dataset(get_dataset): @onlyOnline @onlyFullTest +@pytest.mark.xfail def test_soc_slashdot_snap_dataset(get_dataset): dataset = get_dataset(name='soc-Slashdot0811') assert str(dataset) == 'SNAP-soc-slashdot0811(1)' @@ -19,6 +23,7 @@ def test_soc_slashdot_snap_dataset(get_dataset): @onlyOnline @onlyFullTest +@pytest.mark.xfail def test_wiki_vote_snap_dataset(get_dataset): dataset = get_dataset(name='wiki-vote') assert str(dataset) == 'SNAP-wiki-vote(1)' From b5cafd98411903b35b3ffb519ae2f3e92718e642 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 2 Jul 2023 10:49:50 +0700 Subject: [PATCH 1329/2432] Remove invalid cast warning in `to_dense_batch` (#7675) --- torch_geometric/utils/to_dense_batch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/utils/to_dense_batch.py b/torch_geometric/utils/to_dense_batch.py index c6d9f2ba60a7..cad023e7b93a 100644 --- a/torch_geometric/utils/to_dense_batch.py +++ b/torch_geometric/utils/to_dense_batch.py @@ -124,8 +124,8 @@ def to_dense_batch( x, idx = x[mask], idx[mask] size = [batch_size * max_num_nodes] + list(x.size())[1:] - out = torch.as_tensor(fill_value, dtype=x.dtype, - device=x.device).repeat(size) + out = torch.as_tensor(fill_value, device=x.device) + out = out.to(x.dtype).repeat(size) out[idx] = x out = out.view([batch_size, max_num_nodes] + list(x.size())[1:]) From 52a7cdaedf10f31a31db88175be181fee23529cf Mon Sep 17 00:00:00 2001 From: rusty1s Date: Sun, 2 Jul 2023 21:30:10 +0700 Subject: [PATCH 1330/2432] update --- test/contrib/explain/test_pgm_explainer.py | 3 +-- test/data/lightning/test_datamodule.py | 20 +++++++++----------- test/graphgym/test_graphgym.py | 9 +++------ test/graphgym/test_logger.py | 3 +-- test/loader/test_neighbor_loader.py | 6 ++---- test/nn/conv/test_signed_conv.py | 8 +++++--- test/nn/models/test_node2vec.py | 3 +-- test/nn/pool/test_asap.py | 3 ++- test/nn/test_model_summary.py | 3 +-- test/utils/test_convert.py | 9 +++------ torch_geometric/nn/models/node2vec.py | 3 ++- torch_geometric/testing/decorators.py | 3 +++ 12 files changed, 33 insertions(+), 40 deletions(-) diff --git a/test/contrib/explain/test_pgm_explainer.py b/test/contrib/explain/test_pgm_explainer.py index a9335541ff6d..bf1bead9e421 100644 --- a/test/contrib/explain/test_pgm_explainer.py +++ b/test/contrib/explain/test_pgm_explainer.py @@ -45,8 +45,7 @@ def forward(self, x, edge_index, edge_weight=None, batch=None, **kwargs): edge_label_index = torch.tensor([[0, 1, 2], [3, 4, 5]]) -@withPackage('pgmpy') -@withPackage('pandas') +@withPackage('pgmpy', 'pandas') @pytest.mark.parametrize('node_idx', [2, 6]) @pytest.mark.parametrize('task_level, perturbation_mode', [ ('node', 'randint'), diff --git a/test/data/lightning/test_datamodule.py b/test/data/lightning/test_datamodule.py index 46457184ba8f..0c6807c10bc3 100644 --- a/test/data/lightning/test_datamodule.py +++ b/test/data/lightning/test_datamodule.py @@ -19,6 +19,7 @@ get_random_edge_index, onlyCUDA, onlyFullTest, + onlyNeighborSampler, onlyOnline, withPackage, ) @@ -76,8 +77,7 @@ def configure_optimizers(self): @onlyCUDA @onlyOnline @onlyFullTest -@withPackage('pytorch_lightning>=2.0.0') -@withPackage('torchmetrics>=0.11.0') +@withPackage('pytorch_lightning>=2.0.0', 'torchmetrics>=0.11.0') @pytest.mark.parametrize('strategy_type', [None, 'ddp']) def test_lightning_dataset(get_dataset, strategy_type): import pytorch_lightning as pl @@ -180,9 +180,8 @@ def configure_optimizers(self): @onlyCUDA @onlyOnline @onlyFullTest -@withPackage('pyg_lib') -@withPackage('pytorch_lightning>=2.0.0') -@withPackage('torchmetrics>=0.11.0') +@onlyNeighborSampler +@withPackage('pytorch_lightning>=2.0.0', 'torchmetrics>=0.11.0') @pytest.mark.parametrize('loader', ['full', 'neighbor']) @pytest.mark.parametrize('strategy_type', [None, 'ddp']) def test_lightning_node_data(get_dataset, strategy_type, loader): @@ -279,9 +278,8 @@ def configure_optimizers(self): @onlyCUDA @onlyFullTest -@withPackage('pyg_lib') -@withPackage('pytorch_lightning>=2.0.0') -@withPackage('torchmetrics>=0.11.0') +@onlyNeighborSampler +@withPackage('pytorch_lightning>=2.0.0', 'torchmetrics>=0.11.0') def test_lightning_hetero_node_data(get_dataset): import pytorch_lightning as pl @@ -329,7 +327,7 @@ def sample_from_nodes(self, *args, **kwargs): @onlyCUDA @onlyFullTest -@withPackage('pyg_lib') +@onlyNeighborSampler @withPackage('pytorch_lightning') def test_lightning_hetero_link_data(): torch.manual_seed(12345) @@ -389,7 +387,7 @@ def test_lightning_hetero_link_data(): assert 'edge_label_time' in batch['author', 'paper'] -@withPackage('pyg_lib') +@onlyNeighborSampler @withPackage('pytorch_lightning') def test_lightning_hetero_link_data_custom_store(): torch.manual_seed(12345) @@ -427,7 +425,7 @@ def test_lightning_hetero_link_data_custom_store(): @onlyOnline -@withPackage('pyg_lib') +@onlyNeighborSampler @withPackage('pytorch_lightning') def test_eval_loader_kwargs(get_dataset): data = get_dataset(name='Cora')[0] diff --git a/test/graphgym/test_graphgym.py b/test/graphgym/test_graphgym.py index 5c68be113ce4..8b3c1122f879 100644 --- a/test/graphgym/test_graphgym.py +++ b/test/graphgym/test_graphgym.py @@ -42,8 +42,7 @@ def trivial_metric(true, pred, task_type): @onlyOnline -@withPackage('yacs') -@withPackage('pytorch_lightning') +@withPackage('yacs', 'pytorch_lightning') @pytest.mark.parametrize('auto_resume', [True, False]) @pytest.mark.parametrize('skip_train_eval', [True, False]) @pytest.mark.parametrize('use_trivial_metric', [True, False]) @@ -111,8 +110,7 @@ def test_run_single_graphgym(tmp_path, capfd, auto_resume, skip_train_eval, @onlyOnline -@withPackage('yacs') -@withPackage('pytorch_lightning') +@withPackage('yacs', 'pytorch_lightning') def test_graphgym_module(tmp_path): import pytorch_lightning as pl @@ -166,8 +164,7 @@ def test_graphgym_module(tmp_path): @onlyOnline -@withPackage('yacs') -@withPackage('pytorch_lightning') +@withPackage('yacs', 'pytorch_lightning') def test_train(tmp_path, capfd): warnings.filterwarnings('ignore', ".*does not have many workers.*") diff --git a/test/graphgym/test_logger.py b/test/graphgym/test_logger.py index fa1c005d4caf..74dd0c33d09a 100644 --- a/test/graphgym/test_logger.py +++ b/test/graphgym/test_logger.py @@ -2,8 +2,7 @@ from torch_geometric.testing import withPackage -@withPackage('yacs') -@withPackage('pytorch_lightning') +@withPackage('yacs', 'pytorch_lightning') def test_logger_callback(): logger = LoggerCallback() assert isinstance(logger.train_logger, Logger) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index ba15253fecaa..f4a48a5cbe7e 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -522,8 +522,7 @@ def test_temporal_custom_neighbor_loader_on_cora(get_dataset): assert torch.equal(batch1['paper'].time, batch2['paper'].time) -@withPackage('pyg_lib') -@withPackage('torch_sparse') +@withPackage('pyg_lib', 'torch_sparse') def test_pyg_lib_and_torch_sparse_homo_equality(): edge_index = get_random_edge_index(20, 20, 100) adj = to_torch_csr_tensor(edge_index.flip([0]), size=(20, 20)) @@ -544,8 +543,7 @@ def test_pyg_lib_and_torch_sparse_homo_equality(): assert torch.equal(edge_id1, edge_id2) -@withPackage('pyg_lib') -@withPackage('torch_sparse') +@withPackage('pyg_lib', 'torch_sparse') def test_pyg_lib_and_torch_sparse_hetero_equality(): edge_index = get_random_edge_index(20, 10, 50) adj = to_torch_csr_tensor(edge_index.flip([0]), size=(10, 20)) diff --git a/test/nn/conv/test_signed_conv.py b/test/nn/conv/test_signed_conv.py index 8ee73daf033a..6a1f956abf16 100644 --- a/test/nn/conv/test_signed_conv.py +++ b/test/nn/conv/test_signed_conv.py @@ -40,7 +40,8 @@ def test_signed_conv(): assert torch.allclose(jit1(x, edge_index, edge_index), out1) assert torch.allclose(jit2(out1, edge_index, edge_index), out2) - if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + if (is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE + and torch_geometric.typing.WITH_PT112): t = '(Tensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) @@ -66,7 +67,7 @@ def test_signed_conv(): assert torch.allclose(conv2((out1, out1[:2]), adj2.t(), adj2.t()), out2[:2], atol=1e-6) - if is_full_test(): + if is_full_test() and torch_geometric.typing.WITH_PT112: t = '(PairTensor, Tensor, Tensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) @@ -75,7 +76,8 @@ def test_signed_conv(): assert torch.allclose(jit2((out1, out1[:2]), edge_index, edge_index), out2[:2], atol=1e-6) - if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + if (is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE + and torch_geometric.typing.WITH_PT112): t = '(PairTensor, SparseTensor, SparseTensor) -> Tensor' jit1 = torch.jit.script(conv1.jittable(t)) jit2 = torch.jit.script(conv2.jittable(t)) diff --git a/test/nn/models/test_node2vec.py b/test/nn/models/test_node2vec.py index 0ce285ed543d..39d956e41d3a 100644 --- a/test/nn/models/test_node2vec.py +++ b/test/nn/models/test_node2vec.py @@ -6,8 +6,7 @@ @withCUDA -@withPackage('pyg_lib') -@withPackage('torch_cluster') +@withPackage('pyg_lib|torch_cluster') @pytest.mark.parametrize('p', [1.0]) @pytest.mark.parametrize('q', [1.0, 0.5]) def test_node2vec(device, p, q): diff --git a/test/nn/pool/test_asap.py b/test/nn/pool/test_asap.py index b861b98a35e7..b54f6269a792 100644 --- a/test/nn/pool/test_asap.py +++ b/test/nn/pool/test_asap.py @@ -3,6 +3,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import ASAPooling, GCNConv, GraphConv from torch_geometric.testing import ( is_full_test, @@ -28,7 +29,7 @@ def test_asap(): assert out[0].size() == (num_nodes // 2, in_channels) assert out[1].size() == (2, 2) - if is_full_test(): + if torch_geometric.typing.WITH_PT112 and is_full_test(): torch.jit.script(pool.jittable()) pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN, add_self_loops=True) diff --git a/test/nn/test_model_summary.py b/test/nn/test_model_summary.py index 5610ed8d273c..6408e0ca051f 100644 --- a/test/nn/test_model_summary.py +++ b/test/nn/test_model_summary.py @@ -68,8 +68,7 @@ def test_summary_basic(gcn): assert summary(gcn['model'], gcn['x'], gcn['edge_index']) == expected[1:-1] -@withPackage('tabulate') -@withPackage('torch_sparse') +@withPackage('tabulate', 'torch_sparse') def test_summary_with_sparse_tensor(gcn): expected = """ +---------------------+-----------------------+----------------+----------+ diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index cf9762f499a9..1870a7798e1f 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -403,8 +403,7 @@ def test_from_trimesh(): assert data.face.t().contiguous().tolist() == faces -@withPackage('cudf') -@withPackage('cugraph') +@withPackage('cudf', 'cugraph') @pytest.mark.parametrize('edge_weight', [None, torch.rand(4)]) @pytest.mark.parametrize('relabel_nodes', [True, False]) @pytest.mark.parametrize('directed', [True, False]) @@ -443,8 +442,7 @@ def test_to_cugraph(edge_weight, directed, relabel_nodes): assert torch.allclose(edge_weight, cu_edge_weight.cpu()) -@withPackage('cudf') -@withPackage('cugraph') +@withPackage('cudf', 'cugraph') @pytest.mark.parametrize('edge_weight', [None, torch.randn(4)]) @pytest.mark.parametrize('directed', [True, False]) @pytest.mark.parametrize('relabel_nodes', [True, False]) @@ -522,8 +520,7 @@ def test_to_dgl_hetero_graph(): assert torch.equal(g.edata['edge_attr'], data['v1', 'v2'].edge_attr) -@withPackage('dgl') -@withPackage('torch_sparse') +@withPackage('dgl', 'torch_sparse') def test_to_dgl_sparse(): from torch_geometric.transforms import ToSparseTensor x = torch.randn(5, 3) diff --git a/torch_geometric/nn/models/node2vec.py b/torch_geometric/nn/models/node2vec.py index 37ece5297797..789db1ca9bb0 100644 --- a/torch_geometric/nn/models/node2vec.py +++ b/torch_geometric/nn/models/node2vec.py @@ -123,7 +123,8 @@ def pos_sample(self, batch: Tensor) -> Tensor: def neg_sample(self, batch: Tensor) -> Tensor: batch = batch.repeat(self.walks_per_node * self.num_negative_samples) - rw = torch.randint(self.num_nodes, (batch.size(0), self.walk_length)) + rw = torch.randint(self.num_nodes, (batch.size(0), self.walk_length), + dtype=batch.dtype, device=batch.device) rw = torch.cat([batch.view(-1, 1), rw], dim=-1) walks = [] diff --git a/torch_geometric/testing/decorators.py b/torch_geometric/testing/decorators.py index e33a7ab9e4c9..b62625fa6a3f 100644 --- a/torch_geometric/testing/decorators.py +++ b/torch_geometric/testing/decorators.py @@ -105,6 +105,9 @@ def withPackage(*args) -> Callable: r"""A decorator to skip tests if certain packages are not installed. Also supports version specification.""" def is_installed(package: str) -> bool: + if '|' in package: + return any(is_installed(p) for p in package.split('|')) + req = Requirement(package) if find_spec(req.name) is None: return False From e45059a95cf46f68b10aa890ca497e3937b43811 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 3 Jul 2023 03:09:36 +0700 Subject: [PATCH 1331/2432] Fix full tests for PyTorch < 1.13 (#7677) --- test/nn/models/test_basic_gnn.py | 5 ++++ test/nn/models/test_node2vec.py | 16 ++++++------- test/nn/models/test_rect.py | 2 +- test/nn/pool/connect/test_filter_edges.py | 2 +- test/nn/pool/test_asap.py | 4 ++-- test/nn/pool/test_pan_pool.py | 2 +- test/nn/pool/test_sag_pool.py | 2 +- test/nn/pool/test_topk_pool.py | 2 +- test/transforms/test_pad.py | 28 ++++++++++++++--------- 9 files changed, 37 insertions(+), 26 deletions(-) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index d7020d46a2d2..95b4854834b2 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -1,5 +1,6 @@ import os import os.path as osp +import sys import warnings import pytest @@ -188,6 +189,10 @@ def test_compile(device): def test_packaging(): + if (not torch_geometric.typing.WITH_PT113 and sys.version_info.major == 3 + and sys.version_info.minor >= 10): + return # Unsupported Python version + warnings.filterwarnings('ignore', '.*TypedStorage is deprecated.*') os.makedirs(torch.hub._get_torch_home(), exist_ok=True) diff --git a/test/nn/models/test_node2vec.py b/test/nn/models/test_node2vec.py index 39d956e41d3a..3d3b5c6b0814 100644 --- a/test/nn/models/test_node2vec.py +++ b/test/nn/models/test_node2vec.py @@ -1,6 +1,7 @@ import pytest import torch +import torch_geometric.typing from torch_geometric.nn import Node2Vec from torch_geometric.testing import is_full_test, withCUDA, withPackage @@ -11,15 +12,14 @@ @pytest.mark.parametrize('q', [1.0, 0.5]) def test_node2vec(device, p, q): edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device) + kwargs = dict(embedding_dim=16, walk_length=2, context_size=2, p=p, q=q) - model = Node2Vec( - edge_index, - embedding_dim=16, - walk_length=2, - context_size=2, - p=p, - q=q, - ).to(device) + if not torch_geometric.typing.WITH_TORCH_CLUSTER and q != 1.0: + with pytest.raises(ImportError, match="requires the 'torch-cluster'"): + model = Node2Vec(edge_index, **kwargs) + return + + model = Node2Vec(edge_index, **kwargs).to(device) assert str(model) == 'Node2Vec(3, 16)' assert model(torch.arange(3, device=device)).size() == (3, 16) diff --git a/test/nn/models/test_rect.py b/test/nn/models/test_rect.py index 6364dbe211b9..c16c1f8b9dae 100644 --- a/test/nn/models/test_rect.py +++ b/test/nn/models/test_rect.py @@ -12,7 +12,7 @@ def test_rect(): edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) mask = torch.randint(0, 2, (6, ), dtype=torch.bool) - model = RECT_L(8, 16) + model = RECT_L(8, 16, normalize=False) assert str(model) == 'RECT_L(8, 16)' out = model(x, edge_index) diff --git a/test/nn/pool/connect/test_filter_edges.py b/test/nn/pool/connect/test_filter_edges.py index 6f07f2430bba..e349fd9fb894 100644 --- a/test/nn/pool/connect/test_filter_edges.py +++ b/test/nn/pool/connect/test_filter_edges.py @@ -26,7 +26,7 @@ def test_filter_edges(): assert out1.edge_attr.tolist() == [3, 5] assert out1.batch.tolist() == [0, 1] - if torch_geometric.typing.WITH_PT112 and is_full_test(): + if torch_geometric.typing.WITH_PT113 and is_full_test(): jit = torch.jit.script(connect) out2 = jit(select_output, edge_index, edge_attr, batch) torch.equal(out1.edge_index, out2.edge_index) diff --git a/test/nn/pool/test_asap.py b/test/nn/pool/test_asap.py index b54f6269a792..0e87a5df36a2 100644 --- a/test/nn/pool/test_asap.py +++ b/test/nn/pool/test_asap.py @@ -29,7 +29,7 @@ def test_asap(): assert out[0].size() == (num_nodes // 2, in_channels) assert out[1].size() == (2, 2) - if torch_geometric.typing.WITH_PT112 and is_full_test(): + if torch_geometric.typing.WITH_PT113 and is_full_test(): torch.jit.script(pool.jittable()) pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN, add_self_loops=True) @@ -46,7 +46,7 @@ def test_asap(): @onlyFullTest -@withPackage('torch>=1.12.0') +@withPackage('torch>=1.13.0') def test_asap_jit_save(): pool = ASAPooling(in_channels=16) pool_jit = pool.jittable() diff --git a/test/nn/pool/test_pan_pool.py b/test/nn/pool/test_pan_pool.py index 95818502d3d6..39f9b138b245 100644 --- a/test/nn/pool/test_pan_pool.py +++ b/test/nn/pool/test_pan_pool.py @@ -25,7 +25,7 @@ def test_pan_pooling(): assert perm.size() == (2, ) assert score.size() == (2, ) - if torch_geometric.typing.WITH_PT112 and is_full_test(): + if torch_geometric.typing.WITH_PT113 and is_full_test(): jit = torch.jit.script(pool) out = jit(x, M) assert torch.allclose(h, out[0]) diff --git a/test/nn/pool/test_sag_pool.py b/test/nn/pool/test_sag_pool.py index 006eb3f92ba9..78edd70b4bd2 100644 --- a/test/nn/pool/test_sag_pool.py +++ b/test/nn/pool/test_sag_pool.py @@ -40,7 +40,7 @@ def test_sag_pooling(): assert out3[0].size() == (2, in_channels) assert out3[1].size() == (2, 2) - if torch_geometric.typing.WITH_PT112 and is_full_test(): + if torch_geometric.typing.WITH_PT113 and is_full_test(): pool1.gnn = pool1.gnn.jittable() jit1 = torch.jit.script(pool1) assert torch.allclose(jit1(x, edge_index)[0], out1[0]) diff --git a/test/nn/pool/test_topk_pool.py b/test/nn/pool/test_topk_pool.py index c9c182ead4f0..c0293eacf8d2 100644 --- a/test/nn/pool/test_topk_pool.py +++ b/test/nn/pool/test_topk_pool.py @@ -49,7 +49,7 @@ def test_topk_pooling(): assert out3[0].size() == (2, in_channels) assert out3[1].size() == (2, 2) - if torch_geometric.typing.WITH_PT112 and is_full_test(): + if torch_geometric.typing.WITH_PT113 and is_full_test(): jit1 = torch.jit.script(pool1) assert torch.allclose(jit1(x, edge_index)[0], out1[0]) diff --git a/test/transforms/test_pad.py b/test/transforms/test_pad.py index 40c8223d9ab8..8deb63949346 100644 --- a/test/transforms/test_pad.py +++ b/test/transforms/test_pad.py @@ -427,24 +427,30 @@ def test_pad_data_exclude_keys(data, add_pad_mask, exclude_keys): exclude_keys=exclude_keys) -@pytest.mark.parametrize('data', [fake_data(), fake_hetero_data(node_types=1)]) -def test_pad_invalid_max_num_nodes(data): +@pytest.mark.parametrize('is_hetero', [False, True]) +def test_pad_invalid_max_num_nodes(is_hetero): + if is_hetero: + data = fake_hetero_data(node_types=1) + else: + data = fake_data() + transform = Pad(max_num_nodes=data.num_nodes - 1) - with pytest.raises(AssertionError, - match='The number of nodes after padding'): + with pytest.raises(AssertionError, match="after padding"): transform(data) -@pytest.mark.parametrize( - 'data', - [fake_data(), fake_hetero_data(node_types=1, edge_types=1)]) -def test_pad_invalid_max_num_edges(data): +@pytest.mark.parametrize('is_hetero', [False, True]) +def test_pad_invalid_max_num_edges(is_hetero): + if is_hetero: + data = fake_hetero_data(node_types=1, edge_types=1) + else: + data = fake_data() + transform = Pad(max_num_nodes=data.num_nodes + 10, max_num_edges=data.num_edges - 1) - with pytest.raises(AssertionError, - match='The number of edges after padding'): + with pytest.raises(AssertionError, match="after padding"): transform(data) @@ -452,7 +458,7 @@ def test_pad_num_nodes_not_complete(): data = fake_hetero_data(node_types=2, edge_types=1) transform = Pad(max_num_nodes={'v0': 100}) - with pytest.raises(AssertionError, match='The number of v1 nodes'): + with pytest.raises(AssertionError, match="The number of v1 nodes"): transform(data) From 48df293c7a87ac998cb33deec239af28e7794e3f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 3 Jul 2023 16:04:46 +0700 Subject: [PATCH 1332/2432] Fix custom `NeighborLoader` test (#7680) --- test/loader/test_neighbor_loader.py | 51 +++++++++++++---------------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index f4a48a5cbe7e..b33209f35172 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -6,7 +6,6 @@ import pytest import torch -import torch_geometric.typing from torch_geometric.data import Data, HeteroData from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GraphConv, to_hetero @@ -25,7 +24,6 @@ from torch_geometric.utils import ( is_undirected, sort_edge_index, - to_torch_csc_tensor, to_torch_csr_tensor, to_undirected, ) @@ -390,7 +388,8 @@ def test_custom_neighbor_loader(): feature_store.put_tensor(x, group_name='author', attr_name='x', index=None) # COO: - edge_index = get_random_edge_index(100, 100, 500) + edge_index = get_random_edge_index(100, 100, 500, coalesce=True) + edge_index = edge_index[:, torch.randperm(edge_index.size(1))] data['paper', 'to', 'paper'].edge_index = edge_index coo = (edge_index[0], edge_index[1]) graph_store.put_edge_index(edge_index=coo, @@ -398,7 +397,7 @@ def test_custom_neighbor_loader(): layout='coo', size=(100, 100)) # CSR: - edge_index = get_random_edge_index(100, 200, 1000) + edge_index = get_random_edge_index(100, 200, 1000, coalesce=True) data['paper', 'to', 'author'].edge_index = edge_index adj = to_torch_csr_tensor(edge_index, size=(100, 200)) csr = (adj.crow_indices(), adj.col_indices()) @@ -407,17 +406,16 @@ def test_custom_neighbor_loader(): layout='csr', size=(100, 200)) # CSC: - if torch_geometric.typing.WITH_PT112: - edge_index = get_random_edge_index(200, 100, 1000) - data['author', 'to', 'paper'].edge_index = edge_index - adj = to_torch_csc_tensor(edge_index, size=(200, 100)) - csc = (adj.row_indices(), adj.ccol_indices()) - graph_store.put_edge_index(edge_index=csc, - edge_type=('author', 'to', 'paper'), - layout='csc', size=(200, 100)) + edge_index = get_random_edge_index(200, 100, 1000, coalesce=True) + data['author', 'to', 'paper'].edge_index = edge_index + adj = to_torch_csr_tensor(edge_index.flip([0]), size=(100, 200)) + csc = (adj.col_indices(), adj.crow_indices()) + graph_store.put_edge_index(edge_index=csc, + edge_type=('author', 'to', 'paper'), + layout='csc', size=(200, 100)) # COO (sorted): - edge_index = get_random_edge_index(200, 200, 100) + edge_index = get_random_edge_index(200, 200, 100, coalesce=True) edge_index = edge_index[:, edge_index[1].argsort()] data['author', 'to', 'author'].edge_index = edge_index coo = (edge_index[0], edge_index[1]) @@ -438,25 +436,20 @@ def test_custom_neighbor_loader(): assert len(loader1) == len(loader2) for batch1, batch2 in zip(loader1, loader2): - # loader2 explicitly adds `num_nodes` to the batch + # `loader2` explicitly adds `num_nodes` to the batch: assert len(batch1) + 1 == len(batch2) assert batch1['paper'].batch_size == batch2['paper'].batch_size - # Mapped indices of neighbors may be differently sorted: - assert torch.allclose(batch1['paper'].x.sort()[0], - batch2['paper'].x.sort()[0]) - assert torch.allclose(batch1['author'].x.sort()[0], - batch2['author'].x.sort()[0]) - - assert (batch1['paper', 'to', 'paper'].edge_index.size() == batch1[ - 'paper', 'to', 'paper'].edge_index.size()) - assert (batch1['paper', 'to', 'author'].edge_index.size() == batch1[ - 'paper', 'to', 'author'].edge_index.size()) - if torch_geometric.typing.WITH_PT112: - assert (batch1['author', 'to', 'paper'].edge_index.size() == - batch1['author', 'to', 'paper'].edge_index.size()) - assert (batch1['author', 'to', 'author'].edge_index.size() == batch1[ - 'author', 'to', 'author'].edge_index.size()) + # Mapped indices of neighbors may be differently sorted ... + for node_type in data.node_types: + assert torch.allclose( + batch1[node_type].x.sort()[0], + batch2[node_type].x.sort()[0], + ) + + # ... but should sample the exact same number of edges: + for edge_type in data.edge_types: + assert batch1[edge_type].num_edges == batch2[edge_type].num_edges @onlyOnline From ad31b54da09f1b657dafa8e0143f76bdaca970ea Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 4 Jul 2023 05:48:26 +0700 Subject: [PATCH 1333/2432] Fix `RECT_L` TorchScript test (#7679) --- test/nn/models/test_rect.py | 2 +- torch_geometric/nn/conv/gcn_conv.py | 17 +++++++++++------ torch_geometric/nn/models/rect.py | 15 ++++++++------- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/test/nn/models/test_rect.py b/test/nn/models/test_rect.py index c16c1f8b9dae..6364dbe211b9 100644 --- a/test/nn/models/test_rect.py +++ b/test/nn/models/test_rect.py @@ -12,7 +12,7 @@ def test_rect(): edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) mask = torch.randint(0, 2, (6, ), dtype=torch.bool) - model = RECT_L(8, 16, normalize=False) + model = RECT_L(8, 16) assert str(model) == 'RECT_L(8, 16)' out = model(x, edge_index) diff --git a/torch_geometric/nn/conv/gcn_conv.py b/torch_geometric/nn/conv/gcn_conv.py index 12cf423970fb..5a99573fcb40 100644 --- a/torch_geometric/nn/conv/gcn_conv.py +++ b/torch_geometric/nn/conv/gcn_conv.py @@ -161,15 +161,20 @@ class GCNConv(MessagePassing): edge weights :math:`(|\mathcal{E}|)` *(optional)* - **output:** node features :math:`(|\mathcal{V}|, F_{out})` """ - _cached_edge_index: Optional[OptPairTensor] _cached_adj_t: Optional[SparseTensor] - def __init__(self, in_channels: int, out_channels: int, - improved: bool = False, cached: bool = False, - add_self_loops: bool = True, normalize: bool = True, - bias: bool = True, **kwargs): - + def __init__( + self, + in_channels: int, + out_channels: int, + improved: bool = False, + cached: bool = False, + add_self_loops: bool = True, + normalize: bool = True, + bias: bool = True, + **kwargs, + ): kwargs.setdefault('aggr', 'add') super().__init__(**kwargs) diff --git a/torch_geometric/nn/models/rect.py b/torch_geometric/nn/models/rect.py index 012199b826c2..96d8f9cfea87 100644 --- a/torch_geometric/nn/models/rect.py +++ b/torch_geometric/nn/models/rect.py @@ -1,3 +1,5 @@ +import copy + import torch import torch.nn.functional as F from torch import Tensor @@ -92,9 +94,10 @@ def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover edge_index_type = typing.split(',')[1].strip() class EdgeIndexJittable(torch.nn.Module): - def __init__(self, child): + def __init__(self, child: RECT_L): super().__init__() - self.child = child + self.child = copy.deepcopy(child) + self.child.conv = self.child.conv.jittable() def reset_parameters(self): self.child.reset_parameters() @@ -114,9 +117,10 @@ def get_semantic_labels(self, x: Tensor, y: Tensor, return self.child.get_semantic_labels(x, y, mask) class SparseTensorJittable(torch.nn.Module): - def __init__(self, child): + def __init__(self, child: RECT_L): super().__init__() - self.child = child + self.child = copy.deepcopy(child) + self.child.conv = self.child.conv.jittable() def reset_parameters(self): self.child.reset_parameters() @@ -135,9 +139,6 @@ def get_semantic_labels(self, x: Tensor, y: Tensor, mask: Tensor) -> Tensor: return self.child.get_semantic_labels(x, y, mask) - if self.conv.jittable is not None: - self.conv = self.conv.jittable() - if 'Tensor' == edge_index_type: jittable_module = EdgeIndexJittable(self) elif 'SparseTensor' == edge_index_type: From a7d3ca45a2b5bab73a13003aea63d765930f63aa Mon Sep 17 00:00:00 2001 From: rusty1s Date: Wed, 5 Jul 2023 06:50:31 +0700 Subject: [PATCH 1334/2432] update --- pyproject.toml | 1 + torch_geometric/graphgym/utils/agg_runs.py | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5155c92022b4..796fd9bc2540 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,6 +104,7 @@ name="torch_geometric" based_on_style = "pep8" split_before_named_assigns = false blank_line_before_nested_class_or_def = false +allow_split_before_dict_value = false [tool.pyright] include = ["torch_geometric/utils/*"] diff --git a/torch_geometric/graphgym/utils/agg_runs.py b/torch_geometric/graphgym/utils/agg_runs.py index d577d566040c..46e932d71b69 100644 --- a/torch_geometric/graphgym/utils/agg_runs.py +++ b/torch_geometric/graphgym/utils/agg_runs.py @@ -131,10 +131,8 @@ def agg_runs(dir, metric_best='auto'): results_best[split] = [stats_best] else: results_best[split] += [stats_best] - results = {k: v for k, v in results.items() if v is not None} # rm None - results_best = {k: v - for k, v in results_best.items() - if v is not None} # rm None + results = {k: v for k, v in results.items() if v is not None} + results_best = {k: v for k, v in results_best.items() if v is not None} for key in results: for i in range(len(results[key])): results[key][i] = agg_dict_list(results[key][i]) From decec47e3c9cd67b8ddbd58a15edd00de0c530ef Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Jul 2023 00:10:51 +0000 Subject: [PATCH 1335/2432] [pre-commit.ci] pre-commit suggestions (#7685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/adrienverge/yamllint.git: v1.30.0 → v1.32.0](https://github.com/adrienverge/yamllint.git/compare/v1.30.0...v1.32.0) - [github.com/google/yapf: v0.32.0 → v0.40.0](https://github.com/google/yapf/compare/v0.32.0...v0.40.0) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- .pre-commit-config.yaml | 4 +-- pyproject.toml | 1 - test/loader/test_link_neighbor_loader.py | 4 +-- .../contrib/explain/pgm_explainer.py | 6 ++--- .../contrib/nn/models/rbcd_attack.py | 4 +-- torch_geometric/data/separate.py | 27 ++++++++++++++----- torch_geometric/nn/conv/hgt_conv.py | 3 ++- torch_geometric/nn/dense/linear.py | 3 ++- torch_geometric/nn/fx.py | 3 ++- .../nn/pool/connect/filter_edges.py | 4 +-- torch_geometric/nn/to_hetero_module.py | 3 ++- torch_geometric/sampler/utils.py | 6 +++-- torch_geometric/utils/hetero.py | 3 ++- torch_geometric/utils/trim_to_layer.py | 3 ++- 14 files changed, 48 insertions(+), 26 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a8fbac80de01..b48bce0a0aca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: )$ - repo: https://github.com/adrienverge/yamllint.git - rev: v1.30.0 + rev: v1.32.0 hooks: - id: yamllint name: Lint yaml @@ -36,7 +36,7 @@ repos: # args: [--min=10, .] - repo: https://github.com/google/yapf - rev: v0.32.0 + rev: v0.40.0 hooks: - id: yapf name: Format code diff --git a/pyproject.toml b/pyproject.toml index 796fd9bc2540..5155c92022b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,7 +104,6 @@ name="torch_geometric" based_on_style = "pep8" split_before_named_assigns = false blank_line_before_nested_class_or_def = false -allow_split_before_dict_value = false [tool.pyright] include = ["torch_geometric/utils/*"] diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 053454bf2c46..80cda9c23f32 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -528,5 +528,5 @@ def test_hetero_link_neighbor_loader_triplet(disjoint, temporal, amount): if temporal: for i in range(batch_size): - assert (node_store.time[node_store.batch == i].max() <= - node_store.seed_time[i]) + assert (node_store.time[node_store.batch == i].max() + <= node_store.seed_time[i]) diff --git a/torch_geometric/contrib/explain/pgm_explainer.py b/torch_geometric/contrib/explain/pgm_explainer.py index ef0993b83800..dd0151853095 100644 --- a/torch_geometric/contrib/explain/pgm_explainer.py +++ b/torch_geometric/contrib/explain/pgm_explainer.py @@ -308,9 +308,9 @@ def _explain_node( pred_perturb = model(x_perturb, edge_index, **kwargs) softmax_pred_perturb = torch.softmax(pred_perturb, dim=1) sample_bool = np.ones(shape=(len(neighbors), )) - sample_bool[( - (softmax_pred_perturb[neighbors, target] + self.pred_threshold) - >= softmax_pred[neighbors, target]).cpu()] = 0 + sample_bool[((softmax_pred_perturb[neighbors, target] + + self.pred_threshold) + >= softmax_pred[neighbors, target]).cpu()] = 0 samples.append(seeds) pred_samples.append(sample_bool) diff --git a/torch_geometric/contrib/nn/models/rbcd_attack.py b/torch_geometric/contrib/nn/models/rbcd_attack.py index bb8fbf5b5c36..573281421fff 100644 --- a/torch_geometric/contrib/nn/models/rbcd_attack.py +++ b/torch_geometric/contrib/nn/models/rbcd_attack.py @@ -376,8 +376,8 @@ def _sample_random_block(self, budget: int = 0): def _resample_random_block(self, budget: int): # Keep at most half of the block (i.e. resample low weights) sorted_idx = torch.argsort(self.block_edge_weight) - keep_above = (self.block_edge_weight <= - self.coeffs['eps']).sum().long() + keep_above = (self.block_edge_weight + <= self.coeffs['eps']).sum().long() if keep_above < sorted_idx.size(0) // 2: keep_above = sorted_idx.size(0) // 2 sorted_idx = sorted_idx[keep_above:] diff --git a/torch_geometric/data/separate.py b/torch_geometric/data/separate.py index c16e94cbeafa..51429b9b7639 100644 --- a/torch_geometric/data/separate.py +++ b/torch_geometric/data/separate.py @@ -84,9 +84,17 @@ def _separate( elif isinstance(value, Mapping): # Recursively separate elements of dictionaries. return { - key: _separate(key, elem, idx, slices[key], - incs[key] if decrement else None, batch, store, - decrement) + key: + _separate( + key, + elem, + idx, + slices=slices[key], + incs=incs[key] if decrement else None, + batch=batch, + store=store, + decrement=decrement, + ) for key, elem in value.items() } @@ -102,9 +110,16 @@ def _separate( and isinstance(slices, Sequence)): # Recursively separate elements of lists of Tensors/SparseTensors. return [ - _separate(key, elem, idx, slices[i], - incs[i] if decrement else None, batch, store, decrement) - for i, elem in enumerate(value) + _separate( + key, + elem, + idx, + slices=slices[i], + incs=incs[i] if decrement else None, + batch=batch, + store=store, + decrement=decrement, + ) for i, elem in enumerate(value) ] else: diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index 97071cbe42a8..c65332406c8b 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -210,7 +210,8 @@ def forward( # Transform output node embeddings: a_dict = self.out_lin({ - k: torch.nn.functional.gelu(v) if v is not None else v + k: + torch.nn.functional.gelu(v) if v is not None else v for k, v in out_dict.items() }) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 90c0d8e9e5c9..aeb82c2201f9 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -377,7 +377,8 @@ def __init__( self.kwargs = kwargs self.lins = torch.nn.ModuleDict({ - key: Linear(channels, self.out_channels, **kwargs) + key: + Linear(channels, self.out_channels, **kwargs) for key, channels in self.in_channels.items() }) diff --git a/torch_geometric/nn/fx.py b/torch_geometric/nn/fx.py index 573e3f2bc506..860def82ae34 100644 --- a/torch_geometric/nn/fx.py +++ b/torch_geometric/nn/fx.py @@ -193,7 +193,8 @@ def _init_submodule(self, module: Module, target: str) -> Module: ]) elif isinstance(module, ModuleDict): return ModuleDict({ - key: self._init_submodule(submodule, f'{target}.{key}') + key: + self._init_submodule(submodule, f'{target}.{key}') for key, submodule in module.items() }) else: diff --git a/torch_geometric/nn/pool/connect/filter_edges.py b/torch_geometric/nn/pool/connect/filter_edges.py index 1eb42dcaab02..6530a2530966 100644 --- a/torch_geometric/nn/pool/connect/filter_edges.py +++ b/torch_geometric/nn/pool/connect/filter_edges.py @@ -53,8 +53,8 @@ def forward( batch: Optional[Tensor] = None, ) -> ConnectOutput: - if (not torch.jit.is_scripting() and select_output.num_clusters != - select_output.cluster_index.size(0)): + if (not torch.jit.is_scripting() and select_output.num_clusters + != select_output.cluster_index.size(0)): raise ValueError(f"'{self.__class__.__name__}' requires each " f"cluster to contain only one node") diff --git a/torch_geometric/nn/to_hetero_module.py b/torch_geometric/nn/to_hetero_module.py index 61592185979f..b8c40a52f42f 100644 --- a/torch_geometric/nn/to_hetero_module.py +++ b/torch_geometric/nn/to_hetero_module.py @@ -55,7 +55,8 @@ def dict_forward( if not torch_geometric.typing.WITH_PYG_LIB: return { - key: F.linear(x_dict[key], self.hetero_module.weight[i].T) + + key: + F.linear(x_dict[key], self.hetero_module.weight[i].t()) + self.hetero_module.bias[i] for i, key in enumerate(self.types) } diff --git a/torch_geometric/sampler/utils.py b/torch_geometric/sampler/utils.py index 35cf9392e7e7..830f5143e26b 100644 --- a/torch_geometric/sampler/utils.py +++ b/torch_geometric/sampler/utils.py @@ -149,5 +149,7 @@ def remap_keys( exclude: Optional[List[X]] = None, ) -> Dict[Union[X, Y], Any]: exclude = exclude or [] - return {(k if k in exclude else mapping.get(k, k)): v - for k, v in inputs.items()} + return { + k if k in exclude else mapping.get(k, k): v + for k, v in inputs.items() + } diff --git a/torch_geometric/utils/hetero.py b/torch_geometric/utils/hetero.py index bb74a56c46b3..7715a40422e1 100644 --- a/torch_geometric/utils/hetero.py +++ b/torch_geometric/utils/hetero.py @@ -30,7 +30,8 @@ def learn_sklearn_heuristic(): n_feats, "input features and", out_feats, "outuput feats") x_dict = { - 'v' + str(i): torch.randn( + 'v' + str(i): + torch.randn( (num_nodes_per_type, n_feats)).cuda() for i in range(num_types) } diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index da294fbf5acc..885d6ba7c90f 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -54,7 +54,8 @@ def trim_to_layer( for k, v in x.items() } edge_index = { - k: trim_adj( + k: + trim_adj( v, layer, num_sampled_nodes_per_hop[k[0]], From eda29c81cb661d7dfecd62177b7faf8b0cfde67c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 6 Jul 2023 13:44:28 +0700 Subject: [PATCH 1336/2432] Add `FlopsCount` support via `fvcore` (#7693) Fixes https://github.com/pyg-team/pytorch_geometric/discussions/7394 --- CHANGELOG.md | 1 + test/nn/test_fvcore.py | 23 +++++++++++++++++++++++ torch_geometric/nn/dense/linear.py | 4 ++-- 3 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 test/nn/test_fvcore.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 08a3673dbd7a..3df285b590a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `FlopsCount` support via `fvcore` ([#7693](https://github.com/pyg-team/pytorch_geometric/pull/7693)) - Added back support for PyTorch >= 1.11.0 ([#7656](https://github.com/pyg-team/pytorch_geometric/pull/7656)) - Added `Data.sort()` and `HeteroData.sort()` functionalities ([#7649](https://github.com/pyg-team/pytorch_geometric/pull/7649)) - Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643), [#7647](https://github.com/pyg-team/pytorch_geometric/pull/7647)) diff --git a/test/nn/test_fvcore.py b/test/nn/test_fvcore.py new file mode 100644 index 000000000000..c91f84f7da1b --- /dev/null +++ b/test/nn/test_fvcore.py @@ -0,0 +1,23 @@ +import torch + +from torch_geometric.nn import GraphSAGE +from torch_geometric.testing import get_random_edge_index, withPackage + + +@withPackage('fvcore') +def test_fvcore(): + from fvcore.nn import FlopCountAnalysis + + x = torch.randn(10, 16) + edge_index = get_random_edge_index(10, 10, num_edges=100) + + model = GraphSAGE(16, 32, num_layers=2) + + flops = FlopCountAnalysis(model, (x, edge_index)) + + # TODO (matthias) Currently, aggregations are not properly registered. + assert flops.by_module()['convs.0'] == 2 * 10 * 16 * 32 + assert flops.by_module()['convs.1'] == 2 * 10 * 32 * 32 + assert flops.total() == (flops.by_module()['convs.0'] + + flops.by_module()['convs.1']) + assert flops.by_operator()['linear'] == flops.total() diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index aeb82c2201f9..5f1806486fb3 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -143,12 +143,12 @@ def initialize_parameters(self, module, input): def _save_to_state_dict(self, destination, prefix, keep_vars): if (is_uninitialized_parameter(self.weight) - or torch.onnx.is_in_onnx_export()): + or torch.onnx.is_in_onnx_export() or keep_vars): destination[prefix + 'weight'] = self.weight else: destination[prefix + 'weight'] = self.weight.detach() if self.bias is not None: - if torch.onnx.is_in_onnx_export(): + if torch.onnx.is_in_onnx_export() or keep_vars: destination[prefix + 'bias'] = self.bias else: destination[prefix + 'bias'] = self.bias.detach() From c2869a09fea4c35f561477f2eebb21916a2f67a9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 6 Jul 2023 14:42:42 +0700 Subject: [PATCH 1337/2432] Replace `torch.Tensor`calls (1/3) (#7694) --- torch_geometric/data/feature_store.py | 2 +- .../graphgym/contrib/layer/generalconv.py | 10 ++--- torch_geometric/nn/aggr/basic.py | 4 +- torch_geometric/nn/aggr/quantile.py | 2 +- torch_geometric/nn/aggr/scaler.py | 8 ++-- torch_geometric/nn/aggr/utils.py | 4 +- torch_geometric/nn/conv/agnn_conv.py | 2 +- torch_geometric/nn/conv/antisymmetric_conv.py | 4 +- torch_geometric/nn/conv/arma_conv.py | 8 ++-- torch_geometric/nn/conv/cugraph/gat_conv.py | 6 +-- torch_geometric/nn/conv/cugraph/rgcn_conv.py | 12 +++--- torch_geometric/nn/conv/dna_conv.py | 5 +-- torch_geometric/nn/conv/eg_conv.py | 2 +- torch_geometric/nn/conv/feast_conv.py | 4 +- torch_geometric/nn/conv/gat_conv.py | 10 ++--- torch_geometric/nn/conv/gatv2_conv.py | 6 +-- torch_geometric/nn/conv/gcn2_conv.py | 4 +- torch_geometric/nn/conv/gcn_conv.py | 2 +- torch_geometric/nn/conv/general_conv.py | 2 +- torch_geometric/nn/conv/gin_conv.py | 8 ++-- torch_geometric/nn/conv/gmm_conv.py | 2 +- torch_geometric/nn/conv/han_conv.py | 6 +-- torch_geometric/nn/conv/hgt_conv.py | 4 +- torch_geometric/nn/conv/hypergraph_conv.py | 6 +-- torch_geometric/nn/conv/nn_conv.py | 2 +- torch_geometric/nn/conv/pan_conv.py | 2 +- torch_geometric/nn/conv/pdn_conv.py | 2 +- torch_geometric/nn/conv/rgat_conv.py | 40 +++++++++---------- torch_geometric/nn/conv/rgcn_conv.py | 16 ++++---- torch_geometric/nn/conv/spline_conv.py | 4 +- torch_geometric/nn/conv/supergat_conv.py | 8 ++-- torch_geometric/nn/conv/tag_conv.py | 2 +- torch_geometric/nn/dense/dense_gat_conv.py | 8 ++-- torch_geometric/nn/dense/dense_gcn_conv.py | 2 +- torch_geometric/nn/dense/dense_gin_conv.py | 4 +- torch_geometric/nn/dense/linear.py | 8 ++-- torch_geometric/nn/models/attentive_fp.py | 6 +-- .../nn/models/deep_graph_infomax.py | 2 +- torch_geometric/nn/models/dimenet.py | 4 +- torch_geometric/nn/models/linkx.py | 4 +- torch_geometric/nn/models/re_net.py | 4 +- torch_geometric/nn/norm/batch_norm.py | 8 ++-- torch_geometric/nn/norm/graph_norm.py | 6 +-- torch_geometric/nn/norm/layer_norm.py | 8 ++-- torch_geometric/nn/norm/msg_norm.py | 4 +- torch_geometric/nn/pool/__init__.py | 18 ++++----- torch_geometric/nn/pool/mem_pool.py | 2 +- torch_geometric/nn/pool/pan_pool.py | 4 +- torch_geometric/nn/pool/select/topk.py | 2 +- .../nn/to_hetero_with_bases_transformer.py | 2 +- torch_geometric/transforms/gdc.py | 11 ++--- torch_geometric/utils/geodesic.py | 8 ++-- torch_geometric/utils/to_dense_adj.py | 2 +- 53 files changed, 156 insertions(+), 160 deletions(-) diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index 6e2b33cd91b8..10c7f1efd90f 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -198,7 +198,7 @@ def __setitem__(self, key: str, value: Any): .. code-block:: python view = store.view(TensorAttr(group_name)) - view['index'] = torch.Tensor([1, 2, 3]) + view['index'] = torch.tensor([1, 2, 3]) """ self.__setattr__(key, value) diff --git a/torch_geometric/graphgym/contrib/layer/generalconv.py b/torch_geometric/graphgym/contrib/layer/generalconv.py index 72ca2e96f7cf..c7f3263deb2c 100644 --- a/torch_geometric/graphgym/contrib/layer/generalconv.py +++ b/torch_geometric/graphgym/contrib/layer/generalconv.py @@ -21,13 +21,13 @@ def __init__(self, in_channels, out_channels, improved=False, cached=False, self.cached = cached self.normalize = cfg.gnn.normalize_adj - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) + self.weight = Parameter(torch.empty(in_channels, out_channels)) if cfg.gnn.self_msg == 'concat': - self.weight_self = Parameter( - torch.Tensor(in_channels, out_channels)) + self.weight_self = Parameter(torch.empty(in_channels, + out_channels)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) @@ -137,7 +137,7 @@ def __init__(self, in_channels, out_channels, edge_dim, improved=False, self.linear_self = nn.Linear(in_channels, out_channels, bias=False) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/aggr/basic.py b/torch_geometric/nn/aggr/basic.py index 324d4a7b4135..2e874dba749c 100644 --- a/torch_geometric/nn/aggr/basic.py +++ b/torch_geometric/nn/aggr/basic.py @@ -185,7 +185,7 @@ def __init__(self, t: float = 1.0, learn: bool = False, self.semi_grad = semi_grad self.channels = channels - self.t = Parameter(torch.Tensor(channels)) if learn else t + self.t = Parameter(torch.empty(channels)) if learn else t self.reset_parameters() def reset_parameters(self): @@ -251,7 +251,7 @@ def __init__(self, p: float = 1.0, learn: bool = False, channels: int = 1): self.learn = learn self.channels = channels - self.p = Parameter(torch.Tensor(channels)) if learn else p + self.p = Parameter(torch.empty(channels)) if learn else p self.reset_parameters() def reset_parameters(self): diff --git a/torch_geometric/nn/aggr/quantile.py b/torch_geometric/nn/aggr/quantile.py index 9f70dd600be2..6c20b2a815c9 100644 --- a/torch_geometric/nn/aggr/quantile.py +++ b/torch_geometric/nn/aggr/quantile.py @@ -62,7 +62,7 @@ def __init__(self, q: Union[float, List[float]], f"got ('{interpolation}')") self._q = q - self.register_buffer('q', torch.Tensor(qs).view(-1, 1)) + self.register_buffer('q', torch.tensor(qs).view(-1, 1)) self.interpolation = interpolation self.fill_value = fill_value diff --git a/torch_geometric/nn/aggr/scaler.py b/torch_geometric/nn/aggr/scaler.py index 4269e319d425..1656e70e99d9 100644 --- a/torch_geometric/nn/aggr/scaler.py +++ b/torch_geometric/nn/aggr/scaler.py @@ -60,11 +60,11 @@ def __init__( self.init_avg_deg_log = float(((bin_degree + 1).log() * deg).sum()) / N if train_norm: - self.avg_deg_lin = torch.nn.Parameter(torch.Tensor(1)) - self.avg_deg_log = torch.nn.Parameter(torch.Tensor(1)) + self.avg_deg_lin = torch.nn.Parameter(torch.empty(1)) + self.avg_deg_log = torch.nn.Parameter(torch.empty(1)) else: - self.register_buffer('avg_deg_lin', torch.Tensor(1)) - self.register_buffer('avg_deg_log', torch.Tensor(1)) + self.register_buffer('avg_deg_lin', torch.empty(1)) + self.register_buffer('avg_deg_log', torch.empty(1)) self.reset_parameters() diff --git a/torch_geometric/nn/aggr/utils.py b/torch_geometric/nn/aggr/utils.py index 81c13266b431..4e5b66fe63cd 100644 --- a/torch_geometric/nn/aggr/utils.py +++ b/torch_geometric/nn/aggr/utils.py @@ -147,7 +147,7 @@ class InducedSetAttentionBlock(torch.nn.Module): def __init__(self, channels: int, num_induced_points: int, heads: int = 1, layer_norm: bool = True, dropout: float = 0.0): super().__init__() - self.ind = Parameter(torch.Tensor(1, num_induced_points, channels)) + self.ind = Parameter(torch.empty(1, num_induced_points, channels)) self.mab1 = MultiheadAttentionBlock(channels, heads, layer_norm, dropout) self.mab2 = MultiheadAttentionBlock(channels, heads, layer_norm, @@ -197,7 +197,7 @@ def __init__(self, channels: int, num_seed_points: int = 1, heads: int = 1, layer_norm: bool = True, dropout: float = 0.0): super().__init__() self.lin = Linear(channels, channels) - self.seed = Parameter(torch.Tensor(1, num_seed_points, channels)) + self.seed = Parameter(torch.empty(1, num_seed_points, channels)) self.mab = MultiheadAttentionBlock(channels, heads, layer_norm, dropout) self.reset_parameters() diff --git a/torch_geometric/nn/conv/agnn_conv.py b/torch_geometric/nn/conv/agnn_conv.py index 4150448ffb7e..4b36599ae83c 100644 --- a/torch_geometric/nn/conv/agnn_conv.py +++ b/torch_geometric/nn/conv/agnn_conv.py @@ -50,7 +50,7 @@ def __init__(self, requires_grad: bool = True, add_self_loops: bool = True, self.add_self_loops = add_self_loops if requires_grad: - self.beta = Parameter(torch.Tensor(1)) + self.beta = Parameter(torch.empty(1)) else: self.register_buffer('beta', torch.ones(1)) diff --git a/torch_geometric/nn/conv/antisymmetric_conv.py b/torch_geometric/nn/conv/antisymmetric_conv.py index 2a6df2cd8e3e..7b308a41a252 100644 --- a/torch_geometric/nn/conv/antisymmetric_conv.py +++ b/torch_geometric/nn/conv/antisymmetric_conv.py @@ -73,12 +73,12 @@ def __init__( if phi is None: phi = GCNConv(in_channels, in_channels, bias=False) - self.W = Parameter(torch.Tensor(in_channels, in_channels)) + self.W = Parameter(torch.empty(in_channels, in_channels)) self.register_buffer('eye', torch.eye(in_channels)) self.phi = phi if bias: - self.bias = Parameter(torch.Tensor(in_channels)) + self.bias = Parameter(torch.empty(in_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/arma_conv.py b/torch_geometric/nn/conv/arma_conv.py index 375d1b2996ee..1e2d7a23a692 100644 --- a/torch_geometric/nn/conv/arma_conv.py +++ b/torch_geometric/nn/conv/arma_conv.py @@ -76,10 +76,10 @@ def __init__(self, in_channels: int, out_channels: int, K, T, F_in, F_out = num_stacks, num_layers, in_channels, out_channels T = 1 if self.shared_weights else T - self.weight = Parameter(torch.Tensor(max(1, T - 1), K, F_out, F_out)) + self.weight = Parameter(torch.empty(max(1, T - 1), K, F_out, F_out)) if in_channels > 0: - self.init_weight = Parameter(torch.Tensor(K, F_in, F_out)) - self.root_weight = Parameter(torch.Tensor(T, K, F_in, F_out)) + self.init_weight = Parameter(torch.empty(K, F_in, F_out)) + self.root_weight = Parameter(torch.empty(T, K, F_in, F_out)) else: self.init_weight = torch.nn.parameter.UninitializedParameter() self.root_weight = torch.nn.parameter.UninitializedParameter() @@ -87,7 +87,7 @@ def __init__(self, in_channels: int, out_channels: int, self.initialize_parameters) if bias: - self.bias = Parameter(torch.Tensor(T, K, 1, F_out)) + self.bias = Parameter(torch.empty(T, K, 1, F_out)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/cugraph/gat_conv.py b/torch_geometric/nn/conv/cugraph/gat_conv.py index c9e6bec38950..e298f282d236 100644 --- a/torch_geometric/nn/conv/cugraph/gat_conv.py +++ b/torch_geometric/nn/conv/cugraph/gat_conv.py @@ -44,12 +44,12 @@ def __init__( self.negative_slope = negative_slope self.lin = Linear(in_channels, heads * out_channels, bias=False) - self.att = Parameter(torch.Tensor(2 * heads * out_channels)) + self.att = Parameter(torch.empty(2 * heads * out_channels)) if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/cugraph/rgcn_conv.py b/torch_geometric/nn/conv/cugraph/rgcn_conv.py index 71fd63205798..178a4cdaa321 100644 --- a/torch_geometric/nn/conv/cugraph/rgcn_conv.py +++ b/torch_geometric/nn/conv/cugraph/rgcn_conv.py @@ -49,17 +49,17 @@ def __init__(self, in_channels: int, out_channels: int, num_relations: int, if num_bases is not None: self.weight = Parameter( - torch.Tensor(num_bases + dim_root_weight, in_channels, - out_channels)) - self.comp = Parameter(torch.Tensor(num_relations, num_bases)) + torch.empty(num_bases + dim_root_weight, in_channels, + out_channels)) + self.comp = Parameter(torch.empty(num_relations, num_bases)) else: self.weight = Parameter( - torch.Tensor(num_relations + dim_root_weight, in_channels, - out_channels)) + torch.empty(num_relations + dim_root_weight, in_channels, + out_channels)) self.register_parameter('comp', None) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/dna_conv.py b/torch_geometric/nn/conv/dna_conv.py index f7389987e3ca..50bc6fbcf157 100644 --- a/torch_geometric/nn/conv/dna_conv.py +++ b/torch_geometric/nn/conv/dna_conv.py @@ -22,11 +22,10 @@ def __init__(self, in_channels, out_channels, groups=1, bias=True): self.groups = groups self.weight = Parameter( - torch.Tensor(groups, in_channels // groups, - out_channels // groups)) + torch.empty(groups, in_channels // groups, out_channels // groups)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/eg_conv.py b/torch_geometric/nn/conv/eg_conv.py index 28a10dc2db33..964a74361940 100644 --- a/torch_geometric/nn/conv/eg_conv.py +++ b/torch_geometric/nn/conv/eg_conv.py @@ -106,7 +106,7 @@ def __init__(self, in_channels: int, out_channels: int, num_heads * num_bases * len(aggregators)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/feast_conv.py b/torch_geometric/nn/conv/feast_conv.py index b9aca2de404f..3fee81886ecb 100644 --- a/torch_geometric/nn/conv/feast_conv.py +++ b/torch_geometric/nn/conv/feast_conv.py @@ -63,10 +63,10 @@ def __init__(self, in_channels: int, out_channels: int, heads: int = 1, weight_initializer='uniform') self.u = Linear(in_channels, heads, bias=False, weight_initializer='uniform') - self.c = Parameter(torch.Tensor(heads)) + self.c = Parameter(torch.empty(heads)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/gat_conv.py b/torch_geometric/nn/conv/gat_conv.py index e11d62e43391..475263130541 100644 --- a/torch_geometric/nn/conv/gat_conv.py +++ b/torch_geometric/nn/conv/gat_conv.py @@ -150,21 +150,21 @@ def __init__( weight_initializer='glorot') # The learnable parameters to compute attention coefficients: - self.att_src = Parameter(torch.Tensor(1, heads, out_channels)) - self.att_dst = Parameter(torch.Tensor(1, heads, out_channels)) + self.att_src = Parameter(torch.empty(1, heads, out_channels)) + self.att_dst = Parameter(torch.empty(1, heads, out_channels)) if edge_dim is not None: self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False, weight_initializer='glorot') - self.att_edge = Parameter(torch.Tensor(1, heads, out_channels)) + self.att_edge = Parameter(torch.empty(1, heads, out_channels)) else: self.lin_edge = None self.register_parameter('att_edge', None) if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/gatv2_conv.py b/torch_geometric/nn/conv/gatv2_conv.py index 2de2d95685b6..969ff138ec3d 100644 --- a/torch_geometric/nn/conv/gatv2_conv.py +++ b/torch_geometric/nn/conv/gatv2_conv.py @@ -163,7 +163,7 @@ def __init__( self.lin_r = Linear(in_channels[1], heads * out_channels, bias=bias, weight_initializer='glorot') - self.att = Parameter(torch.Tensor(1, heads, out_channels)) + self.att = Parameter(torch.empty(1, heads, out_channels)) if edge_dim is not None: self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False, @@ -172,9 +172,9 @@ def __init__( self.lin_edge = None if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/gcn2_conv.py b/torch_geometric/nn/conv/gcn2_conv.py index 4329d20bad42..da4e07964e80 100644 --- a/torch_geometric/nn/conv/gcn2_conv.py +++ b/torch_geometric/nn/conv/gcn2_conv.py @@ -92,12 +92,12 @@ def __init__(self, channels: int, alpha: float, theta: float = None, self._cached_edge_index = None self._cached_adj_t = None - self.weight1 = Parameter(torch.Tensor(channels, channels)) + self.weight1 = Parameter(torch.empty(channels, channels)) if shared_weights: self.register_parameter('weight2', None) else: - self.weight2 = Parameter(torch.Tensor(channels, channels)) + self.weight2 = Parameter(torch.empty(channels, channels)) self.reset_parameters() diff --git a/torch_geometric/nn/conv/gcn_conv.py b/torch_geometric/nn/conv/gcn_conv.py index 5a99573fcb40..4efc83902ad7 100644 --- a/torch_geometric/nn/conv/gcn_conv.py +++ b/torch_geometric/nn/conv/gcn_conv.py @@ -192,7 +192,7 @@ def __init__( weight_initializer='glorot') if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/general_conv.py b/torch_geometric/nn/conv/general_conv.py index 135e3c379d16..ad78e529f0ce 100644 --- a/torch_geometric/nn/conv/general_conv.py +++ b/torch_geometric/nn/conv/general_conv.py @@ -120,7 +120,7 @@ def __init__( if self.attention: if self.attention_type == 'additive': self.att_msg = Parameter( - torch.Tensor(1, self.heads, self.out_channels)) + torch.empty(1, self.heads, self.out_channels)) elif self.attention_type == 'dot_product': scaler = torch.tensor(out_channels, dtype=torch.float).sqrt() self.register_buffer('scaler', scaler) diff --git a/torch_geometric/nn/conv/gin_conv.py b/torch_geometric/nn/conv/gin_conv.py index 2a9c08c4b36c..1eb22bf53824 100644 --- a/torch_geometric/nn/conv/gin_conv.py +++ b/torch_geometric/nn/conv/gin_conv.py @@ -60,9 +60,9 @@ def __init__(self, nn: Callable, eps: float = 0., train_eps: bool = False, self.nn = nn self.initial_eps = eps if train_eps: - self.eps = torch.nn.Parameter(torch.Tensor([eps])) + self.eps = torch.nn.Parameter(torch.empty(1)) else: - self.register_buffer('eps', torch.Tensor([eps])) + self.register_buffer('eps', torch.empty(1)) self.reset_parameters() def reset_parameters(self): @@ -145,9 +145,9 @@ def __init__(self, nn: torch.nn.Module, eps: float = 0., self.nn = nn self.initial_eps = eps if train_eps: - self.eps = torch.nn.Parameter(torch.Tensor([eps])) + self.eps = torch.nn.Parameter(torch.empty(1)) else: - self.register_buffer('eps', torch.Tensor([eps])) + self.register_buffer('eps', torch.empty(1)) if edge_dim is not None: if isinstance(self.nn, torch.nn.Sequential): nn = self.nn[0] diff --git a/torch_geometric/nn/conv/gmm_conv.py b/torch_geometric/nn/conv/gmm_conv.py index be285247a4ad..203891ff3a51 100644 --- a/torch_geometric/nn/conv/gmm_conv.py +++ b/torch_geometric/nn/conv/gmm_conv.py @@ -111,7 +111,7 @@ def __init__(self, in_channels: Union[int, Tuple[int, int]], weight_initializer='glorot') if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/han_conv.py b/torch_geometric/nn/conv/han_conv.py index 5fa3afb3d240..e7ba600eb44d 100644 --- a/torch_geometric/nn/conv/han_conv.py +++ b/torch_geometric/nn/conv/han_conv.py @@ -84,7 +84,7 @@ def __init__( self.metadata = metadata self.dropout = dropout self.k_lin = nn.Linear(out_channels, out_channels) - self.q = nn.Parameter(torch.Tensor(1, out_channels)) + self.q = nn.Parameter(torch.empty(1, out_channels)) self.proj = nn.ModuleDict() for node_type, in_channels in self.in_channels.items(): @@ -95,8 +95,8 @@ def __init__( dim = out_channels // heads for edge_type in metadata[1]: edge_type = '__'.join(edge_type) - self.lin_src[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim)) - self.lin_dst[edge_type] = nn.Parameter(torch.Tensor(1, heads, dim)) + self.lin_src[edge_type] = nn.Parameter(torch.empty(1, heads, dim)) + self.lin_dst[edge_type] = nn.Parameter(torch.empty(1, heads, dim)) self.reset_parameters() diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py index c65332406c8b..5fcdf244388a 100644 --- a/torch_geometric/nn/conv/hgt_conv.py +++ b/torch_geometric/nn/conv/hgt_conv.py @@ -88,14 +88,14 @@ def __init__( is_sorted=True) self.skip = ParameterDict({ - node_type: Parameter(torch.Tensor(1)) + node_type: Parameter(torch.empty(1)) for node_type in self.node_types }) self.p_rel = ParameterDict() for edge_type in self.edge_types: edge_type = '__'.join(edge_type) - self.p_rel[edge_type] = Parameter(torch.Tensor(1, heads)) + self.p_rel[edge_type] = Parameter(torch.empty(1, heads)) self.reset_parameters() diff --git a/torch_geometric/nn/conv/hypergraph_conv.py b/torch_geometric/nn/conv/hypergraph_conv.py index 90158d528a3f..430cf0d6437d 100644 --- a/torch_geometric/nn/conv/hypergraph_conv.py +++ b/torch_geometric/nn/conv/hypergraph_conv.py @@ -104,7 +104,7 @@ def __init__( self.dropout = dropout self.lin = Linear(in_channels, heads * out_channels, bias=False, weight_initializer='glorot') - self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels)) + self.att = Parameter(torch.empty(1, heads, 2 * out_channels)) else: self.heads = 1 self.concat = True @@ -112,9 +112,9 @@ def __init__( weight_initializer='glorot') if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/nn_conv.py b/torch_geometric/nn/conv/nn_conv.py index 514efc84a60b..b73ed8632f9b 100644 --- a/torch_geometric/nn/conv/nn_conv.py +++ b/torch_geometric/nn/conv/nn_conv.py @@ -79,7 +79,7 @@ def __init__(self, in_channels: Union[int, Tuple[int, int]], weight_initializer='uniform') if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/pan_conv.py b/torch_geometric/nn/conv/pan_conv.py index 6bf4fb10417a..d4890696894f 100644 --- a/torch_geometric/nn/conv/pan_conv.py +++ b/torch_geometric/nn/conv/pan_conv.py @@ -52,7 +52,7 @@ def __init__(self, in_channels: int, out_channels: int, filter_size: int, self.filter_size = filter_size self.lin = Linear(in_channels, out_channels) - self.weight = Parameter(torch.Tensor(filter_size + 1)) + self.weight = Parameter(torch.empty(filter_size + 1)) self.reset_parameters() diff --git a/torch_geometric/nn/conv/pdn_conv.py b/torch_geometric/nn/conv/pdn_conv.py index bdb00f42ee0e..94ae606d8c58 100644 --- a/torch_geometric/nn/conv/pdn_conv.py +++ b/torch_geometric/nn/conv/pdn_conv.py @@ -68,7 +68,7 @@ def __init__(self, in_channels: int, out_channels: int, edge_dim: int, ) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter("bias", None) diff --git a/torch_geometric/nn/conv/rgat_conv.py b/torch_geometric/nn/conv/rgat_conv.py index 0a480b1d11b4..9e03804aacc5 100644 --- a/torch_geometric/nn/conv/rgat_conv.py +++ b/torch_geometric/nn/conv/rgat_conv.py @@ -239,17 +239,15 @@ def __init__( # The learnable parameters to compute both attention logits and # attention coefficients: self.q = Parameter( - torch.Tensor(self.heads * self.out_channels, - self.heads * self.dim)) + torch.empty(self.heads * self.out_channels, self.heads * self.dim)) self.k = Parameter( - torch.Tensor(self.heads * self.out_channels, - self.heads * self.dim)) + torch.empty(self.heads * self.out_channels, self.heads * self.dim)) if bias and concat: self.bias = Parameter( - torch.Tensor(self.heads * self.dim * self.out_channels)) + torch.empty(self.heads * self.dim * self.out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(self.dim * self.out_channels)) + self.bias = Parameter(torch.empty(self.dim * self.out_channels)) else: self.register_parameter('bias', None) @@ -258,18 +256,18 @@ def __init__( self.heads * self.out_channels, bias=False, weight_initializer='glorot') self.e = Parameter( - torch.Tensor(self.heads * self.out_channels, - self.heads * self.dim)) + torch.empty(self.heads * self.out_channels, + self.heads * self.dim)) else: self.lin_edge = None self.register_parameter('e', None) if num_bases is not None: self.att = Parameter( - torch.Tensor(self.num_relations, self.num_bases)) + torch.empty(self.num_relations, self.num_bases)) self.basis = Parameter( - torch.Tensor(self.num_bases, self.in_channels, - self.heads * self.out_channels)) + torch.empty(self.num_bases, self.in_channels, + self.heads * self.out_channels)) elif num_blocks is not None: assert ( self.in_channels % self.num_blocks == 0 @@ -277,20 +275,20 @@ def __init__( "both 'in_channels' and 'heads * out_channels' must be " "multiple of 'num_blocks' used") self.weight = Parameter( - torch.Tensor(self.num_relations, self.num_blocks, - self.in_channels // self.num_blocks, - (self.heads * self.out_channels) // - self.num_blocks)) + torch.empty(self.num_relations, self.num_blocks, + self.in_channels // self.num_blocks, + (self.heads * self.out_channels) // + self.num_blocks)) else: self.weight = Parameter( - torch.Tensor(self.num_relations, self.in_channels, - self.heads * self.out_channels)) + torch.empty(self.num_relations, self.in_channels, + self.heads * self.out_channels)) self.w = Parameter(torch.ones(self.out_channels)) - self.l1 = Parameter(torch.Tensor(1, self.out_channels)) - self.b1 = Parameter(torch.Tensor(1, self.out_channels)) - self.l2 = Parameter(torch.Tensor(self.out_channels, self.out_channels)) - self.b2 = Parameter(torch.Tensor(1, self.out_channels)) + self.l1 = Parameter(torch.empty(1, self.out_channels)) + self.b1 = Parameter(torch.empty(1, self.out_channels)) + self.l2 = Parameter(torch.empty(self.out_channels, self.out_channels)) + self.b2 = Parameter(torch.empty(1, self.out_channels)) self._alpha = None diff --git a/torch_geometric/nn/conv/rgcn_conv.py b/torch_geometric/nn/conv/rgcn_conv.py index d52daa9e9f47..e49e89456602 100644 --- a/torch_geometric/nn/conv/rgcn_conv.py +++ b/torch_geometric/nn/conv/rgcn_conv.py @@ -134,30 +134,30 @@ def __init__( if num_bases is not None: self.weight = Parameter( - torch.Tensor(num_bases, in_channels[0], out_channels)) - self.comp = Parameter(torch.Tensor(num_relations, num_bases)) + torch.empty(num_bases, in_channels[0], out_channels)) + self.comp = Parameter(torch.empty(num_relations, num_bases)) elif num_blocks is not None: assert (in_channels[0] % num_blocks == 0 and out_channels % num_blocks == 0) self.weight = Parameter( - torch.Tensor(num_relations, num_blocks, - in_channels[0] // num_blocks, - out_channels // num_blocks)) + torch.empty(num_relations, num_blocks, + in_channels[0] // num_blocks, + out_channels // num_blocks)) self.register_parameter('comp', None) else: self.weight = Parameter( - torch.Tensor(num_relations, in_channels[0], out_channels)) + torch.empty(num_relations, in_channels[0], out_channels)) self.register_parameter('comp', None) if root_weight: - self.root = Param(torch.Tensor(in_channels[1], out_channels)) + self.root = Param(torch.empty(in_channels[1], out_channels)) else: self.register_parameter('root', None) if bias: - self.bias = Param(torch.Tensor(out_channels)) + self.bias = Param(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/spline_conv.py b/torch_geometric/nn/conv/spline_conv.py index ea740a2b8444..8654495bf847 100644 --- a/torch_geometric/nn/conv/spline_conv.py +++ b/torch_geometric/nn/conv/spline_conv.py @@ -97,7 +97,7 @@ def __init__( if in_channels[0] > 0: self.weight = Parameter( - torch.Tensor(self.K, in_channels[0], out_channels)) + torch.empty(self.K, in_channels[0], out_channels)) else: self.weight = torch.nn.parameter.UninitializedParameter() self._hook = self.register_forward_pre_hook( @@ -108,7 +108,7 @@ def __init__( weight_initializer='uniform') if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/supergat_conv.py b/torch_geometric/nn/conv/supergat_conv.py index ccec8758cfd5..a65b4912e95d 100644 --- a/torch_geometric/nn/conv/supergat_conv.py +++ b/torch_geometric/nn/conv/supergat_conv.py @@ -153,8 +153,8 @@ def __init__(self, in_channels: int, out_channels: int, heads: int = 1, weight_initializer='glorot') if self.attention_type == 'MX': - self.att_l = Parameter(torch.Tensor(1, heads, out_channels)) - self.att_r = Parameter(torch.Tensor(1, heads, out_channels)) + self.att_l = Parameter(torch.empty(1, heads, out_channels)) + self.att_r = Parameter(torch.empty(1, heads, out_channels)) else: # self.attention_type == 'SD' self.register_parameter('att_l', None) self.register_parameter('att_r', None) @@ -162,9 +162,9 @@ def __init__(self, in_channels: int, out_channels: int, heads: int = 1, self.att_x = self.att_y = None # x/y for self-supervision if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/conv/tag_conv.py b/torch_geometric/nn/conv/tag_conv.py index b81be337c03d..2e131202eb1d 100644 --- a/torch_geometric/nn/conv/tag_conv.py +++ b/torch_geometric/nn/conv/tag_conv.py @@ -57,7 +57,7 @@ def __init__(self, in_channels: int, out_channels: int, K: int = 3, ]) if bias: - self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) + self.bias = torch.nn.Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/dense/dense_gat_conv.py b/torch_geometric/nn/dense/dense_gat_conv.py index c6b793685fab..a7202f27d322 100644 --- a/torch_geometric/nn/dense/dense_gat_conv.py +++ b/torch_geometric/nn/dense/dense_gat_conv.py @@ -35,13 +35,13 @@ def __init__( weight_initializer='glorot') # The learnable parameters to compute attention coefficients: - self.att_src = Parameter(torch.Tensor(1, 1, heads, out_channels)) - self.att_dst = Parameter(torch.Tensor(1, 1, heads, out_channels)) + self.att_src = Parameter(torch.empty(1, 1, heads, out_channels)) + self.att_dst = Parameter(torch.empty(1, 1, heads, out_channels)) if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/dense/dense_gcn_conv.py b/torch_geometric/nn/dense/dense_gcn_conv.py index def1be36b5c8..0e75ad1cd0eb 100644 --- a/torch_geometric/nn/dense/dense_gcn_conv.py +++ b/torch_geometric/nn/dense/dense_gcn_conv.py @@ -26,7 +26,7 @@ def __init__( weight_initializer='glorot') if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/dense/dense_gin_conv.py b/torch_geometric/nn/dense/dense_gin_conv.py index fbfe2ccc0d2a..d33033e24794 100644 --- a/torch_geometric/nn/dense/dense_gin_conv.py +++ b/torch_geometric/nn/dense/dense_gin_conv.py @@ -20,9 +20,9 @@ def __init__( self.nn = nn self.initial_eps = eps if train_eps: - self.eps = torch.nn.Parameter(torch.Tensor([eps])) + self.eps = torch.nn.Parameter(torch.empty(1)) else: - self.register_buffer('eps', torch.Tensor([eps])) + self.register_buffer('eps', torch.empty(1)) self.reset_parameters() def reset_parameters(self): diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 5f1806486fb3..9783d7a37f6a 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -94,14 +94,14 @@ def __init__(self, in_channels: int, out_channels: int, bias: bool = True, self.bias_initializer = bias_initializer if in_channels > 0: - self.weight = Parameter(torch.Tensor(out_channels, in_channels)) + self.weight = Parameter(torch.empty(out_channels, in_channels)) else: self.weight = nn.parameter.UninitializedParameter() self._hook = self.register_forward_pre_hook( self.initialize_parameters) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) @@ -238,9 +238,9 @@ def __init__( self.initialize_parameters) else: self.weight = torch.nn.Parameter( - torch.Tensor(num_types, in_channels, out_channels)) + torch.empty(num_types, in_channels, out_channels)) if kwargs.get('bias', True): - self.bias = Parameter(torch.Tensor(num_types, out_channels)) + self.bias = Parameter(torch.empty(num_types, out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() diff --git a/torch_geometric/nn/models/attentive_fp.py b/torch_geometric/nn/models/attentive_fp.py index 9487806f4611..6fae299d0d27 100644 --- a/torch_geometric/nn/models/attentive_fp.py +++ b/torch_geometric/nn/models/attentive_fp.py @@ -23,13 +23,13 @@ def __init__( self.dropout = dropout - self.att_l = Parameter(torch.Tensor(1, out_channels)) - self.att_r = Parameter(torch.Tensor(1, in_channels)) + self.att_l = Parameter(torch.empty(1, out_channels)) + self.att_r = Parameter(torch.empty(1, in_channels)) self.lin1 = Linear(in_channels + edge_dim, out_channels, False) self.lin2 = Linear(out_channels, out_channels, False) - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) self.reset_parameters() diff --git a/torch_geometric/nn/models/deep_graph_infomax.py b/torch_geometric/nn/models/deep_graph_infomax.py index 36ff63fdc017..70708770136f 100644 --- a/torch_geometric/nn/models/deep_graph_infomax.py +++ b/torch_geometric/nn/models/deep_graph_infomax.py @@ -36,7 +36,7 @@ def __init__( self.summary = summary self.corruption = corruption - self.weight = Parameter(torch.Tensor(hidden_channels, hidden_channels)) + self.weight = Parameter(torch.empty(hidden_channels, hidden_channels)) self.reset_parameters() diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index c0fda9669452..92fdd9da9cc2 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -56,7 +56,7 @@ def __init__(self, num_radial: int, cutoff: float = 5.0, self.cutoff = cutoff self.envelope = Envelope(envelope_exponent) - self.freq = torch.nn.Parameter(torch.Tensor(num_radial)) + self.freq = torch.nn.Parameter(torch.empty(num_radial)) self.reset_parameters() @@ -186,7 +186,7 @@ def __init__( self.lin_ji = Linear(hidden_channels, hidden_channels) self.W = torch.nn.Parameter( - torch.Tensor(hidden_channels, num_bilinear, hidden_channels)) + torch.empty(hidden_channels, num_bilinear, hidden_channels)) self.layers_before_skip = torch.nn.ModuleList([ ResidualLayer(hidden_channels, act) for _ in range(num_before_skip) diff --git a/torch_geometric/nn/models/linkx.py b/torch_geometric/nn/models/linkx.py index 5804f532c3ba..b9691201261e 100644 --- a/torch_geometric/nn/models/linkx.py +++ b/torch_geometric/nn/models/linkx.py @@ -17,9 +17,9 @@ def __init__(self, in_channels: int, out_channels: int, bias: bool = True): self.in_channels = in_channels self.out_channels = out_channels - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) + self.weight = Parameter(torch.empty(in_channels, out_channels)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) diff --git a/torch_geometric/nn/models/re_net.py b/torch_geometric/nn/models/re_net.py index cb4ec8e2bcc8..d9c23d65658c 100644 --- a/torch_geometric/nn/models/re_net.py +++ b/torch_geometric/nn/models/re_net.py @@ -62,8 +62,8 @@ def __init__( self.seq_len = seq_len self.dropout = dropout - self.ent = Parameter(torch.Tensor(num_nodes, hidden_channels)) - self.rel = Parameter(torch.Tensor(num_rels, hidden_channels)) + self.ent = Parameter(torch.empty(num_nodes, hidden_channels)) + self.rel = Parameter(torch.empty(num_rels, hidden_channels)) self.sub_gru = GRU(3 * hidden_channels, hidden_channels, num_layers, batch_first=True, bias=bias) diff --git a/torch_geometric/nn/norm/batch_norm.py b/torch_geometric/nn/norm/batch_norm.py index 6932cfad62cb..7aa6e9fd5a13 100644 --- a/torch_geometric/nn/norm/batch_norm.py +++ b/torch_geometric/nn/norm/batch_norm.py @@ -133,17 +133,17 @@ def __init__( self.track_running_stats = track_running_stats if self.affine: - self.weight = Parameter(torch.Tensor(num_types, in_channels)) - self.bias = Parameter(torch.Tensor(num_types, in_channels)) + self.weight = Parameter(torch.empty(num_types, in_channels)) + self.bias = Parameter(torch.empty(num_types, in_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) if self.track_running_stats: self.register_buffer('running_mean', - torch.Tensor(num_types, in_channels)) + torch.empty(num_types, in_channels)) self.register_buffer('running_var', - torch.Tensor(num_types, in_channels)) + torch.empty(num_types, in_channels)) self.register_buffer('num_batches_tracked', torch.tensor(0)) else: self.register_buffer('running_mean', None) diff --git a/torch_geometric/nn/norm/graph_norm.py b/torch_geometric/nn/norm/graph_norm.py index 02d37a860911..3373b32f7d62 100644 --- a/torch_geometric/nn/norm/graph_norm.py +++ b/torch_geometric/nn/norm/graph_norm.py @@ -33,9 +33,9 @@ def __init__(self, in_channels: int, eps: float = 1e-5): self.in_channels = in_channels self.eps = eps - self.weight = torch.nn.Parameter(torch.Tensor(in_channels)) - self.bias = torch.nn.Parameter(torch.Tensor(in_channels)) - self.mean_scale = torch.nn.Parameter(torch.Tensor(in_channels)) + self.weight = torch.nn.Parameter(torch.empty(in_channels)) + self.bias = torch.nn.Parameter(torch.empty(in_channels)) + self.mean_scale = torch.nn.Parameter(torch.empty(in_channels)) self.reset_parameters() diff --git a/torch_geometric/nn/norm/layer_norm.py b/torch_geometric/nn/norm/layer_norm.py index fdaaeeb760df..7fb56d71905f 100644 --- a/torch_geometric/nn/norm/layer_norm.py +++ b/torch_geometric/nn/norm/layer_norm.py @@ -51,8 +51,8 @@ def __init__( self.mode = mode if affine: - self.weight = Parameter(torch.Tensor(in_channels)) - self.bias = Parameter(torch.Tensor(in_channels)) + self.weight = Parameter(torch.empty(in_channels)) + self.bias = Parameter(torch.empty(in_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) @@ -151,8 +151,8 @@ def __init__( self.affine = affine if affine: - self.weight = Parameter(torch.Tensor(num_types, in_channels)) - self.bias = Parameter(torch.Tensor(num_types, in_channels)) + self.weight = Parameter(torch.empty(num_types, in_channels)) + self.bias = Parameter(torch.empty(num_types, in_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) diff --git a/torch_geometric/nn/norm/msg_norm.py b/torch_geometric/nn/norm/msg_norm.py index c02693dcdc07..af4678e9cab3 100644 --- a/torch_geometric/nn/norm/msg_norm.py +++ b/torch_geometric/nn/norm/msg_norm.py @@ -22,9 +22,7 @@ class MessageNorm(torch.nn.Module): """ def __init__(self, learn_scale: bool = False): super().__init__() - - self.scale = Parameter(torch.Tensor([1.0]), requires_grad=learn_scale) - + self.scale = Parameter(torch.empty(1), requires_grad=learn_scale) self.reset_parameters() def reset_parameters(self): diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index 00c0d2756e7e..ad3d9e2fedde 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -41,7 +41,7 @@ def fps( import torch from torch_geometric.nn import fps - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + x = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) batch = torch.tensor([0, 0, 0, 0]) index = fps(x, batch, ratio=0.5) @@ -82,9 +82,9 @@ def knn( import torch from torch_geometric.nn import knn - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + x = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) + y = torch.tensor([[-1.0, 0.0], [1.0, 0.0]]) batch_y = torch.tensor([0, 0]) assign_index = knn(x, y, 2, batch_x, batch_y) @@ -135,7 +135,7 @@ def knn_graph( import torch from torch_geometric.nn import knn_graph - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + x = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) batch = torch.tensor([0, 0, 0, 0]) edge_index = knn_graph(x, k=2, batch=batch, loop=False) @@ -192,9 +192,9 @@ def radius( import torch from torch_geometric.nn import radius - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + x = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) + y = torch.tensor([[-1.0, 0.0], [1.0, 0.0]]) batch_y = torch.tensor([0, 0]) assign_index = radius(x, y, 1.5, batch_x, batch_y) @@ -244,7 +244,7 @@ def radius_graph( import torch from torch_geometric.nn import radius_graph - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + x = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) batch = torch.tensor([0, 0, 0, 0]) edge_index = radius_graph(x, r=1.5, batch=batch, loop=False) @@ -296,9 +296,9 @@ def nearest( import torch from torch_geometric.nn import nearest - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + x = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) + y = torch.tensor([[-1.0, 0.0], [1.0, 0.0]]) batch_y = torch.tensor([0, 0]) cluster = nearest(x, y, batch_x, batch_y) diff --git a/torch_geometric/nn/pool/mem_pool.py b/torch_geometric/nn/pool/mem_pool.py index 00ea93808900..c0b161797ca4 100644 --- a/torch_geometric/nn/pool/mem_pool.py +++ b/torch_geometric/nn/pool/mem_pool.py @@ -46,7 +46,7 @@ def __init__(self, in_channels: int, out_channels: int, heads: int, self.num_clusters = num_clusters self.tau = tau - self.k = Parameter(torch.Tensor(heads, num_clusters, in_channels)) + self.k = Parameter(torch.empty(heads, num_clusters, in_channels)) self.conv = Conv2d(heads, 1, kernel_size=1, padding=0, bias=False) self.lin = Linear(in_channels, out_channels, bias=False) diff --git a/torch_geometric/nn/pool/pan_pool.py b/torch_geometric/nn/pool/pan_pool.py index 4bcac3920275..a8b1ccc6791e 100644 --- a/torch_geometric/nn/pool/pan_pool.py +++ b/torch_geometric/nn/pool/pan_pool.py @@ -53,8 +53,8 @@ def __init__( self.min_score = min_score self.multiplier = multiplier - self.p = Parameter(torch.Tensor(in_channels)) - self.beta = Parameter(torch.Tensor(2)) + self.p = Parameter(torch.empty(in_channels)) + self.beta = Parameter(torch.empty(2)) self.select = SelectTopK(1, ratio, min_score, nonlinearity) self.connect = FilterEdges() diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py index 1679096e76e8..670c223f980c 100644 --- a/torch_geometric/nn/pool/select/topk.py +++ b/torch_geometric/nn/pool/select/topk.py @@ -131,7 +131,7 @@ def __init__( self.min_score = min_score self.act = activation_resolver(act) - self.weight = torch.nn.Parameter(torch.Tensor(1, in_channels)) + self.weight = torch.nn.Parameter(torch.empty(1, in_channels)) self.reset_parameters() diff --git a/torch_geometric/nn/to_hetero_with_bases_transformer.py b/torch_geometric/nn/to_hetero_with_bases_transformer.py index 726f1634b18e..01410d9675a1 100644 --- a/torch_geometric/nn/to_hetero_with_bases_transformer.py +++ b/torch_geometric/nn/to_hetero_with_bases_transformer.py @@ -345,7 +345,7 @@ def hook(module, inputs, output): # We learn a single scalar weight for each individual edge type, # which is used to weight the output message based on edge type: conv.edge_type_weight = Parameter( - torch.Tensor(1, num_relations, device=device)) + torch.empty(1, num_relations, device=device)) conv.register_message_forward_hook(hook) self.convs.append(conv) diff --git a/torch_geometric/transforms/gdc.py b/torch_geometric/transforms/gdc.py index a9edd428288b..348be9fe6e5d 100644 --- a/torch_geometric/transforms/gdc.py +++ b/torch_geometric/transforms/gdc.py @@ -475,8 +475,8 @@ def __expm__(self, matrix: Tensor, symmetric: bool) -> Tensor: e, V = torch.linalg.eigh(matrix, UPLO='U') diff_mat = V @ torch.diag(e.exp()) @ V.t() else: - diff_mat_np = expm(matrix.cpu().numpy()) - diff_mat = torch.Tensor(diff_mat_np).to(matrix.device) + diff_mat = torch.from_numpy(expm(matrix.cpu().numpy())) + diff_mat = diff_mat.to(matrix.device, matrix.dtype) return diff_mat def __calculate_eps__( @@ -523,16 +523,17 @@ def __neighbors_to_graph__( :rtype: (:class:`LongTensor`, :class:`Tensor`) """ - edge_weight = torch.Tensor(np.concatenate(neighbor_weights)).to(device) + edge_weight = torch.from_numpy(np.concatenate(neighbor_weights)) + edge_weight = edge_weight.to(device, torch.get_default_dtype()) i = np.repeat(np.arange(len(neighbors)), np.fromiter(map(len, neighbors), dtype=int)) j = np.concatenate(neighbors) if normalization == 'col': - edge_index = torch.Tensor(np.vstack([j, i])).to(device) + edge_index = torch.from_numpy(np.vstack([j, i])).to(device) N = len(neighbors) edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N) elif normalization == 'row': - edge_index = torch.Tensor(np.vstack([i, j])).to(device) + edge_index = torch.from_numpy(np.vstack([i, j])).to(device) else: raise ValueError( f"PPR matrix normalization {normalization} unknown.") diff --git a/torch_geometric/utils/geodesic.py b/torch_geometric/utils/geodesic.py index bea66bdce037..d217f28f2949 100644 --- a/torch_geometric/utils/geodesic.py +++ b/torch_geometric/utils/geodesic.py @@ -47,10 +47,10 @@ def geodesic_distance( Example: - >>> pos = torch.Tensor([[0, 0, 0], - ... [2, 0, 0], - ... [0, 2, 0], - ... [2, 2, 0]]) + >>> pos = torch.tensor([[0.0, 0.0, 0.0], + ... [2.0, 0.0, 0.0], + ... [0.0, 2.0, 0.0], + ... [2.0, 2.0, 0.0]]) >>> face = torch.tensor([[0, 0], ... [1, 2], ... [3, 3]]) diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py index 0ab274614712..e2423735a5ff 100644 --- a/torch_geometric/utils/to_dense_adj.py +++ b/torch_geometric/utils/to_dense_adj.py @@ -51,7 +51,7 @@ def to_dense_adj( [0., 0., 0., 0.], [0., 0., 0., 0.]]]) - >>> edge_attr = torch.Tensor([1, 2, 3, 4, 5]) + >>> edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0]) >>> to_dense_adj(edge_index, batch, edge_attr) tensor([[[1., 2.], [3., 0.]], From 085f2cc64dc18cecc88878b9ae3fa0ec7f7e5739 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 6 Jul 2023 14:43:06 +0700 Subject: [PATCH 1338/2432] Replace `torch.Tensor`calls (2/3) (#7695) --- benchmark/runtime/dgl/gat.py | 16 +++++++--------- benchmark/runtime/dgl/gcn.py | 8 ++++---- benchmark/runtime/dgl/rgcn.py | 16 ++++++++-------- docs/source/tutorial/create_gnn.rst | 2 +- examples/hetero/dmgi_unsup.py | 2 +- examples/rgcn_link_pred.py | 4 ++-- graphgym/custom_graphgym/layer/example.py | 8 ++++---- 7 files changed, 27 insertions(+), 29 deletions(-) diff --git a/benchmark/runtime/dgl/gat.py b/benchmark/runtime/dgl/gat.py index c06398c134af..1de2c4cf2714 100644 --- a/benchmark/runtime/dgl/gat.py +++ b/benchmark/runtime/dgl/gat.py @@ -19,10 +19,9 @@ def __init__(self, g, in_channels, out_channels, heads=1, self.negative_slope = negative_slope self.dropout = dropout - self.weight = Parameter(torch.Tensor(in_channels, - heads * out_channels)) - self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels)) - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.weight = Parameter(torch.empty(in_channels, heads * out_channels)) + self.att = Parameter(torch.empty(1, heads, 2 * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) self.reset_parameters() def reset_parameters(self): @@ -76,11 +75,10 @@ def __init__(self, g, in_channels, out_channels, heads=1, self.heads = heads self.negative_slope = negative_slope self.dropout = dropout - self.weight = Parameter(torch.Tensor(in_channels, - heads * out_channels)) - self.att_l = Parameter(torch.Tensor(heads, out_channels, 1)) - self.att_r = Parameter(torch.Tensor(heads, out_channels, 1)) - self.bias = Parameter(torch.Tensor(heads * out_channels)) + self.weight = Parameter(torch.empty(in_channels, heads * out_channels)) + self.att_l = Parameter(torch.empty(heads, out_channels, 1)) + self.att_r = Parameter(torch.empty(heads, out_channels, 1)) + self.bias = Parameter(torch.empty(heads * out_channels)) self.softmax = EdgeSoftmax() self.reset_parameters() diff --git a/benchmark/runtime/dgl/gcn.py b/benchmark/runtime/dgl/gcn.py index 985caa7ef550..d92b6aa4eb3c 100644 --- a/benchmark/runtime/dgl/gcn.py +++ b/benchmark/runtime/dgl/gcn.py @@ -10,8 +10,8 @@ class GCNConv(torch.nn.Module): def __init__(self, g, in_channels, out_channels): super().__init__() self.g = g - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) - self.bias = Parameter(torch.Tensor(out_channels)) + self.weight = Parameter(torch.empty(in_channels, out_channels)) + self.bias = Parameter(torch.empty(out_channels)) self.reset_parameters() def reset_parameters(self): @@ -49,8 +49,8 @@ class GCNSPMVConv(torch.nn.Module): def __init__(self, g, in_channels, out_channels): super().__init__() self.g = g - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) - self.bias = Parameter(torch.Tensor(out_channels)) + self.weight = Parameter(torch.empty(in_channels, out_channels)) + self.bias = Parameter(torch.empty(out_channels)) self.reset_parameters() def reset_parameters(self): diff --git a/benchmark/runtime/dgl/rgcn.py b/benchmark/runtime/dgl/rgcn.py index 4c7125d5a967..6e4ed53e8f5a 100644 --- a/benchmark/runtime/dgl/rgcn.py +++ b/benchmark/runtime/dgl/rgcn.py @@ -16,10 +16,10 @@ def __init__(self, g, in_channels, out_channels, num_relations, num_bases): self.num_relations = num_relations self.num_bases = num_bases - self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels)) - self.att = Param(torch.Tensor(num_relations, num_bases)) - self.root = Param(torch.Tensor(in_channels, out_channels)) - self.bias = Param(torch.Tensor(out_channels)) + self.basis = Param(torch.empty(num_bases, in_channels, out_channels)) + self.att = Param(torch.empty(num_relations, num_bases)) + self.root = Param(torch.empty(in_channels, out_channels)) + self.bias = Param(torch.empty(out_channels)) self.reset_parameters() @@ -88,10 +88,10 @@ def __init__(self, g, in_channels, out_channels, num_relations, num_bases): self.num_relations = num_relations self.num_bases = num_bases - self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels)) - self.att = Param(torch.Tensor(num_relations, num_bases)) - self.root = Param(torch.Tensor(in_channels, out_channels)) - self.bias = Param(torch.Tensor(out_channels)) + self.basis = Param(torch.empty(num_bases, in_channels, out_channels)) + self.att = Param(torch.empty(num_relations, num_bases)) + self.root = Param(torch.empty(in_channels, out_channels)) + self.bias = Param(torch.empty(out_channels)) self.reset_parameters() diff --git a/docs/source/tutorial/create_gnn.rst b/docs/source/tutorial/create_gnn.rst index 09e0f3c79b6f..363a9094fa29 100644 --- a/docs/source/tutorial/create_gnn.rst +++ b/docs/source/tutorial/create_gnn.rst @@ -72,7 +72,7 @@ The full layer implementation is shown below: def __init__(self, in_channels, out_channels): super().__init__(aggr='add') # "Add" aggregation (Step 5). self.lin = Linear(in_channels, out_channels, bias=False) - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) self.reset_parameters() diff --git a/examples/hetero/dmgi_unsup.py b/examples/hetero/dmgi_unsup.py index bd042cb33ba4..51dd7c1421c0 100644 --- a/examples/hetero/dmgi_unsup.py +++ b/examples/hetero/dmgi_unsup.py @@ -29,7 +29,7 @@ def __init__(self, num_nodes, in_channels, out_channels, num_relations): self.convs = torch.nn.ModuleList( [GCNConv(in_channels, out_channels) for _ in range(num_relations)]) self.M = torch.nn.Bilinear(out_channels, out_channels, 1) - self.Z = torch.nn.Parameter(torch.Tensor(num_nodes, out_channels)) + self.Z = torch.nn.Parameter(torch.empty(num_nodes, out_channels)) self.reset_parameters() def reset_parameters(self): diff --git a/examples/rgcn_link_pred.py b/examples/rgcn_link_pred.py index 89ecd7e10bcf..71c768bfbcf0 100644 --- a/examples/rgcn_link_pred.py +++ b/examples/rgcn_link_pred.py @@ -24,7 +24,7 @@ class RGCNEncoder(torch.nn.Module): def __init__(self, num_nodes, hidden_channels, num_relations): super().__init__() - self.node_emb = Parameter(torch.Tensor(num_nodes, hidden_channels)) + self.node_emb = Parameter(torch.empty(num_nodes, hidden_channels)) self.conv1 = RGCNConv(hidden_channels, hidden_channels, num_relations, num_blocks=5) self.conv2 = RGCNConv(hidden_channels, hidden_channels, num_relations, @@ -47,7 +47,7 @@ def forward(self, edge_index, edge_type): class DistMultDecoder(torch.nn.Module): def __init__(self, num_relations, hidden_channels): super().__init__() - self.rel_emb = Parameter(torch.Tensor(num_relations, hidden_channels)) + self.rel_emb = Parameter(torch.empty(num_relations, hidden_channels)) self.reset_parameters() def reset_parameters(self): diff --git a/graphgym/custom_graphgym/layer/example.py b/graphgym/custom_graphgym/layer/example.py index 6a62e86b96e0..bbbcde913284 100644 --- a/graphgym/custom_graphgym/layer/example.py +++ b/graphgym/custom_graphgym/layer/example.py @@ -24,10 +24,10 @@ def __init__(self, in_channels, out_channels, bias=True, **kwargs): self.in_channels = in_channels self.out_channels = out_channels - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) + self.weight = Parameter(torch.empty(in_channels, out_channels)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) @@ -66,10 +66,10 @@ def __init__(self, in_channels, out_channels, bias=True, **kwargs): self.in_channels = in_channels self.out_channels = out_channels - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) + self.weight = Parameter(torch.empty(in_channels, out_channels)) if bias: - self.bias = Parameter(torch.Tensor(out_channels)) + self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) From 62d7ffbe6e157deb3ec01472eeb992f8e92b3bbf Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Thu, 6 Jul 2023 16:24:50 -0700 Subject: [PATCH 1339/2432] Modify `dtype` in `ogbn-products` GraphSAGE example (#7703) --- examples/ogbn_products_sage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ogbn_products_sage.py b/examples/ogbn_products_sage.py index 7dbb96b481c2..4f1a442ff30a 100644 --- a/examples/ogbn_products_sage.py +++ b/examples/ogbn_products_sage.py @@ -150,7 +150,7 @@ def test(): model.reset_parameters() optimizer = torch.optim.Adam(model.parameters(), lr=0.003) - best_val_acc = final_test_acc = 0 + best_val_acc = final_test_acc = 0.0 for epoch in range(1, 21): loss, acc = train(epoch) print(f'Epoch {epoch:02d}, Loss: {loss:.4f}, Approx. Train: {acc:.4f}') From f2b26b085bab19f8bce7c0226e3ca20a9f73876b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 7 Jul 2023 06:27:31 +0700 Subject: [PATCH 1340/2432] Revert `xfail` option in `SNAPDataset` tests (#7704) --- test/datasets/test_snap_dataset.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/datasets/test_snap_dataset.py b/test/datasets/test_snap_dataset.py index 1c26e2fd481d..f2c9fad87d8c 100644 --- a/test/datasets/test_snap_dataset.py +++ b/test/datasets/test_snap_dataset.py @@ -1,11 +1,8 @@ -import pytest - from torch_geometric.testing import onlyFullTest, onlyOnline @onlyOnline @onlyFullTest -@pytest.mark.xfail def test_ego_facebook_snap_dataset(get_dataset): dataset = get_dataset(name='ego-facebook') assert str(dataset) == 'SNAP-ego-facebook(10)' @@ -14,7 +11,6 @@ def test_ego_facebook_snap_dataset(get_dataset): @onlyOnline @onlyFullTest -@pytest.mark.xfail def test_soc_slashdot_snap_dataset(get_dataset): dataset = get_dataset(name='soc-Slashdot0811') assert str(dataset) == 'SNAP-soc-slashdot0811(1)' @@ -23,7 +19,6 @@ def test_soc_slashdot_snap_dataset(get_dataset): @onlyOnline @onlyFullTest -@pytest.mark.xfail def test_wiki_vote_snap_dataset(get_dataset): dataset = get_dataset(name='wiki-vote') assert str(dataset) == 'SNAP-wiki-vote(1)' From f80afef6fc27b8534261659ebdfd10efed8f4540 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 7 Jul 2023 06:27:56 +0700 Subject: [PATCH 1341/2432] Replace `torch.Tensor`calls (3/4) (#7697) --- test/data/test_feature_store.py | 4 +- test/distributed/test_local_feature_store.py | 40 +++++++++---------- test/loader/test_dataloader.py | 2 +- test/nn/dense/test_dense_gat_conv.py | 22 +++++----- test/nn/dense/test_dense_gcn_conv.py | 22 +++++----- test/nn/dense/test_dense_gin_conv.py | 22 +++++----- test/nn/dense/test_dense_graph_conv.py | 22 +++++----- test/nn/dense/test_dense_sage_conv.py | 22 +++++----- test/nn/pool/select/test_select_topk.py | 10 ++--- test/nn/pool/test_edge_pool.py | 10 +++-- test/nn/pool/test_topk_pool.py | 6 +-- test/nn/test_model_hub.py | 2 +- test/nn/test_parameter_dict.py | 10 ++--- test/nn/test_resolver.py | 2 +- test/nn/unpool/test_knn_interpolate.py | 20 ++++++++-- test/transforms/test_compose.py | 4 +- test/transforms/test_distance.py | 13 ++++-- test/transforms/test_generate_mesh_normals.py | 16 ++++---- test/transforms/test_grid_sampling.py | 8 +++- test/transforms/test_linear_transformation.py | 8 ++-- test/transforms/test_one_hot_degree.py | 18 ++++++--- test/transforms/test_polar.py | 18 ++++++--- test/transforms/test_random_flip.py | 6 +-- test/transforms/test_random_jitter.py | 16 ++++---- test/transforms/test_random_shear.py | 6 +-- test/transforms/test_sample_points.py | 17 +++++--- test/utils/test_convert.py | 20 +++++----- test/utils/test_dropout.py | 2 +- test/utils/test_geodesic.py | 31 ++++++++------ test/utils/test_normalized_cut.py | 13 +++--- test/utils/test_sparse.py | 16 ++++---- test/utils/test_subgraph.py | 17 ++++---- test/utils/test_to_dense_adj.py | 16 ++++++-- 33 files changed, 259 insertions(+), 202 deletions(-) diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py index b690531d5296..95f02a59c640 100644 --- a/test/data/test_feature_store.py +++ b/test/data/test_feature_store.py @@ -24,7 +24,7 @@ def __init__(self): def test_feature_store(): store = MyFeatureStore() - tensor = torch.Tensor([[0, 0, 0], [1, 1, 1], [2, 2, 2]]) + tensor = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]) group_name = 'A' attr_name = 'feat' @@ -94,7 +94,7 @@ def test_feature_store(): def test_feature_store_override(): store = MyFeatureStoreNoGroupName() - tensor = torch.Tensor([[0, 0, 0], [1, 1, 1], [2, 2, 2]]) + tensor = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]) attr_name = 'feat' index = torch.tensor([0, 1, 2]) diff --git a/test/distributed/test_local_feature_store.py b/test/distributed/test_local_feature_store.py index 557ca91ca641..5dd1a1c441d2 100644 --- a/test/distributed/test_local_feature_store.py +++ b/test/distributed/test_local_feature_store.py @@ -6,16 +6,16 @@ def test_local_feature_store_global_id(): store = LocalFeatureStore() - feat = torch.Tensor([ - [0, 0, 0], - [1, 1, 1], - [2, 2, 2], - [3, 3, 3], - [4, 4, 4], - [5, 5, 5], - [6, 6, 6], - [7, 7, 7], - [8, 8, 8], + feat = torch.tensor([ + [0.0, 0.0, 0.0], + [1.0, 1.0, 1.0], + [2.0, 2.0, 2.0], + [3.0, 3.0, 3.0], + [4.0, 4.0, 4.0], + [5.0, 5.0, 5.0], + [6.0, 6.0, 6.0], + [7.0, 7.0, 7.0], + [8.0, 8.0, 8.0], ]) paper_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) @@ -32,16 +32,16 @@ def test_local_feature_store_global_id(): def test_local_feature_store_utils(): store = LocalFeatureStore() - feat = torch.Tensor([ - [0, 0, 0], - [1, 1, 1], - [2, 2, 2], - [3, 3, 3], - [4, 4, 4], - [5, 5, 5], - [6, 6, 6], - [7, 7, 7], - [8, 8, 8], + feat = torch.tensor([ + [0.0, 0.0, 0.0], + [1.0, 1.0, 1.0], + [2.0, 2.0, 2.0], + [3.0, 3.0, 3.0], + [4.0, 4.0, 4.0], + [5.0, 5.0, 5.0], + [6.0, 6.0, 6.0], + [7.0, 7.0, 7.0], + [8.0, 8.0, 8.0], ]) paper_global_id = torch.tensor([1, 2, 3, 5, 8, 4]) diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index 7dad99c0ffd1..ebe2bed755ae 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -22,7 +22,7 @@ def test_dataloader(num_workers, device): if num_workers > 0 and device != torch.device('cpu'): return - x = torch.Tensor([[1], [1], [1]]) + x = torch.tensor([[1.0], [1.0], [1.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) face = torch.tensor([[0], [1], [2]]) y = 2. diff --git a/test/nn/dense/test_dense_gat_conv.py b/test/nn/dense/test_dense_gat_conv.py index 94a5f7fd5068..d54ee28b496e 100644 --- a/test/nn/dense/test_dense_gat_conv.py +++ b/test/nn/dense/test_dense_gat_conv.py @@ -25,16 +25,16 @@ def test_dense_gat_conv(heads, concat): sparse_out = sparse_conv(x, edge_index) x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels) - adj = torch.Tensor([ + adj = torch.tensor([ [ - [0, 1, 0], - [1, 0, 1], - [0, 1, 0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 1.0], + [0.0, 1.0, 0.0], ], [ - [0, 1, 0], - [1, 0, 0], - [0, 0, 0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], ], ]) mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool) @@ -55,10 +55,10 @@ def test_dense_gat_conv_with_broadcasting(): conv = DenseGATConv(channels, channels, heads=4) x = torch.randn(batch_size, num_nodes, channels) - adj = torch.Tensor([ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + adj = torch.tensor([ + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ]) assert conv(x, adj).size() == (batch_size, num_nodes, 64) diff --git a/test/nn/dense/test_dense_gcn_conv.py b/test/nn/dense/test_dense_gcn_conv.py index 20237b80addf..80b6714ec204 100644 --- a/test/nn/dense/test_dense_gcn_conv.py +++ b/test/nn/dense/test_dense_gcn_conv.py @@ -22,16 +22,16 @@ def test_dense_gcn_conv(): assert sparse_out.size() == (5, channels) x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels) - adj = torch.Tensor([ + adj = torch.tensor([ [ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ], [ - [0, 1, 0], - [1, 0, 0], - [0, 0, 0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], ], ]) mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool) @@ -53,10 +53,10 @@ def test_dense_gcn_conv_with_broadcasting(): conv = DenseGCNConv(channels, channels) x = torch.randn(batch_size, num_nodes, channels) - adj = torch.Tensor([ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + adj = torch.tensor([ + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ]) assert conv(x, adj).size() == (batch_size, num_nodes, channels) diff --git a/test/nn/dense/test_dense_gin_conv.py b/test/nn/dense/test_dense_gin_conv.py index 84e347bc4c6f..a22903fbfaa5 100644 --- a/test/nn/dense/test_dense_gin_conv.py +++ b/test/nn/dense/test_dense_gin_conv.py @@ -28,16 +28,16 @@ def test_dense_gin_conv(): assert sparse_out.size() == (5, channels) x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels) - adj = torch.Tensor([ + adj = torch.tensor([ [ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ], [ - [0, 1, 0], - [1, 0, 0], - [0, 0, 0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], ], ]) mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool) @@ -60,10 +60,10 @@ def test_dense_gin_conv_with_broadcasting(): conv = DenseGINConv(nn) x = torch.randn(batch_size, num_nodes, channels) - adj = torch.Tensor([ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + adj = torch.tensor([ + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ]) assert conv(x, adj).size() == (batch_size, num_nodes, channels) diff --git a/test/nn/dense/test_dense_graph_conv.py b/test/nn/dense/test_dense_graph_conv.py index f27a255d8f3d..b08426a6aafb 100644 --- a/test/nn/dense/test_dense_graph_conv.py +++ b/test/nn/dense/test_dense_graph_conv.py @@ -55,16 +55,16 @@ def test_dense_graph_conv_batch(aggr): assert sparse_out.size() == (5, channels) x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels) - adj = torch.Tensor([ + adj = torch.tensor([ [ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ], [ - [0, 1, 0], - [1, 0, 0], - [0, 0, 0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], ], ]) mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool) @@ -83,10 +83,10 @@ def test_dense_graph_conv_with_broadcasting(aggr): conv = DenseGraphConv(channels, channels, aggr=aggr) x = torch.randn(batch_size, num_nodes, channels) - adj = torch.Tensor([ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + adj = torch.tensor([ + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ]) assert conv(x, adj).size() == (batch_size, num_nodes, channels) diff --git a/test/nn/dense/test_dense_sage_conv.py b/test/nn/dense/test_dense_sage_conv.py index 2bc64d1f0a76..83cac9f1ad4f 100644 --- a/test/nn/dense/test_dense_sage_conv.py +++ b/test/nn/dense/test_dense_sage_conv.py @@ -22,16 +22,16 @@ def test_dense_sage_conv(): assert sparse_out.size() == (5, channels) x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels) - adj = torch.Tensor([ + adj = torch.tensor([ [ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ], [ - [0, 1, 0], - [1, 0, 0], - [0, 0, 0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], ], ]) mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool) @@ -53,10 +53,10 @@ def test_dense_sage_conv_with_broadcasting(): conv = DenseSAGEConv(channels, channels) x = torch.randn(batch_size, num_nodes, channels) - adj = torch.Tensor([ - [0, 1, 1], - [1, 0, 1], - [1, 1, 0], + adj = torch.tensor([ + [0.0, 1.0, 1.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], ]) assert conv(x, adj).size() == (batch_size, num_nodes, channels) diff --git a/test/nn/pool/select/test_select_topk.py b/test/nn/pool/select/test_select_topk.py index 61f1ce17e9a5..fd00bda49fec 100644 --- a/test/nn/pool/select/test_select_topk.py +++ b/test/nn/pool/select/test_select_topk.py @@ -10,22 +10,22 @@ def test_topk_ratio(): - x = torch.Tensor([2, 4, 5, 6, 2, 9]) + x = torch.tensor([2.0, 4.0, 5.0, 6.0, 2.0, 9.0]) batch = torch.tensor([0, 0, 1, 1, 1, 1]) perm1 = topk(x, 0.5, batch) assert perm1.tolist() == [1, 5, 3] - assert x[perm1].tolist() == [4, 9, 6] + assert x[perm1].tolist() == [4.0, 9.0, 6.0] assert batch[perm1].tolist() == [0, 1, 1] perm2 = topk(x, 2, batch) assert perm2.tolist() == [1, 0, 5, 3] - assert x[perm2].tolist() == [4, 2, 9, 6] + assert x[perm2].tolist() == [4.0, 2.0, 9.0, 6.0] assert batch[perm2].tolist() == [0, 0, 1, 1] perm3 = topk(x, 3, batch) assert perm3.tolist() == [1, 0, 5, 3, 2] - assert x[perm3].tolist() == [4, 2, 9, 6, 5] + assert x[perm3].tolist() == [4.0, 2.0, 9.0, 6.0, 5.0] assert batch[perm3].tolist() == [0, 0, 1, 1, 1] if is_full_test(): @@ -37,8 +37,6 @@ def test_topk_ratio(): @pytest.mark.parametrize('min_score', [None, 2.0]) def test_select_topk(min_score): - if min_score is not None: - return x = torch.randn(6, 16) batch = torch.tensor([0, 0, 1, 1, 1, 1]) diff --git a/test/nn/pool/test_edge_pool.py b/test/nn/pool/test_edge_pool.py index 0e23e845b9e3..4cfe188ca486 100644 --- a/test/nn/pool/test_edge_pool.py +++ b/test/nn/pool/test_edge_pool.py @@ -13,8 +13,10 @@ def test_compute_edge_score_softmax(): assert torch.all(e >= 0) and torch.all(e <= 1) # Test whether all incoming edge scores sum up to one. - assert torch.allclose(scatter(e, edge_index[1], reduce='sum'), - torch.Tensor([1, 1, 1, 1, 1, 1])) + assert torch.allclose( + scatter(e, edge_index[1], reduce='sum'), + torch.ones(6), + ) if is_full_test(): jit = torch.jit.script(EdgePooling.compute_edge_score_softmax) @@ -48,7 +50,7 @@ def test_compute_edge_score_sigmoid(): def test_edge_pooling(): - x = torch.Tensor([[0], [1], [2], [3], [4], [5], [-1]]) + x = torch.tensor([[0.0], [1.0], [2.0], [3.0], [4.0], [5.0], [-1.0]]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4, 0]]) batch = torch.tensor([0, 0, 0, 0, 1, 1, 0]) @@ -90,7 +92,7 @@ def test_edge_pooling(): assert torch.equal(out[2], batch) # Test edge cases. - x = torch.Tensor([[0], [1], [2], [3], [4], [5]]) + x = torch.tensor([[0.0], [1.0], [2.0], [3.0], [4.0], [5.0]]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) diff --git a/test/nn/pool/test_topk_pool.py b/test/nn/pool/test_topk_pool.py index c0293eacf8d2..a7c570f82075 100644 --- a/test/nn/pool/test_topk_pool.py +++ b/test/nn/pool/test_topk_pool.py @@ -9,19 +9,19 @@ def test_filter_adj(): edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 3], [1, 3, 0, 2, 1, 3, 0, 2]]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]) perm = torch.tensor([2, 3]) out = filter_adj(edge_index, edge_attr, perm) assert out[0].tolist() == [[0, 1], [1, 0]] - assert out[1].tolist() == [6, 8] + assert out[1].tolist() == [6.0, 8.0] if is_full_test(): jit = torch.jit.script(filter_adj) out = jit(edge_index, edge_attr, perm) assert out[0].tolist() == [[0, 1], [1, 0]] - assert out[1].tolist() == [6, 8] + assert out[1].tolist() == [6.0, 8.0] def test_topk_pooling(): diff --git a/test/nn/test_model_hub.py b/test/nn/test_model_hub.py index f8404ffdd092..c5ef0d174f32 100644 --- a/test/nn/test_model_hub.py +++ b/test/nn/test_model_hub.py @@ -30,7 +30,7 @@ def model(): def test_model_init(): model = DummyModel( MODEL_NAME, DATASET_NAME, model_kwargs={ - **CONFIG, 'tensor': torch.Tensor([1, 2, 3]) + **CONFIG, 'tensor': torch.randn([1, 2, 3]) }) assert model.model_config == CONFIG diff --git a/test/nn/test_parameter_dict.py b/test/nn/test_parameter_dict.py index 29d44f926667..cdddd101e800 100644 --- a/test/nn/test_parameter_dict.py +++ b/test/nn/test_parameter_dict.py @@ -18,9 +18,9 @@ def test_internal_external_key_conversion(): def test_dot_syntax_keys(): parameter_dict = { - 'param1': torch.nn.Parameter(torch.Tensor(16, 16)), - 'model.param2': torch.nn.Parameter(torch.Tensor(8, 8)), - 'model.sub_model.param3': torch.nn.Parameter(torch.Tensor(4, 4)), + 'param1': torch.nn.Parameter(torch.randn(16, 16)), + 'model.param2': torch.nn.Parameter(torch.randn(8, 8)), + 'model.sub_model.param3': torch.nn.Parameter(torch.randn(4, 4)), } parameter_dict = ParameterDict(parameter_dict) @@ -38,8 +38,8 @@ def test_dot_syntax_keys(): def test_tuple_keys(): parameter_dict = { - ('a', 'b'): torch.nn.Parameter(torch.Tensor(16, 16)), - ('a.b', 'c'): torch.nn.Parameter(torch.Tensor(8, 8)), + ('a', 'b'): torch.nn.Parameter(torch.randn(16, 16)), + ('a.b', 'c'): torch.nn.Parameter(torch.randn(8, 8)), } parameter_dict = ParameterDict(parameter_dict) diff --git a/test/nn/test_resolver.py b/test/nn/test_resolver.py index 321df85d479c..be64edb73823 100644 --- a/test/nn/test_resolver.py +++ b/test/nn/test_resolver.py @@ -72,7 +72,7 @@ def test_normalization_resolver(norm_tuple): def test_optimizer_resolver(): - params = [torch.nn.Parameter(torch.Tensor(1))] + params = [torch.nn.Parameter(torch.randn(1))] assert isinstance(optimizer_resolver(torch.optim.SGD(params, lr=0.01)), torch.optim.SGD) diff --git a/test/nn/unpool/test_knn_interpolate.py b/test/nn/unpool/test_knn_interpolate.py index 734ad83c0ebf..43868b5a8c27 100644 --- a/test/nn/unpool/test_knn_interpolate.py +++ b/test/nn/unpool/test_knn_interpolate.py @@ -6,11 +6,23 @@ @withPackage('torch_cluster') def test_knn_interpolate(): - x = torch.Tensor([[1], [10], [100], [-1], [-10], [-100]]) - pos_x = torch.Tensor([[-1, 0], [0, 0], [1, 0], [-2, 0], [0, 0], [2, 0]]) - pos_y = torch.Tensor([[-1, -1], [1, 1], [-2, -2], [2, 2]]) + x = torch.tensor([[1.0], [10.0], [100.0], [-1.0], [-10.0], [-100.0]]) + pos_x = torch.tensor([ + [-1.0, 0.0], + [0.0, 0.0], + [1.0, 0.0], + [-2.0, 0.0], + [0.0, 0.0], + [2.0, 0.0], + ]) + pos_y = torch.tensor([ + [-1.0, -1.0], + [1.0, 1.0], + [-2.0, -2.0], + [2.0, 2.0], + ]) batch_x = torch.tensor([0, 0, 0, 1, 1, 1]) batch_y = torch.tensor([0, 0, 1, 1]) y = knn_interpolate(x, pos_x, pos_y, batch_x, batch_y, k=2) - assert y.tolist() == [[4], [70], [-4], [-70]] + assert y.tolist() == [[4.0], [70.0], [-4.0], [-70.0]] diff --git a/test/transforms/test_compose.py b/test/transforms/test_compose.py index f599dd028d33..541d4bf640b2 100644 --- a/test/transforms/test_compose.py +++ b/test/transforms/test_compose.py @@ -11,13 +11,13 @@ def test_compose(): ' AddSelfLoops()\n' '])') - pos = torch.Tensor([[0, 0], [2, 0], [4, 0]]) + pos = torch.tensor([[0.0, 0.0], [2.0, 0.0], [4.0, 0.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) data = Data(edge_index=edge_index, pos=pos) data = transform(data) assert len(data) == 2 - assert data.pos.tolist() == [[-2, 0], [0, 0], [2, 0]] + assert data.pos.tolist() == [[-2.0, 0.0], [0.0, 0.0], [2.0, 0.0]] assert data.edge_index.size() == (2, 7) diff --git a/test/transforms/test_distance.py b/test/transforms/test_distance.py index 1f4537b49255..7f307814c264 100644 --- a/test/transforms/test_distance.py +++ b/test/transforms/test_distance.py @@ -7,20 +7,25 @@ def test_distance(): assert str(Distance()) == 'Distance(norm=True, max_value=None)' - pos = torch.Tensor([[-1, 0], [0, 0], [2, 0]]) + pos = torch.tensor([[-1.0, 0.0], [0.0, 0.0], [2.0, 0.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_attr = torch.Tensor([1, 1, 1, 1]) + edge_attr = torch.tensor([1.0, 1.0, 1.0, 1.0]) data = Data(edge_index=edge_index, pos=pos) data = Distance(norm=False)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[1], [1], [2], [2]] + assert data.edge_attr.tolist() == [[1.0], [1.0], [2.0], [2.0]] data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) data = Distance(norm=True)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[1, 0.5], [1, 0.5], [1, 1], [1, 1]] + assert data.edge_attr.tolist() == [ + [1.0, 0.5], + [1.0, 0.5], + [1.0, 1.0], + [1.0, 1.0], + ] diff --git a/test/transforms/test_generate_mesh_normals.py b/test/transforms/test_generate_mesh_normals.py index 4ff0b8c5f404..6fcaffd11cfe 100644 --- a/test/transforms/test_generate_mesh_normals.py +++ b/test/transforms/test_generate_mesh_normals.py @@ -8,13 +8,13 @@ def test_generate_mesh_normals(): transform = GenerateMeshNormals() assert str(transform) == 'GenerateMeshNormals()' - pos = torch.Tensor([ - [0, 0, 0], - [-2, 1, 0], - [-1, 1, 0], - [0, 1, 0], - [1, 1, 0], - [2, 1, 0], + pos = torch.tensor([ + [0.0, 0.0, 0.0], + [-2.0, 1.0, 0.0], + [-1.0, 1.0, 0.0], + [0.0, 1.0, 0.0], + [1.0, 1.0, 0.0], + [2.0, 1.0, 0.0], ]) face = torch.tensor([ [0, 0, 0, 0], @@ -26,4 +26,4 @@ def test_generate_mesh_normals(): assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.face.tolist() == face.tolist() - assert data.norm.tolist() == [[0, 0, -1]] * 6 + assert data.norm.tolist() == [[0.0, 0.0, -1.0]] * 6 diff --git a/test/transforms/test_grid_sampling.py b/test/transforms/test_grid_sampling.py index eae940e061dc..6893ad3e81cc 100644 --- a/test/transforms/test_grid_sampling.py +++ b/test/transforms/test_grid_sampling.py @@ -9,7 +9,13 @@ def test_grid_sampling(): assert str(GridSampling(5)) == 'GridSampling(size=5)' - pos = torch.Tensor([[0, 2], [3, 2], [3, 2], [2, 8], [2, 6]]) + pos = torch.tensor([ + [0.0, 2.0], + [3.0, 2.0], + [3.0, 2.0], + [2.0, 8.0], + [2.0, 6.0], + ]) y = torch.tensor([0, 1, 1, 2, 2]) batch = torch.tensor([0, 0, 0, 0, 0]) diff --git a/test/transforms/test_linear_transformation.py b/test/transforms/test_linear_transformation.py index 65d9a45b52f7..23b5ac7f44cf 100644 --- a/test/transforms/test_linear_transformation.py +++ b/test/transforms/test_linear_transformation.py @@ -6,11 +6,11 @@ @pytest.mark.parametrize('matrix', [ - [[2., 0.], [0., 2.]], - torch.tensor([[2., 0.], [0., 2.]]), + [[2.0, 0.0], [0.0, 2.0]], + torch.tensor([[2.0, 0.0], [0.0, 2.0]]), ]) def test_linear_transformation(matrix): - pos = torch.Tensor([[-1, 1], [-3, 0], [2, -1]]) + pos = torch.tensor([[-1.0, 1.0], [-3.0, 0.0], [2.0, -1.0]]) transform = LinearTransformation(matrix) assert str(transform) == ('LinearTransformation(\n' @@ -20,7 +20,7 @@ def test_linear_transformation(matrix): out = transform(Data(pos=pos)) assert len(out) == 1 - assert out.pos.tolist() == [[-2, 2], [-6, 0], [4, -2]] + assert torch.allclose(out.pos, 2 * pos) out = transform(Data()) assert len(out) == 0 diff --git a/test/transforms/test_one_hot_degree.py b/test/transforms/test_one_hot_degree.py index 972ce035bfd9..cd73b0d35ea7 100644 --- a/test/transforms/test_one_hot_degree.py +++ b/test/transforms/test_one_hot_degree.py @@ -8,19 +8,27 @@ def test_one_hot_degree(): assert str(OneHotDegree(max_degree=3)) == 'OneHotDegree(3)' edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - x = torch.Tensor([1, 1, 1, 1]) + x = torch.tensor([1.0, 1.0, 1.0, 1.0]) data = Data(edge_index=edge_index, num_nodes=4) data = OneHotDegree(max_degree=3)(data) assert len(data) == 3 assert data.edge_index.tolist() == edge_index.tolist() - assert data.x.tolist() == [[0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0], - [0, 1, 0, 0]] + assert data.x.tolist() == [ + [0.0, 0.0, 0.0, 1.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + ] assert data.num_nodes == 4 data = Data(edge_index=edge_index, x=x) data = OneHotDegree(max_degree=3)(data) assert len(data) == 2 assert data.edge_index.tolist() == edge_index.tolist() - assert data.x.tolist() == [[1, 0, 0, 0, 1], [1, 0, 1, 0, 0], - [1, 0, 1, 0, 0], [1, 0, 1, 0, 0]] + assert data.x.tolist() == [ + [1.0, 0.0, 0.0, 0.0, 1.0], + [1.0, 0.0, 1.0, 0.0, 0.0], + [1.0, 0.0, 1.0, 0.0, 0.0], + [1.0, 0.0, 1.0, 0.0, 0.0], + ] diff --git a/test/transforms/test_polar.py b/test/transforms/test_polar.py index 4fe98560b431..29918413023a 100644 --- a/test/transforms/test_polar.py +++ b/test/transforms/test_polar.py @@ -9,22 +9,28 @@ def test_polar(): assert str(Polar()) == 'Polar(norm=True, max_value=None)' - pos = torch.Tensor([[0, 0], [1, 0]]) + pos = torch.tensor([[0.0, 0.0], [1.0, 0.0]]) edge_index = torch.tensor([[0, 1], [1, 0]]) - edge_attr = torch.Tensor([1, 1]) + edge_attr = torch.tensor([1.0, 1.0]) data = Data(edge_index=edge_index, pos=pos) data = Polar(norm=False)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, torch.Tensor([[1, 0], [1, PI]]), - atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 0.0], [1.0, PI]]), + atol=1e-4, + ) data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) data = Polar(norm=True)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0], [1, 1, 0.5]]), atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 1.0, 0.0], [1.0, 1.0, 0.5]]), + atol=1e-4, + ) diff --git a/test/transforms/test_random_flip.py b/test/transforms/test_random_flip.py index f32ed9d20d68..98281ffe5de9 100644 --- a/test/transforms/test_random_flip.py +++ b/test/transforms/test_random_flip.py @@ -7,14 +7,14 @@ def test_random_flip(): assert str(RandomFlip(axis=0)) == 'RandomFlip(axis=0, p=0.5)' - pos = torch.Tensor([[-1, 1], [-3, 0], [2, -1]]) + pos = torch.tensor([[-1.0, 1.0], [-3.0, 0.0], [2.0, -1.0]]) data = Data(pos=pos) data = RandomFlip(axis=0, p=1)(data) assert len(data) == 1 - assert data.pos.tolist() == [[1, 1], [3, 0], [-2, -1]] + assert data.pos.tolist() == [[1.0, 1.0], [3.0, 0.0], [-2.0, -1.0]] data = Data(pos=pos) data = RandomFlip(axis=1, p=1)(data) assert len(data) == 1 - assert data.pos.tolist() == [[-1, -1], [-3, 0], [2, 1]] + assert data.pos.tolist() == [[-1.0, -1.0], [-3.0, 0.0], [2.0, 1.0]] diff --git a/test/transforms/test_random_jitter.py b/test/transforms/test_random_jitter.py index 17753fe3b6d7..8d8b2089a5de 100644 --- a/test/transforms/test_random_jitter.py +++ b/test/transforms/test_random_jitter.py @@ -7,23 +7,23 @@ def test_random_jitter(): assert str(RandomJitter(0.1)) == 'RandomJitter(0.1)' - pos = torch.Tensor([[0, 0], [0, 0], [0, 0], [0, 0]]) + pos = torch.tensor([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]) data = Data(pos=pos) data = RandomJitter(0)(data) assert len(data) == 1 - assert data.pos.tolist() == pos.tolist() + assert torch.allclose(data.pos, pos) data = Data(pos=pos) data = RandomJitter(0.1)(data) assert len(data) == 1 - assert data.pos.min().item() >= -0.1 - assert data.pos.max().item() <= 0.1 + assert data.pos.min() >= -0.1 + assert data.pos.max() <= 0.1 data = Data(pos=pos) data = RandomJitter([0.1, 1])(data) assert len(data) == 1 - assert data.pos[:, 0].min().item() >= -0.1 - assert data.pos[:, 0].max().item() <= 0.1 - assert data.pos[:, 1].min().item() >= -1 - assert data.pos[:, 1].max().item() <= 1 + assert data.pos[:, 0].min() >= -0.1 + assert data.pos[:, 0].max() <= 0.1 + assert data.pos[:, 1].min() >= -1 + assert data.pos[:, 1].max() <= 1 diff --git a/test/transforms/test_random_shear.py b/test/transforms/test_random_shear.py index 33f3f6a0d18a..5bd185f37fc0 100644 --- a/test/transforms/test_random_shear.py +++ b/test/transforms/test_random_shear.py @@ -7,14 +7,14 @@ def test_random_shear(): assert str(RandomShear(0.1)) == 'RandomShear(0.1)' - pos = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + pos = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) data = Data(pos=pos) data = RandomShear(0)(data) assert len(data) == 1 - assert data.pos.tolist() == pos.tolist() + assert torch.allclose(data.pos, pos) data = Data(pos=pos) data = RandomShear(0.1)(data) assert len(data) == 1 - assert data.pos.tolist() != pos.tolist() + assert not torch.allclose(data.pos, pos) diff --git a/test/transforms/test_sample_points.py b/test/transforms/test_sample_points.py index 4d47171ea892..f20d1dc6c27e 100644 --- a/test/transforms/test_sample_points.py +++ b/test/transforms/test_sample_points.py @@ -7,20 +7,25 @@ def test_sample_points(): assert str(SamplePoints(1024)) == 'SamplePoints(1024)' - pos = torch.Tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]) + pos = torch.tensor([ + [0.0, 0.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [1.0, 1.0, 0.0], + ]) face = torch.tensor([[0, 1], [1, 2], [2, 3]]) data = Data(pos=pos) data.face = face data = SamplePoints(8)(data) assert len(data) == 1 - assert pos[:, 0].min().item() >= 0 and pos[:, 0].max().item() <= 1 - assert pos[:, 1].min().item() >= 0 and pos[:, 1].max().item() <= 1 - assert pos[:, 2].abs().sum().item() == 0 + assert pos[:, 0].min() >= 0 and pos[:, 0].max() <= 1 + assert pos[:, 1].min() >= 0 and pos[:, 1].max() <= 1 + assert pos[:, 2].abs().sum() == 0 data = Data(pos=pos) data.face = face data = SamplePoints(8, include_normals=True)(data) assert len(data) == 2 - assert data.normal[:, :2].abs().sum().item() == 0 - assert data.normal[:, 2].abs().sum().item() == 8 + assert data.normal[:, :2].abs().sum() == 0 + assert data.normal[:, 2].abs().sum() == 8 diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index 1870a7798e1f..ccaeaf50974f 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -32,7 +32,7 @@ def test_to_scipy_sparse_matrix(): assert adj.col.tolist() == edge_index[1].tolist() assert adj.data.tolist() == [1, 1, 1] - edge_attr = torch.Tensor([1, 2, 3]) + edge_attr = torch.tensor([1.0, 2.0, 3.0]) adj = to_scipy_sparse_matrix(edge_index, edge_attr) assert isinstance(adj, scipy.sparse.coo_matrix) is True assert adj.shape == (2, 2) @@ -54,25 +54,25 @@ def test_from_scipy_sparse_matrix(): def test_to_networkx(): import networkx as nx - x = torch.Tensor([[1, 2], [3, 4]]) - pos = torch.Tensor([[0, 0], [1, 1]]) + x = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) + pos = torch.tensor([[0.0, 0.0], [1.0, 1.0]]) edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - edge_attr = torch.Tensor([1, 2, 3]) + edge_attr = torch.tensor([1.0, 2.0, 3.0]) data = Data(x=x, pos=pos, edge_index=edge_index, weight=edge_attr) for remove_self_loops in [True, False]: G = to_networkx(data, node_attrs=['x', 'pos'], edge_attrs=['weight'], remove_self_loops=remove_self_loops) - assert G.nodes[0]['x'] == [1, 2] - assert G.nodes[1]['x'] == [3, 4] - assert G.nodes[0]['pos'] == [0, 0] - assert G.nodes[1]['pos'] == [1, 1] + assert G.nodes[0]['x'] == [1.0, 2.0] + assert G.nodes[1]['x'] == [3.0, 4.0] + assert G.nodes[0]['pos'] == [0.0, 0.0] + assert G.nodes[1]['pos'] == [1.0, 1.0] if remove_self_loops: - assert nx.to_numpy_array(G).tolist() == [[0, 1], [2, 0]] + assert nx.to_numpy_array(G).tolist() == [[0.0, 1.0], [2.0, 0.0]] else: - assert nx.to_numpy_array(G).tolist() == [[3, 1], [2, 0]] + assert nx.to_numpy_array(G).tolist() == [[3.0, 1.0], [2.0, 0.0]] @withPackage('networkx') diff --git a/test/utils/test_dropout.py b/test/utils/test_dropout.py index 99c239cab78f..2c091a0a13c7 100644 --- a/test/utils/test_dropout.py +++ b/test/utils/test_dropout.py @@ -15,7 +15,7 @@ def test_dropout_adj(): [0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2], ]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) with pytest.warns(UserWarning, match="'dropout_adj' is deprecated"): out = dropout_adj(edge_index, edge_attr, training=False) diff --git a/test/utils/test_geodesic.py b/test/utils/test_geodesic.py index 1a533749aefe..3ea5cca1391b 100644 --- a/test/utils/test_geodesic.py +++ b/test/utils/test_geodesic.py @@ -10,17 +10,22 @@ @withPackage('gdist') @pytest.mark.skip(reason="No way of currently testing this") def test_geodesic_distance(): - pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]]) + pos = torch.tensor([ + [0.0, 0.0, 0.0], + [2.0, 0.0, 0.0], + [0.0, 2.0, 0.0], + [2.0, 2.0, 0.0], + ]) face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t() out = geodesic_distance(pos, face) - expected = [ - [0, 1, 1, sqrt(2)], - [1, 0, sqrt(2), 1], - [1, sqrt(2), 0, 1], - [sqrt(2), 1, 1, 0], - ] - assert torch.allclose(out, torch.tensor(expected)) + expected = torch.tensor([ + [0.0, 1.0, 1.0, sqrt(2)], + [1.0, 0.0, sqrt(2), 1.0], + [1.0, sqrt(2), 0.0, 1.0], + [sqrt(2), 1.0, 1.0, 0.0], + ]) + assert torch.allclose(out, expected) assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1)) out = geodesic_distance(pos, face, norm=False) @@ -30,14 +35,14 @@ def test_geodesic_distance(): [2, 2 * sqrt(2), 0, 2], [2 * sqrt(2), 2, 2, 0], ] - assert torch.allclose(out, torch.tensor(expected)) + assert torch.allclose(out, expected) src = torch.tensor([0, 0, 0, 0]) dest = torch.tensor([0, 1, 2, 3]) out = geodesic_distance(pos, face, src=src, dest=dest) - expected = [0, 1, 1, sqrt(2)] - assert torch.allclose(out, torch.tensor(expected)) + expected = torch.tensor([0.0, 1.0, 1.0, sqrt(2)]) + assert torch.allclose(out, expected) out = geodesic_distance(pos, face, dest=dest) - expected = [0, 0, 0, 0] - assert torch.allclose(out, torch.Tensor(expected)) + expected = torch.tensor([0.0, 0.0, 0.0, 0.0]) + assert torch.allclose(out, expected) diff --git a/test/utils/test_normalized_cut.py b/test/utils/test_normalized_cut.py index 8b4359f44fd7..db992ed795fd 100644 --- a/test/utils/test_normalized_cut.py +++ b/test/utils/test_normalized_cut.py @@ -7,13 +7,14 @@ def test_normalized_cut(): row = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4, 4]) col = torch.tensor([1, 0, 2, 3, 1, 4, 1, 4, 2, 3]) - edge_attr = torch.Tensor([3, 3, 6, 3, 6, 1, 3, 2, 1, 2]) - expected_output = [4, 4, 5, 2.5, 5, 1, 2.5, 2, 1, 2] + edge_attr = torch.tensor( + [3.0, 3.0, 6.0, 3.0, 6.0, 1.0, 3.0, 2.0, 1.0, 2.0]) + expected = torch.tensor([4.0, 4.0, 5.0, 2.5, 5.0, 1.0, 2.5, 2.0, 1.0, 2.0]) - output = normalized_cut(torch.stack([row, col], dim=0), edge_attr) - assert output.tolist() == expected_output + out = normalized_cut(torch.stack([row, col], dim=0), edge_attr) + assert torch.allclose(out, expected) if is_full_test(): jit = torch.jit.script(normalized_cut) - output = jit(torch.stack([row, col], dim=0), edge_attr) - assert output.tolist() == expected_output + out = jit(torch.stack([row, col], dim=0), edge_attr) + assert torch.allclose(out, expected) diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 9fdf3331dd98..922e04cecfb8 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -18,9 +18,9 @@ def test_dense_to_sparse(): - adj = torch.Tensor([ - [3, 1], - [2, 0], + adj = torch.tensor([ + [3.0, 1.0], + [2.0, 0.0], ]) edge_index, edge_attr = dense_to_sparse(adj) assert edge_index.tolist() == [[0, 0, 1], [0, 1, 0]] @@ -32,12 +32,12 @@ def test_dense_to_sparse(): assert edge_index.tolist() == [[0, 0, 1], [0, 1, 0]] assert edge_attr.tolist() == [3, 1, 2] - adj = torch.Tensor([[ - [3, 1], - [2, 0], + adj = torch.tensor([[ + [3.0, 1.0], + [2.0, 0.0], ], [ - [0, 1], - [0, 2], + [0.0, 1.0], + [0.0, 2.0], ]]) edge_index, edge_attr = dense_to_sparse(adj) assert edge_index.tolist() == [[0, 0, 1, 2, 3], [0, 1, 0, 3, 3]] diff --git a/test/utils/test_subgraph.py b/test/utils/test_subgraph.py index 92a905c5c675..6b462b1fd12b 100644 --- a/test/utils/test_subgraph.py +++ b/test/utils/test_subgraph.py @@ -31,16 +31,17 @@ def test_subgraph(): [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6], [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5], ]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) + edge_attr = torch.tensor( + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]) - idx = torch.tensor([3, 4, 5], dtype=torch.long) + idx = torch.tensor([3, 4, 5]) mask = index_to_mask(idx, 7) indices = idx.tolist() for subset in [idx, mask, indices]: out = subgraph(subset, edge_index, edge_attr, return_edge_mask=True) assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]] - assert out[1].tolist() == [7, 8, 9, 10] + assert out[1].tolist() == [7.0, 8.0, 9.0, 10.0] assert out[2].tolist() == [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0] out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True) @@ -51,9 +52,9 @@ def test_subgraph(): def test_bipartite_subgraph(): edge_index = torch.tensor([[0, 5, 2, 3, 3, 4, 4, 3, 5, 5, 6], [0, 0, 3, 2, 0, 0, 2, 1, 2, 3, 1]]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - idx = (torch.tensor([2, 3, 5], dtype=torch.long), - torch.tensor([2, 3], dtype=torch.long)) + edge_attr = torch.tensor( + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0]) + idx = (torch.tensor([2, 3, 5]), torch.tensor([2, 3])) mask = (index_to_mask(idx[0], 7), index_to_mask(idx[1], 4)) indices = (idx[0].tolist(), idx[1].tolist()) mixed = (mask[0], idx[1]) @@ -62,13 +63,13 @@ def test_bipartite_subgraph(): out = bipartite_subgraph(subset, edge_index, edge_attr, return_edge_mask=True) assert out[0].tolist() == [[2, 3, 5, 5], [3, 2, 2, 3]] - assert out[1].tolist() == [3, 4, 9, 10] + assert out[1].tolist() == [3.0, 4.0, 9.0, 10.0] assert out[2].tolist() == [0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0] out = bipartite_subgraph(subset, edge_index, edge_attr, relabel_nodes=True) assert out[0].tolist() == [[0, 1, 2, 2], [1, 0, 0, 1]] - assert out[1].tolist() == [3, 4, 9, 10] + assert out[1].tolist() == [3.0, 4.0, 9.0, 10.0] def test_k_hop_subgraph(): diff --git a/test/utils/test_to_dense_adj.py b/test/utils/test_to_dense_adj.py index 5015aac890a1..a6c8a94981f3 100644 --- a/test/utils/test_to_dense_adj.py +++ b/test/utils/test_to_dense_adj.py @@ -33,7 +33,7 @@ def test_to_dense_adj(): assert adj[0][:3, :3].tolist() == [[1, 1, 0], [1, 0, 0], [0, 0, 0]] assert adj[1][:3, :3].tolist() == [[0, 1, 0], [0, 0, 1], [1, 0, 0]] - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) adj = to_dense_adj(edge_index, batch, edge_attr) assert adj.size() == (2, 3, 3) assert adj[0].tolist() == [[1, 2, 0], [3, 0, 0], [0, 0, 0]] @@ -93,8 +93,16 @@ def test_to_dense_adj_with_duplicate_entries(): assert adj[0].tolist() == [[2, 1, 0], [1, 0, 0], [0, 0, 0]] assert adj[1].tolist() == [[0, 1, 0], [0, 0, 2], [1, 0, 0]] - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]) adj = to_dense_adj(edge_index, batch, edge_attr) assert adj.size() == (2, 3, 3) - assert adj[0].tolist() == [[3, 3, 0], [4, 0, 0], [0, 0, 0]] - assert adj[1].tolist() == [[0, 5, 0], [0, 0, 13], [8, 0, 0]] + assert adj[0].tolist() == [ + [3.0, 3.0, 0.0], + [4.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + assert adj[1].tolist() == [ + [0.0, 5.0, 0.0], + [0.0, 0.0, 13.0], + [8.0, 0.0, 0.0], + ] From 3fc9462d852798df45cd18ee4583e05094ef7374 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 7 Jul 2023 15:12:18 +0700 Subject: [PATCH 1342/2432] Replace `torch.Tensor`calls (4/4) (#7707) --- test/data/test_dataset.py | 12 +++---- test/data/test_dataset_summary.py | 17 ++++----- test/loader/test_cluster.py | 18 ++++++++-- test/loader/test_graph_saint.py | 9 ++++- test/nn/models/test_autoencoder.py | 19 +++++----- test/nn/pool/test_avg_pool.py | 38 +++++++++++++++++--- test/nn/pool/test_max_pool.py | 38 +++++++++++++++++--- test/nn/pool/test_voxel_grid.py | 16 +++++++-- test/transforms/test_cartesian.py | 4 +-- test/transforms/test_center.py | 2 +- test/transforms/test_knn_graph.py | 9 ++++- test/transforms/test_local_cartesian.py | 4 +-- test/transforms/test_local_degree_profile.py | 2 +- test/transforms/test_normalize_rotation.py | 28 ++++++++++----- test/transforms/test_point_pair_features.py | 22 +++++++----- test/transforms/test_radius_graph.py | 9 ++++- test/transforms/test_random_rotate.py | 9 +++-- test/transforms/test_random_scale.py | 2 +- test/transforms/test_spherical.py | 37 +++++++++++-------- test/transforms/test_target_indegree.py | 2 +- test/transforms/test_to_dense.py | 2 +- test/utils/test_convert.py | 6 ++-- test/utils/test_to_dense_batch.py | 26 ++++++++++---- 23 files changed, 241 insertions(+), 90 deletions(-) diff --git a/test/data/test_dataset.py b/test/data/test_dataset.py index d82cb09bdf42..ad0538557b7b 100644 --- a/test/data/test_dataset.py +++ b/test/data/test_dataset.py @@ -29,8 +29,8 @@ def process(self): def test_in_memory_dataset(): - x1 = torch.Tensor([[1], [1], [1]]) - x2 = torch.Tensor([[2], [2], [2]]) + x1 = torch.tensor([[1.0], [1.0], [1.0]]) + x2 = torch.tensor([[2.0], [2.0], [2.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) face = torch.tensor([[0], [1], [2]]) @@ -72,8 +72,8 @@ def test_in_memory_dataset(): def test_stored_in_memory_dataset(tmp_path): - x1 = torch.Tensor([[1], [1], [1]]) - x2 = torch.Tensor([[2], [2], [2], [2]]) + x1 = torch.tensor([[1.0], [1.0], [1.0]]) + x2 = torch.tensor([[2.0], [2.0], [2.0], [2.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) data1 = Data(x1, edge_index, num_nodes=3, test_int=1, test_str='1') @@ -97,8 +97,8 @@ def test_stored_in_memory_dataset(tmp_path): def test_stored_hetero_in_memory_dataset(tmp_path): - x1 = torch.Tensor([[1], [1], [1]]) - x2 = torch.Tensor([[2], [2], [2], [2]]) + x1 = torch.tensor([[1.0], [1.0], [1.0]]) + x2 = torch.tensor([[2.0], [2.0], [2.0], [2.0]]) data1 = HeteroData() data1['paper'].x = x1 diff --git a/test/data/test_dataset_summary.py b/test/data/test_dataset_summary.py index 5eed8708d9f0..5af3984c3c2f 100644 --- a/test/data/test_dataset_summary.py +++ b/test/data/test_dataset_summary.py @@ -7,6 +7,7 @@ def check_stats(stats: Stats, expected: Tensor): + expected = expected.to(torch.float) assert stats.mean == float(expected.mean()) assert stats.std == float(expected.std()) assert stats.min == float(expected.min()) @@ -18,8 +19,8 @@ def check_stats(stats: Stats, expected: Tensor): def test_dataset_summary(): dataset = FakeDataset(num_graphs=10) - num_nodes = torch.Tensor([data.num_nodes for data in dataset]) - num_edges = torch.Tensor([data.num_edges for data in dataset]) + num_nodes = torch.tensor([data.num_nodes for data in dataset]) + num_edges = torch.tensor([data.num_edges for data in dataset]) summary = dataset.get_summary() @@ -68,8 +69,8 @@ def test_dataset_summary_hetero_representation_length(): def test_dataset_summary_hetero_per_type_check(): dataset = FakeHeteroDataset(num_graphs=10) - exp_num_nodes = torch.Tensor([data.num_nodes for data in dataset]) - exp_num_edges = torch.Tensor([data.num_edges for data in dataset]) + exp_num_nodes = torch.tensor([data.num_nodes for data in dataset]) + exp_num_edges = torch.tensor([data.num_edges for data in dataset]) summary = dataset.get_summary() @@ -81,8 +82,8 @@ def test_dataset_summary_hetero_per_type_check(): num_nodes_per_type = {} for node_type in dataset.node_types: - num_nodes_per_type[node_type] = torch.Tensor( - [data[node_type].num_nodes for data in dataset]) + num_nodes = [data[node_type].num_nodes for data in dataset] + num_nodes_per_type[node_type] = torch.tensor(num_nodes) assert len(summary.num_nodes_per_type) == len(dataset.node_types) for node_type, stats in summary.num_nodes_per_type.items(): @@ -90,8 +91,8 @@ def test_dataset_summary_hetero_per_type_check(): num_edges_per_type = {} for edge_type in dataset.edge_types: - num_edges_per_type[edge_type] = torch.Tensor( - [data[edge_type].num_edges for data in dataset]) + num_edges = [data[edge_type].num_edges for data in dataset] + num_edges_per_type[edge_type] = torch.tensor(num_edges) assert len(summary.num_edges_per_type) == len(dataset.edge_types) for edge_type, stats in summary.num_edges_per_type.items(): diff --git a/test/loader/test_cluster.py b/test/loader/test_cluster.py index 2e10f5202614..cdcbf9e613ee 100644 --- a/test/loader/test_cluster.py +++ b/test/loader/test_cluster.py @@ -28,7 +28,14 @@ def test_cluster_gcn(): [0, 1, 0, 1, 0, 1], ]) - x = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + x = torch.tensor([ + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + [5.0, 5.0], + ]) edge_index = adj.nonzero(as_tuple=False).t() edge_attr = torch.arange(edge_index.size(1)) n_id = torch.arange(6) @@ -121,7 +128,14 @@ def test_keep_inter_cluster_edges(): [0, 1, 0, 1, 0, 1], ]) - x = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + x = torch.tensor([ + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + [5.0, 5.0], + ]) edge_index = adj.nonzero(as_tuple=False).t() edge_attr = torch.arange(edge_index.size(1)) data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) diff --git a/test/loader/test_graph_saint.py b/test/loader/test_graph_saint.py index efb7ef6dd8cc..8866e5ec846e 100644 --- a/test/loader/test_graph_saint.py +++ b/test/loader/test_graph_saint.py @@ -22,7 +22,14 @@ def test_graph_saint(): edge_index = adj.nonzero(as_tuple=False).t() edge_id = adj[edge_index[0], edge_index[1]] - x = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + x = torch.tensor([ + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + [5.0, 5.0], + ]) n_id = torch.arange(6) data = Data(edge_index=edge_index, x=x, n_id=n_id, edge_id=edge_id, num_nodes=6) diff --git a/test/nn/models/test_autoencoder.py b/test/nn/models/test_autoencoder.py index 0d783531208c..0a79d74dfa72 100644 --- a/test/nn/models/test_autoencoder.py +++ b/test/nn/models/test_autoencoder.py @@ -11,18 +11,21 @@ def test_gae(): model = GAE(encoder=lambda x: x) model.reset_parameters() - x = torch.Tensor([[1, -1], [1, 2], [2, 1]]) + x = torch.tensor([[1.0, -1.0], [1.0, 2.0], [2.0, 1.0]]) z = model.encode(x) assert torch.allclose(z, x) adj = model.decoder.forward_all(z) - assert torch.allclose( - adj, - torch.Tensor([[+2, -1, +1], [-1, +5, +4], [+1, +4, +5]]).sigmoid()) + expected = torch.tensor([ + [2.0, -1.0, 1.0], + [-1.0, 5.0, 4.0], + [1.0, 4.0, 5.0], + ]).sigmoid() + assert torch.allclose(adj, expected) edge_index = torch.tensor([[0, 1], [1, 2]]) value = model.decode(z, edge_index) - assert torch.allclose(value, torch.sigmoid(torch.Tensor([-1, 4]))) + assert torch.allclose(value, torch.tensor([-1.0, 4.0]).sigmoid()) if is_full_test(): jit = torch.jit.export(model) @@ -48,7 +51,7 @@ def test_gae(): def test_vgae(): model = VGAE(encoder=lambda x: (x, x)) - x = torch.Tensor([[1, -1], [1, 2], [2, 1]]) + x = torch.tensor([[1.0, -1.0], [1.0, 2.0], [2.0, 1.0]]) model.encode(x) assert float(model.kl_loss()) > 0 @@ -65,7 +68,7 @@ def test_arga(): model = ARGA(encoder=lambda x: x, discriminator=lambda x: T([0.5])) model.reset_parameters() - x = torch.Tensor([[1, -1], [1, 2], [2, 1]]) + x = torch.tensor([[1.0, -1.0], [1.0, 2.0], [2.0, 1.0]]) z = model.encode(x) assert float(model.reg_loss(z)) > 0 @@ -81,7 +84,7 @@ def test_arga(): def test_argva(): model = ARGVA(encoder=lambda x: (x, x), discriminator=lambda x: T([0.5])) - x = torch.Tensor([[1, -1], [1, 2], [2, 1]]) + x = torch.tensor([[1.0, -1.0], [1.0, 2.0], [2.0, 1.0]]) model.encode(x) model.reparametrize(model.__mu__, model.__logstd__) assert float(model.kl_loss()) > 0 diff --git a/test/nn/pool/test_avg_pool.py b/test/nn/pool/test_avg_pool.py index 15c8113df89b..ad68bf0ada21 100644 --- a/test/nn/pool/test_avg_pool.py +++ b/test/nn/pool/test_avg_pool.py @@ -7,7 +7,14 @@ def test_avg_pool_x(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) out = avg_pool_x(cluster, x, batch) @@ -38,11 +45,25 @@ def test_avg_pool_x(): def test_avg_pool(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - pos = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) + pos = torch.tensor([ + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + [5.0, 5.0], + ]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - edge_attr = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + edge_attr = torch.ones(edge_index.size(1)) batch = torch.tensor([0, 0, 0, 0, 1, 1]) data = Batch(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr, @@ -58,7 +79,14 @@ def test_avg_pool(): def test_avg_pool_neighbor_x(): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) diff --git a/test/nn/pool/test_max_pool.py b/test/nn/pool/test_max_pool.py index db58de3d91e9..b3efc828f7ef 100644 --- a/test/nn/pool/test_max_pool.py +++ b/test/nn/pool/test_max_pool.py @@ -7,7 +7,14 @@ def test_max_pool_x(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) out = max_pool_x(cluster, x, batch) @@ -38,11 +45,25 @@ def test_max_pool_x(): def test_max_pool(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - pos = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) + pos = torch.tensor([ + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + [5.0, 5.0], + ]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - edge_attr = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + edge_attr = torch.ones(edge_index.size(1)) batch = torch.tensor([0, 0, 0, 0, 1, 1]) data = Batch(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr, @@ -58,7 +79,14 @@ def test_max_pool(): def test_max_pool_neighbor_x(): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) diff --git a/test/nn/pool/test_voxel_grid.py b/test/nn/pool/test_voxel_grid.py index e8dcd30db203..f35ab0187815 100644 --- a/test/nn/pool/test_voxel_grid.py +++ b/test/nn/pool/test_voxel_grid.py @@ -7,7 +7,13 @@ @withPackage('torch_cluster') def test_voxel_grid(): - pos = torch.Tensor([[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]]) + pos = torch.tensor([ + [0.0, 0.0], + [11.0, 9.0], + [2.0, 8.0], + [2.0, 2.0], + [8.0, 3.0], + ]) batch = torch.tensor([0, 0, 0, 1, 1]) assert voxel_grid(pos, size=5, batch=batch).tolist() == [0, 5, 3, 6, 7] @@ -22,7 +28,13 @@ def test_voxel_grid(): @withPackage('torch_cluster') def test_single_voxel_grid(): - pos = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]]) + pos = torch.tensor([ + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + [3.0, 3.0], + [4.0, 4.0], + ]) edge_index = torch.tensor([[0, 0, 3], [1, 2, 4]]) batch = torch.tensor([0, 0, 0, 1, 1]) x = torch.randn(5, 16) diff --git a/test/transforms/test_cartesian.py b/test/transforms/test_cartesian.py index eeaa8393c609..2cb11318339e 100644 --- a/test/transforms/test_cartesian.py +++ b/test/transforms/test_cartesian.py @@ -7,9 +7,9 @@ def test_cartesian(): assert str(Cartesian()) == 'Cartesian(norm=True, max_value=None)' - pos = torch.Tensor([[-1, 0], [0, 0], [2, 0]]) + pos = torch.tensor([[-1.0, 0.0], [0.0, 0.0], [2.0, 0.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_attr = torch.Tensor([1, 2, 3, 4]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0]) data = Data(edge_index=edge_index, pos=pos) data = Cartesian(norm=False)(data) diff --git a/test/transforms/test_center.py b/test/transforms/test_center.py index 4c4c0a7b5dda..61d0ad76ba53 100644 --- a/test/transforms/test_center.py +++ b/test/transforms/test_center.py @@ -8,7 +8,7 @@ def test_center(): transform = Center() assert str(transform) == 'Center()' - pos = torch.Tensor([[0, 0], [2, 0], [4, 0]]) + pos = torch.tensor([[0.0, 0.0], [2.0, 0.0], [4.0, 0.0]]) data = Data(pos=pos) data = transform(data) diff --git a/test/transforms/test_knn_graph.py b/test/transforms/test_knn_graph.py index abcb957b7040..f8b824e06d4a 100644 --- a/test/transforms/test_knn_graph.py +++ b/test/transforms/test_knn_graph.py @@ -9,7 +9,14 @@ def test_knn_graph(): assert str(KNNGraph()) == 'KNNGraph(k=6)' - pos = torch.Tensor([[0, 0], [1, 0], [2, 0], [0, 1], [-2, 0], [0, -2]]) + pos = torch.tensor([ + [0.0, 0.0], + [1.0, 0.0], + [2.0, 0.0], + [0.0, 1.0], + [-2.0, 0.0], + [0.0, -2.0], + ]) expected_row = [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5] expected_col = [1, 2, 3, 4, 5, 0, 2, 3, 5, 0, 1, 0, 1, 4, 0, 3, 0, 1] diff --git a/test/transforms/test_local_cartesian.py b/test/transforms/test_local_cartesian.py index fdc752aaa01e..3c275a778a11 100644 --- a/test/transforms/test_local_cartesian.py +++ b/test/transforms/test_local_cartesian.py @@ -8,9 +8,9 @@ def test_local_cartesian(): transform = LocalCartesian() assert str(transform) == 'LocalCartesian()' - pos = torch.Tensor([[-1, 0], [0, 0], [2, 0]]) + pos = torch.tensor([[-1.0, 0.0], [0.0, 0.0], [2.0, 0.0]]) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_attr = torch.Tensor([1, 2, 3, 4]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0]) data = Data(edge_index=edge_index, pos=pos) data = transform(data) diff --git a/test/transforms/test_local_degree_profile.py b/test/transforms/test_local_degree_profile.py index 1cd311478d9d..3504ed5d41d4 100644 --- a/test/transforms/test_local_degree_profile.py +++ b/test/transforms/test_local_degree_profile.py @@ -8,7 +8,7 @@ def test_target_indegree(): assert str(LocalDegreeProfile()) == 'LocalDegreeProfile()' edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - x = torch.Tensor([[1], [1], [1], [1]]) # One isolated node. + x = torch.tensor([[1.0], [1.0], [1.0], [1.0]]) # One isolated node. expected = torch.tensor([ [1, 2, 2, 2, 0], diff --git a/test/transforms/test_normalize_rotation.py b/test/transforms/test_normalize_rotation.py index cc192364e424..b5f5e6171ab1 100644 --- a/test/transforms/test_normalize_rotation.py +++ b/test/transforms/test_normalize_rotation.py @@ -9,19 +9,31 @@ def test_normalize_rotation(): assert str(NormalizeRotation()) == 'NormalizeRotation()' - pos = torch.Tensor([[-2, -2], [-1, -1], [0, 0], [1, 1], [2, 2]]) - normal = torch.Tensor([[-1, 1], [-1, 1], [-1, 1], [-1, 1], [-1, 1]]) + pos = torch.tensor([ + [-2.0, -2.0], + [-1.0, -1.0], + [0.0, 0.0], + [1.0, 1.0], + [2.0, 2.0], + ]) + normal = torch.tensor([ + [-1.0, 1.0], + [-1.0, 1.0], + [-1.0, 1.0], + [-1.0, 1.0], + [-1.0, 1.0], + ]) data = Data(pos=pos) data.normal = normal data = NormalizeRotation()(data) assert len(data) == 2 - expected_pos = torch.Tensor([ - [-2 * sqrt(2), 0], - [-sqrt(2), 0], - [0, 0], - [sqrt(2), 0], - [2 * sqrt(2), 0], + expected_pos = torch.tensor([ + [-2 * sqrt(2), 0.0], + [-sqrt(2), 0.0], + [0.0, 0.0], + [sqrt(2), 0.0], + [2 * sqrt(2), 0.0], ]) expected_normal = [[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]] diff --git a/test/transforms/test_point_pair_features.py b/test/transforms/test_point_pair_features.py index bb79af67af59..7080e6cd9f29 100644 --- a/test/transforms/test_point_pair_features.py +++ b/test/transforms/test_point_pair_features.py @@ -10,10 +10,10 @@ def test_point_pair_features(): transform = PointPairFeatures() assert str(transform) == 'PointPairFeatures()' - pos = torch.Tensor([[0, 0, 0], [1, 0, 0]]) + pos = torch.tensor([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) edge_index = torch.tensor([[0, 1], [1, 0]]) - norm = torch.Tensor([[1, 0, 0], [1, 0, 0]]) - edge_attr = torch.Tensor([1, 1]) + norm = torch.tensor([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) + edge_attr = torch.tensor([1.0, 1.0]) data = Data(edge_index=edge_index, pos=pos, norm=norm) data = transform(data) @@ -21,9 +21,11 @@ def test_point_pair_features(): assert data.pos.tolist() == pos.tolist() assert data.norm.tolist() == norm.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 0, 0, 0], [1, PI, PI, 0]]), - atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 0.0, 0.0, 0.0], [1.0, PI, PI, 0.0]]), + atol=1e-4, + ) data = Data(edge_index=edge_index, pos=pos, norm=norm, edge_attr=edge_attr) data = transform(data) @@ -31,6 +33,8 @@ def test_point_pair_features(): assert data.pos.tolist() == pos.tolist() assert data.norm.tolist() == norm.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0, 0, 0], [1, 1, PI, PI, 0]]), - atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 1.0, 0.0, 0.0, 0.0], [1.0, 1.0, PI, PI, 0.0]]), + atol=1e-4, + ) diff --git a/test/transforms/test_radius_graph.py b/test/transforms/test_radius_graph.py index 0fbf9fd962f0..a73109da435e 100644 --- a/test/transforms/test_radius_graph.py +++ b/test/transforms/test_radius_graph.py @@ -10,7 +10,14 @@ def test_radius_graph(): assert str(RadiusGraph(r=1)) == 'RadiusGraph(r=1)' - pos = torch.Tensor([[0, 0], [1, 0], [2, 0], [0, 1], [-2, 0], [0, -2]]) + pos = torch.tensor([ + [0.0, 0.0], + [1.0, 0.0], + [2.0, 0.0], + [0.0, 1.0], + [-2.0, 0.0], + [0.0, -2.0], + ]) data = Data(pos=pos) data = RadiusGraph(r=1.5)(data) diff --git a/test/transforms/test_random_rotate.py b/test/transforms/test_random_rotate.py index 6f73bf518798..c0877e871825 100644 --- a/test/transforms/test_random_rotate.py +++ b/test/transforms/test_random_rotate.py @@ -8,7 +8,7 @@ def test_random_rotate(): assert str(RandomRotate([-180, 180])) == ('RandomRotate(' '[-180, 180], axis=0)') - pos = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + pos = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) data = Data(pos=pos) data = RandomRotate(0)(data) @@ -20,7 +20,12 @@ def test_random_rotate(): assert len(data) == 1 assert data.pos.tolist() == [[1, 1], [1, -1], [-1, 1], [-1, -1]] - pos = torch.Tensor([[-1, -1, 1], [-1, 1, 1], [1, -1, -1], [1, 1, -1]]) + pos = torch.tensor([ + [-1.0, -1.0, 1.0], + [-1.0, 1.0, 1.0], + [1.0, -1.0, -1.0], + [1.0, 1.0, -1.0], + ]) data = Data(pos=pos) data = RandomRotate([180, 180], axis=0)(data) diff --git a/test/transforms/test_random_scale.py b/test/transforms/test_random_scale.py index 658c51515514..f0a3d51fc91d 100644 --- a/test/transforms/test_random_scale.py +++ b/test/transforms/test_random_scale.py @@ -7,7 +7,7 @@ def test_random_scale(): assert str(RandomScale([1, 2])) == 'RandomScale([1, 2])' - pos = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + pos = torch.tensor([[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]]) data = Data(pos=pos) data = RandomScale([1, 1])(data) diff --git a/test/transforms/test_spherical.py b/test/transforms/test_spherical.py index 78fbd127b692..eff867ef30ea 100644 --- a/test/transforms/test_spherical.py +++ b/test/transforms/test_spherical.py @@ -9,29 +9,33 @@ def test_spherical(): assert str(Spherical()) == 'Spherical(norm=True, max_value=None)' - pos = torch.Tensor([[0, 0, 0], [1, 0, 0]]) + pos = torch.tensor([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) edge_index = torch.tensor([[0, 1], [1, 0]]) - edge_attr = torch.Tensor([1, 1]) + edge_attr = torch.tensor([1.0, 1.0]) data = Data(edge_index=edge_index, pos=pos) data = Spherical(norm=False)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 0, PI / 2], [1, PI, PI / 2]]), - atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 0.0, PI / 2.0], [1.0, PI, PI / 2.0]]), + atol=1e-4, + ) data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) data = Spherical(norm=True)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0, 0.5], [1, 1, 0.5, 0.5]]), - atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 1.0, 0.0, 0.5], [1.0, 1.0, 0.5, 0.5]]), + atol=1e-4, + ) - pos = torch.Tensor([[0, 0, 0], [0, 0, 1]]) + pos = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) edge_index = torch.tensor([[0, 1], [1, 0]]) data = Data(edge_index=edge_index, pos=pos) @@ -39,14 +43,19 @@ def test_spherical(): assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 0, 0], [1, 0, PI]]), atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 0.0, 0.0], [1.0, 0.0, PI]]), + atol=1e-4, + ) data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) data = Spherical(norm=True)(data) assert len(data) == 3 assert data.pos.tolist() == pos.tolist() assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0, 0], [1, 1, 0, 1]]), - atol=1e-04) + assert torch.allclose( + data.edge_attr, + torch.tensor([[1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 1.0]]), + atol=1e-4, + ) diff --git a/test/transforms/test_target_indegree.py b/test/transforms/test_target_indegree.py index 0bc3557d459f..96352e1d47f4 100644 --- a/test/transforms/test_target_indegree.py +++ b/test/transforms/test_target_indegree.py @@ -8,7 +8,7 @@ def test_target_indegree(): assert str(TargetIndegree()) == 'TargetIndegree(norm=True, max_value=None)' edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_attr = torch.Tensor([1, 1, 1, 1]) + edge_attr = torch.tensor([1.0, 1.0, 1.0, 1.0]) data = Data(edge_index=edge_index, num_nodes=3) data = TargetIndegree(norm=False)(data) diff --git a/test/transforms/test_to_dense.py b/test/transforms/test_to_dense.py index 72378ed40771..28e7ef86ab53 100644 --- a/test/transforms/test_to_dense.py +++ b/test/transforms/test_to_dense.py @@ -6,7 +6,7 @@ def test_to_dense(): edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6]) + edge_attr = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) num_nodes = edge_index.max().item() + 1 x = torch.randn((num_nodes, 4)) pos = torch.randn((num_nodes, 3)) diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index ccaeaf50974f..f5f58a325f9d 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -100,10 +100,10 @@ def test_from_networkx_set_node_attributes(): def test_to_networkx_undirected(): import networkx as nx - x = torch.Tensor([[1, 2], [3, 4]]) - pos = torch.Tensor([[0, 0], [1, 1]]) + x = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) + pos = torch.tensor([[0.0, 0.0], [1.0, 1.0]]) edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - edge_attr = torch.Tensor([1, 2, 3]) + edge_attr = torch.tensor([1.0, 2.0, 3.0]) data = Data(x=x, pos=pos, edge_index=edge_index, weight=edge_attr) for remove_self_loops in [True, False]: diff --git a/test/utils/test_to_dense_batch.py b/test/utils/test_to_dense_batch.py index 1611cb39fb69..7c13993a4496 100644 --- a/test/utils/test_to_dense_batch.py +++ b/test/utils/test_to_dense_batch.py @@ -11,14 +11,21 @@ @pytest.mark.parametrize('fill', [70.0, torch.tensor(49.0)]) def test_to_dense_batch(fill): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) batch = torch.tensor([0, 0, 1, 2, 2, 2]) item = fill.item() if isinstance(fill, Tensor) else fill - expected = torch.Tensor([ - [[1, 2], [3, 4], [item, item]], - [[5, 6], [item, item], [item, item]], - [[7, 8], [9, 10], [11, 12]], + expected = torch.tensor([ + [[1.0, 2.0], [3.0, 4.0], [item, item]], + [[5.0, 6.0], [item, item], [item, item]], + [[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]], ]) out, mask = to_dense_batch(x, batch, fill_value=fill) @@ -56,7 +63,14 @@ def test_to_dense_batch(fill): def test_to_dense_batch_disable_dynamic_shapes(): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) + x = torch.tensor([ + [1.0, 2.0], + [3.0, 4.0], + [5.0, 6.0], + [7.0, 8.0], + [9.0, 10.0], + [11.0, 12.0], + ]) batch = torch.tensor([0, 0, 1, 2, 2, 2]) with set_experimental_mode(True, 'disable_dynamic_shapes'): From 4a09b8d661fec2c2bfdac5432665c41eea0ba484 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 7 Jul 2023 15:18:07 +0700 Subject: [PATCH 1343/2432] Rename `dest` to `dst` in `geodesic_distance` (#7708) --- CHANGELOG.md | 1 + test/nn/models/test_meta.py | 4 ++-- test/utils/test_geodesic.py | 6 +++--- torch_geometric/nn/models/meta.py | 6 +++--- torch_geometric/utils/geodesic.py | 34 +++++++++++++++++++------------ 5 files changed, 30 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3df285b590a3..642a689f6915 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Renamed `dest` argument to `dst` in `utils.geodesic_distance` ([#7708](https://github.com/pyg-team/pytorch_geometric/pull/7708)) - Changed `add_random_edge` to only add true negative edges ([#7654](https://github.com/pyg-team/pytorch_geometric/pull/7654)) - Allowed the usage of `BasicGNN` models in `DeepGraphInfomax` ([#7648](https://github.com/pyg-team/pytorch_geometric/pull/7648)) - Breaking Change: Made `Data.keys` a method rather than a property ([#7629](https://github.com/pyg-team/pytorch_geometric/pull/7629)) diff --git a/test/nn/models/test_meta.py b/test/nn/models/test_meta.py index 646b835a128c..75c345f6d113 100644 --- a/test/nn/models/test_meta.py +++ b/test/nn/models/test_meta.py @@ -47,7 +47,7 @@ def __init__(self): def forward( self, src: Tensor, - dest: Tensor, + dst: Tensor, edge_attr: Optional[Tensor], u: Optional[Tensor], batch: Optional[Tensor], @@ -55,7 +55,7 @@ def forward( assert edge_attr is not None assert u is not None assert batch is not None - out = torch.cat([src, dest, edge_attr, u[batch]], 1) + out = torch.cat([src, dst, edge_attr, u[batch]], 1) return self.edge_mlp(out) class NodeModel(torch.nn.Module): diff --git a/test/utils/test_geodesic.py b/test/utils/test_geodesic.py index 3ea5cca1391b..c1daf41e212e 100644 --- a/test/utils/test_geodesic.py +++ b/test/utils/test_geodesic.py @@ -38,11 +38,11 @@ def test_geodesic_distance(): assert torch.allclose(out, expected) src = torch.tensor([0, 0, 0, 0]) - dest = torch.tensor([0, 1, 2, 3]) - out = geodesic_distance(pos, face, src=src, dest=dest) + dst = torch.tensor([0, 1, 2, 3]) + out = geodesic_distance(pos, face, src=src, dst=dst) expected = torch.tensor([0.0, 1.0, 1.0, sqrt(2)]) assert torch.allclose(out, expected) - out = geodesic_distance(pos, face, dest=dest) + out = geodesic_distance(pos, face, dst=dst) expected = torch.tensor([0.0, 0.0, 0.0, 0.0]) assert torch.allclose(out, expected) diff --git a/torch_geometric/nn/models/meta.py b/torch_geometric/nn/models/meta.py index 7aef8a423660..b9e8f3672749 100644 --- a/torch_geometric/nn/models/meta.py +++ b/torch_geometric/nn/models/meta.py @@ -48,12 +48,12 @@ def __init__(self): super().__init__() self.edge_mlp = Seq(Lin(..., ...), ReLU(), Lin(..., ...)) - def forward(self, src, dest, edge_attr, u, batch): - # src, dest: [E, F_x], where E is the number of edges. + def forward(self, src, dst, edge_attr, u, batch): + # src, dst: [E, F_x], where E is the number of edges. # edge_attr: [E, F_e] # u: [B, F_u], where B is the number of graphs. # batch: [E] with max entry B - 1. - out = torch.cat([src, dest, edge_attr, u[batch]], 1) + out = torch.cat([src, dst, edge_attr, u[batch]], 1) return self.edge_mlp(out) class NodeModel(torch.nn.Module): diff --git a/torch_geometric/utils/geodesic.py b/torch_geometric/utils/geodesic.py index d217f28f2949..889f805b2378 100644 --- a/torch_geometric/utils/geodesic.py +++ b/torch_geometric/utils/geodesic.py @@ -1,4 +1,5 @@ import multiprocessing as mp +import warnings from typing import Optional import numpy as np @@ -10,13 +11,14 @@ def geodesic_distance( pos: Tensor, face: Tensor, src: Optional[Tensor] = None, - dest: Optional[Tensor] = None, + dst: Optional[Tensor] = None, norm: bool = True, max_distance: Optional[float] = None, num_workers: int = 0, + **kwargs, ) -> Tensor: r"""Computes (normalized) geodesic distances of a mesh given by :obj:`pos` - and :obj:`face`. If :obj:`src` and :obj:`dest` are given, this method only + and :obj:`face`. If :obj:`src` and :obj:`dst` are given, this method only computes the geodesic distances for the respective source and target node-pairs. @@ -26,11 +28,11 @@ def geodesic_distance( To install, run :obj:`pip install cython && pip install gdist`. Args: - pos (Tensor): The node positions. - face (LongTensor): The face indices. - src (LongTensor, optional): If given, only compute geodesic distances + pos (torch.Tensor): The node positions. + face (torch.Tensor): The face indices. + src (torch.Tensor, optional): If given, only compute geodesic distances for the specified source indices. (default: :obj:`None`) - dest (LongTensor, optional): If given, only compute geodesic distances + dst (torch.Tensor, optional): If given, only compute geodesic distances for the specified target indices. (default: :obj:`None`) norm (bool, optional): Normalizes geodesic distances by :math:`\sqrt{\textrm{area}(\mathcal{M})}`. (default: :obj:`True`) @@ -62,6 +64,12 @@ def geodesic_distance( """ import gdist + if 'dest' in kwargs: + dst = kwargs['dest'] + warnings.warn( + "'dest' attribute in 'geodesic_distance' is deprecated and will " + "be removed in a future release. Use 'dst' argument instead.") + max_distance = float('inf') if max_distance is None else max_distance if norm: @@ -75,7 +83,7 @@ def geodesic_distance( pos = pos.detach().cpu().to(torch.double).numpy() face = face.detach().t().cpu().to(torch.int).numpy() - if src is None and dest is None: + if src is None and dst is None: out = gdist.local_gdist_matrix(pos, face, max_distance * norm).toarray() / norm return torch.from_numpy(out).to(dtype) @@ -85,11 +93,11 @@ def geodesic_distance( else: src = src.detach().cpu().to(torch.int).numpy() - dest = None if dest is None else dest.detach().cpu().to(torch.int).numpy() + dst = None if dst is None else dst.detach().cpu().to(torch.int).numpy() - def _parallel_loop(pos, face, src, dest, max_distance, norm, i, dtype): + def _parallel_loop(pos, face, src, dst, max_distance, norm, i, dtype): s = src[i:i + 1] - d = None if dest is None else dest[i:i + 1] + d = None if dst is None else dst[i:i + 1] out = gdist.compute_gdist(pos, face, s, d, max_distance * norm) / norm return torch.from_numpy(out).to(dtype) @@ -98,17 +106,17 @@ def _parallel_loop(pos, face, src, dest, max_distance, norm, i, dtype): with mp.Pool(num_workers) as pool: outs = pool.starmap( _parallel_loop, - [(pos, face, src, dest, max_distance, norm, i, dtype) + [(pos, face, src, dst, max_distance, norm, i, dtype) for i in range(len(src))]) else: outs = [ - _parallel_loop(pos, face, src, dest, max_distance, norm, i, dtype) + _parallel_loop(pos, face, src, dst, max_distance, norm, i, dtype) for i in range(len(src)) ] out = torch.cat(outs, dim=0) - if dest is None: + if dst is None: out = out.view(-1, pos.shape[0]) return out From 10f235097a6ea06ccc5d544b6702812b8bd48276 Mon Sep 17 00:00:00 2001 From: Mohamad Zamini <32536264+mzamini92@users.noreply.github.com> Date: Fri, 7 Jul 2023 18:50:18 -0600 Subject: [PATCH 1344/2432] Add `interval` argument to `transforms.Distance` (#7700) @rusty1s following previous PRs regarding adding `interval` to other transforms, by adding `interval` to the code, we can use the `Distance` transformation and specify the `interval` parameter to control the range and normalization of the distances. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- torch_geometric/transforms/distance.py | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 642a689f6915..7e17dd25a992 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added back support for PyTorch >= 1.11.0 ([#7656](https://github.com/pyg-team/pytorch_geometric/pull/7656)) - Added `Data.sort()` and `HeteroData.sort()` functionalities ([#7649](https://github.com/pyg-team/pytorch_geometric/pull/7649)) - Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643), [#7647](https://github.com/pyg-team/pytorch_geometric/pull/7647)) -- Added `interval` argument to `Cartesian` and `LocalCartesian` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614)) +- Added `interval` argument to `Cartesian`, `LocalCartesian` and `Distance` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614), [#7700](https://github.com/pyg-team/pytorch_geometric/pull/7700)) - Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) - Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) - Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) diff --git a/torch_geometric/transforms/distance.py b/torch_geometric/transforms/distance.py index 70404da7b4a5..d012c4d2fffe 100644 --- a/torch_geometric/transforms/distance.py +++ b/torch_geometric/transforms/distance.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Tuple import torch @@ -10,22 +10,31 @@ @functional_transform('distance') class Distance(BaseTransform): r"""Saves the Euclidean distance of linked nodes in its edge attributes - (functional name: :obj:`distance`). + (functional name: :obj:`distance`). Each distance gets globally normalized + to a specified interval (:math:`[0, 1]` by default). Args: norm (bool, optional): If set to :obj:`False`, the output will not be - normalized to the interval :math:`[0, 1]`. (default: :obj:`True`) + normalized. (default: :obj:`True`) max_value (float, optional): If set and :obj:`norm=True`, normalization will be performed based on this value instead of the maximum value found in the data. (default: :obj:`None`) cat (bool, optional): If set to :obj:`False`, all existing edge attributes will be replaced. (default: :obj:`True`) + interval ((float, float), optional): A tuple specifying the lower and + upper bound for normalization. (default: :obj:`(0.0, 1.0)`) """ - def __init__(self, norm: bool = True, max_value: Optional[float] = None, - cat: bool = True): + def __init__( + self, + norm: bool = True, + max_value: Optional[float] = None, + cat: bool = True, + interval: Tuple[float, float] = (0.0, 1.0), + ): self.norm = norm self.max = max_value self.cat = cat + self.interval = interval def forward(self, data: Data) -> Data: (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr @@ -33,7 +42,10 @@ def forward(self, data: Data) -> Data: dist = torch.norm(pos[col] - pos[row], p=2, dim=-1).view(-1, 1) if self.norm and dist.numel() > 0: - dist = dist / (dist.max() if self.max is None else self.max) + max_value = dist.max() if self.max is None else self.max + + length = self.interval[1] - self.interval[0] + dist = length * (dist / max_value) + self.interval[0] if pseudo is not None and self.cat: pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo From a4447449a7ab185a643dba878e8548c452a62a26 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 8 Jul 2023 16:56:44 +0700 Subject: [PATCH 1345/2432] `HeteroData` support in `to_networkx` (#7713) --- CHANGELOG.md | 1 + test/utils/test_convert.py | 39 ++++++++++++- torch_geometric/data/data.py | 9 +++ torch_geometric/utils/convert.py | 97 +++++++++++++++++-------------- torch_geometric/utils/geodesic.py | 6 +- 5 files changed, 105 insertions(+), 47 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e17dd25a992..f0b0f6bba85c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `HeteroData` support in `to_networkx` ([#7713](https://github.com/pyg-team/pytorch_geometric/pull/7713)) - Added `FlopsCount` support via `fvcore` ([#7693](https://github.com/pyg-team/pytorch_geometric/pull/7693)) - Added back support for PyTorch >= 1.11.0 ([#7656](https://github.com/pyg-team/pytorch_geometric/pull/7656)) - Added `Data.sort()` and `HeteroData.sort()` functionalities ([#7649](https://github.com/pyg-team/pytorch_geometric/pull/7649)) diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index f5f58a325f9d..93ea55d84586 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -3,7 +3,7 @@ import torch from torch_geometric.data import Data, HeteroData -from torch_geometric.testing import withPackage +from torch_geometric.testing import get_random_edge_index, withPackage from torch_geometric.utils import ( from_cugraph, from_dgl, @@ -131,6 +131,43 @@ def test_to_networkx_undirected(): assert nx.to_numpy_array(G).tolist() == [[3, 2], [2, 0]] +@withPackage('networkx') +def test_to_networkx_hetero(): + edge_index = get_random_edge_index(5, 10, 20, coalesce=True) + + data = HeteroData() + data['global_id'] = 0 + data['author'].x = torch.arange(5) + data['paper'].x = torch.arange(10) + data['author', 'paper'].edge_index = edge_index + data['author', 'paper'].edge_attr = torch.arange(edge_index.size(1)) + + G = to_networkx(data, node_attrs=['x'], edge_attrs=['edge_attr'], + graph_attrs=['global_id']) + + assert G.number_of_nodes() == 15 + assert G.number_of_edges() == edge_index.size(1) + + assert G.graph == {'global_id': 0} + + for i, (v, data) in enumerate(G.nodes(data=True)): + assert i == v + assert len(data) == 2 + if i < 5: + assert data['x'] == i + assert data['type'] == 'author' + else: + assert data['x'] == i - 5 + assert data['type'] == 'paper' + + for i, (v, w, data) in enumerate(G.edges(data=True)): + assert v == int(edge_index[0, i]) + assert w == int(edge_index[1, i]) + 5 + assert len(data) == 2 + assert data['type'] == ('author', 'to', 'paper') + assert data['edge_attr'] == i + + @withPackage('networkx') def test_from_networkx(): x = torch.randn(2, 8) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index c6d36d23df3a..fd19b16dbbd3 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -194,6 +194,15 @@ def edge_attrs(self) -> List[str]: r"""Returns all edge-level tensor attribute names.""" return list(set(chain(*[s.edge_attrs() for s in self.edge_stores]))) + @property + def node_offsets(self) -> Dict[NodeType, int]: + out: Dict[NodeType, int] = {} + offset: int = 0 + for store in self.node_stores: + out[store._key] = offset + offset += store.num_nodes + return out + def generate_ids(self): r"""Generates and sets :obj:`n_id` and :obj:`e_id` attributes to assign each node and edge to a continuously ascending and unique ID.""" diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index 946d344127a4..eccb23051bbb 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -1,5 +1,5 @@ from collections import defaultdict -from typing import Any, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import scipy.sparse import torch @@ -78,7 +78,10 @@ def from_scipy_sparse_matrix( def to_networkx( - data: 'torch_geometric.data.Data', + data: Union[ + 'torch_geometric.data.Data', + 'torch_geometric.data.HeteroData', + ], node_attrs: Optional[Iterable[str]] = None, edge_attrs: Optional[Iterable[str]] = None, graph_attrs: Optional[Iterable[str]] = None, @@ -90,7 +93,8 @@ def to_networkx( a directed :obj:`networkx.DiGraph` otherwise. Args: - data (torch_geometric.data.Data): The data object. + data (torch_geometric.data.Data or torch_geometric.data.HeteroData): A + homogeneous or heterogeneous data object. node_attrs (iterable of str, optional): The node attributes to be copied. (default: :obj:`None`) edge_attrs (iterable of str, optional): The edge attributes to be @@ -98,14 +102,15 @@ def to_networkx( graph_attrs (iterable of str, optional): The graph attributes to be copied. (default: :obj:`None`) to_undirected (bool or str, optional): If set to :obj:`True` or - "upper", will return a :obj:`networkx.Graph` instead of a + :obj:`"upper"`, will return a :obj:`networkx.Graph` instead of a :obj:`networkx.DiGraph`. The undirected graph will correspond to the upper triangle of the corresponding adjacency matrix. - Similarly, if set to "lower", the undirected graph will correspond - to the lower triangle of the adjacency matrix. (default: - :obj:`False`) + Similarly, if set to :obj:`"lower"`, the undirected graph will + correspond to the lower triangle of the adjacency matrix. + Only applicable in case the :obj:`data` object holds a homogeneous + graph. (default: :obj:`False`) remove_self_loops (bool, optional): If set to :obj:`True`, will not - include self loops in the resulting graph. (default: :obj:`False`) + include self-loops in the resulting graph. (default: :obj:`False`) Examples: @@ -120,47 +125,53 @@ def to_networkx( """ import networkx as nx - G = nx.Graph() if to_undirected else nx.DiGraph() - - G.add_nodes_from(range(data.num_nodes)) - - node_attrs = node_attrs or [] - edge_attrs = edge_attrs or [] - graph_attrs = graph_attrs or [] - - values = {} - for key, value in data(*(node_attrs + edge_attrs + graph_attrs)): - if torch.is_tensor(value): - value = value if value.dim() <= 1 else value.squeeze(-1) - values[key] = value.tolist() - else: - values[key] = value - - to_undirected = "upper" if to_undirected is True else to_undirected - to_undirected_upper = True if to_undirected == "upper" else False - to_undirected_lower = True if to_undirected == "lower" else False - - for i, (u, v) in enumerate(data.edge_index.t().tolist()): + from torch_geometric.data import HeteroData - if to_undirected_upper and u > v: - continue - elif to_undirected_lower and u < v: - continue + to_undirected_upper: bool = to_undirected in {'upper', True} + to_undirected_lower: bool = to_undirected == 'lower' + to_undirected: bool = to_undirected_upper or to_undirected_lower - if remove_self_loops and u == v: - continue + if isinstance(data, HeteroData) and to_undirected: + raise ValueError("'to_undirected' is not supported in " + "'to_networkx' for heterogeneous graphs") - G.add_edge(u, v) + G = nx.Graph() if to_undirected else nx.DiGraph() - for key in edge_attrs: - G[u][v][key] = values[key][i] + def to_networkx_value(value: Any) -> Any: + return value.tolist() if isinstance(value, Tensor) else value + + for key in graph_attrs or []: + G.graph[key] = to_networkx_value(data[key]) + + node_offsets = data.node_offsets + for store in data.node_stores: + start = node_offsets[store._key] + for i in range(store.num_nodes): + attr: Dict[str, Any] = {} + if isinstance(data, HeteroData): + attr['type'] = store._key + for key in node_attrs or []: + attr[key] = to_networkx_value(store[key][i]) + G.add_node(start + i, **attr) + + for store in data.edge_stores: + for i, (v, w) in enumerate(store.edge_index.t().tolist()): + if to_undirected_upper and v > w: + continue + elif to_undirected_lower and v < w: + continue + elif remove_self_loops and v == w and not store.is_bipartite(): + continue - for key in node_attrs: - for i, feat_dict in G.nodes(data=True): - feat_dict.update({key: values[key][i]}) + attr: Dict[str, Any] = {} + if isinstance(data, HeteroData): + v = v + node_offsets[store._key[0]] + w = w + node_offsets[store._key[-1]] + attr['type'] = store._key + for key in edge_attrs or []: + attr[key] = to_networkx_value(store[key][i]) - for key in graph_attrs: - G.graph[key] = values[key] + G.add_edge(v, w, **attr) return G diff --git a/torch_geometric/utils/geodesic.py b/torch_geometric/utils/geodesic.py index 889f805b2378..e452984d6cc5 100644 --- a/torch_geometric/utils/geodesic.py +++ b/torch_geometric/utils/geodesic.py @@ -66,9 +66,9 @@ def geodesic_distance( if 'dest' in kwargs: dst = kwargs['dest'] - warnings.warn( - "'dest' attribute in 'geodesic_distance' is deprecated and will " - "be removed in a future release. Use 'dst' argument instead.") + warnings.warn("'dest' attribute in 'geodesic_distance' is deprecated " + "and will be removed in a future release. Use the 'dst' " + "argument instead.") max_distance = float('inf') if max_distance is None else max_distance From 9bc70173159b44502d6908c1563cf8a72cac5cdb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 9 Jul 2023 11:04:06 +0700 Subject: [PATCH 1346/2432] Raise error when collecting non-existing attributes in `HeteroData` (#7714) --- CHANGELOG.md | 1 + benchmark/training/training_benchmark.py | 4 ++-- benchmark/utils/hetero_gat.py | 4 ++-- benchmark/utils/hetero_sage.py | 4 ++-- benchmark/utils/utils.py | 4 ++-- test/data/test_hetero_data.py | 2 ++ test/explain/test_hetero_explanation.py | 2 +- torch_geometric/data/hetero_data.py | 19 ++++++++++++++++--- torch_geometric/explain/explanation.py | 12 ++++-------- 9 files changed, 32 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f0b0f6bba85c..c7029a2e5165 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) - Renamed `dest` argument to `dst` in `utils.geodesic_distance` ([#7708](https://github.com/pyg-team/pytorch_geometric/pull/7708)) - Changed `add_random_edge` to only add true negative edges ([#7654](https://github.com/pyg-team/pytorch_geometric/pull/7654)) - Allowed the usage of `BasicGNN` models in `DeepGraphInfomax` ([#7648](https://github.com/pyg-team/pytorch_geometric/pull/7648)) diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index e17fd0368d75..0abc5cdab82e 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -36,7 +36,7 @@ def train_homo(model, loader, optimizer, device, progress_bar=True, desc="", for batch in loader: optimizer.zero_grad() batch = batch.to(device) - if hasattr(batch, 'adj_t'): + if 'adj_t' in batch: edge_index = batch.adj_t else: edge_index = batch.edge_index @@ -67,7 +67,7 @@ def train_hetero(model, loader, optimizer, device, progress_bar=True, desc="", for batch in loader: optimizer.zero_grad() batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: edge_index_dict = batch.adj_t_dict else: edge_index_dict = batch.edge_index_dict diff --git a/benchmark/utils/hetero_gat.py b/benchmark/utils/hetero_gat.py index b1bda5c5e367..d3483e4ecd94 100644 --- a/benchmark/utils/hetero_gat.py +++ b/benchmark/utils/hetero_gat.py @@ -22,7 +22,7 @@ def inference(self, loader, device, progress_bar=False): loader = tqdm(loader, desc="Inference") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: self.model(batch.x_dict, batch.adj_t_dict) else: self.model(batch.x_dict, batch.edge_index_dict) @@ -35,7 +35,7 @@ def test(self, x, loader, device, progress_bar=False): loader = tqdm(loader, desc="Evaluate") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: out = self.model(batch.x_dict, batch.adj_t_dict) else: out = self.model(batch.x_dict, batch.edge_index_dict) diff --git a/benchmark/utils/hetero_sage.py b/benchmark/utils/hetero_sage.py index 8b71b15cd586..1445ef6146bc 100644 --- a/benchmark/utils/hetero_sage.py +++ b/benchmark/utils/hetero_sage.py @@ -21,7 +21,7 @@ def inference(self, loader, device, progress_bar=False): loader = tqdm(loader, desc="Inference") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: self.model(batch.x_dict, batch.adj_t_dict) else: self.model(batch.x_dict, batch.edge_index_dict) @@ -34,7 +34,7 @@ def test(self, loader, device, progress_bar=False): loader = tqdm(loader, desc="Evaluate") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: out = self.model(batch.x_dict, batch.adj_t_dict) else: out = self.model(batch.x_dict, batch.edge_index_dict) diff --git a/benchmark/utils/utils.py b/benchmark/utils/utils.py index ecb5d16aa5fc..5b2dd3d9f650 100644 --- a/benchmark/utils/utils.py +++ b/benchmark/utils/utils.py @@ -168,7 +168,7 @@ def test(model, loader, device, hetero, progress_bar=True, if hetero: for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: edge_index_dict = batch.adj_t_dict else: edge_index_dict = batch.edge_index_dict @@ -182,7 +182,7 @@ def test(model, loader, device, hetero, progress_bar=True, else: for batch in loader: batch = batch.to(device) - if hasattr(batch, 'adj_t'): + if 'adj_t' in batch: edge_index = batch.adj_t else: edge_index = batch.edge_index diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 75a37ee7789c..bea01b6ad99f 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -106,6 +106,8 @@ def test_hetero_data_to_from_dict(): def test_hetero_data_functions(): data = HeteroData() + with pytest.raises(KeyError, match="did not find any occurrences of it"): + data.collect('x') data['paper'].x = x_paper data['author'].x = x_author data['paper', 'paper'].edge_index = edge_index_paper_paper diff --git a/test/explain/test_hetero_explanation.py b/test/explain/test_hetero_explanation.py index f492407ca5ba..ebbb3f0981b9 100644 --- a/test/explain/test_hetero_explanation.py +++ b/test/explain/test_hetero_explanation.py @@ -136,7 +136,7 @@ def test_visualize_feature_importance( path = osp.join(tmp_path, 'feature_importance.png') if node_mask_type is None: - with pytest.raises(ValueError, match="node_mask' is not"): + with pytest.raises(KeyError, match="Tried to collect 'node_mask'"): explanation.visualize_feature_importance(path, top_k=top_k) else: explanation.visualize_feature_importance(path, top_k=top_k) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 16c4320f50b6..cfe58a3b2683 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -502,7 +502,11 @@ def metadata(self) -> Tuple[List[NodeType], List[EdgeType]]: """ return self.node_types, self.edge_types - def collect(self, key: str) -> Dict[NodeOrEdgeType, Any]: + def collect( + self, + key: str, + allow_empty: bool = False, + ) -> Dict[NodeOrEdgeType, Any]: r"""Collects the attribute :attr:`key` from all node and edge types. .. code-block:: python @@ -517,12 +521,21 @@ def collect(self, key: str) -> Dict[NodeOrEdgeType, Any]: .. note:: This is equivalent to writing :obj:`data.x_dict`. + + Args: + key (str): The attribute to collect from all node and ege types. + allow_empty (bool, optional): If set to :obj:`True`, will not raise + an error in case the attribute does not exit in any node or + edge type. (default: :obj:`False`) """ mapping = {} for subtype, store in chain(self._node_store_dict.items(), self._edge_store_dict.items()): if hasattr(store, key): mapping[subtype] = getattr(store, key) + if not allow_empty and len(mapping) == 0: + raise KeyError(f"Tried to collect '{key}' but did not find any " + f"occurrences of it in any node and/or edge type") return mapping def _check_type_name(self, name: str): @@ -888,7 +901,7 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: value = torch.cat(values, dim) if len(values) > 1 else values[0] data[key] = value - if len(self.edge_label_index_dict) > 0: + if 'edge_label_index' in self: edge_label_index_dict = self.edge_label_index_dict for edge_type, edge_label_index in edge_label_index_dict.items(): edge_label_index = edge_label_index.clone() @@ -1093,7 +1106,7 @@ def to_homogeneous_edge_index( cumsum = 0 edge_indices: List[Tensor] = [] edge_slices: Dict[EdgeType, Tuple[int, int]] = {} - for edge_type, edge_index in data.edge_index_dict.items(): + for edge_type, edge_index in data.collect('edge_index', True).items(): edge_index = offset_edge_index(node_slices, edge_type, edge_index) edge_indices.append(edge_index) edge_slices[edge_type] = (cumsum, cumsum + edge_index.size(1)) diff --git a/torch_geometric/explain/explanation.py b/torch_geometric/explain/explanation.py index 57e4b98ed7ce..049c618a126a 100644 --- a/torch_geometric/explain/explanation.py +++ b/torch_geometric/explain/explanation.py @@ -271,11 +271,11 @@ def get_explanation_subgraph(self) -> 'HeteroExplanation': return self._apply_masks( node_mask_dict={ key: mask.sum(dim=-1) > 0 - for key, mask in self.node_mask_dict.items() + for key, mask in self.collect('node_mask', True).items() }, edge_mask_dict={ key: mask > 0 - for key, mask in self.edge_mask_dict.items() + for key, mask in self.collect('edge_mask', True).items() }, ) @@ -285,11 +285,11 @@ def get_complement_subgraph(self) -> 'HeteroExplanation': return self._apply_masks( node_mask_dict={ key: mask.sum(dim=-1) == 0 - for key, mask in self.node_mask_dict.items() + for key, mask in self.collect('node_mask', True).items() }, edge_mask_dict={ key: mask == 0 - for key, mask in self.edge_mask_dict.items() + for key, mask in self.collect('edge_mask', True).items() }, ) @@ -328,10 +328,6 @@ def visualize_feature_importance( plots all features. (default: :obj:`None`) """ node_mask_dict = self.node_mask_dict - if len(node_mask_dict) == 0: - raise ValueError(f"The attribute 'node_mask' is not available " - f"in '{self.__class__.__name__}' " - f"(got {self.available_explanations})") for node_mask in node_mask_dict.values(): if node_mask.dim() != 2 or node_mask.size(1) <= 1: raise ValueError(f"Cannot compute feature importance for " From cd725577dbdcbf5883d0c85b7f3637799188919b Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Thu, 13 Jul 2023 02:28:42 +0100 Subject: [PATCH 1347/2432] Don't implicitly chain exceptions when accessing non-existent storage attributes (#7734) This PR changes the traceback when a user tries to access attributes on `Data` that doesn't exist: ```python import torch_geometric data = torch_geometric.data.Data() data.asdf ``` ## Before ```diff Traceback (most recent call last): File "/workspaces/pytorch_geometric/torch_geometric/data/storage.py", line 80, in __getattr__ return self[key] File "/workspaces/pytorch_geometric/torch_geometric/data/storage.py", line 105, in __getitem__ return self._mapping[key] KeyError: 'asdf' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/workspaces/pytorch_geometric/test.py", line 4, in data.asdf File "/workspaces/pytorch_geometric/torch_geometric/data/data.py", line 475, in __getattr__ return getattr(self._store, key) File "/workspaces/pytorch_geometric/torch_geometric/data/storage.py", line 82, in __getattr__ raise AttributeError( AttributeError: 'GlobalStorage' object has no attribute 'asdf' ``` ## After ``` Traceback (most recent call last): File "/workspaces/pytorch_geometric/test.py", line 4, in data.asdf File "/workspaces/pytorch_geometric/torch_geometric/data/data.py", line 475, in __getattr__ return getattr(self._store, key) File "/workspaces/pytorch_geometric/torch_geometric/data/storage.py", line 82, in __getattr__ raise AttributeError( AttributeError: 'GlobalStorage' object has no attribute 'asdf' ``` ## Alternative By explicitly chaining exceptions, one of the lines in the error changes: ```diff - except KeyError: + except KeyError as e: raise AttributeError( - f"'{self.__class__.__name__}' object has no attribute '{key}'") + f"'{self.__class__.__name__}' object has no attribute '{key}'") from e ``` ```diff - During handling of the above exception, another exception occurred: + The above exception was the direct cause of the following exception: ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/data/test_storage.py | 4 ++++ torch_geometric/data/storage.py | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7029a2e5165..e9eaa0d292bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) - Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) - Renamed `dest` argument to `dst` in `utils.geodesic_distance` ([#7708](https://github.com/pyg-team/pytorch_geometric/pull/7708)) - Changed `add_random_edge` to only add true negative edges ([#7654](https://github.com/pyg-team/pytorch_geometric/pull/7654)) diff --git a/test/data/test_storage.py b/test/data/test_storage.py index c7564e5d153f..7adac31be6eb 100644 --- a/test/data/test_storage.py +++ b/test/data/test_storage.py @@ -1,6 +1,7 @@ import copy from typing import Any +import pytest import torch from torch_geometric.data.storage import BaseStorage @@ -52,6 +53,9 @@ def test_base_storage(): assert int(storage.x) == 0 assert int(deepcopied_storage.x) == 0 + with pytest.raises(AttributeError, match="has no attribute 'asdf'"): + storage.asdf + def test_storage_tensor_methods(): x = torch.randn(5) diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index 716d00d92595..3de611967fc6 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -80,7 +80,8 @@ def __getattr__(self, key: str) -> Any: return self[key] except KeyError: raise AttributeError( - f"'{self.__class__.__name__}' object has no attribute '{key}'") + f"'{self.__class__.__name__}' object has no attribute '{key}'" + ) from None def __setattr__(self, key: str, value: Any): propobj = getattr(self.__class__, key, None) From 9c79ce8570df194c15e4e6857c8b86d757dbbcb7 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 12 Jul 2023 18:30:26 -0700 Subject: [PATCH 1348/2432] Modify `dtype` in `ogbn-products` GAT example #7703 (#7735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` Traceback (most recent call last): File "ogbn_products_gat.py", line 185, in print(f'Final Test: {test_acc.mean():.4f} ± {test_acc.std():.4f}') RuntimeError: mean(): could not infer output dtype. Input dtype must be either a floating point or complex dtype. Got: Long ``` --- examples/ogbn_products_gat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ogbn_products_gat.py b/examples/ogbn_products_gat.py index 93a596178abc..06d01e94b1f8 100644 --- a/examples/ogbn_products_gat.py +++ b/examples/ogbn_products_gat.py @@ -165,7 +165,7 @@ def test(): model.reset_parameters() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) - best_val_acc = final_test_acc = 0 + best_val_acc = final_test_acc = 0.0 for epoch in range(1, 101): loss, acc = train(epoch) print(f'Epoch {epoch:02d}, Loss: {loss:.4f}, Approx. Train: {acc:.4f}') From 165b68eebdc4d5bf44e56ca12f081aa55020a883 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 15 Jul 2023 14:45:02 +0700 Subject: [PATCH 1349/2432] Prevent commits to master branch (#7749) --- .pre-commit-config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b48bce0a0aca..3e932718180e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,12 @@ ci: autoupdate_schedule: quarterly repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: no-commit-to-branch + name: No commits to master + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: From 6995c54eb22195bc1771df2ac6c7e639e27c1c6a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 17 Jul 2023 08:32:07 +0700 Subject: [PATCH 1350/2432] Test `ImbalancedSampler` with `HeteroData` (#7753) --- test/loader/test_imbalanced_sampler.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/test/loader/test_imbalanced_sampler.py b/test/loader/test_imbalanced_sampler.py index 28fa7c50d998..eb042765fb4a 100644 --- a/test/loader/test_imbalanced_sampler.py +++ b/test/loader/test_imbalanced_sampler.py @@ -3,7 +3,7 @@ import torch from torch_geometric.data import Data -from torch_geometric.datasets import FakeDataset +from torch_geometric.datasets import FakeDataset, FakeHeteroDataset from torch_geometric.loader import ( DataLoader, ImbalancedSampler, @@ -92,3 +92,25 @@ def test_neighbor_loader_with_imbalanced_sampler(): num_neighbors=[-1]) assert torch.allclose(y, torch.cat([batch.y for batch in loader])) + + +@onlyNeighborSampler +def test_hetero_neighbor_loader_with_imbalanced_sampler(): + torch.manual_seed(12345) + data = FakeHeteroDataset(num_classes=2)[0] + + loader = NeighborLoader( + data, + batch_size=100, + input_nodes='v0', + num_neighbors=[-1], + sampler=ImbalancedSampler(data['v0'].y), + ) + + y = torch.cat([batch['v0'].y[:batch['v0'].batch_size] for batch in loader]) + + histogram = y.bincount() + prob = histogram / histogram.sum() + + assert histogram.sum() == data['v0'].num_nodes + assert prob.min() > 0.4 and prob.max() < 0.6 From c6f3a55dff40992947ba15e34925cb3333a7c351 Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Mon, 17 Jul 2023 19:46:27 +0200 Subject: [PATCH 1351/2432] Add possibility to run inference benchmarks on XPU device (#7705) This PR adds possibility to run inference benchmarks on XPU device. Exemplary CMD: `python inference_benchmark.py --device xpu --datasets Reddit --models sage --eval-batch-sizes 1024 --num-layers 2 --num-hidden-channels 64 --profile` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: kgajdamo --- CHANGELOG.md | 1 + benchmark/inference/inference_benchmark.py | 47 ++++++++++++++++++---- test/profile/test_profile.py | 31 +++++++++++++- torch_geometric/profile/__init__.py | 2 + torch_geometric/profile/profile.py | 9 +++++ torch_geometric/testing/__init__.py | 2 + torch_geometric/testing/decorators.py | 14 +++++++ 7 files changed, 97 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9eaa0d292bd..9bcc1c405979 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added possibility to run inference benchmarks on XPU device ([#7705](https://github.com/pyg-team/pytorch_geometric/pull/7705)) - Added `HeteroData` support in `to_networkx` ([#7713](https://github.com/pyg-team/pytorch_geometric/pull/7713)) - Added `FlopsCount` support via `fvcore` ([#7693](https://github.com/pyg-team/pytorch_geometric/pull/7693)) - Added back support for PyTorch >= 1.11.0 ([#7656](https://github.com/pyg-team/pytorch_geometric/pull/7656)) diff --git a/benchmark/inference/inference_benchmark.py b/benchmark/inference/inference_benchmark.py index 380c1d53f5fa..22d6dd7bf64f 100644 --- a/benchmark/inference/inference_benchmark.py +++ b/benchmark/inference/inference_benchmark.py @@ -16,7 +16,12 @@ ) from torch_geometric.loader import NeighborLoader from torch_geometric.nn import PNAConv -from torch_geometric.profile import rename_profile_file, timeit, torch_profile +from torch_geometric.profile import ( + rename_profile_file, + timeit, + torch_profile, + xpu_profile, +) supported_sets = { 'ogbn-mag': ['rgat', 'rgcn'], @@ -42,11 +47,23 @@ def run(args: argparse.ArgumentParser): warnings.warn("Cannot write profile data to CSV because profiling is " "disabled") - # cuda device is not suitable for full batch mode - device = torch.device( - 'cuda' if not args.full_batch and torch.cuda.is_available() else 'cpu') + if args.device == 'xpu': + try: + import intel_extension_for_pytorch as ipex + except ImportError: + raise RuntimeError('XPU device requires IPEX to be installed') + + if ((args.device == 'cuda' and not torch.cuda.is_available()) + or (args.device == 'xpu' and not torch.xpu.is_available())): + raise RuntimeError(f'{args.device.upper()} is not available') + + if args.device == 'cuda' and args.full_batch: + raise RuntimeError('CUDA device is not suitable for full batch mode') + + device = torch.device(args.device) print('BENCHMARK STARTS') + print(f'Running on {args.device.upper()}') for dataset_name in args.datasets: assert dataset_name in supported_sets.keys( ), f"Dataset {dataset_name} isn't supported." @@ -66,11 +83,17 @@ def run(args: argparse.ArgumentParser): if args.num_layers != [1] and not hetero and args.num_steps != -1: raise ValueError("Layer-wise inference requires `steps=-1`") - if torch.cuda.is_available(): + if args.device == 'cuda': amp = torch.cuda.amp.autocast(enabled=False) + elif args.device == 'xpu': + amp = torch.xpu.amp.autocast(enabled=False) else: amp = torch.cpu.amp.autocast(enabled=args.bf16) + if args.device == 'xpu' and args.warmup < 1: + print('XPU device requires warmup - setting warmup=1') + args.warmup = 1 + inputs_channels = data[ 'paper'].num_features if dataset_name == 'ogbn-mag' \ else dataset.num_features @@ -163,6 +186,8 @@ def run(args: argparse.ArgumentParser): state_dict = torch.load(args.ckpt_path) model.load_state_dict(state_dict) model.eval() + if args.device == 'xpu': + model = ipex.optimize(model) # Define context manager parameters: if args.cpu_affinity and with_loader: @@ -170,9 +195,13 @@ def run(args: argparse.ArgumentParser): args.loader_cores) else: cpu_affinity = nullcontext() - profile = torch_profile( - args.export_chrome_trace, csv_data, - args.write_csv) if args.profile else nullcontext() + if args.profile and args.device == 'xpu': + profile = xpu_profile(args.export_chrome_trace) + elif args.profile: + profile = torch_profile(args.export_chrome_trace, + csv_data, args.write_csv) + else: + profile = nullcontext() itt = emit_itt( ) if args.vtune_profile else nullcontext() @@ -256,6 +285,8 @@ def run(args: argparse.ArgumentParser): argparser = argparse.ArgumentParser('GNN inference benchmark') add = argparser.add_argument + add('--device', choices=['cpu', 'cuda', 'xpu'], default='cpu', + help='Device to run benchmark on') add('--datasets', nargs='+', default=['ogbn-mag', 'ogbn-products', 'Reddit'], type=str) add('--use-sparse-tensor', action='http://github.com/store_true', diff --git a/test/profile/test_profile.py b/test/profile/test_profile.py index c4b5c38f3f65..b64d1125f9da 100644 --- a/test/profile/test_profile.py +++ b/test/profile/test_profile.py @@ -1,6 +1,7 @@ import os.path import warnings +import pytest import torch import torch.nn.functional as F @@ -11,11 +12,12 @@ rename_profile_file, timeit, ) -from torch_geometric.profile.profile import torch_profile +from torch_geometric.profile.profile import torch_profile, xpu_profile from torch_geometric.testing import ( onlyCUDA, onlyLinux, onlyOnline, + onlyXPU, withCUDA, withPackage, ) @@ -105,3 +107,30 @@ def test_torch_profile(capfd, get_dataset, device): rename_profile_file('test_profile') assert os.path.exists('profile-test_profile.json') os.remove('profile-test_profile.json') + + +@onlyXPU +@onlyOnline +@pytest.mark.parametrize('export_chrome_trace', [False, True]) +def test_xpu_profile(capfd, get_dataset, export_chrome_trace): + dataset = get_dataset(name='Cora') + device = torch.device('xpu') + data = dataset[0].to(device) + model = GraphSAGE(dataset.num_features, hidden_channels=64, num_layers=3, + out_channels=dataset.num_classes).to(device) + + with xpu_profile(export_chrome_trace): + model(data.x, data.edge_index) + + out, _ = capfd.readouterr() + assert 'Self CPU' in out + if data.x.is_xpu: + assert 'Self XPU' in out + + f_name = 'timeline.json' + f_exists = os.path.exists(f_name) + if not export_chrome_trace: + assert not f_exists + else: + assert f_exists + os.remove(f_name) diff --git a/torch_geometric/profile/__init__.py b/torch_geometric/profile/__init__.py index 9f7340979181..a72d953e6cf4 100644 --- a/torch_geometric/profile/__init__.py +++ b/torch_geometric/profile/__init__.py @@ -4,6 +4,7 @@ print_time_total, rename_profile_file, torch_profile, + xpu_profile, ) from .utils import count_parameters from .utils import get_model_size @@ -21,6 +22,7 @@ 'print_time_total', 'rename_profile_file', 'torch_profile', + 'xpu_profile', 'count_parameters', 'get_model_size', 'get_data_size', diff --git a/torch_geometric/profile/profile.py b/torch_geometric/profile/profile.py index fc8f0a887914..748d7f57b7ad 100644 --- a/torch_geometric/profile/profile.py +++ b/torch_geometric/profile/profile.py @@ -265,6 +265,15 @@ def torch_profile(export_chrome_trace=True, csv_data=None, write_csv=None): save_profile_data(csv_data, events, use_cuda) +@contextmanager +def xpu_profile(export_chrome_trace=True): + with torch.autograd.profiler_legacy.profile(use_xpu=True) as profile: + yield + print(profile.key_averages().table(sort_by='self_xpu_time_total')) + if export_chrome_trace: + profile.export_chrome_trace('timeline.json') + + def format_prof_time(time): # Profile time is in micro seconds, so format it appropriately: return round(time / 1e6, 3) diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py index 83f9820416b3..6a108d42fc04 100644 --- a/torch_geometric/testing/__init__.py +++ b/torch_geometric/testing/__init__.py @@ -4,6 +4,7 @@ onlyLinux, onlyPython, onlyCUDA, + onlyXPU, onlyOnline, onlyGraphviz, onlyNeighborSampler, @@ -22,6 +23,7 @@ 'onlyLinux', 'onlyPython', 'onlyCUDA', + 'onlyXPU', 'onlyOnline', 'onlyGraphviz', 'onlyNeighborSampler', diff --git a/torch_geometric/testing/decorators.py b/torch_geometric/testing/decorators.py index b62625fa6a3f..8c5e35c16e41 100644 --- a/torch_geometric/testing/decorators.py +++ b/torch_geometric/testing/decorators.py @@ -59,6 +59,20 @@ def onlyCUDA(func: Callable) -> Callable: )(func) +def onlyXPU(func: Callable) -> Callable: + r"""A decorator to skip tests if XPU is not found.""" + import pytest + try: + import intel_extension_for_pytorch as ipex + xpu_available = ipex.xpu.is_available() + except ImportError: + xpu_available = False + return pytest.mark.skipif( + not xpu_available, + reason="XPU not available", + )(func) + + def onlyOnline(func: Callable): r"""A decorator to skip tests if there exists no connection to the internet.""" From 6537da82a63bb6aeb69d7fa0098ab446b6659094 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Jul 2023 08:33:32 +0200 Subject: [PATCH 1352/2432] Fix flaky shared memory test (#7769) Fix https://github.com/pyg-team/pytorch_geometric/issues/7730 --- test/data/test_data.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/data/test_data.py b/test/data/test_data.py index 4b4fd3ee1ac3..2271fb59d7da 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -339,12 +339,13 @@ def test_data_share_memory(): for data in data_list: assert not data.x.is_shared() + assert torch.all(data.x == 0.0) mp.spawn(run, args=(data_list, ), nprocs=4, join=True) for data in data_list: assert data.x.is_shared() - assert torch.allclose(data.x, torch.full((8, ), 4.)) + assert torch.all(data.x > 0.0) def test_data_setter_properties(): From 48279b45ac0b3bdea68434eb3b61580cbb820825 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Wed, 19 Jul 2023 07:59:00 +0100 Subject: [PATCH 1353/2432] Replace `F.dropout` with `nn.Dropout` when used with `to_hetero` (#7750) Contributes to #7745. --- examples/pytorch_lightning/relational_gnn.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/pytorch_lightning/relational_gnn.py b/examples/pytorch_lightning/relational_gnn.py index 042ebdf60ef2..fa7b264c7434 100644 --- a/examples/pytorch_lightning/relational_gnn.py +++ b/examples/pytorch_lightning/relational_gnn.py @@ -21,7 +21,7 @@ class GNN(torch.nn.Module): def __init__(self, hidden_channels: int, out_channels: int, dropout: float): super().__init__() - self.dropout = dropout + self.dropout = torch.nn.Dropout(p=dropout) self.conv1 = SAGEConv((-1, -1), hidden_channels) self.conv2 = SAGEConv((-1, -1), hidden_channels) @@ -29,9 +29,9 @@ def __init__(self, hidden_channels: int, out_channels: int, def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: x = self.conv1(x, edge_index).relu() - x = F.dropout(x, p=self.dropout, training=self.training) + x = self.dropout(x) x = self.conv2(x, edge_index).relu() - x = F.dropout(x, p=self.dropout, training=self.training) + x = self.dropout(x) return self.lin(x) From 01814681d1256c8ce1b03b1f4be76ccc5c6a651a Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Wed, 19 Jul 2023 02:20:00 -0700 Subject: [PATCH 1354/2432] Add `device` in `rgcn_link_pred` example (#7739) Signed-off-by: Serge Panev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- examples/rgcn_link_pred.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/examples/rgcn_link_pred.py b/examples/rgcn_link_pred.py index 71c768bfbcf0..fd47d254cb15 100644 --- a/examples/rgcn_link_pred.py +++ b/examples/rgcn_link_pred.py @@ -16,9 +16,11 @@ from torch_geometric.datasets import RelLinkPredDataset from torch_geometric.nn import GAE, RGCNConv +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'RLPD') dataset = RelLinkPredDataset(path, 'FB15k-237') -data = dataset[0] +data = dataset[0].to(device) class RGCNEncoder(torch.nn.Module): @@ -60,10 +62,10 @@ def forward(self, z, edge_index, edge_type): model = GAE( - RGCNEncoder(data.num_nodes, hidden_channels=500, - num_relations=dataset.num_relations), - DistMultDecoder(dataset.num_relations // 2, hidden_channels=500), -) + RGCNEncoder(data.num_nodes, 500, dataset.num_relations), + DistMultDecoder(dataset.num_relations // 2, 500), +).to(device) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) @@ -73,8 +75,10 @@ def negative_sampling(edge_index, num_nodes): mask_2 = ~mask_1 neg_edge_index = edge_index.clone() - neg_edge_index[0, mask_1] = torch.randint(num_nodes, (mask_1.sum(), )) - neg_edge_index[1, mask_2] = torch.randint(num_nodes, (mask_2.sum(), )) + neg_edge_index[0, mask_1] = torch.randint(num_nodes, (mask_1.sum(), ), + device=neg_edge_index.device) + neg_edge_index[1, mask_2] = torch.randint(num_nodes, (mask_2.sum(), ), + device=neg_edge_index.device) return neg_edge_index From b344d4179642b84296179c8b934df66d4a2541a5 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Wed, 19 Jul 2023 10:26:04 +0100 Subject: [PATCH 1355/2432] Add `.gitattributes` for consistent line endings in the repo (#7759) No more weird diffs, e.g. https://github.com/pyg-team/pytorch_geometric/pull/7717#issuecomment-1635607258, https://github.com/pyg-team/pytorch_geometric/pull/7503#issuecomment-1575475212. See also: https://docs.github.com/en/get-started/getting-started-with-git/configuring-git-to-handle-line-endings#per-repository-settings --------- Co-authored-by: Matthias Fey --- .gitattributes | 1 + examples/rect.py | 132 +++---- test/nn/conv/test_han_conv.py | 272 ++++++------- test/utils/test_get_mesh_laplacian.py | 202 +++++----- torch_geometric/datasets/movie_lens.py | 198 +++++----- torch_geometric/datasets/movie_lens_100k.py | 358 ++++++++--------- torch_geometric/nn/conv/han_conv.py | 366 +++++++++--------- torch_geometric/nn/models/rect.py | 306 +++++++-------- .../transforms/remove_training_classes.py | 54 +-- .../transforms/svd_feature_reduction.py | 56 +-- torch_geometric/utils/get_mesh_laplacian.py | 218 +++++------ 11 files changed, 1082 insertions(+), 1081 deletions(-) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000000..d9bd16b0923e --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.py text eol=lf diff --git a/examples/rect.py b/examples/rect.py index 6d59991c6b10..a89e6ff1f3e7 100644 --- a/examples/rect.py +++ b/examples/rect.py @@ -1,66 +1,66 @@ -import argparse -import copy -import os.path as osp - -import torch -from sklearn.linear_model import LogisticRegression - -import torch_geometric.transforms as T -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import RECT_L - -# RECT focuses on the zero-shot, i.e. completely-imbalanced label setting: -# For this, we first remove "unseen" classes from the training set and train a -# RECT (or more specifically its supervised part RECT-L) model in the zero-shot -# label scenario. Lastly, we train a simple classifier to evaluate the final -# performance of the embeddings based on the original labels. - -# Datasets Citeseer Cora Pubmed -# Unseen Classes [1, 2, 5] [3, 4] [1, 2, 3] [3, 4, 6] [2] -# RECT-L 66.30 68.20 74.60 71.20 75.30 -# GCN 51.80 55.70 55.80 57.10 59.80 -# NodeFeats 61.40 61.40 57.50 57.50 73.10 - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, default='Cora', - choices=['Cora', 'CiteSeer', 'PubMed']) -parser.add_argument('--unseen-classes', type=int, nargs='*', default=[1, 2, 3]) -args = parser.parse_args() - -path = osp.join(osp.dirname(osp.realpath(__file__)), '../data/Planetoid') -train_mask_original = Planetoid(path, args.dataset)[0].train_mask.clone() -transform = T.Compose([ - T.NormalizeFeatures(), - T.SVDFeatureReduction(200), - T.GDC(), -]) -dataset = Planetoid(path, args.dataset, transform=transform) -data = dataset[0] -zs_data = T.RemoveTrainingClasses(args.unseen_classes)(copy.copy(data)) - -model = RECT_L(200, 200, normalize=False, dropout=0.0) -zs_data.y = model.get_semantic_labels(zs_data.x, zs_data.y, zs_data.train_mask) - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model, zs_data = model.to(device), zs_data.to(device) - -criterion = torch.nn.MSELoss(reduction='sum') -optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) - -model.train() -for epoch in range(1, 201): - optimizer.zero_grad() - out = model(zs_data.x, zs_data.edge_index, zs_data.edge_attr) - loss = criterion(out[zs_data.train_mask], zs_data.y) - loss.backward() - optimizer.step() - print(f'Epoch {epoch:03d}, Loss {loss:.4f}') - -model.eval() -with torch.no_grad(): - h = model.embed(zs_data.x, zs_data.edge_index, zs_data.edge_attr).cpu() - -reg = LogisticRegression() -reg.fit(h[data.train_mask].numpy(), data.y[data.train_mask].numpy()) -test_acc = reg.score(h[data.test_mask].numpy(), data.y[data.test_mask].numpy()) -print(f'Test Acc: {test_acc:.4f}') +import argparse +import copy +import os.path as osp + +import torch +from sklearn.linear_model import LogisticRegression + +import torch_geometric.transforms as T +from torch_geometric.datasets import Planetoid +from torch_geometric.nn import RECT_L + +# RECT focuses on the zero-shot, i.e. completely-imbalanced label setting: +# For this, we first remove "unseen" classes from the training set and train a +# RECT (or more specifically its supervised part RECT-L) model in the zero-shot +# label scenario. Lastly, we train a simple classifier to evaluate the final +# performance of the embeddings based on the original labels. + +# Datasets Citeseer Cora Pubmed +# Unseen Classes [1, 2, 5] [3, 4] [1, 2, 3] [3, 4, 6] [2] +# RECT-L 66.30 68.20 74.60 71.20 75.30 +# GCN 51.80 55.70 55.80 57.10 59.80 +# NodeFeats 61.40 61.40 57.50 57.50 73.10 + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, default='Cora', + choices=['Cora', 'CiteSeer', 'PubMed']) +parser.add_argument('--unseen-classes', type=int, nargs='*', default=[1, 2, 3]) +args = parser.parse_args() + +path = osp.join(osp.dirname(osp.realpath(__file__)), '../data/Planetoid') +train_mask_original = Planetoid(path, args.dataset)[0].train_mask.clone() +transform = T.Compose([ + T.NormalizeFeatures(), + T.SVDFeatureReduction(200), + T.GDC(), +]) +dataset = Planetoid(path, args.dataset, transform=transform) +data = dataset[0] +zs_data = T.RemoveTrainingClasses(args.unseen_classes)(copy.copy(data)) + +model = RECT_L(200, 200, normalize=False, dropout=0.0) +zs_data.y = model.get_semantic_labels(zs_data.x, zs_data.y, zs_data.train_mask) + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +model, zs_data = model.to(device), zs_data.to(device) + +criterion = torch.nn.MSELoss(reduction='sum') +optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) + +model.train() +for epoch in range(1, 201): + optimizer.zero_grad() + out = model(zs_data.x, zs_data.edge_index, zs_data.edge_attr) + loss = criterion(out[zs_data.train_mask], zs_data.y) + loss.backward() + optimizer.step() + print(f'Epoch {epoch:03d}, Loss {loss:.4f}') + +model.eval() +with torch.no_grad(): + h = model.embed(zs_data.x, zs_data.edge_index, zs_data.edge_attr).cpu() + +reg = LogisticRegression() +reg.fit(h[data.train_mask].numpy(), data.y[data.train_mask].numpy()) +test_acc = reg.score(h[data.test_mask].numpy(), data.y[data.test_mask].numpy()) +print(f'Test Acc: {test_acc:.4f}') diff --git a/test/nn/conv/test_han_conv.py b/test/nn/conv/test_han_conv.py index 299d7e11fc41..caa7342d1e09 100644 --- a/test/nn/conv/test_han_conv.py +++ b/test/nn/conv/test_han_conv.py @@ -1,136 +1,136 @@ -import torch - -import torch_geometric.typing -from torch_geometric.nn import HANConv -from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce, to_torch_csc_tensor - - -def test_han_conv(): - x_dict = { - 'author': torch.randn(6, 16), - 'paper': torch.randn(5, 12), - 'term': torch.randn(4, 3) - } - edge_index1 = coalesce(torch.randint(0, 6, (2, 7))) - edge_index2 = coalesce(torch.randint(0, 5, (2, 4))) - edge_index3 = coalesce(torch.randint(0, 3, (2, 5))) - edge_index_dict = { - ('author', 'metapath0', 'author'): edge_index1, - ('paper', 'metapath1', 'paper'): edge_index2, - ('paper', 'metapath2', 'paper'): edge_index3, - } - - adj_t_dict1 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = to_torch_csc_tensor( - edge_index, - size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), - ).t() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - in_channels = {'author': 16, 'paper': 12, 'term': 3} - - conv = HANConv(in_channels, 16, metadata, heads=2) - assert str(conv) == 'HANConv(16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 3 - assert out_dict1['author'].size() == (6, 16) - assert out_dict1['paper'].size() == (5, 16) - assert out_dict1['term'] is None - del out_dict1['term'] - del x_dict['term'] - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - adj_t_dict2[edge_type] = SparseTensor.from_edge_index( - edge_index, - sparse_sizes=adj_t_dict1[edge_type].size()[::-1], - ).t() - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict3.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - # Test non-zero dropout: - conv = HANConv(in_channels, 16, metadata, heads=2, dropout=0.1) - assert str(conv) == 'HANConv(16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (6, 16) - assert out_dict1['paper'].size() == (5, 16) - - -def test_han_conv_lazy(): - x_dict = { - 'author': torch.randn(6, 16), - 'paper': torch.randn(5, 12), - } - edge_index1 = coalesce(torch.randint(0, 6, (2, 8))) - edge_index2 = coalesce(torch.randint(0, 5, (2, 6))) - edge_index_dict = { - ('author', 'to', 'author'): edge_index1, - ('paper', 'to', 'paper'): edge_index2, - } - - adj_t_dict1 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = to_torch_csc_tensor( - edge_index, - size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), - ).t() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - conv = HANConv(-1, 16, metadata, heads=2) - assert str(conv) == 'HANConv(16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (6, 16) - assert out_dict1['paper'].size() == (5, 16) - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - adj_t_dict2[edge_type] = SparseTensor.from_edge_index( - edge_index, - sparse_sizes=adj_t_dict1[edge_type].size()[::-1], - ).t() - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - -def test_han_conv_empty_tensor(): - x_dict = { - 'author': torch.randn(6, 16), - 'paper': torch.empty(0, 12), - } - edge_index_dict = { - ('paper', 'to', 'author'): torch.empty((2, 0), dtype=torch.long), - ('author', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), - ('paper', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), - } - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - in_channels = {'author': 16, 'paper': 12} - conv = HANConv(in_channels, 16, metadata, heads=2) - - out_dict = conv(x_dict, edge_index_dict) - assert len(out_dict) == 2 - assert out_dict['author'].size() == (6, 16) - assert torch.all(out_dict['author'] == 0) - assert out_dict['paper'].size() == (0, 16) +import torch + +import torch_geometric.typing +from torch_geometric.nn import HANConv +from torch_geometric.typing import SparseTensor +from torch_geometric.utils import coalesce, to_torch_csc_tensor + + +def test_han_conv(): + x_dict = { + 'author': torch.randn(6, 16), + 'paper': torch.randn(5, 12), + 'term': torch.randn(4, 3) + } + edge_index1 = coalesce(torch.randint(0, 6, (2, 7))) + edge_index2 = coalesce(torch.randint(0, 5, (2, 4))) + edge_index3 = coalesce(torch.randint(0, 3, (2, 5))) + edge_index_dict = { + ('author', 'metapath0', 'author'): edge_index1, + ('paper', 'metapath1', 'paper'): edge_index2, + ('paper', 'metapath2', 'paper'): edge_index3, + } + + adj_t_dict1 = {} + for edge_type, edge_index in edge_index_dict.items(): + src_type, _, dst_type = edge_type + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() + + metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) + in_channels = {'author': 16, 'paper': 12, 'term': 3} + + conv = HANConv(in_channels, 16, metadata, heads=2) + assert str(conv) == 'HANConv(16, heads=2)' + out_dict1 = conv(x_dict, edge_index_dict) + assert len(out_dict1) == 3 + assert out_dict1['author'].size() == (6, 16) + assert out_dict1['paper'].size() == (5, 16) + assert out_dict1['term'] is None + del out_dict1['term'] + del x_dict['term'] + + out_dict2 = conv(x_dict, adj_t_dict1) + assert len(out_dict1) == len(out_dict2) + for key in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for key in out_dict3.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + + # Test non-zero dropout: + conv = HANConv(in_channels, 16, metadata, heads=2, dropout=0.1) + assert str(conv) == 'HANConv(16, heads=2)' + out_dict1 = conv(x_dict, edge_index_dict) + assert len(out_dict1) == 2 + assert out_dict1['author'].size() == (6, 16) + assert out_dict1['paper'].size() == (5, 16) + + +def test_han_conv_lazy(): + x_dict = { + 'author': torch.randn(6, 16), + 'paper': torch.randn(5, 12), + } + edge_index1 = coalesce(torch.randint(0, 6, (2, 8))) + edge_index2 = coalesce(torch.randint(0, 5, (2, 6))) + edge_index_dict = { + ('author', 'to', 'author'): edge_index1, + ('paper', 'to', 'paper'): edge_index2, + } + + adj_t_dict1 = {} + for edge_type, edge_index in edge_index_dict.items(): + src_type, _, dst_type = edge_type + adj_t_dict1[edge_type] = to_torch_csc_tensor( + edge_index, + size=(x_dict[src_type].size(0), x_dict[dst_type].size(0)), + ).t() + + metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) + conv = HANConv(-1, 16, metadata, heads=2) + assert str(conv) == 'HANConv(16, heads=2)' + out_dict1 = conv(x_dict, edge_index_dict) + assert len(out_dict1) == 2 + assert out_dict1['author'].size() == (6, 16) + assert out_dict1['paper'].size() == (5, 16) + + out_dict2 = conv(x_dict, adj_t_dict1) + assert len(out_dict1) == len(out_dict2) + for key in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj_t_dict2 = {} + for edge_type, edge_index in edge_index_dict.items(): + adj_t_dict2[edge_type] = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=adj_t_dict1[edge_type].size()[::-1], + ).t() + out_dict3 = conv(x_dict, adj_t_dict2) + assert len(out_dict1) == len(out_dict3) + for key in out_dict1.keys(): + assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) + + +def test_han_conv_empty_tensor(): + x_dict = { + 'author': torch.randn(6, 16), + 'paper': torch.empty(0, 12), + } + edge_index_dict = { + ('paper', 'to', 'author'): torch.empty((2, 0), dtype=torch.long), + ('author', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), + ('paper', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), + } + + metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) + in_channels = {'author': 16, 'paper': 12} + conv = HANConv(in_channels, 16, metadata, heads=2) + + out_dict = conv(x_dict, edge_index_dict) + assert len(out_dict) == 2 + assert out_dict['author'].size() == (6, 16) + assert torch.all(out_dict['author'] == 0) + assert out_dict['paper'].size() == (0, 16) diff --git a/test/utils/test_get_mesh_laplacian.py b/test/utils/test_get_mesh_laplacian.py index 04a6bdb83082..de60ee04f8d0 100644 --- a/test/utils/test_get_mesh_laplacian.py +++ b/test/utils/test_get_mesh_laplacian.py @@ -1,101 +1,101 @@ -import torch - -from torch_geometric.utils import get_mesh_laplacian - - -def test_get_mesh_laplacian_of_cube(): - pos = torch.tensor([ - [1.0, 1.0, 1.0], - [1.0, -1.0, 1.0], - [-1.0, -1.0, 1.0], - [-1.0, 1.0, 1.0], - [1.0, 1.0, -1.0], - [1.0, -1.0, -1.0], - [-1.0, -1.0, -1.0], - [-1.0, 1.0, -1.0], - ]) - - face = torch.tensor([ - [0, 1, 2], - [0, 3, 2], - [4, 5, 1], - [4, 0, 1], - [7, 6, 5], - [7, 4, 5], - [3, 2, 6], - [3, 7, 6], - [4, 0, 3], - [4, 7, 3], - [1, 5, 6], - [1, 2, 6], - ]) - - edge_index, edge_weight = get_mesh_laplacian(pos, face.t(), - normalization='rw') - - assert edge_index.tolist() == [ - [ - 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, - 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 0, 1, 2, 3, 4, 5, 6, 7 - ], - [ - 1, 2, 3, 4, 0, 2, 4, 5, 6, 0, 1, 3, 6, 0, 2, 4, 6, 7, 0, 1, 3, 5, - 7, 1, 4, 6, 7, 1, 2, 3, 5, 7, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 7 - ], - ] - - assert torch.allclose( - edge_weight, - torch.tensor([ - 0.375, 0.0, 0.375, 0.375, 0.3, 0.3, 0.0, 0.3, 0.0, 0.0, 0.375, - 0.375, 0.375, 0.3, 0.3, 0.0, 0.0, 0.3, 0.3, 0.0, 0.0, 0.3, 0.3, - 0.375, 0.375, 0.375, 0.0, 0.0, 0.3, 0.0, 0.3, 0.3, 0.375, 0.375, - 0.0, 0.375, -1.125, -0.9, -1.125, -0.9, -0.9, -1.125, -0.9, -1.125 - ])) - - -def test_get_mesh_laplacian_of_irregular_triangular_prism(): - pos = torch.tensor([ - [0.0, 0.0, 0.0], - [4.0, 0.0, 0.0], - [0.0, 0.0, -3.0], - [1.0, 5.0, -1.0], - [3.0, 5.0, -1.0], - [2.0, 5.0, -2.0], - ]) - - face = torch.tensor([ - [0, 1, 2], - [3, 4, 5], - [0, 1, 4], - [0, 3, 4], - [1, 2, 5], - [1, 4, 5], - [2, 0, 3], - [2, 5, 3], - ]) - - edge_index, edge_weight = get_mesh_laplacian(pos, face.t(), - normalization='rw') - - assert edge_index.tolist() == [ - [ - 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, - 5, 5, 0, 1, 2, 3, 4, 5 - ], - [ - 1, 2, 3, 4, 0, 2, 4, 5, 0, 1, 3, 5, 0, 2, 4, 5, 0, 1, 3, 5, 1, 2, - 3, 4, 0, 1, 2, 3, 4, 5 - ], - ] - - assert torch.allclose( - edge_weight, - torch.tensor([ - 0.09730332, 0.15039921, 0.05081503, 0.00000000, 0.08726977, - 0.03521059, 0.05363689, 0.00723919, 0.14497279, 0.03784235, - 0.01629947, 0.03438699, 0.08362866, 0.02782887, 0.24252312, - 0.40727590, 0.00000000, 0.08728313, 0.21507657, 0.38582093, - 0.01117009, 0.04936920, 0.34247482, 0.36583540, -0.29851755, - -0.18335645, -0.23350160, -0.76125660, -0.68818060, -0.76884955 - ])) +import torch + +from torch_geometric.utils import get_mesh_laplacian + + +def test_get_mesh_laplacian_of_cube(): + pos = torch.tensor([ + [1.0, 1.0, 1.0], + [1.0, -1.0, 1.0], + [-1.0, -1.0, 1.0], + [-1.0, 1.0, 1.0], + [1.0, 1.0, -1.0], + [1.0, -1.0, -1.0], + [-1.0, -1.0, -1.0], + [-1.0, 1.0, -1.0], + ]) + + face = torch.tensor([ + [0, 1, 2], + [0, 3, 2], + [4, 5, 1], + [4, 0, 1], + [7, 6, 5], + [7, 4, 5], + [3, 2, 6], + [3, 7, 6], + [4, 0, 3], + [4, 7, 3], + [1, 5, 6], + [1, 2, 6], + ]) + + edge_index, edge_weight = get_mesh_laplacian(pos, face.t(), + normalization='rw') + + assert edge_index.tolist() == [ + [ + 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, + 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, 0, 1, 2, 3, 4, 5, 6, 7 + ], + [ + 1, 2, 3, 4, 0, 2, 4, 5, 6, 0, 1, 3, 6, 0, 2, 4, 6, 7, 0, 1, 3, 5, + 7, 1, 4, 6, 7, 1, 2, 3, 5, 7, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 7 + ], + ] + + assert torch.allclose( + edge_weight, + torch.tensor([ + 0.375, 0.0, 0.375, 0.375, 0.3, 0.3, 0.0, 0.3, 0.0, 0.0, 0.375, + 0.375, 0.375, 0.3, 0.3, 0.0, 0.0, 0.3, 0.3, 0.0, 0.0, 0.3, 0.3, + 0.375, 0.375, 0.375, 0.0, 0.0, 0.3, 0.0, 0.3, 0.3, 0.375, 0.375, + 0.0, 0.375, -1.125, -0.9, -1.125, -0.9, -0.9, -1.125, -0.9, -1.125 + ])) + + +def test_get_mesh_laplacian_of_irregular_triangular_prism(): + pos = torch.tensor([ + [0.0, 0.0, 0.0], + [4.0, 0.0, 0.0], + [0.0, 0.0, -3.0], + [1.0, 5.0, -1.0], + [3.0, 5.0, -1.0], + [2.0, 5.0, -2.0], + ]) + + face = torch.tensor([ + [0, 1, 2], + [3, 4, 5], + [0, 1, 4], + [0, 3, 4], + [1, 2, 5], + [1, 4, 5], + [2, 0, 3], + [2, 5, 3], + ]) + + edge_index, edge_weight = get_mesh_laplacian(pos, face.t(), + normalization='rw') + + assert edge_index.tolist() == [ + [ + 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, + 5, 5, 0, 1, 2, 3, 4, 5 + ], + [ + 1, 2, 3, 4, 0, 2, 4, 5, 0, 1, 3, 5, 0, 2, 4, 5, 0, 1, 3, 5, 1, 2, + 3, 4, 0, 1, 2, 3, 4, 5 + ], + ] + + assert torch.allclose( + edge_weight, + torch.tensor([ + 0.09730332, 0.15039921, 0.05081503, 0.00000000, 0.08726977, + 0.03521059, 0.05363689, 0.00723919, 0.14497279, 0.03784235, + 0.01629947, 0.03438699, 0.08362866, 0.02782887, 0.24252312, + 0.40727590, 0.00000000, 0.08728313, 0.21507657, 0.38582093, + 0.01117009, 0.04936920, 0.34247482, 0.36583540, -0.29851755, + -0.18335645, -0.23350160, -0.76125660, -0.68818060, -0.76884955 + ])) diff --git a/torch_geometric/datasets/movie_lens.py b/torch_geometric/datasets/movie_lens.py index bc872625bbe1..9316a784933f 100644 --- a/torch_geometric/datasets/movie_lens.py +++ b/torch_geometric/datasets/movie_lens.py @@ -1,99 +1,99 @@ -import os -import os.path as osp -from typing import Callable, List, Optional - -import torch - -from torch_geometric.data import ( - HeteroData, - InMemoryDataset, - download_url, - extract_zip, -) - - -class MovieLens(InMemoryDataset): - r"""A heterogeneous rating dataset, assembled by GroupLens Research from - the `MovieLens web site `_, consisting of nodes of - type :obj:`"movie"` and :obj:`"user"`. - User ratings for movies are available as ground truth labels for the edges - between the users and the movies :obj:`("user", "rates", "movie")`. - - Args: - root (str): Root directory where the dataset should be saved. - transform (callable, optional): A function/transform that takes in an - :obj:`torch_geometric.data.HeteroData` object and returns a - transformed version. The data object will be transformed before - every access. (default: :obj:`None`) - pre_transform (callable, optional): A function/transform that takes in - an :obj:`torch_geometric.data.HeteroData` object and returns a - transformed version. The data object will be transformed before - being saved to disk. (default: :obj:`None`) - model_name (str): Name of model used to transform movie titles to node - features. The model comes from the`Huggingface SentenceTransformer - `_. - """ - url = 'https://files.grouplens.org/datasets/movielens/ml-latest-small.zip' - - def __init__( - self, - root: str, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None, - model_name: Optional[str] = 'all-MiniLM-L6-v2', - ): - self.model_name = model_name - super().__init__(root, transform, pre_transform) - self.data, self.slices = torch.load(self.processed_paths[0]) - - @property - def raw_file_names(self) -> List[str]: - return [ - osp.join('ml-latest-small', 'movies.csv'), - osp.join('ml-latest-small', 'ratings.csv'), - ] - - @property - def processed_file_names(self) -> str: - return f'data_{self.model_name}.pt' - - def download(self): - path = download_url(http://github.com/self.url,%20self.raw_dir) - extract_zip(path, self.raw_dir) - os.remove(path) - - def process(self): - import pandas as pd - from sentence_transformers import SentenceTransformer - - data = HeteroData() - - df = pd.read_csv(self.raw_paths[0], index_col='movieId') - movie_mapping = {idx: i for i, idx in enumerate(df.index)} - - genres = df['genres'].str.get_dummies('|').values - genres = torch.from_numpy(genres).to(torch.float) - - model = SentenceTransformer(self.model_name) - with torch.no_grad(): - emb = model.encode(df['title'].values, show_progress_bar=True, - convert_to_tensor=True).cpu() - - data['movie'].x = torch.cat([emb, genres], dim=-1) - - df = pd.read_csv(self.raw_paths[1]) - user_mapping = {idx: i for i, idx in enumerate(df['userId'].unique())} - data['user'].num_nodes = len(user_mapping) - - src = [user_mapping[idx] for idx in df['userId']] - dst = [movie_mapping[idx] for idx in df['movieId']] - edge_index = torch.tensor([src, dst]) - - rating = torch.from_numpy(df['rating'].values).to(torch.long) - data['user', 'rates', 'movie'].edge_index = edge_index - data['user', 'rates', 'movie'].edge_label = rating - - if self.pre_transform is not None: - data = self.pre_transform(data) - - torch.save(self.collate([data]), self.processed_paths[0]) +import os +import os.path as osp +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + HeteroData, + InMemoryDataset, + download_url, + extract_zip, +) + + +class MovieLens(InMemoryDataset): + r"""A heterogeneous rating dataset, assembled by GroupLens Research from + the `MovieLens web site `_, consisting of nodes of + type :obj:`"movie"` and :obj:`"user"`. + User ratings for movies are available as ground truth labels for the edges + between the users and the movies :obj:`("user", "rates", "movie")`. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + model_name (str): Name of model used to transform movie titles to node + features. The model comes from the`Huggingface SentenceTransformer + `_. + """ + url = 'https://files.grouplens.org/datasets/movielens/ml-latest-small.zip' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + model_name: Optional[str] = 'all-MiniLM-L6-v2', + ): + self.model_name = model_name + super().__init__(root, transform, pre_transform) + self.data, self.slices = torch.load(self.processed_paths[0]) + + @property + def raw_file_names(self) -> List[str]: + return [ + osp.join('ml-latest-small', 'movies.csv'), + osp.join('ml-latest-small', 'ratings.csv'), + ] + + @property + def processed_file_names(self) -> str: + return f'data_{self.model_name}.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.raw_dir) + extract_zip(path, self.raw_dir) + os.remove(path) + + def process(self): + import pandas as pd + from sentence_transformers import SentenceTransformer + + data = HeteroData() + + df = pd.read_csv(self.raw_paths[0], index_col='movieId') + movie_mapping = {idx: i for i, idx in enumerate(df.index)} + + genres = df['genres'].str.get_dummies('|').values + genres = torch.from_numpy(genres).to(torch.float) + + model = SentenceTransformer(self.model_name) + with torch.no_grad(): + emb = model.encode(df['title'].values, show_progress_bar=True, + convert_to_tensor=True).cpu() + + data['movie'].x = torch.cat([emb, genres], dim=-1) + + df = pd.read_csv(self.raw_paths[1]) + user_mapping = {idx: i for i, idx in enumerate(df['userId'].unique())} + data['user'].num_nodes = len(user_mapping) + + src = [user_mapping[idx] for idx in df['userId']] + dst = [movie_mapping[idx] for idx in df['movieId']] + edge_index = torch.tensor([src, dst]) + + rating = torch.from_numpy(df['rating'].values).to(torch.long) + data['user', 'rates', 'movie'].edge_index = edge_index + data['user', 'rates', 'movie'].edge_label = rating + + if self.pre_transform is not None: + data = self.pre_transform(data) + + torch.save(self.collate([data]), self.processed_paths[0]) diff --git a/torch_geometric/datasets/movie_lens_100k.py b/torch_geometric/datasets/movie_lens_100k.py index b7b8c600188a..cf83dca5177f 100644 --- a/torch_geometric/datasets/movie_lens_100k.py +++ b/torch_geometric/datasets/movie_lens_100k.py @@ -1,179 +1,179 @@ -import os -import os.path as osp -import shutil -from typing import Callable, List, Optional - -import torch - -from torch_geometric.data import ( - HeteroData, - InMemoryDataset, - download_url, - extract_zip, -) - -MOVIE_HEADERS = [ - "movieId", "title", "releaseDate", "videoReleaseDate", "IMDb URL", - "unknown", "Action", "Adventure", "Animation", "Children's", "Comedy", - "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", - "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" -] -USER_HEADERS = ["userId", "age", "gender", "occupation", "zipCode"] -RATING_HEADERS = ["userId", "movieId", "rating", "timestamp"] - - -class MovieLens100K(InMemoryDataset): - r"""The MovieLens 100K heterogeneous rating dataset, assembled by GroupLens - Research from the `MovieLens web site `__, - consisting of movies (1,682 nodes) and users (943 nodes) with 100K - ratings between them. - User ratings for movies are available as ground truth labels. - Features of users and movies are encoded according to the `"Inductive - Matrix Completion Based on Graph Neural Networks" - `__ paper. - - Args: - root (str): Root directory where the dataset should be saved. - transform (callable, optional): A function/transform that takes in an - :obj:`torch_geometric.data.HeteroData` object and returns a - transformed version. The data object will be transformed before - every access. (default: :obj:`None`) - pre_transform (callable, optional): A function/transform that takes in - an :obj:`torch_geometric.data.HeteroData` object and returns a - transformed version. The data object will be transformed before - being saved to disk. (default: :obj:`None`) - - **STATS:** - - .. list-table:: - :widths: 20 10 10 10 - :header-rows: 1 - - * - Node/Edge Type - - #nodes/#edges - - #features - - #tasks - * - Movie - - 1,682 - - 18 - - - * - User - - 943 - - 24 - - - * - User-Movie - - 80,000 - - 1 - - 1 - """ - url = 'https://files.grouplens.org/datasets/movielens/ml-100k.zip' - - def __init__( - self, - root: str, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None, - ): - super().__init__(root, transform, pre_transform) - self.load(self.processed_paths[0], data_cls=HeteroData) - - @property - def raw_file_names(self) -> List[str]: - return ['u.item', 'u.user', 'u1.base', 'u1.test'] - - @property - def processed_file_names(self) -> str: - return 'data.pt' - - def download(self): - path = download_url(http://github.com/self.url,%20self.root) - extract_zip(path, self.root) - os.remove(path) - folder = osp.join(self.root, 'ml-100k') - shutil.rmtree(self.raw_dir) - os.rename(folder, self.raw_dir) - - def process(self): - import pandas as pd - - data = HeteroData() - - # Process movie data: - df = pd.read_csv( - self.raw_paths[0], - sep='|', - header=None, - names=MOVIE_HEADERS, - index_col='movieId', - encoding='ISO-8859-1', - ) - movie_mapping = {idx: i for i, idx in enumerate(df.index)} - - x = df[MOVIE_HEADERS[6:]].values - data['movie'].x = torch.from_numpy(x).to(torch.float) - - # Process user data: - df = pd.read_csv( - self.raw_paths[1], - sep='|', - header=None, - names=USER_HEADERS, - index_col='userId', - encoding='ISO-8859-1', - ) - user_mapping = {idx: i for i, idx in enumerate(df.index)} - - age = df['age'].values / df['age'].values.max() - age = torch.from_numpy(age).to(torch.float).view(-1, 1) - - gender = df['gender'].str.get_dummies().values - gender = torch.from_numpy(gender).to(torch.float) - - occupation = df['occupation'].str.get_dummies().values - occupation = torch.from_numpy(occupation).to(torch.float) - - data['user'].x = torch.cat([age, gender, occupation], dim=-1) - - # Process rating data for training: - df = pd.read_csv( - self.raw_paths[2], - sep='\t', - header=None, - names=RATING_HEADERS, - ) - - src = [user_mapping[idx] for idx in df['userId']] - dst = [movie_mapping[idx] for idx in df['movieId']] - edge_index = torch.tensor([src, dst]) - data['user', 'rates', 'movie'].edge_index = edge_index - - rating = torch.from_numpy(df['rating'].values).to(torch.long) - data['user', 'rates', 'movie'].rating = rating - - time = torch.from_numpy(df['timestamp'].values) - data['user', 'rates', 'movie'].time = time - - data['movie', 'rated_by', 'user'].edge_index = edge_index.flip([0]) - data['movie', 'rated_by', 'user'].rating = rating - data['movie', 'rated_by', 'user'].time = time - - # Process rating data for testing: - df = pd.read_csv( - self.raw_paths[3], - sep='\t', - header=None, - names=RATING_HEADERS, - ) - - src = [user_mapping[idx] for idx in df['userId']] - dst = [movie_mapping[idx] for idx in df['movieId']] - edge_label_index = torch.tensor([src, dst]) - data['user', 'rates', 'movie'].edge_label_index = edge_label_index - - edge_label = torch.from_numpy(df['rating'].values).to(torch.float) - data['user', 'rates', 'movie'].edge_label = edge_label - - if self.pre_transform is not None: - data = self.pre_transform(data) - - self.save([data], self.processed_paths[0]) +import os +import os.path as osp +import shutil +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + HeteroData, + InMemoryDataset, + download_url, + extract_zip, +) + +MOVIE_HEADERS = [ + "movieId", "title", "releaseDate", "videoReleaseDate", "IMDb URL", + "unknown", "Action", "Adventure", "Animation", "Children's", "Comedy", + "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", + "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" +] +USER_HEADERS = ["userId", "age", "gender", "occupation", "zipCode"] +RATING_HEADERS = ["userId", "movieId", "rating", "timestamp"] + + +class MovieLens100K(InMemoryDataset): + r"""The MovieLens 100K heterogeneous rating dataset, assembled by GroupLens + Research from the `MovieLens web site `__, + consisting of movies (1,682 nodes) and users (943 nodes) with 100K + ratings between them. + User ratings for movies are available as ground truth labels. + Features of users and movies are encoded according to the `"Inductive + Matrix Completion Based on Graph Neural Networks" + `__ paper. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + every access. (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.HeteroData` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 20 10 10 10 + :header-rows: 1 + + * - Node/Edge Type + - #nodes/#edges + - #features + - #tasks + * - Movie + - 1,682 + - 18 + - + * - User + - 943 + - 24 + - + * - User-Movie + - 80,000 + - 1 + - 1 + """ + url = 'https://files.grouplens.org/datasets/movielens/ml-100k.zip' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_file_names(self) -> List[str]: + return ['u.item', 'u.user', 'u1.base', 'u1.test'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.root) + extract_zip(path, self.root) + os.remove(path) + folder = osp.join(self.root, 'ml-100k') + shutil.rmtree(self.raw_dir) + os.rename(folder, self.raw_dir) + + def process(self): + import pandas as pd + + data = HeteroData() + + # Process movie data: + df = pd.read_csv( + self.raw_paths[0], + sep='|', + header=None, + names=MOVIE_HEADERS, + index_col='movieId', + encoding='ISO-8859-1', + ) + movie_mapping = {idx: i for i, idx in enumerate(df.index)} + + x = df[MOVIE_HEADERS[6:]].values + data['movie'].x = torch.from_numpy(x).to(torch.float) + + # Process user data: + df = pd.read_csv( + self.raw_paths[1], + sep='|', + header=None, + names=USER_HEADERS, + index_col='userId', + encoding='ISO-8859-1', + ) + user_mapping = {idx: i for i, idx in enumerate(df.index)} + + age = df['age'].values / df['age'].values.max() + age = torch.from_numpy(age).to(torch.float).view(-1, 1) + + gender = df['gender'].str.get_dummies().values + gender = torch.from_numpy(gender).to(torch.float) + + occupation = df['occupation'].str.get_dummies().values + occupation = torch.from_numpy(occupation).to(torch.float) + + data['user'].x = torch.cat([age, gender, occupation], dim=-1) + + # Process rating data for training: + df = pd.read_csv( + self.raw_paths[2], + sep='\t', + header=None, + names=RATING_HEADERS, + ) + + src = [user_mapping[idx] for idx in df['userId']] + dst = [movie_mapping[idx] for idx in df['movieId']] + edge_index = torch.tensor([src, dst]) + data['user', 'rates', 'movie'].edge_index = edge_index + + rating = torch.from_numpy(df['rating'].values).to(torch.long) + data['user', 'rates', 'movie'].rating = rating + + time = torch.from_numpy(df['timestamp'].values) + data['user', 'rates', 'movie'].time = time + + data['movie', 'rated_by', 'user'].edge_index = edge_index.flip([0]) + data['movie', 'rated_by', 'user'].rating = rating + data['movie', 'rated_by', 'user'].time = time + + # Process rating data for testing: + df = pd.read_csv( + self.raw_paths[3], + sep='\t', + header=None, + names=RATING_HEADERS, + ) + + src = [user_mapping[idx] for idx in df['userId']] + dst = [movie_mapping[idx] for idx in df['movieId']] + edge_label_index = torch.tensor([src, dst]) + data['user', 'rates', 'movie'].edge_label_index = edge_label_index + + edge_label = torch.from_numpy(df['rating'].values).to(torch.float) + data['user', 'rates', 'movie'].edge_label = edge_label + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) diff --git a/torch_geometric/nn/conv/han_conv.py b/torch_geometric/nn/conv/han_conv.py index e7ba600eb44d..c88bd2e127c7 100644 --- a/torch_geometric/nn/conv/han_conv.py +++ b/torch_geometric/nn/conv/han_conv.py @@ -1,183 +1,183 @@ -from typing import Dict, List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from torch import Tensor, nn - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense import Linear -from torch_geometric.nn.inits import glorot, reset -from torch_geometric.typing import Adj, EdgeType, Metadata, NodeType, OptTensor -from torch_geometric.utils import softmax - - -def group( - xs: List[Tensor], - q: nn.Parameter, - k_lin: nn.Module, -) -> Tuple[OptTensor, OptTensor]: - - if len(xs) == 0: - return None, None - else: - num_edge_types = len(xs) - out = torch.stack(xs) - if out.numel() == 0: - return out.view(0, out.size(-1)), None - attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1) - attn = F.softmax(attn_score, dim=0) - out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0) - return out, attn - - -class HANConv(MessagePassing): - r""" - The Heterogenous Graph Attention Operator from the - `"Heterogenous Graph Attention Network" - `_ paper. - - .. note:: - - For an example of using HANConv, see `examples/hetero/han_imdb.py - `_. - - Args: - in_channels (int or Dict[str, int]): Size of each input sample of every - node type, or :obj:`-1` to derive the size from the first input(s) - to the forward method. - out_channels (int): Size of each output sample. - metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata - of the heterogeneous graph, *i.e.* its node and edge types given - by a list of strings and a list of string triplets, respectively. - See :meth:`torch_geometric.data.HeteroData.metadata` for more - information. - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - negative_slope (float, optional): LeakyReLU angle of the negative - slope. (default: :obj:`0.2`) - dropout (float, optional): Dropout probability of the normalized - attention coefficients which exposes each node to a stochastically - sampled neighborhood during training. (default: :obj:`0`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.MessagePassing`. - """ - def __init__( - self, - in_channels: Union[int, Dict[str, int]], - out_channels: int, - metadata: Metadata, - heads: int = 1, - negative_slope=0.2, - dropout: float = 0.0, - **kwargs, - ): - super().__init__(aggr='add', node_dim=0, **kwargs) - - if not isinstance(in_channels, dict): - in_channels = {node_type: in_channels for node_type in metadata[0]} - - self.heads = heads - self.in_channels = in_channels - self.out_channels = out_channels - self.negative_slope = negative_slope - self.metadata = metadata - self.dropout = dropout - self.k_lin = nn.Linear(out_channels, out_channels) - self.q = nn.Parameter(torch.empty(1, out_channels)) - - self.proj = nn.ModuleDict() - for node_type, in_channels in self.in_channels.items(): - self.proj[node_type] = Linear(in_channels, out_channels) - - self.lin_src = nn.ParameterDict() - self.lin_dst = nn.ParameterDict() - dim = out_channels // heads - for edge_type in metadata[1]: - edge_type = '__'.join(edge_type) - self.lin_src[edge_type] = nn.Parameter(torch.empty(1, heads, dim)) - self.lin_dst[edge_type] = nn.Parameter(torch.empty(1, heads, dim)) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - reset(self.proj) - glorot(self.lin_src) - glorot(self.lin_dst) - self.k_lin.reset_parameters() - glorot(self.q) - - def forward( - self, - x_dict: Dict[NodeType, Tensor], - edge_index_dict: Dict[EdgeType, Adj], - return_semantic_attention_weights: bool = False, - ) -> Union[Dict[NodeType, OptTensor], Tuple[Dict[NodeType, OptTensor], - Dict[NodeType, OptTensor]]]: - r"""Runs the forward pass of the module. - - Args: - x_dict (Dict[str, torch.Tensor]): A dictionary holding node feature - information for each individual node type. - edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A - dictionary holding graph connectivity information for each - individual edge type, either as a :class:`torch.Tensor` of - shape :obj:`[2, num_edges]` or a - :class:`torch_sparse.SparseTensor`. - return_semantic_attention_weights (bool, optional): If set to - :obj:`True`, will additionally return the semantic-level - attention weights for each destination node type. - (default: :obj:`False`) - """ - H, D = self.heads, self.out_channels // self.heads - x_node_dict, out_dict = {}, {} - - # Iterate over node types: - for node_type, x in x_dict.items(): - x_node_dict[node_type] = self.proj[node_type](x).view(-1, H, D) - out_dict[node_type] = [] - - # Iterate over edge types: - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - edge_type = '__'.join(edge_type) - lin_src = self.lin_src[edge_type] - lin_dst = self.lin_dst[edge_type] - x_src = x_node_dict[src_type] - x_dst = x_node_dict[dst_type] - alpha_src = (x_src * lin_src).sum(dim=-1) - alpha_dst = (x_dst * lin_dst).sum(dim=-1) - # propagate_type: (x_dst: PairTensor, alpha: PairTensor) - out = self.propagate(edge_index, x=(x_src, x_dst), - alpha=(alpha_src, alpha_dst), size=None) - - out = F.relu(out) - out_dict[dst_type].append(out) - - # iterate over node types: - semantic_attn_dict = {} - for node_type, outs in out_dict.items(): - out, attn = group(outs, self.q, self.k_lin) - out_dict[node_type] = out - semantic_attn_dict[node_type] = attn - - if return_semantic_attention_weights: - return out_dict, semantic_attn_dict - - return out_dict - - def message(self, x_j: Tensor, alpha_i: Tensor, alpha_j: Tensor, - index: Tensor, ptr: Optional[Tensor], - size_i: Optional[int]) -> Tensor: - - alpha = alpha_j + alpha_i - alpha = F.leaky_relu(alpha, self.negative_slope) - alpha = softmax(alpha, index, ptr, size_i) - alpha = F.dropout(alpha, p=self.dropout, training=self.training) - out = x_j * alpha.view(-1, self.heads, 1) - return out.view(-1, self.out_channels) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.out_channels}, ' - f'heads={self.heads})') +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import Tensor, nn + +from torch_geometric.nn.conv import MessagePassing +from torch_geometric.nn.dense import Linear +from torch_geometric.nn.inits import glorot, reset +from torch_geometric.typing import Adj, EdgeType, Metadata, NodeType, OptTensor +from torch_geometric.utils import softmax + + +def group( + xs: List[Tensor], + q: nn.Parameter, + k_lin: nn.Module, +) -> Tuple[OptTensor, OptTensor]: + + if len(xs) == 0: + return None, None + else: + num_edge_types = len(xs) + out = torch.stack(xs) + if out.numel() == 0: + return out.view(0, out.size(-1)), None + attn_score = (q * torch.tanh(k_lin(out)).mean(1)).sum(-1) + attn = F.softmax(attn_score, dim=0) + out = torch.sum(attn.view(num_edge_types, 1, -1) * out, dim=0) + return out, attn + + +class HANConv(MessagePassing): + r""" + The Heterogenous Graph Attention Operator from the + `"Heterogenous Graph Attention Network" + `_ paper. + + .. note:: + + For an example of using HANConv, see `examples/hetero/han_imdb.py + `_. + + Args: + in_channels (int or Dict[str, int]): Size of each input sample of every + node type, or :obj:`-1` to derive the size from the first input(s) + to the forward method. + out_channels (int): Size of each output sample. + metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata + of the heterogeneous graph, *i.e.* its node and edge types given + by a list of strings and a list of string triplets, respectively. + See :meth:`torch_geometric.data.HeteroData.metadata` for more + information. + heads (int, optional): Number of multi-head-attentions. + (default: :obj:`1`) + negative_slope (float, optional): LeakyReLU angle of the negative + slope. (default: :obj:`0.2`) + dropout (float, optional): Dropout probability of the normalized + attention coefficients which exposes each node to a stochastically + sampled neighborhood during training. (default: :obj:`0`) + **kwargs (optional): Additional arguments of + :class:`torch_geometric.nn.conv.MessagePassing`. + """ + def __init__( + self, + in_channels: Union[int, Dict[str, int]], + out_channels: int, + metadata: Metadata, + heads: int = 1, + negative_slope=0.2, + dropout: float = 0.0, + **kwargs, + ): + super().__init__(aggr='add', node_dim=0, **kwargs) + + if not isinstance(in_channels, dict): + in_channels = {node_type: in_channels for node_type in metadata[0]} + + self.heads = heads + self.in_channels = in_channels + self.out_channels = out_channels + self.negative_slope = negative_slope + self.metadata = metadata + self.dropout = dropout + self.k_lin = nn.Linear(out_channels, out_channels) + self.q = nn.Parameter(torch.empty(1, out_channels)) + + self.proj = nn.ModuleDict() + for node_type, in_channels in self.in_channels.items(): + self.proj[node_type] = Linear(in_channels, out_channels) + + self.lin_src = nn.ParameterDict() + self.lin_dst = nn.ParameterDict() + dim = out_channels // heads + for edge_type in metadata[1]: + edge_type = '__'.join(edge_type) + self.lin_src[edge_type] = nn.Parameter(torch.empty(1, heads, dim)) + self.lin_dst[edge_type] = nn.Parameter(torch.empty(1, heads, dim)) + + self.reset_parameters() + + def reset_parameters(self): + super().reset_parameters() + reset(self.proj) + glorot(self.lin_src) + glorot(self.lin_dst) + self.k_lin.reset_parameters() + glorot(self.q) + + def forward( + self, + x_dict: Dict[NodeType, Tensor], + edge_index_dict: Dict[EdgeType, Adj], + return_semantic_attention_weights: bool = False, + ) -> Union[Dict[NodeType, OptTensor], Tuple[Dict[NodeType, OptTensor], + Dict[NodeType, OptTensor]]]: + r"""Runs the forward pass of the module. + + Args: + x_dict (Dict[str, torch.Tensor]): A dictionary holding node feature + information for each individual node type. + edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A + dictionary holding graph connectivity information for each + individual edge type, either as a :class:`torch.Tensor` of + shape :obj:`[2, num_edges]` or a + :class:`torch_sparse.SparseTensor`. + return_semantic_attention_weights (bool, optional): If set to + :obj:`True`, will additionally return the semantic-level + attention weights for each destination node type. + (default: :obj:`False`) + """ + H, D = self.heads, self.out_channels // self.heads + x_node_dict, out_dict = {}, {} + + # Iterate over node types: + for node_type, x in x_dict.items(): + x_node_dict[node_type] = self.proj[node_type](x).view(-1, H, D) + out_dict[node_type] = [] + + # Iterate over edge types: + for edge_type, edge_index in edge_index_dict.items(): + src_type, _, dst_type = edge_type + edge_type = '__'.join(edge_type) + lin_src = self.lin_src[edge_type] + lin_dst = self.lin_dst[edge_type] + x_src = x_node_dict[src_type] + x_dst = x_node_dict[dst_type] + alpha_src = (x_src * lin_src).sum(dim=-1) + alpha_dst = (x_dst * lin_dst).sum(dim=-1) + # propagate_type: (x_dst: PairTensor, alpha: PairTensor) + out = self.propagate(edge_index, x=(x_src, x_dst), + alpha=(alpha_src, alpha_dst), size=None) + + out = F.relu(out) + out_dict[dst_type].append(out) + + # iterate over node types: + semantic_attn_dict = {} + for node_type, outs in out_dict.items(): + out, attn = group(outs, self.q, self.k_lin) + out_dict[node_type] = out + semantic_attn_dict[node_type] = attn + + if return_semantic_attention_weights: + return out_dict, semantic_attn_dict + + return out_dict + + def message(self, x_j: Tensor, alpha_i: Tensor, alpha_j: Tensor, + index: Tensor, ptr: Optional[Tensor], + size_i: Optional[int]) -> Tensor: + + alpha = alpha_j + alpha_i + alpha = F.leaky_relu(alpha, self.negative_slope) + alpha = softmax(alpha, index, ptr, size_i) + alpha = F.dropout(alpha, p=self.dropout, training=self.training) + out = x_j * alpha.view(-1, self.heads, 1) + return out.view(-1, self.out_channels) + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.out_channels}, ' + f'heads={self.heads})') diff --git a/torch_geometric/nn/models/rect.py b/torch_geometric/nn/models/rect.py index 96d8f9cfea87..d4e70c81d245 100644 --- a/torch_geometric/nn/models/rect.py +++ b/torch_geometric/nn/models/rect.py @@ -1,153 +1,153 @@ -import copy - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Linear - -from torch_geometric.nn import GCNConv -from torch_geometric.typing import Adj, OptTensor, SparseTensor -from torch_geometric.utils import scatter - - -class RECT_L(torch.nn.Module): - r"""The RECT model, *i.e.* its supervised RECT-L part, from the - `"Network Embedding with Completely-imbalanced Labels" - `_ paper. - In particular, a GCN model is trained that reconstructs semantic class - knowledge. - - .. note:: - - For an example of using RECT, see `examples/rect.py - `_. - - Args: - in_channels (int): Size of each input sample. - hidden_channels (int): Intermediate size of each sample. - normalize (bool, optional): Whether to add self-loops and compute - symmetric normalization coefficients on-the-fly. - (default: :obj:`True`) - dropout (float, optional): The dropout probability. - (default: :obj:`0.0`) - """ - def __init__(self, in_channels: int, hidden_channels: int, - normalize: bool = True, dropout: float = 0.0): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.dropout = dropout - - self.conv = GCNConv(in_channels, hidden_channels, normalize=normalize) - self.lin = Linear(hidden_channels, in_channels) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.conv.reset_parameters() - self.lin.reset_parameters() - torch.nn.init.xavier_uniform_(self.lin.weight.data) - - @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (Tensor, SparseTensor, OptTensor) -> Tensor - pass - - @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (Tensor, Tensor, OptTensor) -> Tensor - pass - - def forward(self, x: Tensor, edge_index: Adj, - edge_weight: OptTensor = None) -> Tensor: - """""" - x = self.conv(x, edge_index, edge_weight) - x = F.dropout(x, p=self.dropout, training=self.training) - return self.lin(x) - - @torch.jit._overload_method - def embed(self, x, edge_index, edge_weight=None): - # type: (Tensor, SparseTensor, OptTensor) -> Tensor - pass - - @torch.jit._overload_method - def embed(self, x, edge_index, edge_weight=None): - # type: (Tensor, Tensor, OptTensor) -> Tensor - pass - - def embed(self, x: Tensor, edge_index: Adj, - edge_weight: OptTensor = None) -> Tensor: - with torch.no_grad(): - return self.conv(x, edge_index, edge_weight) - - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: - r"""Replaces the original labels by their class-centers.""" - with torch.no_grad(): - y = y[mask] - mean = scatter(x[mask], y, dim=0, reduce='mean') - return mean[y] - - def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover - edge_index_type = typing.split(',')[1].strip() - - class EdgeIndexJittable(torch.nn.Module): - def __init__(self, child: RECT_L): - super().__init__() - self.child = copy.deepcopy(child) - self.child.conv = self.child.conv.jittable() - - def reset_parameters(self): - self.child.reset_parameters() - - def forward(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child(x, edge_index, edge_weight) - - @torch.jit.export - def embed(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child.embed(x, edge_index, edge_weight) - - @torch.jit.export - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: - return self.child.get_semantic_labels(x, y, mask) - - class SparseTensorJittable(torch.nn.Module): - def __init__(self, child: RECT_L): - super().__init__() - self.child = copy.deepcopy(child) - self.child.conv = self.child.conv.jittable() - - def reset_parameters(self): - self.child.reset_parameters() - - def forward(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None): - return self.child(x, edge_index, edge_weight) - - @torch.jit.export - def embed(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child.embed(x, edge_index, edge_weight) - - @torch.jit.export - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: - return self.child.get_semantic_labels(x, y, mask) - - if 'Tensor' == edge_index_type: - jittable_module = EdgeIndexJittable(self) - elif 'SparseTensor' == edge_index_type: - jittable_module = SparseTensorJittable(self) - else: - raise ValueError(f"Could not parse types '{typing}'") - - return jittable_module - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.hidden_channels})') +import copy + +import torch +import torch.nn.functional as F +from torch import Tensor +from torch.nn import Linear + +from torch_geometric.nn import GCNConv +from torch_geometric.typing import Adj, OptTensor, SparseTensor +from torch_geometric.utils import scatter + + +class RECT_L(torch.nn.Module): + r"""The RECT model, *i.e.* its supervised RECT-L part, from the + `"Network Embedding with Completely-imbalanced Labels" + `_ paper. + In particular, a GCN model is trained that reconstructs semantic class + knowledge. + + .. note:: + + For an example of using RECT, see `examples/rect.py + `_. + + Args: + in_channels (int): Size of each input sample. + hidden_channels (int): Intermediate size of each sample. + normalize (bool, optional): Whether to add self-loops and compute + symmetric normalization coefficients on-the-fly. + (default: :obj:`True`) + dropout (float, optional): The dropout probability. + (default: :obj:`0.0`) + """ + def __init__(self, in_channels: int, hidden_channels: int, + normalize: bool = True, dropout: float = 0.0): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.dropout = dropout + + self.conv = GCNConv(in_channels, hidden_channels, normalize=normalize) + self.lin = Linear(hidden_channels, in_channels) + + self.reset_parameters() + + def reset_parameters(self): + r"""Resets all learnable parameters of the module.""" + self.conv.reset_parameters() + self.lin.reset_parameters() + torch.nn.init.xavier_uniform_(self.lin.weight.data) + + @torch.jit._overload_method + def forward(self, x, edge_index, edge_weight=None): + # type: (Tensor, SparseTensor, OptTensor) -> Tensor + pass + + @torch.jit._overload_method + def forward(self, x, edge_index, edge_weight=None): + # type: (Tensor, Tensor, OptTensor) -> Tensor + pass + + def forward(self, x: Tensor, edge_index: Adj, + edge_weight: OptTensor = None) -> Tensor: + """""" + x = self.conv(x, edge_index, edge_weight) + x = F.dropout(x, p=self.dropout, training=self.training) + return self.lin(x) + + @torch.jit._overload_method + def embed(self, x, edge_index, edge_weight=None): + # type: (Tensor, SparseTensor, OptTensor) -> Tensor + pass + + @torch.jit._overload_method + def embed(self, x, edge_index, edge_weight=None): + # type: (Tensor, Tensor, OptTensor) -> Tensor + pass + + def embed(self, x: Tensor, edge_index: Adj, + edge_weight: OptTensor = None) -> Tensor: + with torch.no_grad(): + return self.conv(x, edge_index, edge_weight) + + def get_semantic_labels(self, x: Tensor, y: Tensor, + mask: Tensor) -> Tensor: + r"""Replaces the original labels by their class-centers.""" + with torch.no_grad(): + y = y[mask] + mean = scatter(x[mask], y, dim=0, reduce='mean') + return mean[y] + + def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover + edge_index_type = typing.split(',')[1].strip() + + class EdgeIndexJittable(torch.nn.Module): + def __init__(self, child: RECT_L): + super().__init__() + self.child = copy.deepcopy(child) + self.child.conv = self.child.conv.jittable() + + def reset_parameters(self): + self.child.reset_parameters() + + def forward(self, x: Tensor, edge_index: Tensor, + edge_weight: OptTensor = None) -> Tensor: + return self.child(x, edge_index, edge_weight) + + @torch.jit.export + def embed(self, x: Tensor, edge_index: Tensor, + edge_weight: OptTensor = None) -> Tensor: + return self.child.embed(x, edge_index, edge_weight) + + @torch.jit.export + def get_semantic_labels(self, x: Tensor, y: Tensor, + mask: Tensor) -> Tensor: + return self.child.get_semantic_labels(x, y, mask) + + class SparseTensorJittable(torch.nn.Module): + def __init__(self, child: RECT_L): + super().__init__() + self.child = copy.deepcopy(child) + self.child.conv = self.child.conv.jittable() + + def reset_parameters(self): + self.child.reset_parameters() + + def forward(self, x: Tensor, edge_index: SparseTensor, + edge_weight: OptTensor = None): + return self.child(x, edge_index, edge_weight) + + @torch.jit.export + def embed(self, x: Tensor, edge_index: SparseTensor, + edge_weight: OptTensor = None) -> Tensor: + return self.child.embed(x, edge_index, edge_weight) + + @torch.jit.export + def get_semantic_labels(self, x: Tensor, y: Tensor, + mask: Tensor) -> Tensor: + return self.child.get_semantic_labels(x, y, mask) + + if 'Tensor' == edge_index_type: + jittable_module = EdgeIndexJittable(self) + elif 'SparseTensor' == edge_index_type: + jittable_module = SparseTensorJittable(self) + else: + raise ValueError(f"Could not parse types '{typing}'") + + return jittable_module + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.hidden_channels})') diff --git a/torch_geometric/transforms/remove_training_classes.py b/torch_geometric/transforms/remove_training_classes.py index 1d29ff91c56f..7350bee75ed4 100644 --- a/torch_geometric/transforms/remove_training_classes.py +++ b/torch_geometric/transforms/remove_training_classes.py @@ -1,27 +1,27 @@ -from typing import List - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform - - -@functional_transform('remove_training_classes') -class RemoveTrainingClasses(BaseTransform): - r"""Removes classes from the node-level training set as given by - :obj:`data.train_mask`, *e.g.*, in order to get a zero-shot label scenario - (functional name: :obj:`remove_training_classes`). - - Args: - classes (List[int]): The classes to remove from the training set. - """ - def __init__(self, classes: List[int]): - self.classes = classes - - def forward(self, data: Data) -> Data: - data.train_mask = data.train_mask.clone() - for i in self.classes: - data.train_mask[data.y == i] = False - return data - - def __repr__(self) -> str: - return f'{self.__class__.__name__}({self.classes})' +from typing import List + +from torch_geometric.data import Data +from torch_geometric.data.datapipes import functional_transform +from torch_geometric.transforms import BaseTransform + + +@functional_transform('remove_training_classes') +class RemoveTrainingClasses(BaseTransform): + r"""Removes classes from the node-level training set as given by + :obj:`data.train_mask`, *e.g.*, in order to get a zero-shot label scenario + (functional name: :obj:`remove_training_classes`). + + Args: + classes (List[int]): The classes to remove from the training set. + """ + def __init__(self, classes: List[int]): + self.classes = classes + + def forward(self, data: Data) -> Data: + data.train_mask = data.train_mask.clone() + for i in self.classes: + data.train_mask[data.y == i] = False + return data + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.classes})' diff --git a/torch_geometric/transforms/svd_feature_reduction.py b/torch_geometric/transforms/svd_feature_reduction.py index 760d4e50d2a8..c99798034369 100644 --- a/torch_geometric/transforms/svd_feature_reduction.py +++ b/torch_geometric/transforms/svd_feature_reduction.py @@ -1,28 +1,28 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform - - -@functional_transform('svd_feature_reduction') -class SVDFeatureReduction(BaseTransform): - r"""Dimensionality reduction of node features via Singular Value - Decomposition (SVD) (functional name: :obj:`svd_feature_reduction`). - - Args: - out_channels (int): The dimensionlity of node features after - reduction. - """ - def __init__(self, out_channels: int): - self.out_channels = out_channels - - def forward(self, data: Data) -> Data: - if data.x.size(-1) > self.out_channels: - U, S, _ = torch.linalg.svd(data.x) - data.x = torch.mm(U[:, :self.out_channels], - torch.diag(S[:self.out_channels])) - return data - - def __repr__(self) -> str: - return f'{self.__class__.__name__}({self.out_channels})' +import torch + +from torch_geometric.data import Data +from torch_geometric.data.datapipes import functional_transform +from torch_geometric.transforms import BaseTransform + + +@functional_transform('svd_feature_reduction') +class SVDFeatureReduction(BaseTransform): + r"""Dimensionality reduction of node features via Singular Value + Decomposition (SVD) (functional name: :obj:`svd_feature_reduction`). + + Args: + out_channels (int): The dimensionlity of node features after + reduction. + """ + def __init__(self, out_channels: int): + self.out_channels = out_channels + + def forward(self, data: Data) -> Data: + if data.x.size(-1) > self.out_channels: + U, S, _ = torch.linalg.svd(data.x) + data.x = torch.mm(U[:, :self.out_channels], + torch.diag(S[:self.out_channels])) + return data + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.out_channels})' diff --git a/torch_geometric/utils/get_mesh_laplacian.py b/torch_geometric/utils/get_mesh_laplacian.py index 98b8cba135f7..57a50884a097 100644 --- a/torch_geometric/utils/get_mesh_laplacian.py +++ b/torch_geometric/utils/get_mesh_laplacian.py @@ -1,109 +1,109 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor - -from torch_geometric.utils import add_self_loops, scatter, to_undirected - - -def get_mesh_laplacian( - pos: Tensor, - face: Tensor, - normalization: Optional[str] = None, -) -> Tuple[Tensor, Tensor]: - r"""Computes the mesh Laplacian of a mesh given by :obj:`pos` and - :obj:`face`. Computation is based on the cotangent matrix defined as - - .. math:: - \mathbf{C}_{ij} = \begin{cases} - \frac{\cot \angle_{ikj}~+\cot \angle_{ilj}}{2} & - \text{if } i, j \text{ is an edge} \\ - -\sum_{j \in N(i)}{C_{ij}} & - \text{if } i \text{ is in the diagonal} \\ - 0 & \text{otherwise} - \end{cases} - - Normalization depends on the mass matrix defined as - - .. math:: - \mathbf{M}_{ij} = \begin{cases} - a(i) & \text{if } i \text{ is in the diagonal} \\ - 0 & \text{otherwise} - \end{cases} - - where :math:`a(i)` is obtained by joining the barycenters of the - triangles around vertex :math:`i`. - - Args: - pos (Tensor): The node positions. - face (LongTensor): The face indices. - normalization (str, optional): The normalization scheme for the mesh - Laplacian (default: :obj:`None`): - - 1. :obj:`None`: No normalization - :math:`\mathbf{L} = \mathbf{C}` - - 2. :obj:`"sym"`: Symmetric normalization - :math:`\mathbf{L} = \mathbf{M}^{-1/2} \mathbf{C}\mathbf{M}^{-1/2}` - - 3. :obj:`"rw"`: Row-wise normalization - :math:`\mathbf{L} = \mathbf{M}^{-1} \mathbf{C}` - """ - assert pos.size(1) == 3 and face.size(0) == 3 - - num_nodes = pos.shape[0] - - def get_cots(left, centre, right): - left_pos, central_pos, right_pos = pos[left], pos[centre], pos[right] - left_vec = left_pos - central_pos - right_vec = right_pos - central_pos - dot = torch.einsum('ij, ij -> i', left_vec, right_vec) - cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1) - cot = dot / cross # cot = cos / sin - return cot / 2.0 # by definition - - # For each triangle face, get all three cotangents: - cot_021 = get_cots(face[0], face[2], face[1]) - cot_102 = get_cots(face[1], face[0], face[2]) - cot_012 = get_cots(face[0], face[1], face[2]) - cot_weight = torch.cat([cot_021, cot_102, cot_012]) - - # Face to edge: - cot_index = torch.cat([face[:2], face[1:], face[::2]], dim=1) - cot_index, cot_weight = to_undirected(cot_index, cot_weight) - - # Compute the diagonal part: - cot_deg = scatter(cot_weight, cot_index[0], 0, num_nodes, reduce='sum') - edge_index, _ = add_self_loops(cot_index, num_nodes=num_nodes) - edge_weight = torch.cat([cot_weight, -cot_deg], dim=0) - - if normalization is not None: - - def get_areas(left, centre, right): - central_pos = pos[centre] - left_vec = pos[left] - central_pos - right_vec = pos[right] - central_pos - cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1) - area = cross / 6.0 # one-third of a triangle's area is cross / 6.0 - return area / 2.0 # since each corresponding area is counted twice - - # Like before, but here we only need the diagonal (the mass matrix): - area_021 = get_areas(face[0], face[2], face[1]) - area_102 = get_areas(face[1], face[0], face[2]) - area_012 = get_areas(face[0], face[1], face[2]) - area_weight = torch.cat([area_021, area_102, area_012]) - area_index = torch.cat([face[:2], face[1:], face[::2]], dim=1) - area_index, area_weight = to_undirected(area_index, area_weight) - area_deg = scatter(area_weight, area_index[0], 0, num_nodes, 'sum') - - if normalization == 'sym': - area_deg_inv_sqrt = area_deg.pow_(-0.5) - area_deg_inv_sqrt[area_deg_inv_sqrt == float('inf')] = 0.0 - edge_weight = (area_deg_inv_sqrt[edge_index[0]] * edge_weight * - area_deg_inv_sqrt[edge_index[1]]) - elif normalization == 'rw': - area_deg_inv = 1.0 / area_deg - area_deg_inv[area_deg_inv == float('inf')] = 0.0 - edge_weight = area_deg_inv[edge_index[0]] * edge_weight - - return edge_index, edge_weight +from typing import Optional, Tuple + +import torch +from torch import Tensor + +from torch_geometric.utils import add_self_loops, scatter, to_undirected + + +def get_mesh_laplacian( + pos: Tensor, + face: Tensor, + normalization: Optional[str] = None, +) -> Tuple[Tensor, Tensor]: + r"""Computes the mesh Laplacian of a mesh given by :obj:`pos` and + :obj:`face`. Computation is based on the cotangent matrix defined as + + .. math:: + \mathbf{C}_{ij} = \begin{cases} + \frac{\cot \angle_{ikj}~+\cot \angle_{ilj}}{2} & + \text{if } i, j \text{ is an edge} \\ + -\sum_{j \in N(i)}{C_{ij}} & + \text{if } i \text{ is in the diagonal} \\ + 0 & \text{otherwise} + \end{cases} + + Normalization depends on the mass matrix defined as + + .. math:: + \mathbf{M}_{ij} = \begin{cases} + a(i) & \text{if } i \text{ is in the diagonal} \\ + 0 & \text{otherwise} + \end{cases} + + where :math:`a(i)` is obtained by joining the barycenters of the + triangles around vertex :math:`i`. + + Args: + pos (Tensor): The node positions. + face (LongTensor): The face indices. + normalization (str, optional): The normalization scheme for the mesh + Laplacian (default: :obj:`None`): + + 1. :obj:`None`: No normalization + :math:`\mathbf{L} = \mathbf{C}` + + 2. :obj:`"sym"`: Symmetric normalization + :math:`\mathbf{L} = \mathbf{M}^{-1/2} \mathbf{C}\mathbf{M}^{-1/2}` + + 3. :obj:`"rw"`: Row-wise normalization + :math:`\mathbf{L} = \mathbf{M}^{-1} \mathbf{C}` + """ + assert pos.size(1) == 3 and face.size(0) == 3 + + num_nodes = pos.shape[0] + + def get_cots(left, centre, right): + left_pos, central_pos, right_pos = pos[left], pos[centre], pos[right] + left_vec = left_pos - central_pos + right_vec = right_pos - central_pos + dot = torch.einsum('ij, ij -> i', left_vec, right_vec) + cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1) + cot = dot / cross # cot = cos / sin + return cot / 2.0 # by definition + + # For each triangle face, get all three cotangents: + cot_021 = get_cots(face[0], face[2], face[1]) + cot_102 = get_cots(face[1], face[0], face[2]) + cot_012 = get_cots(face[0], face[1], face[2]) + cot_weight = torch.cat([cot_021, cot_102, cot_012]) + + # Face to edge: + cot_index = torch.cat([face[:2], face[1:], face[::2]], dim=1) + cot_index, cot_weight = to_undirected(cot_index, cot_weight) + + # Compute the diagonal part: + cot_deg = scatter(cot_weight, cot_index[0], 0, num_nodes, reduce='sum') + edge_index, _ = add_self_loops(cot_index, num_nodes=num_nodes) + edge_weight = torch.cat([cot_weight, -cot_deg], dim=0) + + if normalization is not None: + + def get_areas(left, centre, right): + central_pos = pos[centre] + left_vec = pos[left] - central_pos + right_vec = pos[right] - central_pos + cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1) + area = cross / 6.0 # one-third of a triangle's area is cross / 6.0 + return area / 2.0 # since each corresponding area is counted twice + + # Like before, but here we only need the diagonal (the mass matrix): + area_021 = get_areas(face[0], face[2], face[1]) + area_102 = get_areas(face[1], face[0], face[2]) + area_012 = get_areas(face[0], face[1], face[2]) + area_weight = torch.cat([area_021, area_102, area_012]) + area_index = torch.cat([face[:2], face[1:], face[::2]], dim=1) + area_index, area_weight = to_undirected(area_index, area_weight) + area_deg = scatter(area_weight, area_index[0], 0, num_nodes, 'sum') + + if normalization == 'sym': + area_deg_inv_sqrt = area_deg.pow_(-0.5) + area_deg_inv_sqrt[area_deg_inv_sqrt == float('inf')] = 0.0 + edge_weight = (area_deg_inv_sqrt[edge_index[0]] * edge_weight * + area_deg_inv_sqrt[edge_index[1]]) + elif normalization == 'rw': + area_deg_inv = 1.0 / area_deg + area_deg_inv[area_deg_inv == float('inf')] = 0.0 + edge_weight = area_deg_inv[edge_index[0]] * edge_weight + + return edge_index, edge_weight From 137a95ccc382d72e21e12957e74b167bdf225377 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Wed, 19 Jul 2023 10:35:38 +0100 Subject: [PATCH 1356/2432] Replace broadcast+reduce with matmul for faster operation both on CPU and CUDA devices (#7766) This PR improves fwd and fwd+bwd performance by replacing broadcast+reduce op with matmul. ### Benchmark results On V100: - fwd: up to `2.76x` faster - fwd+bwd: up to `1.4x` faster On CPU: - fwd: up to `6.52x` faster - fwd+bwd: up to `1.50` faster Result on 6-core CPU with V100: image Result on 6-core CPU without GPU: image ### Benchmark script ```python import torch from torch.utils.benchmark import Timer, Compare def main(): torch.set_default_device("cpu") # or "cuda" formulations = { "broadcast_reduce": lambda x, y: (x * y).sum(dim=-1), "einsum": lambda x, y: torch.einsum("ij,...j->i", x, y), "matmul": lambda x, y: (x @ y.t()).squeeze(), } # verify all formulations are equivalent x_j = torch.randn((2**10, 200)) att_l = torch.randn((1, 200)) y0 = formulations["broadcast_reduce"](x_j, att_l) for name, fn in formulations.items(): torch.testing.assert_close( y0, fn(x_j, att_l), rtol=1e-3, atol=1e-3, msg=name ) sizes = [2**10, 2**12, 2**14, 2**16] results = [] # == Forward == for name, fn in formulations.items(): for size in sizes: x_j = torch.randn((size, 200)) att_l = torch.randn((1, 200)) m = Timer( f"{name}(x, y)", globals={"x": x_j, "y": att_l, name: fn}, num_threads=6, label="fwd", sub_label=name, description=str(size), ).blocked_autorange(min_run_time=1) results.append(m) # == Forward + Backward == for globals_dict in ( {"x": torch.ones_like(x_j, requires_grad=True), "y": att_l}, {"x": x_j, "y": torch.ones_like(att_l, requires_grad=True)}, {"x": torch.ones_like(x_j, requires_grad=True), "y": torch.ones_like(att_l, requires_grad=True)}, ): for name, fn in formulations.items(): for size in sizes: x_j = torch.randn((size, 200)) att_l = torch.randn((1, 200)) timer_globals = globals_dict.copy() timer_globals[name] = fn m = Timer( f"{name}(x, y).sum().backward()", globals=timer_globals, num_threads=6, label="fwd+bwd", sub_label=name, description=str(size), ).blocked_autorange(min_run_time=1) results.append(m) compare = Compare(results) compare.colorize() compare.print() if __name__ == "__main__": main() ``` --------- Co-authored-by: Matthias Fey --- torch_geometric/nn/models/attentive_fp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/nn/models/attentive_fp.py b/torch_geometric/nn/models/attentive_fp.py index 6fae299d0d27..c99b34c7b77e 100644 --- a/torch_geometric/nn/models/attentive_fp.py +++ b/torch_geometric/nn/models/attentive_fp.py @@ -51,8 +51,8 @@ def message(self, x_j: Tensor, x_i: Tensor, edge_attr: Tensor, size_i: Optional[int]) -> Tensor: x_j = F.leaky_relu_(self.lin1(torch.cat([x_j, edge_attr], dim=-1))) - alpha_j = (x_j * self.att_l).sum(dim=-1) - alpha_i = (x_i * self.att_r).sum(dim=-1) + alpha_j = (x_j @ self.att_l.t()).squeeze(-1) + alpha_i = (x_i @ self.att_r.t()).squeeze(-1) alpha = alpha_j + alpha_i alpha = F.leaky_relu_(alpha) alpha = softmax(alpha, index, ptr, size_i) From 67c0acc95dacfc5071efd8651c7478e69924565f Mon Sep 17 00:00:00 2001 From: YanbingJiang Date: Wed, 19 Jul 2023 17:44:38 +0800 Subject: [PATCH 1357/2432] Fix `bf16` data type failure in `spmm` (#7754) This PR is to fix the data type failure in bf16 with sparse graph. It will convert src to other's dtype in spmm.py. --- torch_geometric/utils/spmm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/utils/spmm.py b/torch_geometric/utils/spmm.py index 0e63e5312536..b9dd77b99c33 100644 --- a/torch_geometric/utils/spmm.py +++ b/torch_geometric/utils/spmm.py @@ -46,7 +46,7 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: if (torch_geometric.typing.WITH_PT2 and other.dim() == 2 and not src.is_cuda() and not src.requires_grad()): # Use optimized PyTorch `torch.sparse.mm` path: - csr = src.to_torch_sparse_csr_tensor() + csr = src.to_torch_sparse_csr_tensor().to(other.dtype) return torch.sparse.mm(csr, other, reduce) return torch_sparse.matmul(src, other, reduce) From 67db3bc9a34bc2f8f2a785cbbc13c1e70e4c092f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Jul 2023 13:11:48 +0200 Subject: [PATCH 1358/2432] Do not account for `face` in `HeteroData.__cat_dim__` (#7773) Align implementation of `__inc__` and `__cat_dim__` --- torch_geometric/data/hetero_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index cfe58a3b2683..0e2955d7be96 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -331,7 +331,7 @@ def __cat_dim__(self, key: str, value: Any, **kwargs) -> Any: if isinstance(value, SparseTensor) and 'adj' in key: return (0, 1) - elif 'index' in key or 'face' in key: + elif isinstance(store, EdgeStorage) and 'index' in key: return -1 return 0 From a90949df1d339f5fe41f979f9d5de37bcbae6dd1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 19 Jul 2023 13:19:56 +0200 Subject: [PATCH 1359/2432] Warn user when using `training` flag in `to_hetero` (#7772) Fixes #7745 --- CHANGELOG.md | 1 + test/nn/test_fx.py | 22 ++++++++++++++++++++++ test/nn/test_model_summary.py | 4 ++++ test/nn/test_to_hetero_transformer.py | 3 ++- torch_geometric/nn/fx.py | 8 ++++++++ torch_geometric/nn/models/basic_gnn.py | 7 +++---- 6 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 test/nn/test_fx.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bcc1c405979..d0436ca8e003 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) - Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) - Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) - Renamed `dest` argument to `dst` in `utils.geodesic_distance` ([#7708](https://github.com/pyg-team/pytorch_geometric/pull/7708)) diff --git a/test/nn/test_fx.py b/test/nn/test_fx.py new file mode 100644 index 000000000000..a0f1ff44c1ef --- /dev/null +++ b/test/nn/test_fx.py @@ -0,0 +1,22 @@ +import torch +import torch.nn.functional as F +from torch import Tensor + + +def test_dropout(): + class MyModule(torch.nn.Module): + def forward(self, x: Tensor) -> Tensor: + return F.dropout(x, p=1.0, training=self.training) + + module = MyModule() + graph_module = torch.fx.symbolic_trace(module) + graph_module.recompile() + + x = torch.randn(4) + + graph_module.train() + assert torch.allclose(graph_module(x), torch.zeros_like(x)) + + # This is certainly undesired behavior due to tracing :( + graph_module.eval() + assert torch.allclose(graph_module(x), torch.zeros_like(x)) diff --git a/test/nn/test_model_summary.py b/test/nn/test_model_summary.py index 6408e0ca051f..f07aff03c162 100644 --- a/test/nn/test_model_summary.py +++ b/test/nn/test_model_summary.py @@ -59,6 +59,7 @@ def test_summary_basic(gcn): | Layer | Input Shape | Output Shape | #Param | |---------------------+--------------------+----------------+----------| | GCN | [100, 32], [2, 20] | [100, 32] | 1,072 | +| ├─(dropout)Dropout | [100, 16] | [100, 16] | -- | | ├─(act)ReLU | [100, 16] | [100, 16] | -- | | ├─(convs)ModuleList | -- | -- | 1,072 | | │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | 528 | @@ -75,6 +76,7 @@ def test_summary_with_sparse_tensor(gcn): | Layer | Input Shape | Output Shape | #Param | |---------------------+-----------------------+----------------+----------| | GCN | [100, 32], [100, 100] | [100, 32] | 1,072 | +| ├─(dropout)Dropout | [100, 16] | [100, 16] | -- | | ├─(act)ReLU | [100, 16] | [100, 16] | -- | | ├─(convs)ModuleList | -- | -- | 1,072 | | │ └─(0)GCNConv | [100, 32], [100, 100] | [100, 16] | 528 | @@ -91,6 +93,7 @@ def test_summary_with_max_depth(gcn): | Layer | Input Shape | Output Shape | #Param | |---------------------+--------------------+----------------+----------| | GCN | [100, 32], [2, 20] | [100, 32] | 1,072 | +| ├─(dropout)Dropout | [100, 16] | [100, 16] | -- | | ├─(act)ReLU | [100, 16] | [100, 16] | -- | | ├─(convs)ModuleList | -- | -- | 1,072 | +---------------------+--------------------+----------------+----------+ @@ -106,6 +109,7 @@ def test_summary_with_leaf_module(gcn): | Layer | Input Shape | Output Shape | #Param | |-----------------------------------------+--------------------+----------------+----------| | GCN | [100, 32], [2, 20] | [100, 32] | 1,072 | +| ├─(dropout)Dropout | [100, 16] | [100, 16] | -- | | ├─(act)ReLU | [100, 16] | [100, 16] | -- | | ├─(convs)ModuleList | -- | -- | 1,072 | | │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | 528 | diff --git a/test/nn/test_to_hetero_transformer.py b/test/nn/test_to_hetero_transformer.py index 768806bbd08a..6ee4ed31653b 100644 --- a/test/nn/test_to_hetero_transformer.py +++ b/test/nn/test_to_hetero_transformer.py @@ -277,7 +277,8 @@ def test_to_hetero_basic(): assert out['author'].size() == (8, 16) model = Net10() - model = to_hetero(model, metadata, debug=False) + with pytest.warns(UserWarning, match="with keyword argument 'training'"): + model = to_hetero(model, metadata, debug=False) out = model(x_dict, edge_index_dict) assert isinstance(out, dict) and len(out) == 2 assert out['paper'].size() == (100, 32) diff --git a/torch_geometric/nn/fx.py b/torch_geometric/nn/fx.py index 860def82ae34..021f43ca6921 100644 --- a/torch_geometric/nn/fx.py +++ b/torch_geometric/nn/fx.py @@ -1,4 +1,5 @@ import copy +import warnings from typing import Any, Dict, Optional import torch @@ -127,6 +128,13 @@ def transform(self) -> GraphModule: # We iterate over each node and determine its output level # (node-level, edge-level) by filling `self._state`: for node in list(self.graph.nodes): + if node.op == 'call_function' and 'training' in node.kwargs: + warnings.warn(f"Found function '{node.name}' with keyword " + f"argument 'training'. During FX tracing, this " + f"will likely be baked in as a constant value. " + f"Consider replacing this function by a module " + f"to properly encapsulate its training flag.") + if node.op == 'placeholder': if node.name not in self._state: if 'edge' in node.name or 'adj' in node.name: diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index 7244893fc222..15397a6b5b20 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -2,7 +2,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch -import torch.nn.functional as F from torch import Tensor from torch.nn import Linear, ModuleList from tqdm import tqdm @@ -83,7 +82,7 @@ def __init__( self.hidden_channels = hidden_channels self.num_layers = num_layers - self.dropout = dropout + self.dropout = torch.nn.Dropout(p=dropout) self.act = activation_resolver(act, **(act_kwargs or {})) self.jk_mode = jk self.act_first = act_first @@ -232,7 +231,7 @@ def forward( x = self.norms[i](x) if self.act is not None and not self.act_first: x = self.act(x) - x = F.dropout(x, p=self.dropout, training=self.training) + x = self.dropout(x) if hasattr(self, 'jk'): xs.append(x) @@ -537,7 +536,7 @@ def init_conv(self, in_channels: Union[int, Tuple[int, int]], Conv = GATConv if not v2 else GATv2Conv return Conv(in_channels, out_channels, heads=heads, concat=concat, - dropout=self.dropout, **kwargs) + dropout=self.dropout.p, **kwargs) class PNA(BasicGNN): From 4455906e86cc21e4ef28ef2575ae51e5e9559c31 Mon Sep 17 00:00:00 2001 From: Aniket Saxena <92912434+fork123aniket@users.noreply.github.com> Date: Wed, 19 Jul 2023 17:00:06 +0530 Subject: [PATCH 1360/2432] `GraphMaskExplainer` code updation (#7717) This PR is related to the changes discussed in https://github.com/pyg-team/pytorch_geometric/discussions/7271#discussioncomment-6330688. This PR removes all redundant loss functions and unessential module calling from the `GraphMask` implementation. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Akihiro Nitta Co-authored-by: Matthias Fey --- .../contrib/explain/graphmask_explainer.py | 43 ++----------------- 1 file changed, 3 insertions(+), 40 deletions(-) diff --git a/torch_geometric/contrib/explain/graphmask_explainer.py b/torch_geometric/contrib/explain/graphmask_explainer.py index b7d137c2a7bb..8a034b87ce0a 100644 --- a/torch_geometric/contrib/explain/graphmask_explainer.py +++ b/torch_geometric/contrib/explain/graphmask_explainer.py @@ -5,23 +5,17 @@ import torch import torch.nn.functional as F from torch import Tensor -from torch.nn import LayerNorm, Linear, Parameter, ReLU, Sequential +from torch.nn import LayerNorm, Linear, Parameter, ReLU from tqdm import tqdm from torch_geometric.explain import Explanation from torch_geometric.explain.algorithm import ExplainerAlgorithm -from torch_geometric.explain.config import ( - MaskType, - ModelMode, - ModelReturnType, - ModelTaskLevel, -) +from torch_geometric.explain.config import MaskType, ModelMode, ModelTaskLevel from torch_geometric.nn import MessagePassing def explain_message(self, out: Tensor, x_i: Tensor, x_j: Tensor) -> Tensor: - norm = Sequential(LayerNorm(out.size(-1)).to(out.device), ReLU()) - basis_messages = norm(out) + basis_messages = F.layer_norm(out, (out.size(-1), )).relu() if getattr(self, 'message_scale', None) is not None: basis_messages = basis_messages * self.message_scale.unsqueeze(-1) @@ -268,37 +262,6 @@ def reset_parameters(self, input_dims: List[int], h_dim: List[int]): for layer_norm in self.layer_norms: layer_norm.reset_parameters() - def _loss_regression(self, y_hat: Tensor, y: Tensor) -> Tensor: - assert self.model_config.return_type == ModelReturnType.raw - return F.mse_loss(y_hat, y) - - def _loss_binary_classification(self, y_hat: Tensor, y: Tensor) -> Tensor: - if self.model_config.return_type == ModelReturnType.raw: - loss_fn = F.binary_cross_entropy_with_logits - elif self.model_config.return_type == ModelReturnType.probs: - loss_fn = F.binary_cross_entropy - else: - assert False - - return loss_fn(y_hat.view_as(y), y.float()) - - def _loss_multiclass_classification( - self, - y_hat: Tensor, - y: Tensor, - ) -> Tensor: - if self.model_config.return_type == ModelReturnType.raw: - loss_fn = F.cross_entropy - elif self.model_config.return_type == ModelReturnType.probs: - loss_fn = F.nll_loss - y_hat = y_hat.log() - elif self.model_config.return_type == ModelReturnType.log_probs: - loss_fn = F.nll_loss - else: - assert False - - return loss_fn(y_hat, y) - def _loss(self, y_hat: Tensor, y: Tensor, penalty: float) -> Tensor: if self.model_config.mode == ModelMode.binary_classification: loss = self._loss_binary_classification(y_hat, y) From cc835de57ca718a892a56c817e1c539cc75651d5 Mon Sep 17 00:00:00 2001 From: Nripesh Niketan <86844847+NripeshN@users.noreply.github.com> Date: Wed, 19 Jul 2023 17:08:37 +0530 Subject: [PATCH 1361/2432] Apple Silicon "MPS" support main examples (#7770) Hi @rusty1s, The following files passed successfully on "MPS" device --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 5 +++-- examples/argva_node_clustering.py | 8 +++++++- examples/arma.py | 8 +++++++- examples/autoencoder.py | 8 +++++++- examples/dna.py | 8 +++++++- examples/film.py | 8 +++++++- examples/gcn.py | 8 +++++++- examples/glnn.py | 7 ++++++- examples/infomax_transductive.py | 8 +++++++- examples/link_pred.py | 8 +++++++- examples/linkx.py | 7 ++++++- examples/mutag_gin.py | 9 ++++++++- examples/proteins_diff_pool.py | 8 +++++++- examples/rect.py | 8 +++++++- examples/rgcn.py | 8 +++++++- examples/sgc.py | 8 +++++++- examples/signed_gcn.py | 9 ++++++++- examples/tagcn.py | 8 +++++++- examples/tensorboard_logging.py | 8 +++++++- 19 files changed, 129 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0436ca8e003..ae2b5d9491e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,7 +63,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) - Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) - Added edge weight support to `LightGCN` ([#7157](https://github.com/pyg-team/pytorch_geometric/pull/7157)) -- Added `SparseTensor` support to`trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) +- Added `SparseTensor` support to `trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) - Added instructions for ROCm build wheels ([#7143](https://github.com/pyg-team/pytorch_geometric/pull/7143)) - Added a `ComposeFilters` class to compose `pre_filter` functions in `Dataset` ([#7097](https://github.com/pyg-team/pytorch_geometric/pull/7097)) - Added a time-step aware variant of the `EllipticBitcoinDataset` called `EllipticBitcoinTemporalDataset` ([#7011](https://github.com/pyg-team/pytorch_geometric/pull/7011)) @@ -71,6 +71,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) +- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770)) ### Changed @@ -349,7 +350,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `Aggregation.set_validate_args` option to skip validation of `dim_size` ([#5290](https://github.com/pyg-team/pytorch_geometric/pull/5290)) - Added `SparseTensor` support to inference and training benchmark suite ([#5242](https://github.com/pyg-team/pytorch_geometric/pull/5242), [#5258](https://github.com/pyg-team/pytorch_geometric/pull/5258), [#5881](https://github.com/pyg-team/pytorch_geometric/pull/5881)) - Added experimental mode in inference benchmarks ([#5254](https://github.com/pyg-team/pytorch_geometric/pull/5254)) -- Added node classification example instrumented with [Weights and Biases (W&B) logging](https://wandb.com) and [W&B Sweeps](https://wandb.com/sweeps) ([#5192](https://github.com/pyg-team/pytorch_geometric/pull/5192)) +- Added node classification example instrumented with [Weights and Biases (W&B) logging](https://wandb.com) and [W&B Sweeps](https://wandb.com/sweeps) ([#5192](https://github.com/pyg-team/pytorch_geometric/pull/5192)) - Added experimental mode for `utils.scatter` ([#5232](https://github.com/pyg-team/pytorch_geometric/pull/5232), [#5241](https://github.com/pyg-team/pytorch_geometric/pull/5241), [#5386](https://github.com/pyg-team/pytorch_geometric/pull/5386)) - Added missing test labels in `HGBDataset` ([#5233](https://github.com/pyg-team/pytorch_geometric/pull/5233)) - Added `BaseStorage.get()` functionality ([#5240](https://github.com/pyg-team/pytorch_geometric/pull/5240)) diff --git a/examples/argva_node_clustering.py b/examples/argva_node_clustering.py index 8a31db4c2cc2..d26c223b1d26 100644 --- a/examples/argva_node_clustering.py +++ b/examples/argva_node_clustering.py @@ -15,7 +15,13 @@ from torch_geometric.datasets import Planetoid from torch_geometric.nn import ARGVA, GCNConv -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + transform = T.Compose([ T.ToDevice(device), T.RandomLinkSplit(num_val=0.05, num_test=0.1, is_undirected=True, diff --git a/examples/arma.py b/examples/arma.py index e6673b8f4439..3e90cfc8feec 100644 --- a/examples/arma.py +++ b/examples/arma.py @@ -32,7 +32,13 @@ def forward(self, x, edge_index): return F.log_softmax(x, dim=1) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model, data = Net(dataset.num_features, 16, dataset.num_classes).to(device), data.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) diff --git a/examples/autoencoder.py b/examples/autoencoder.py index e4dffcfd724a..f564d2f7b9e4 100644 --- a/examples/autoencoder.py +++ b/examples/autoencoder.py @@ -15,7 +15,13 @@ parser.add_argument('--epochs', type=int, default=400) args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + transform = T.Compose([ T.NormalizeFeatures(), T.ToDevice(device), diff --git a/examples/dna.py b/examples/dna.py index d45baad0a892..79d485109a1c 100644 --- a/examples/dna.py +++ b/examples/dna.py @@ -59,7 +59,13 @@ def forward(self, x, edge_index): return torch.log_softmax(x, dim=1) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model = Net(in_channels=dataset.num_features, hidden_channels=128, out_channels=dataset.num_classes, num_layers=5, heads=8, groups=16) model, data = model.to(device), data.to(device) diff --git a/examples/film.py b/examples/film.py index 08b8dad554f9..23c9821685f3 100644 --- a/examples/film.py +++ b/examples/film.py @@ -42,7 +42,13 @@ def forward(self, x, edge_index): return x -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model = Net(in_channels=train_dataset.num_features, hidden_channels=320, out_channels=train_dataset.num_classes, num_layers=4, dropout=0.1).to(device) diff --git a/examples/gcn.py b/examples/gcn.py index ac68e8242a4c..a8be4952c752 100644 --- a/examples/gcn.py +++ b/examples/gcn.py @@ -18,7 +18,13 @@ parser.add_argument('--wandb', action='http://github.com/store_true', help='Track experiment') args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + init_wandb(name=f'GCN-{args.dataset}', lr=args.lr, epochs=args.epochs, hidden_channels=args.hidden_channels, device=device) diff --git a/examples/glnn.py b/examples/glnn.py index 38412d9ff272..f7b3f05f026c 100644 --- a/examples/glnn.py +++ b/examples/glnn.py @@ -16,7 +16,12 @@ help='Balances loss from hard labels and teacher outputs') args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') dataset = Planetoid(path, name='Cora', transform=T.NormalizeFeatures()) diff --git a/examples/infomax_transductive.py b/examples/infomax_transductive.py index 55670e3485a8..4d2acab0ac2f 100644 --- a/examples/infomax_transductive.py +++ b/examples/infomax_transductive.py @@ -26,7 +26,13 @@ def corruption(x, edge_index): return x[torch.randperm(x.size(0), device=x.device)], edge_index -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model = DeepGraphInfomax( hidden_channels=512, encoder=Encoder(dataset.num_features, 512), diff --git a/examples/link_pred.py b/examples/link_pred.py index c741faa3bdc3..fd179e4c25d6 100644 --- a/examples/link_pred.py +++ b/examples/link_pred.py @@ -8,7 +8,13 @@ from torch_geometric.nn import GCNConv from torch_geometric.utils import negative_sampling -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + transform = T.Compose([ T.NormalizeFeatures(), T.ToDevice(device), diff --git a/examples/linkx.py b/examples/linkx.py index 169eb288ee43..646279c661b3 100644 --- a/examples/linkx.py +++ b/examples/linkx.py @@ -6,7 +6,12 @@ from torch_geometric.datasets import LINKXDataset from torch_geometric.nn import LINKX -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'LINKX') dataset = LINKXDataset(path, name='Penn94') diff --git a/examples/mutag_gin.py b/examples/mutag_gin.py index 4f0d1f1805bc..cbe2c08421c9 100644 --- a/examples/mutag_gin.py +++ b/examples/mutag_gin.py @@ -19,7 +19,14 @@ parser.add_argument('--wandb', action='http://github.com/store_true', help='Track experiment') args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + # MPS is currently slower than CPU due to missing int64 min/max ops + device = torch.device('cpu') +else: + device = torch.device('cpu') + init_wandb(name=f'GIN-{args.dataset}', batch_size=args.batch_size, lr=args.lr, epochs=args.epochs, hidden_channels=args.hidden_channels, num_layers=args.num_layers, device=device) diff --git a/examples/proteins_diff_pool.py b/examples/proteins_diff_pool.py index dce64ecfc4b9..ef376060685c 100644 --- a/examples/proteins_diff_pool.py +++ b/examples/proteins_diff_pool.py @@ -107,7 +107,13 @@ def forward(self, x, adj, mask=None): return F.log_softmax(x, dim=-1), l1 + l2, e1 + e2 -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model = Net().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) diff --git a/examples/rect.py b/examples/rect.py index a89e6ff1f3e7..7c55152e651a 100644 --- a/examples/rect.py +++ b/examples/rect.py @@ -41,7 +41,13 @@ model = RECT_L(200, 200, normalize=False, dropout=0.0) zs_data.y = model.get_semantic_labels(zs_data.x, zs_data.y, zs_data.train_mask) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model, zs_data = model.to(device), zs_data.to(device) criterion = torch.nn.MSELoss(reduction='sum') diff --git a/examples/rgcn.py b/examples/rgcn.py index 00c738cc7fc7..49c1926447bb 100644 --- a/examples/rgcn.py +++ b/examples/rgcn.py @@ -50,7 +50,13 @@ def forward(self, edge_index, edge_type): return F.log_softmax(x, dim=1) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + device = torch.device('cpu') if args.dataset == 'AM' else device model, data = Net().to(device), data.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0005) diff --git a/examples/sgc.py b/examples/sgc.py index 087d2317c88f..bc418f061b77 100644 --- a/examples/sgc.py +++ b/examples/sgc.py @@ -24,7 +24,13 @@ def forward(self): return F.log_softmax(x, dim=1) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model, data = Net().to(device), data.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.2, weight_decay=0.005) diff --git a/examples/signed_gcn.py b/examples/signed_gcn.py index 2f696c5173f4..ba5651543348 100644 --- a/examples/signed_gcn.py +++ b/examples/signed_gcn.py @@ -14,7 +14,14 @@ for data in dataset: pos_edge_indices.append(data.edge_index[:, data.edge_attr > 0]) neg_edge_indices.append(data.edge_index[:, data.edge_attr < 0]) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + pos_edge_index = torch.cat(pos_edge_indices, dim=1).to(device) neg_edge_index = torch.cat(neg_edge_indices, dim=1).to(device) diff --git a/examples/tagcn.py b/examples/tagcn.py index a7daf4408d53..59d5976a5a0b 100644 --- a/examples/tagcn.py +++ b/examples/tagcn.py @@ -27,7 +27,13 @@ def forward(self): return F.log_softmax(x, dim=1) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model, data = Net().to(device), data.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) diff --git a/examples/tensorboard_logging.py b/examples/tensorboard_logging.py index 15f5d6688ebd..0691190b5a62 100644 --- a/examples/tensorboard_logging.py +++ b/examples/tensorboard_logging.py @@ -27,7 +27,13 @@ def forward(self, x, edge_index): return F.log_softmax(x, dim=1) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + model, data = Net().to(device), data.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) From 7a395bf935bc2032f8a8fc87e8121faa60535798 Mon Sep 17 00:00:00 2001 From: Arash Arbabi Date: Wed, 19 Jul 2023 16:57:33 +0330 Subject: [PATCH 1362/2432] fix: wrong initialization in `DimeNet` output block (#7774) Fixes: #7762 --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/nn/models/dimenet.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae2b5d9491e7..dd3ed7a999ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed a bug where the `DimeNet` implementation returns zero after initialization ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774)) - Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) - Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) - Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index 92fdd9da9cc2..833f658c8835 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -355,7 +355,7 @@ def reset_parameters(self): for lin in self.lins: glorot_orthogonal(lin.weight, scale=2.0) lin.bias.data.fill_(0) - self.lin.weight.data.fill_(0) + glorot_orthogonal(self.lin.weight, scale=2.0) def forward(self, x: Tensor, rbf: Tensor, i: Tensor, num_nodes: Optional[int] = None) -> Tensor: @@ -396,7 +396,7 @@ def reset_parameters(self): for lin in self.lins: glorot_orthogonal(lin.weight, scale=2.0) lin.bias.data.fill_(0) - self.lin.weight.data.fill_(0) + glorot_orthogonal(self.lin.weight, scale=2.0) def forward(self, x: Tensor, rbf: Tensor, i: Tensor, num_nodes: Optional[int] = None) -> Tensor: From 87ca71913efa164efe07622b5e1fcd189125d39c Mon Sep 17 00:00:00 2001 From: Ebrahim Pichka Date: Wed, 19 Jul 2023 11:24:33 -0400 Subject: [PATCH 1363/2432] Add `torch_geometric.utils.lexsort` (#7775) resolves #7743 Added Pytorch implementation of numpy lexsort. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/utils/test_lexsort.py | 11 +++++++++ torch_geometric/sampler/utils.py | 12 ++------- torch_geometric/utils/__init__.py | 2 ++ torch_geometric/utils/lexsort.py | 41 +++++++++++++++++++++++++++++++ 5 files changed, 57 insertions(+), 10 deletions(-) create mode 100644 test/utils/test_lexsort.py create mode 100644 torch_geometric/utils/lexsort.py diff --git a/CHANGELOG.md b/CHANGELOG.md index dd3ed7a999ce..d1a6c331783e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `lexsort` implementation ([#7775](https://github.com/pyg-team/pytorch_geometric/pull/7775)) - Added possibility to run inference benchmarks on XPU device ([#7705](https://github.com/pyg-team/pytorch_geometric/pull/7705)) - Added `HeteroData` support in `to_networkx` ([#7713](https://github.com/pyg-team/pytorch_geometric/pull/7713)) - Added `FlopsCount` support via `fvcore` ([#7693](https://github.com/pyg-team/pytorch_geometric/pull/7693)) diff --git a/test/utils/test_lexsort.py b/test/utils/test_lexsort.py new file mode 100644 index 000000000000..72a9f216809b --- /dev/null +++ b/test/utils/test_lexsort.py @@ -0,0 +1,11 @@ +import numpy as np +import torch + +from torch_geometric.utils import lexsort + + +def test_lexsort(): + keys = [torch.randn(100) for _ in range(3)] + + expected = np.lexsort([key.numpy() for key in keys]) + assert torch.equal(lexsort(keys), torch.from_numpy(expected)) diff --git a/torch_geometric/sampler/utils.py b/torch_geometric/sampler/utils.py index 830f5143e26b..ced0c72ed21b 100644 --- a/torch_geometric/sampler/utils.py +++ b/torch_geometric/sampler/utils.py @@ -1,13 +1,12 @@ from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union -import numpy as np import torch from torch import Tensor from torch_geometric.data import Data, HeteroData from torch_geometric.data.storage import EdgeStorage from torch_geometric.typing import NodeType, OptTensor -from torch_geometric.utils import coalesce, index_sort +from torch_geometric.utils import coalesce, index_sort, lexsort from torch_geometric.utils.sparse import index2ptr # Edge Layout Conversion ###################################################### @@ -22,14 +21,7 @@ def sort_csc( col, perm = index_sort(col) return row[perm], col, perm else: - # We use `np.lexsort` to sort based on multiple keys. - # TODO There does not seem to exist a PyTorch equivalent yet :( - perm = np.lexsort([ - src_node_time[row].detach().cpu().numpy(), - col.detach().cpu().numpy() - ]) - perm = torch.from_numpy(perm).to(col.device) - + perm = lexsort([src_node_time[row], col]) return row[perm], col[perm], perm diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 951559adf2bd..9609d13c5dca 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -7,6 +7,7 @@ from .softmax import softmax from .dropout import dropout_adj, dropout_node, dropout_edge, dropout_path from .sort_edge_index import sort_edge_index +from .lexsort import lexsort from .coalesce import coalesce from .undirected import is_undirected, to_undirected from .loop import (contains_self_loops, remove_self_loops, @@ -63,6 +64,7 @@ 'dropout_path', 'dropout_adj', 'sort_edge_index', + 'lexsort', 'coalesce', 'is_undirected', 'to_undirected', diff --git a/torch_geometric/utils/lexsort.py b/torch_geometric/utils/lexsort.py new file mode 100644 index 000000000000..b7c6a00ba2c1 --- /dev/null +++ b/torch_geometric/utils/lexsort.py @@ -0,0 +1,41 @@ +from typing import List + +import numpy as np +import torch +from torch import Tensor + +import torch_geometric.typing + + +def lexsort( + keys: List[Tensor], + dim: int = -1, + descending: bool = False, +) -> Tensor: + r"""Performs an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, returns an array of integer indices that + describe their sort order. + The last key in the sequence is used for the primary sort order, the + second-to-last key for the secondary sort order, and so on. + + Args: + keys ([torch.Tensor]): The :math:`k` different columns to be sorted. + The last key is the primary sort key. + dim (int, optional): The dimension to sort along. (default: :obj:`-1`) + descending (bool, optional): Controls the sorting order (ascending or + descending). (default: :obj:`False`) + """ + assert len(keys) >= 1 + + if not torch_geometric.typing.WITH_PT113: + keys = [k.neg() for k in keys] if descending else keys + out = np.lexsort([k.detach().cpu().numpy() for k in keys], axis=dim) + return torch.from_numpy(out).to(keys[0].device) + + kwargs = dict(dim=dim, descending=descending, stable=True) + out = keys[0].argsort(**kwargs) + for k in keys[1:]: + out = out.gather(dim, k.gather(dim, out).argsort(**kwargs)) + + return out From 34890a592baa02ad250fe34fa66bbd53ced17801 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 20 Jul 2023 09:42:41 +0200 Subject: [PATCH 1364/2432] Added `output_initializer` argument to `DimeNet` models (#7780) --- CHANGELOG.md | 2 +- torch_geometric/nn/models/dimenet.py | 40 +++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1a6c331783e..bbf84ebe6133 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,7 +76,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Fixed a bug where the `DimeNet` implementation returns zero after initialization ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774)) +- Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) - Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) - Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) - Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index 833f658c8835..cc41761587a0 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -338,9 +338,14 @@ def __init__( out_channels: int, num_layers: int, act: Callable, + output_initializer: str = 'zeros', ): + assert output_initializer in {'zeros', 'glorot_orthogonal'} + super().__init__() + self.act = act + self.output_initializer = output_initializer self.lin_rbf = Linear(num_radial, hidden_channels, bias=False) self.lins = torch.nn.ModuleList() @@ -355,7 +360,10 @@ def reset_parameters(self): for lin in self.lins: glorot_orthogonal(lin.weight, scale=2.0) lin.bias.data.fill_(0) - glorot_orthogonal(self.lin.weight, scale=2.0) + if self.output_initializer == 'zeros': + self.lin.weight.data.fill_(0) + elif self.output_initializer == 'glorot_orthogonal': + glorot_orthogonal(self.lin.weight, scale=2.0) def forward(self, x: Tensor, rbf: Tensor, i: Tensor, num_nodes: Optional[int] = None) -> Tensor: @@ -375,9 +383,14 @@ def __init__( out_channels: int, num_layers: int, act: Callable, + output_initializer: str = 'zeros', ): + assert output_initializer in {'zeros', 'glorot_orthogonal'} + super().__init__() + self.act = act + self.output_initializer = output_initializer self.lin_rbf = Linear(num_radial, hidden_channels, bias=False) @@ -396,7 +409,10 @@ def reset_parameters(self): for lin in self.lins: glorot_orthogonal(lin.weight, scale=2.0) lin.bias.data.fill_(0) - glorot_orthogonal(self.lin.weight, scale=2.0) + if self.output_initializer == 'zeros': + self.lin.weight.data.fill_(0) + elif self.output_initializer == 'glorot_orthogonal': + glorot_orthogonal(self.lin.weight, scale=2.0) def forward(self, x: Tensor, rbf: Tensor, i: Tensor, num_nodes: Optional[int] = None) -> Tensor: @@ -470,6 +486,9 @@ class DimeNet(torch.nn.Module): output blocks. (default: :obj:`3`) act (str or Callable, optional): The activation function. (default: :obj:`"swish"`) + output_initializer (str, optional): The initialization method for the + output layer (:obj:`"zeros"`, :obj:`"glorot_orthogonal"`). + (default: :obj:`"zeros"`) """ url = ('https://github.com/klicperajo/dimenet/raw/master/pretrained/' @@ -490,6 +509,7 @@ def __init__( num_after_skip: int = 2, num_output_layers: int = 3, act: Union[str, Callable] = 'swish', + output_initializer: str = 'zeros', ): super().__init__() @@ -509,8 +529,14 @@ def __init__( self.emb = EmbeddingBlock(num_radial, hidden_channels, act) self.output_blocks = torch.nn.ModuleList([ - OutputBlock(num_radial, hidden_channels, out_channels, - num_output_layers, act) for _ in range(num_blocks + 1) + OutputBlock( + num_radial, + hidden_channels, + out_channels, + num_output_layers, + act, + output_initializer, + ) for _ in range(num_blocks + 1) ]) self.interaction_blocks = torch.nn.ModuleList([ @@ -721,6 +747,9 @@ class DimeNetPlusPlus(DimeNet): output blocks. (default: :obj:`3`) act: (str or Callable, optional): The activation funtion. (default: :obj:`"swish"`) + output_initializer (str, optional): The initialization method for the + output layer (:obj:`"zeros"`, :obj:`"glorot_orthogonal"`). + (default: :obj:`"zeros"`) """ url = ('https://raw.githubusercontent.com/gasteigerjo/dimenet/' @@ -743,6 +772,7 @@ def __init__( num_after_skip: int = 2, num_output_layers: int = 3, act: Union[str, Callable] = 'swish', + output_initializer: str = 'zeros', ): act = activation_resolver(act) @@ -760,6 +790,7 @@ def __init__( num_after_skip=num_after_skip, num_output_layers=num_output_layers, act=act, + output_initializer=output_initializer, ) # We are re-using the RBF, SBF and embedding layers of `DimeNet` and @@ -775,6 +806,7 @@ def __init__( out_channels, num_output_layers, act, + output_initializer, ) for _ in range(num_blocks + 1) ]) From 2014f064226f58263ec552e1f49a54dd5a1a9943 Mon Sep 17 00:00:00 2001 From: Nripesh Niketan <86844847+NripeshN@users.noreply.github.com> Date: Fri, 21 Jul 2023 13:03:52 +0530 Subject: [PATCH 1365/2432] Update `hetero_link_pred.py` example to support MPS (#7784) Hi @rusty1s, The following example supports MPS Acceleration --------- Co-authored-by: Jintang Li --- CHANGELOG.md | 2 +- examples/hetero/hetero_link_pred.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbf84ebe6133..2bb29b520b8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,7 +72,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) -- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770)) +- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770), [#7784](https://github.com/pyg-team/pytorch_geometric/pull/7784)) ### Changed diff --git a/examples/hetero/hetero_link_pred.py b/examples/hetero/hetero_link_pred.py index b77393c8bab1..4acc451e36e6 100644 --- a/examples/hetero/hetero_link_pred.py +++ b/examples/hetero/hetero_link_pred.py @@ -14,7 +14,12 @@ help='Whether to use weighted MSE loss.') args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data/MovieLens') dataset = MovieLens(path, model_name='all-MiniLM-L6-v2') From 53d91282160a680a740f595fb5a46dafb295d6ed Mon Sep 17 00:00:00 2001 From: Nripesh Niketan <86844847+NripeshN@users.noreply.github.com> Date: Fri, 21 Jul 2023 13:09:06 +0530 Subject: [PATCH 1366/2432] Update `gnn_explainer_link_pred.py` example to support MPS (#7781) Hi @rusty1s, This program supports MPS acceleration. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- examples/explain/gnn_explainer_link_pred.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bb29b520b8b..9370976819b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,7 +72,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) -- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770), [#7784](https://github.com/pyg-team/pytorch_geometric/pull/7784)) +- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770), [#7784](https://github.com/pyg-team/pytorch_geometric/pull/7784), [#7785](https://github.com/pyg-team/pytorch_geometric/pull/7785)) ### Changed diff --git a/examples/explain/gnn_explainer_link_pred.py b/examples/explain/gnn_explainer_link_pred.py index 5b3812f9dd37..080a588801f7 100644 --- a/examples/explain/gnn_explainer_link_pred.py +++ b/examples/explain/gnn_explainer_link_pred.py @@ -9,9 +9,15 @@ from torch_geometric.explain import Explainer, GNNExplainer, ModelConfig from torch_geometric.nn import GCNConv +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + dataset = 'Cora' path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') transform = T.Compose([ T.NormalizeFeatures(), T.ToDevice(device), From 8cd05f8f1f3caa9bd7cc5456c9581982189e13ea Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Fri, 21 Jul 2023 15:54:22 +0800 Subject: [PATCH 1367/2432] Added a detailed warning if `torch_cluster` is not installed (#7785) Gives users a more detailed warning message if `torch_cluster` is not installed when using a related API, see also #7783 --------- Co-authored-by: Matthias Fey --- torch_geometric/nn/pool/__init__.py | 7 +------ torch_geometric/typing.py | 6 ++++++ torch_geometric/utils/dropout.py | 12 ++++-------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index ad3d9e2fedde..cc7a0145be2d 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -3,7 +3,7 @@ from torch import Tensor import torch_geometric.typing -from torch_geometric.typing import OptTensor +from torch_geometric.typing import OptTensor, torch_cluster from .asap import ASAPooling from .avg_pool import avg_pool, avg_pool_neighbor_x, avg_pool_x @@ -18,11 +18,6 @@ from .voxel_grid import voxel_grid from .approx_knn import approx_knn, approx_knn_graph -try: - import torch_cluster -except ImportError: - torch_cluster = None - def fps( x: Tensor, diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index de1868746c4b..762d2829c3e5 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -51,6 +51,12 @@ f"Disabling its usage. Stacktrace: {e}") WITH_TORCH_CLUSTER = False + class TorchCluster: + def __getattr__(self, key: str): + raise ImportError(f"'{key}' requires 'torch-cluster'") + + torch_cluster = TorchCluster() + try: import torch_spline_conv # noqa WITH_TORCH_SPLINE_CONV = True diff --git a/torch_geometric/utils/dropout.py b/torch_geometric/utils/dropout.py index 422557f2c47f..9a90953c1a7d 100644 --- a/torch_geometric/utils/dropout.py +++ b/torch_geometric/utils/dropout.py @@ -1,14 +1,9 @@ from typing import Optional, Tuple import torch - -try: - import torch_cluster # noqa - random_walk = torch.ops.torch_cluster.random_walk -except ImportError: - random_walk = None from torch import Tensor +import torch_geometric.typing from torch_geometric.deprecation import deprecated from torch_geometric.typing import OptTensor from torch_geometric.utils.degree import degree @@ -267,7 +262,7 @@ def dropout_path(edge_index: Tensor, p: float = 0.2, walks_per_node: int = 1, if not training or p == 0.0: return edge_index, edge_mask - if random_walk is None: + if not torch_geometric.typing.WITH_TORCH_CLUSTER: raise ImportError('`dropout_path` requires `torch-cluster`.') num_nodes = maybe_num_nodes(edge_index, num_nodes) @@ -285,7 +280,8 @@ def dropout_path(edge_index: Tensor, p: float = 0.2, walks_per_node: int = 1, deg = degree(row, num_nodes=num_nodes) rowptr = row.new_zeros(num_nodes + 1) torch.cumsum(deg, 0, out=rowptr[1:]) - n_id, e_id = random_walk(rowptr, col, start, walk_length, 1.0, 1.0) + n_id, e_id = torch.ops.torch_cluster.random_walk(rowptr, col, start, + walk_length, 1.0, 1.0) e_id = e_id[e_id != -1].view(-1) # filter illegal edges if edge_orders is not None: From bfbb3141e639fd14ad85c49bedcb462f8c44764d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 22 Jul 2023 16:43:56 +0200 Subject: [PATCH 1368/2432] Fix `CaptumExplainer` for `binary_classification` tasks (#7787) Fixes #7702 --- CHANGELOG.md | 3 +- .../algorithm/test_captum_explainer.py | 61 ++++++++++++++++--- torch_geometric/explain/algorithm/captum.py | 39 +++++++----- .../explain/algorithm/captum_explainer.py | 20 ++++-- 4 files changed, 92 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9370976819b3..d03ce9be9060 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) - Added `lexsort` implementation ([#7775](https://github.com/pyg-team/pytorch_geometric/pull/7775)) - Added possibility to run inference benchmarks on XPU device ([#7705](https://github.com/pyg-team/pytorch_geometric/pull/7705)) - Added `HeteroData` support in `to_networkx` ([#7713](https://github.com/pyg-team/pytorch_geometric/pull/7713)) @@ -76,7 +77,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed -- Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) +- Fixed `CaptumExplainer` for `binary_classification` tasks ([#7787](https://github.com/pyg-team/pytorch_geometric/pull/7787)) - Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) - Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) - Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) diff --git a/test/explain/algorithm/test_captum_explainer.py b/test/explain/algorithm/test_captum_explainer.py index 6cea8b9ed741..48009fcf1711 100644 --- a/test/explain/algorithm/test_captum_explainer.py +++ b/test/explain/algorithm/test_captum_explainer.py @@ -70,14 +70,10 @@ def forward(self, x, edge_index, batch=None, edge_label_index=None): return x -node_mask_types = [ - MaskType.attributes, - None, -] -edge_mask_types = [ - MaskType.object, - None, -] +node_mask_types = [MaskType.attributes, None] +edge_mask_types = [MaskType.object, None] +task_levels = [ModelTaskLevel.node, ModelTaskLevel.edge, ModelTaskLevel.graph] +indices = [1, torch.arange(2)] def check_explanation( @@ -112,12 +108,57 @@ def test_unsupported_methods(method): ) +@withPackage('captum') +@pytest.mark.parametrize('method', ['IntegratedGradients']) +@pytest.mark.parametrize('node_mask_type', node_mask_types) +@pytest.mark.parametrize('edge_mask_type', edge_mask_types) +@pytest.mark.parametrize('task_level', task_levels) +@pytest.mark.parametrize('index', indices) +def test_captum_explainer_binary_classification( + method, + data, + node_mask_type, + edge_mask_type, + task_level, + index, +): + if node_mask_type is None and edge_mask_type is None: + return + + batch = torch.tensor([0, 0, 1, 1]) + edge_label_index = torch.tensor([[0, 1, 2], [2, 3, 1]]) + + model_config = ModelConfig( + mode='binary_classification', + task_level=task_level, + return_type='probs', + ) + + explainer = Explainer( + GCN(model_config), + algorithm=CaptumExplainer(method), + explanation_type='model', + edge_mask_type=edge_mask_type, + node_mask_type=node_mask_type, + model_config=model_config, + ) + + explanation = explainer( + data.x, + data.edge_index, + index=index, + batch=batch, + edge_label_index=edge_label_index, + ) + check_explanation(explanation, node_mask_type, edge_mask_type) + + @withPackage('captum') @pytest.mark.parametrize('method', methods) @pytest.mark.parametrize('node_mask_type', node_mask_types) @pytest.mark.parametrize('edge_mask_type', edge_mask_types) -@pytest.mark.parametrize('task_level', ['node', 'edge', 'graph']) -@pytest.mark.parametrize('index', [1, torch.arange(2)]) +@pytest.mark.parametrize('task_level', task_levels) +@pytest.mark.parametrize('index', indices) def test_captum_explainer_multiclass_classification( method, data, diff --git a/torch_geometric/explain/algorithm/captum.py b/torch_geometric/explain/algorithm/captum.py index d952a5fe689c..9014bfc8f897 100644 --- a/torch_geometric/explain/algorithm/captum.py +++ b/torch_geometric/explain/algorithm/captum.py @@ -9,6 +9,11 @@ set_hetero_masks, set_masks, ) +from torch_geometric.explain.config import ( + ModelConfig, + ModelMode, + ModelReturnType, +) from torch_geometric.typing import EdgeType, Metadata, NodeType @@ -29,12 +34,14 @@ def __init__( model: torch.nn.Module, mask_type: Union[str, MaskLevelType], output_idx: Optional[Union[int, Tensor]] = None, + model_config: Optional[ModelConfig] = None, ): super().__init__() self.mask_type = MaskLevelType(mask_type) self.model = model self.output_idx = output_idx + self.model_config = model_config def forward(self, mask, *args): """""" @@ -65,16 +72,25 @@ def forward(self, mask, *args): else: x = self.model(mask.squeeze(0), *args) - # Clear mask: - if self.mask_type != MaskLevelType.node: + return self.postprocess(x) + + def postprocess(self, x: Tensor) -> Tensor: + if self.mask_type.with_edge: clear_masks(self.model) - if self.output_idx is not None: + if self.output_idx is not None: # Filter by output index: x = x[self.output_idx] - if isinstance(self.output_idx, int) or (self.output_idx.numel() - == 1): + if (isinstance(self.output_idx, int) + or self.output_idx.dim() == 0): x = x.unsqueeze(0) + # Convert binary classification to multi-class classification: + if (self.model_config is not None + and self.model_config.mode == ModelMode.binary_classification): + assert self.model_config.return_type == ModelReturnType.probs + x = x.view(-1, 1) + x = torch.cat([1 - x, x], dim=-1) + return x @@ -86,8 +102,9 @@ def __init__( mask_type: Union[str, MaskLevelType], output_idx: Optional[Union[int, Tensor]], metadata: Metadata, + model_config: Optional[ModelConfig] = None, ): - super().__init__(model, mask_type, output_idx) + super().__init__(model, mask_type, output_idx, model_config) self.node_types = metadata[0] self.edge_types = metadata[1] self.num_node_types = len(self.node_types) @@ -151,15 +168,7 @@ def forward(self, *args): else: x = self.model(x_dict, edge_index_dict) - if self.mask_type.with_edge: - clear_masks(self.model) - - if self.output_idx is not None: - x = x[self.output_idx] - if isinstance(self.output_idx, int) or (self.output_idx.numel() - == 1): - x = x.unsqueeze(0) - return x + return self.postprocess(x) def _to_edge_mask(edge_index: Tensor) -> Tensor: diff --git a/torch_geometric/explain/algorithm/captum_explainer.py b/torch_geometric/explain/algorithm/captum_explainer.py index 1af5c202dd32..a5e10653a83c 100644 --- a/torch_geometric/explain/algorithm/captum_explainer.py +++ b/torch_geometric/explain/algorithm/captum_explainer.py @@ -15,7 +15,7 @@ convert_captum_output, to_captum_input, ) -from torch_geometric.explain.config import MaskType, ModelMode +from torch_geometric.explain.config import MaskType, ModelMode, ModelReturnType from torch_geometric.typing import EdgeType, NodeType @@ -145,10 +145,12 @@ def forward( mask_type, index, metadata, + self.model_config, ) else: metadata = None - captum_model = CaptumModel(model, mask_type, index) + captum_model = CaptumModel(model, mask_type, index, + self.model_config) attribution_method = self.attribution_method(captum_model) @@ -183,10 +185,18 @@ def forward( def supports(self) -> bool: node_mask_type = self.explainer_config.node_mask_type if node_mask_type not in [None, MaskType.attributes]: - logging.error(f"'{self.__class__.__name__}' only supports " - f"'node_mask_type' None or 'attributes' " + logging.error(f"'{self.__class__.__name__}' expects " + f"'node_mask_type' to be 'None' or 'attributes' " f"(got '{node_mask_type.value}')") return False - # TODO (ramona): Confirm that output type is valid. + return_type = self.model_config.return_type + if (self.model_config.mode == ModelMode.binary_classification + and return_type != ModelReturnType.probs): + logging.error(f"'{self.__class__.__name__}' expects " + f"'return_type' to be 'probs' for binary " + f"classification tasks (got '{return_type.value}')") + return False + + # TODO (ramona) Confirm that output type is valid. return True From 02cc18d9d6841ecda4eb61eebb48b86f1aaae477 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 24 Jul 2023 08:59:42 +0200 Subject: [PATCH 1369/2432] Fix `edge_label_index` in homogeneous+`disjoint` mode (#7791) --- CHANGELOG.md | 1 + torch_geometric/sampler/neighbor_sampler.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d03ce9be9060..f0e9bdb146d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `edge_label_index` computation in `LinkNeighborLoader` for the homogeneous+`disjoint` mode ([#7791](https://github.com/pyg-team/pytorch_geometric/pull/7791)) - Fixed `CaptumExplainer` for `binary_classification` tasks ([#7787](https://github.com/pyg-team/pytorch_geometric/pull/7787)) - Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) - Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index aeae99fb3ec9..702c7810667c 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -572,7 +572,7 @@ def edge_sample( if neg_sampling is None or neg_sampling.is_binary(): if disjoint: out.batch = out.batch % num_pos - edge_label_index = torch.arange(2 * seed.numel()).view(2, -1) + edge_label_index = torch.arange(seed.numel()).view(2, -1) else: edge_label_index = inverse_seed.view(2, -1) From 6acc0967d9a5b2226d8e004cac52b4b20aea1a82 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Fri, 28 Jul 2023 09:00:43 -0700 Subject: [PATCH 1370/2432] Add `dist_context`/RPC for distributed training (#7671) This code belongs to the part of the whole distributed training for PyG. This class will do 1. dist_context is to setup the distributed mode like worker mode or other mode and also setup the information for distributed context like role, rank, group_name, world_size,etc. 2. based on pytorch rpc.api to setup the api to check is_rpc_initialized and wrapper func, init_rpc(), RpcCall(), RpcRouter, rpc_async_request() and rpc_sync_request(), etc These basic rpc functionality will be used in later feature lookup function after node sampling. Any comments please let us know. thanks. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/distributed/test_rpc.py | 121 +++++++++++++ torch_geometric/distributed/dist_context.py | 21 +++ torch_geometric/distributed/rpc.py | 190 ++++++++++++++++++++ 4 files changed, 333 insertions(+), 1 deletion(-) create mode 100644 test/distributed/test_rpc.py create mode 100644 torch_geometric/distributed/dist_context.py create mode 100644 torch_geometric/distributed/rpc.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f0e9bdb146d1..a6ba8b3392db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_rpc.py b/test/distributed/test_rpc.py new file mode 100644 index 000000000000..673c53a694a0 --- /dev/null +++ b/test/distributed/test_rpc.py @@ -0,0 +1,121 @@ +import socket +from typing import Dict, List + +import torch + +import torch_geometric.distributed.rpc as rpc +from torch_geometric.distributed import LocalFeatureStore +from torch_geometric.distributed.dist_context import DistContext, DistRole +from torch_geometric.distributed.rpc import RpcRouter + + +def run_rpc_feature_test( + world_size: int, + rank: int, + feature: LocalFeatureStore, + partition_book: torch.Tensor, + master_port: int, +): + # 1) Initialize the context info: + current_ctx = DistContext( + rank=rank, + global_rank=rank, + world_size=world_size, + global_world_size=world_size, + group_name='dist-feature-test', + ) + rpc_worker_names: Dict[DistRole, List[str]] = {} + + rpc.init_rpc( + current_ctx=current_ctx, + rpc_worker_names=rpc_worker_names, + master_addr='localhost', + master_port=master_port, + ) + + # 2) Collect all workers: + partition_to_workers = rpc.rpc_partition_to_workers( + current_ctx, world_size, rank) + + assert partition_to_workers == [ + ['dist-feature-test-0'], + ['dist-feature-test-1'], + ] + + # 3) Find the mapping between worker and partition ID: + rpc_router = RpcRouter(partition_to_workers) + + assert rpc_router.get_to_worker(partition_idx=0) == 'dist-feature-test-0' + assert rpc_router.get_to_worker(partition_idx=1) == 'dist-feature-test-1' + + meta = { + 'edge_types': None, + 'is_hetero': False, + 'node_types': None, + 'num_parts': 2 + } + + feature.num_partitions = world_size + feature.partition_idx = rank + feature.feature_pb = partition_book + feature.meta = meta + feature.set_local_only(local_only=False) + feature.set_rpc_router(rpc_router) + + # Global node IDs: + global_id0 = torch.arange(128 * 2) + global_id1 = torch.arange(128 * 2) + 128 * 2 + + # Lookup the features from stores including locally and remotely: + tensor0 = feature.lookup_features(global_id0) + tensor1 = feature.lookup_features(global_id1) + + # Expected searched results: + cpu_tensor0 = torch.cat([torch.ones(128, 1024), torch.ones(128, 1024) * 2]) + cpu_tensor1 = torch.cat([torch.zeros(128, 1024), torch.zeros(128, 1024)]) + + # Verify.. + assert torch.allclose(cpu_tensor0, tensor0.wait()) + assert torch.allclose(cpu_tensor1, tensor1.wait()) + + rpc.shutdown_rpc() + + +def test_dist_feature_lookup(): + cpu_tensor0 = torch.cat([torch.ones(128, 1024), torch.ones(128, 1024) * 2]) + cpu_tensor1 = torch.cat([torch.zeros(128, 1024), torch.zeros(128, 1024)]) + + # Global node IDs: + global_id0 = torch.arange(128 * 2) + global_id1 = torch.arange(128 * 2) + 128 * 2 + + # Set the partition book for two features (partition 0 and 1): + partition_book = torch.cat([ + torch.zeros(128 * 2, dtype=torch.long), + torch.ones(128 * 2, dtype=torch.long) + ]) + + # Put the test tensor into the different feature stores with IDs: + feature0 = LocalFeatureStore() + feature0.put_global_id(global_id0, group_name=None) + feature0.put_tensor(cpu_tensor0, group_name=None, attr_name='x') + + feature1 = LocalFeatureStore() + feature1.put_global_id(global_id1, group_name=None) + feature1.put_tensor(cpu_tensor1, group_name=None, attr_name='x') + + mp_context = torch.multiprocessing.get_context('spawn') + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.bind(('127.0.0.1', 0)) + port = s.getsockname()[1] + s.close() + + w0 = mp_context.Process(target=run_rpc_feature_test, + args=(2, 0, feature0, partition_book, port)) + w1 = mp_context.Process(target=run_rpc_feature_test, + args=(2, 1, feature1, partition_book, port)) + + w0.start() + w1.start() + w0.join() + w1.join() diff --git a/torch_geometric/distributed/dist_context.py b/torch_geometric/distributed/dist_context.py new file mode 100644 index 000000000000..5b3e72f733e3 --- /dev/null +++ b/torch_geometric/distributed/dist_context.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass +from enum import Enum + + +class DistRole(Enum): + WORKER = 1 + + +@dataclass +class DistContext: + r"""Context information of the current process.""" + rank: int + global_rank: int + world_size: int + global_world_size: int + group_name: str + role: DistRole = DistRole.WORKER + + @property + def worker_name(self) -> str: + return f'{self.group_name}-{self.rank}' diff --git a/torch_geometric/distributed/rpc.py b/torch_geometric/distributed/rpc.py new file mode 100644 index 000000000000..3f710d9b8b56 --- /dev/null +++ b/torch_geometric/distributed/rpc.py @@ -0,0 +1,190 @@ +import atexit +import logging +import threading +from abc import ABC, abstractmethod +from typing import Dict, List + +from torch._C._distributed_rpc import _is_current_rpc_agent_set +from torch.distributed import rpc + +from torch_geometric.distributed.dist_context import DistContext, DistRole + +_rpc_init_lock = threading.RLock() + + +def rpc_is_initialized() -> bool: + return _is_current_rpc_agent_set() + + +@rpc.api._require_initialized +def global_all_gather(obj, timeout=None): + r"""Gathers objects from all groups in a list.""" + if timeout is None: + return rpc.api._all_gather(obj) + return rpc.api._all_gather(obj, timeout=timeout) + + +@rpc.api._require_initialized +def global_barrier(timeout=None): + r""" Block until all local and remote RPC processes.""" + try: + global_all_gather(obj=None, timeout=timeout) + except RuntimeError: + logging.error("Failed to respond to global barrier") + + +def init_rpc( + current_ctx: DistContext, + rpc_worker_names: Dict[DistRole, List[str]], + master_addr: str, + master_port: int, + num_rpc_threads: int = 16, + rpc_timeout: float = 240, +): + with _rpc_init_lock: + if rpc_is_initialized(): + return + + if current_ctx is None: + raise RuntimeError("'dist_context' has not been set in 'init_rpc'") + + options = rpc.TensorPipeRpcBackendOptions( + _transports=['ibv', 'uv'], + _channels=['mpt_uv', 'basic'], + num_worker_threads=num_rpc_threads, + rpc_timeout=rpc_timeout, + init_method=f'tcp://{master_addr}:{master_port}', + ) + + rpc.init_rpc( + name=current_ctx.worker_name, + rank=current_ctx.global_rank, + world_size=current_ctx.global_world_size, + rpc_backend_options=options, + ) + + gathered_results = global_all_gather( + obj=(current_ctx.role, current_ctx.world_size, current_ctx.rank), + timeout=rpc_timeout, + ) + + for worker_name, (role, world_size, rank) in gathered_results.items(): + worker_list = rpc_worker_names.get(role, None) + if worker_list is None: + worker_list = [None for _ in range(world_size)] + else: + if len(worker_list) != world_size: + raise RuntimeError(f"Inconsistent world size found in " + f"'init_rpc' (got {len(worker_list)})") + + worker_list[rank] = worker_name + rpc_worker_names[role] = worker_list + + global_barrier(timeout=rpc_timeout) + + +def shutdown_rpc(graceful: bool = True): + if rpc_is_initialized(): + rpc.shutdown(graceful) + + +atexit.register(shutdown_rpc, False) + + +class RpcRouter: + r"""A router to get the worker based on the partition ID.""" + def __init__(self, partition_to_workers: List[List[str]]): + for pid, rpc_worker_list in enumerate(partition_to_workers): + if len(rpc_worker_list) == 0: + raise ValueError("No RPC worker is in worker list") + self.partition_to_workers = partition_to_workers + self.rpc_worker_indices = [0 for _ in range(len(partition_to_workers))] + + def get_to_worker(self, partition_idx: int) -> str: + rpc_worker_list = self.partition_to_workers[partition_idx] + worker_idx = self.rpc_worker_indices[partition_idx] + router_worker = rpc_worker_list[worker_idx] + self.rpc_worker_indices[partition_idx] = ((worker_idx + 1) % + len(rpc_worker_list)) + return router_worker + + +@rpc.api._require_initialized +def rpc_partition_to_workers( + current_ctx: DistContext, + num_partitions: int, + current_partition_idx: int, +): + r"""Performs an :obj:`all_gather` to get the mapping between partition and + workers.""" + ctx = current_ctx + partition_to_workers = [[] for _ in range(num_partitions)] + gathered_results = global_all_gather( + (ctx.role, num_partitions, current_partition_idx)) + for worker_name, (role, nparts, idx) in gathered_results.items(): + partition_to_workers[idx].append(worker_name) + return partition_to_workers + + +class RpcCallBase(ABC): + r"""A wrapper base class for RPC calls in remote processes.""" + @abstractmethod + def rpc_sync(self, *args, **kwargs): + pass + + @abstractmethod + def rpc_async(self, *args, **kwargs): + pass + + +_rpc_call_lock = threading.RLock() +_rpc_call_id: int = 0 +_rpc_call_pool: Dict[int, RpcCallBase] = {} + + +@rpc.api._require_initialized +def rpc_register(call: RpcCallBase) -> int: + r"""Registers a call for RPC requests.""" + global _rpc_call_id, _rpc_call_pool + + with _rpc_call_lock: + call_id = _rpc_call_id + _rpc_call_id += 1 + if call_id in _rpc_call_pool: + raise RuntimeError("Registered function twice in 'rpc_register'") + _rpc_call_pool[call_id] = call + + return call_id + + +def _rpc_async_call(call_id: int, *args, **kwargs): + r""" Entry point for RPC requests.""" + return _rpc_call_pool.get(call_id).rpc_async(*args, **kwargs) + + +@rpc.api._require_initialized +def rpc_async(worker_name: str, call_id: int, args=None, kwargs=None): + r"""Performs an asynchronous RPC request and returns a future.""" + return rpc.rpc_async( + to=worker_name, + func=_rpc_async_call, + args=(call_id, *args), + kwargs=kwargs, + ) + + +def _rpc_sync_call(call_id: int, *args, **kwargs): + r"""Entry point for synchronous RPC requests.""" + return _rpc_call_pool.get(call_id).rpc_sync(*args, **kwargs) + + +@rpc.api._require_initialized +def rpc_sync(worker_name: str, call_id: int, args=None, kwargs=None): + r"""Performs a synchronous RPC request and returns a future.""" + future = rpc.rpc_async( + to=worker_name, + func=_rpc_sync_call, + args=(call_id, *args), + kwargs=kwargs, + ) + return future.wait() From 4889a1e0852ef7a3d04d125432b8c08dde023ef9 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Fri, 28 Jul 2023 17:07:32 +0100 Subject: [PATCH 1371/2432] Make `FieldStatus` enum picklable for multi-processing (#7808) This PR makes `FieldStatus` picklable. For pickability of enums, see https://docs.python.org/3/howto/enum.html#functional-api. --- > Is num_workers > 0 compatible with torch_geometric.data.FeatureStore and torch_geometric.data.GraphStore objects? > Working with NeighborLoader I am encountering the following: > ``` > def dump(obj, file, protocol=None): > '''Replacement for pickle.dump() using ForkingPickler.''' > > ForkingPickler(file, protocol).dump(obj) > > E _pickle.PicklingError: Can't pickle : attribute lookup FieldStatus on torch_geometric.data.feature_store failed > ``` _Originally reported in PyG Slack: https://torchgeometricco.slack.com/archives/C01DN0B3B1N/p1690516103185129_ --- CHANGELOG.md | 1 + test/data/test_feature_store.py | 5 ++--- torch_geometric/data/data.py | 4 ++-- torch_geometric/data/feature_store.py | 16 +++++++++------- .../distributed/local_feature_store.py | 6 +++--- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6ba8b3392db..a4b6482a6b19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Made `FieldStatus` enum picklable to avoid `PicklingError` in a multi-process setting ([#7808](https://github.com/pyg-team/pytorch_geometric/pull/7808)) - Fixed `edge_label_index` computation in `LinkNeighborLoader` for the homogeneous+`disjoint` mode ([#7791](https://github.com/pyg-team/pytorch_geometric/pull/7791)) - Fixed `CaptumExplainer` for `binary_classification` tasks ([#7787](https://github.com/pyg-team/pytorch_geometric/pull/7787)) - Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) diff --git a/test/data/test_feature_store.py b/test/data/test_feature_store.py index 95f02a59c640..c9a85096624c 100644 --- a/test/data/test_feature_store.py +++ b/test/data/test_feature_store.py @@ -4,14 +4,13 @@ import torch from torch_geometric.data import TensorAttr -from torch_geometric.data.feature_store import AttrView, _field_status +from torch_geometric.data.feature_store import AttrView, _FieldStatus from torch_geometric.testing import MyFeatureStore @dataclass class MyTensorAttrNoGroupName(TensorAttr): - def __init__(self, attr_name=_field_status.UNSET, - index=_field_status.UNSET): + def __init__(self, attr_name=_FieldStatus.UNSET, index=_FieldStatus.UNSET): # Treat group_name as optional, and move it to the end super().__init__(None, attr_name, index) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index fd19b16dbbd3..d8fdadacc2b1 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -20,7 +20,7 @@ from torch import Tensor from torch_geometric.data import EdgeAttr, FeatureStore, GraphStore, TensorAttr -from torch_geometric.data.feature_store import _field_status +from torch_geometric.data.feature_store import _FieldStatus from torch_geometric.data.graph_store import EdgeLayout from torch_geometric.data.storage import ( BaseStorage, @@ -372,7 +372,7 @@ class DataTensorAttr(TensorAttr): r"""Tensor attribute for `Data` without group name.""" def __init__( self, - attr_name=_field_status.UNSET, + attr_name=_FieldStatus.UNSET, index=None, ): super().__init__(None, attr_name, index) diff --git a/torch_geometric/data/feature_store.py b/torch_geometric/data/feature_store.py index 10c7f1efd90f..946479d0f633 100644 --- a/torch_geometric/data/feature_store.py +++ b/torch_geometric/data/feature_store.py @@ -32,13 +32,15 @@ from torch_geometric.typing import FeatureTensorType, NodeType from torch_geometric.utils.mixin import CastMixin -_field_status = Enum("FieldStatus", "UNSET") - # We allow indexing with a tensor, numpy array, Python slicing, or a single # integer index. IndexType = Union[torch.Tensor, np.ndarray, slice, int] +class _FieldStatus(Enum): + UNSET = None + + @dataclass class TensorAttr(CastMixin): r"""Defines the attributes of a :class:`FeatureStore` tensor. @@ -52,20 +54,20 @@ class TensorAttr(CastMixin): """ # The group name that the tensor corresponds to. Defaults to UNSET. - group_name: Optional[NodeType] = _field_status.UNSET + group_name: Optional[NodeType] = _FieldStatus.UNSET # The name of the tensor within its group. Defaults to UNSET. - attr_name: Optional[str] = _field_status.UNSET + attr_name: Optional[str] = _FieldStatus.UNSET # The node indices the rows of the tensor correspond to. Defaults to UNSET. - index: Optional[IndexType] = _field_status.UNSET + index: Optional[IndexType] = _FieldStatus.UNSET # Convenience methods ##################################################### def is_set(self, key: str) -> bool: r"""Whether an attribute is set in :obj:`TensorAttr`.""" assert key in self.__dataclass_fields__ - return getattr(self, key) != _field_status.UNSET + return getattr(self, key) != _FieldStatus.UNSET def is_fully_specified(self) -> bool: r"""Whether the :obj:`TensorAttr` has no unset fields.""" @@ -137,7 +139,7 @@ def __getattr__(self, key: Any) -> Union['AttrView', FeatureTensorType]: # Find the first attribute name that is UNSET: attr_name: Optional[str] = None for field in out._attr.__dataclass_fields__: - if getattr(out._attr, field) == _field_status.UNSET: + if getattr(out._attr, field) == _FieldStatus.UNSET: attr_name = field break diff --git a/torch_geometric/distributed/local_feature_store.py b/torch_geometric/distributed/local_feature_store.py index 9cc5c395e0fb..260087e09a74 100644 --- a/torch_geometric/distributed/local_feature_store.py +++ b/torch_geometric/distributed/local_feature_store.py @@ -8,7 +8,7 @@ from torch import Tensor from torch_geometric.data import FeatureStore, TensorAttr -from torch_geometric.data.feature_store import _field_status +from torch_geometric.data.feature_store import _FieldStatus from torch_geometric.typing import EdgeType, NodeType @@ -17,8 +17,8 @@ class LocalTensorAttr(TensorAttr): r"""Tensor attribute for storing features without :obj:`index`.""" def __init__( self, - group_name: Optional[Union[NodeType, EdgeType]] = _field_status.UNSET, - attr_name: Optional[str] = _field_status.UNSET, + group_name: Optional[Union[NodeType, EdgeType]] = _FieldStatus.UNSET, + attr_name: Optional[str] = _FieldStatus.UNSET, index=None, ): super().__init__(group_name, attr_name, index) From 9fbb906283038b66f1502f0218f8485bbe78d832 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 31 Jul 2023 13:57:40 +0200 Subject: [PATCH 1372/2432] Fix broken Windows build due to `distributed` package (#7818) --- torch_geometric/distributed/rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/distributed/rpc.py b/torch_geometric/distributed/rpc.py index 3f710d9b8b56..b6f46ba3521b 100644 --- a/torch_geometric/distributed/rpc.py +++ b/torch_geometric/distributed/rpc.py @@ -4,7 +4,6 @@ from abc import ABC, abstractmethod from typing import Dict, List -from torch._C._distributed_rpc import _is_current_rpc_agent_set from torch.distributed import rpc from torch_geometric.distributed.dist_context import DistContext, DistRole @@ -13,6 +12,7 @@ def rpc_is_initialized() -> bool: + from torch._C._distributed_rpc import _is_current_rpc_agent_set return _is_current_rpc_agent_set() From 97e42ab6f915daa87cfec2f71dd59fcd74ae2495 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 31 Jul 2023 15:01:01 +0200 Subject: [PATCH 1373/2432] Add disclaimer on default mini-batching behavior (#7819) Fixes #7777 --- docs/source/advanced/batching.rst | 14 +++++++++----- torch_geometric/data/batch.py | 17 +++++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/docs/source/advanced/batching.rst b/docs/source/advanced/batching.rst index 9acbd5eab22c..881755eda112 100644 --- a/docs/source/advanced/batching.rst +++ b/docs/source/advanced/batching.rst @@ -31,27 +31,31 @@ The same is true for :obj:`face` tensors, *i.e.*, face indices in meshes. All other tensors will just get concatenated in the first dimension without any further increasement of their values. However, there are a few special use-cases (as outlined below) where the user actively wants to modify this behavior to its own needs. -:pyg:`PyG` allows modification to the underlying batching procedure by overwriting the :func:`torch_geometric.data.Data.__inc__` and :func:`torch_geometric.data.Data.__cat_dim__` functionalities. +:pyg:`PyG` allows modification to the underlying batching procedure by overwriting the :meth:`torch_geometric.data.Data.__inc__` and :meth:`torch_geometric.data.Data.__cat_dim__` functionalities. Without any modifications, these are defined as follows in the :class:`~torch_geometric.data.Data` class: .. code-block:: python def __inc__(self, key, value, *args, **kwargs): - if 'index' in key or 'face' in key: + if 'index' in key: return self.num_nodes else: return 0 def __cat_dim__(self, key, value, *args, **kwargs): - if 'index' in key or 'face' in key: + if 'index' in key: return 1 else: return 0 -We can see that :meth:`__inc__` defines the incremental count between two consecutive graph attributes, where as :meth:`__cat_dim__` defines in which dimension graph tensors of the same attribute should be concatenated together. +We can see that :meth:`~torch_geometric.data.Data.__inc__` defines the incremental count between two consecutive graph attributes. +By default, :pyg:`PyG` increments attributes by the number of nodes whenever their attribute names contain the substring :obj:`index` (for historical reasons), which comes in handy for attributes such as :obj:`edge_index` or :obj:`node_index`. +However, note that this may lead to unexpected behavior for attributes whose names contain the substring :obj:`index` but should not be incremented. +To make sure, it is best practice to always double-check the output of batching. +Furthermore, :meth:`~torch_geometric.data.Data.__cat_dim__` defines in which dimension graph tensors of the same attribute should be concatenated together. Both functions are called for each attribute stored in the :class:`~torch_geometric.data.Data` class, and get passed their specific :obj:`key` and value :obj:`item` as arguments. -In what follows, we present a few use-cases where the modification of :func:`__inc__` and :func:`__cat_dim__` might be absolutely necessary. +In what follows, we present a few use-cases where the modification of :meth:`~torch_geometric.data.Data.__inc__` and :meth:`~torch_geometric.data.Data.__cat_dim__` might be absolutely necessary. Pairs of Graphs --------------- diff --git a/torch_geometric/data/batch.py b/torch_geometric/data/batch.py index cf5e1f6c01f4..c5c18e47b2e1 100644 --- a/torch_geometric/data/batch.py +++ b/torch_geometric/data/batch.py @@ -60,6 +60,23 @@ class Batch(metaclass=DynamicInheritance): :class:`torch_geometric.data.HeteroData`. In addition, single graphs can be identified via the assignment vector :obj:`batch`, which maps each node to its respective graph identifier. + + :pyg:`PyG` allows modification to the underlying batching procedure by + overwriting the :meth:`~Data.__inc__` and :meth:`~Data.__cat_dim__` + functionalities. + The :meth:`~Data.__inc__` method defines the incremental count between two + consecutive graph attributes. + By default, :pyg:`PyG` increments attributes by the number of nodes + whenever their attribute names contain the substring :obj:`index` + (for historical reasons), which comes in handy for attributes such as + :obj:`edge_index` or :obj:`node_index`. + However, note that this may lead to unexpected behavior for attributes + whose names contain the substring :obj:`index` but should not be + incremented. + To make sure, it is best practice to always double-check the output of + batching. + Furthermore, :meth:`~Data.__cat_dim__` defines in which dimension graph + tensors of the same attribute should be concatenated together. """ @classmethod def from_data_list(cls, data_list: List[BaseData], From bc69d1ace586c06983e4acc682b385b330759d2e Mon Sep 17 00:00:00 2001 From: Kaidi Cao <16861426+kaidic@users.noreply.github.com> Date: Mon, 31 Jul 2023 03:27:01 -1000 Subject: [PATCH 1374/2432] Let `coalesce()` and `sort_edge_index()` accept `edge_index` as a tuple of tensors (#7814) This PR allows `coalesce()` and `sort_edge_index()` to accept `edge_index` as a tensor or a tuple of tensors. --------- Co-authored-by: Kaidi Cao Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- test/utils/test_coalesce.py | 5 +++++ test/utils/test_sort_edge_index.py | 5 +++++ torch_geometric/utils/coalesce.py | 26 ++++++++++++++++-------- torch_geometric/utils/num_nodes.py | 19 ++++++++++++++--- torch_geometric/utils/sort_edge_index.py | 13 ++++++++---- 5 files changed, 53 insertions(+), 15 deletions(-) diff --git a/test/utils/test_coalesce.py b/test/utils/test_coalesce.py index 6d0f9f69b804..5c3885df3da8 100644 --- a/test/utils/test_coalesce.py +++ b/test/utils/test_coalesce.py @@ -26,6 +26,11 @@ def test_coalesce(): assert out[1][0].tolist() == [[4], [3], [2], [6]] assert out[1][1].tolist() == [4, 3, 2, 6] + out = coalesce((edge_index[0], edge_index[1])) + assert isinstance(out, tuple) + assert out[0].tolist() == [0, 1, 1, 2] + assert out[1].tolist() == [1, 0, 2, 1] + def test_coalesce_without_duplicates(): edge_index = torch.tensor([[2, 1, 1, 0], [1, 2, 0, 1]]) diff --git a/test/utils/test_sort_edge_index.py b/test/utils/test_sort_edge_index.py index d1f5e47fa6de..b1f11b9cba6d 100644 --- a/test/utils/test_sort_edge_index.py +++ b/test/utils/test_sort_edge_index.py @@ -13,6 +13,11 @@ def test_sort_edge_index(): out = sort_edge_index(edge_index) assert out.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] + out = sort_edge_index((edge_index[0], edge_index[1])) + assert isinstance(out, tuple) + assert out[0].tolist() == [0, 1, 1, 2] + assert out[1].tolist() == [1, 0, 2, 1] + out = sort_edge_index(edge_index, None) assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] assert out[1] is None diff --git a/torch_geometric/utils/coalesce.py b/torch_geometric/utils/coalesce.py index 20e1b899db75..d5305d5aa41a 100644 --- a/torch_geometric/utils/coalesce.py +++ b/torch_geometric/utils/coalesce.py @@ -41,9 +41,9 @@ def coalesce( together according to the given :obj:`reduce` option. Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor or List[Tensor], optional): Edge weights or multi- - dimensional edge features. + edge_index (torch.Tensor): The edge indices. + edge_attr (torch.Tensor or List[torch.Tensor], optional): Edge weights + or multi-dimensional edge features. If given as a list, will re-shuffle and remove duplicates for all its entries. (default: :obj:`None`) num_nodes (int, optional): The number of nodes, *i.e.* @@ -90,17 +90,22 @@ def coalesce( [3, 1, 2]]), tensor([1., 1., 1.])) """ - nnz = edge_index.size(1) + num_edges = edge_index[0].size(0) num_nodes = maybe_num_nodes(edge_index, num_nodes) - idx = edge_index.new_empty(nnz + 1) + idx = edge_index[0].new_empty(num_edges + 1) idx[0] = -1 idx[1:] = edge_index[1 - int(sort_by_row)] idx[1:].mul_(num_nodes).add_(edge_index[int(sort_by_row)]) if not is_sorted: idx[1:], perm = index_sort(idx[1:], max_value=num_nodes * num_nodes) - edge_index = edge_index[:, perm] + if isinstance(edge_index, Tensor): + edge_index = edge_index[:, perm] + elif isinstance(edge_index, tuple): + edge_index = (edge_index[0][perm], edge_index[1][perm]) + else: + raise NotImplementedError if isinstance(edge_attr, Tensor): edge_attr = edge_attr[perm] elif isinstance(edge_attr, (list, tuple)): @@ -114,12 +119,17 @@ def coalesce( return edge_index, edge_attr return edge_index - edge_index = edge_index[:, mask] + if isinstance(edge_index, Tensor): + edge_index = edge_index[:, mask] + elif isinstance(edge_index, tuple): + edge_index = (edge_index[0][mask], edge_index[1][mask]) + else: + raise NotImplementedError dim_size: Optional[int] = None if isinstance(edge_attr, (Tensor, list, tuple)) and len(edge_attr) > 0: dim_size = edge_index.size(1) - idx = torch.arange(0, nnz, device=edge_index.device) + idx = torch.arange(0, num_edges, device=edge_index.device) idx.sub_(mask.logical_not_().cumsum(dim=0)) if edge_attr is None: diff --git a/torch_geometric/utils/num_nodes.py b/torch_geometric/utils/num_nodes.py index c8c4f63887f3..112f3bb816c6 100644 --- a/torch_geometric/utils/num_nodes.py +++ b/torch_geometric/utils/num_nodes.py @@ -1,5 +1,5 @@ from copy import copy -from typing import Dict, Optional, Union +from typing import Dict, Optional, Tuple, Union import torch from torch import Tensor @@ -14,6 +14,12 @@ def maybe_num_nodes(edge_index, num_nodes): pass +@torch.jit._overload +def maybe_num_nodes(edge_index, num_nodes): + # type: (Tuple[Tensor, Tensor], Optional[int]) -> int + pass + + @torch.jit._overload def maybe_num_nodes(edge_index, num_nodes): # type: (SparseTensor, Optional[int]) -> int @@ -21,7 +27,7 @@ def maybe_num_nodes(edge_index, num_nodes): def maybe_num_nodes( - edge_index: Union[Tensor, SparseTensor], + edge_index: Union[Tensor, Tuple[Tensor, Tensor], SparseTensor], num_nodes: Optional[int] = None, ) -> int: if num_nodes is not None: @@ -39,8 +45,15 @@ def maybe_num_nodes( return tmp.max() + 1 return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0 - else: + elif isinstance(edge_index, tuple): + return max( + int(edge_index[0].max()) + 1 if edge_index[0].numel() > 0 else 0, + int(edge_index[1].max()) + 1 if edge_index[1].numel() > 0 else 0, + ) + elif isinstance(edge_index, SparseTensor): return max(edge_index.size(0), edge_index.size(1)) + else: + raise NotImplementedError def maybe_num_nodes_dict( diff --git a/torch_geometric/utils/sort_edge_index.py b/torch_geometric/utils/sort_edge_index.py index d09d0e2306cf..7be4544a545b 100644 --- a/torch_geometric/utils/sort_edge_index.py +++ b/torch_geometric/utils/sort_edge_index.py @@ -37,9 +37,9 @@ def sort_edge_index( # noqa """Row-wise sorts :obj:`edge_index`. Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor or List[Tensor], optional): Edge weights or multi- - dimensional edge features. + edge_index (torch.Tensor): The edge indices. + edge_attr (torch.Tensor or List[torch.Tensor], optional): Edge weights + or multi-dimensional edge features. If given as a list, will re-shuffle and remove duplicates for all its entries. (default: :obj:`None`) num_nodes (int, optional): The number of nodes, *i.e.* @@ -81,7 +81,12 @@ def sort_edge_index( # noqa _, perm = index_sort(idx, max_value=num_nodes * num_nodes) - edge_index = edge_index[:, perm] + if isinstance(edge_index, Tensor): + edge_index = edge_index[:, perm] + elif isinstance(edge_index, tuple): + edge_index = (edge_index[0][perm], edge_index[1][perm]) + else: + raise NotImplementedError if edge_attr is None: return edge_index, None From e8f752ff6aae828a5f70d7d53324cfbc4302092f Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Mon, 31 Jul 2023 15:47:47 +0100 Subject: [PATCH 1375/2432] Fix `LinkNeighborLoader` producing double-sized `edge_label_time` for homogeneous graphs (#7807) Fixes `edge_label_time.size() == (2*batch_size,)` to have `(batch_size,)`. Adds a test case for #7791. Part of #7796 and #6528. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/loader/test_link_neighbor_loader.py | 32 +++++++++++++++++++++ torch_geometric/sampler/neighbor_sampler.py | 2 +- 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4b6482a6b19..9710bf227e6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed the shape of `edge_label_time` when using temporal sampling on homogeneous graphs ([#7807](https://github.com/pyg-team/pytorch_geometric/pull/7807)) - Made `FieldStatus` enum picklable to avoid `PicklingError` in a multi-process setting ([#7808](https://github.com/pyg-team/pytorch_geometric/pull/7808)) - Fixed `edge_label_index` computation in `LinkNeighborLoader` for the homogeneous+`disjoint` mode ([#7791](https://github.com/pyg-team/pytorch_geometric/pull/7791)) - Fixed `CaptumExplainer` for `binary_classification` tasks ([#7787](https://github.com/pyg-team/pytorch_geometric/pull/7787)) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 80cda9c23f32..406e347b1106 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -204,6 +204,38 @@ def test_link_neighbor_loader_edge_label(): assert torch.all(batch.edge_label[10:] == 0) +@withPackage('pyg_lib') +@pytest.mark.parametrize('batch_size', [1]) +def test_temporal_homo_link_neighbor_loader(batch_size): + data = Data( + x=torch.randn(10, 5), + edge_index=torch.randint(0, 10, (2, 123)), + time=torch.arange(10), + ) + + # Ensure that nodes exist at the time of the `edge_label_time`: + edge_label_time = torch.max( + data.time[data.edge_index[0]], + data.time[data.edge_index[1]], + ) + + loader = LinkNeighborLoader( + data, + num_neighbors=[-1], + time_attr='time', + edge_label=torch.ones(data.num_edges), + edge_label_time=edge_label_time, + batch_size=batch_size, + shuffle=True, + ) + + for batch in loader: + assert batch.edge_label_index.size() == (2, batch_size) + assert batch.edge_label_time.size() == (batch_size, ) + assert batch.edge_label.size() == (batch_size, ) + assert torch.all(batch.time <= batch.edge_label_time) + + @withPackage('pyg_lib') def test_temporal_hetero_link_neighbor_loader(): data = HeteroData() diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index 702c7810667c..24c1e2834c19 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -576,7 +576,7 @@ def edge_sample( else: edge_label_index = inverse_seed.view(2, -1) - out.metadata = (input_id, edge_label_index, edge_label, seed_time) + out.metadata = (input_id, edge_label_index, edge_label, src_time) elif neg_sampling.is_triplet(): if disjoint: From 1199597c67ef68e1743c5e61573a6585a7f36f86 Mon Sep 17 00:00:00 2001 From: Wesley Spacebar Date: Mon, 31 Jul 2023 13:20:45 -0600 Subject: [PATCH 1376/2432] Create a small demo dataset by parsing JSON files created from OSE wiki GVCS product ecology (#7811) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/ose_gvcs.py | 111 +++++++++++++++++++++++++++ 3 files changed, 114 insertions(+) create mode 100644 torch_geometric/datasets/ose_gvcs.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 9710bf227e6a..316525789a21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `OSE_GVCS` dataset ([#7811](https://github.com/pyg-team/pytorch_geometric/pull/7811)) - Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) - Added `lexsort` implementation ([#7775](https://github.com/pyg-team/pytorch_geometric/pull/7775)) - Added possibility to run inference benchmarks on XPU device ([#7705](https://github.com/pyg-team/pytorch_geometric/pull/7705)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 8734f6285b11..399feed97b82 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -86,6 +86,7 @@ from .igmc_dataset import IGMCDataset from .amazon_book import AmazonBook from .hm import HM +from .ose_gvcs import OSE_GVCS from .fake import FakeDataset, FakeHeteroDataset from .sbm_dataset import StochasticBlockModelDataset @@ -190,6 +191,7 @@ 'IGMCDataset', 'AmazonBook', 'HM', + 'OSE_GVCS', ] synthetic_datasets = [ 'FakeDataset', diff --git a/torch_geometric/datasets/ose_gvcs.py b/torch_geometric/datasets/ose_gvcs.py new file mode 100644 index 000000000000..7156f00c0909 --- /dev/null +++ b/torch_geometric/datasets/ose_gvcs.py @@ -0,0 +1,111 @@ +import json +import os +from collections import defaultdict +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + HeteroData, + InMemoryDataset, + download_url, + extract_tar, +) + + +class OSE_GVCS(InMemoryDataset): + r"""A dataset describing the `Product ecology + `_ of the Open + Source Ecology's iconoclastic `Global Village Construction Set + `_. + GVCS is a modular, DIY, low-cost set of blueprints that enables the + fabrication of the 50 different industrial machines that it takes to + build a small, sustainable civilization with modern comforts. + + The dataset contains a heterogenous graphs with 50 :obj:`machine` nodes, + composing the GVCS, and 290 directed edges, each representing one out of + three relationships of machines. + """ + machines = [ + '3D Printer', '3D Scanner', 'Aluminum Extractor', 'Backhoe', + 'Bakery Oven', 'Baler', 'Bioplastic Extruder', 'Bulldozer', 'Car', + 'CEB Press', 'Cement Mixer', 'Chipper Hammermill', 'CNC Circuit Mill', + 'CNC Torch Table', 'Dairy Milker', 'Drill Press', + 'Electric Motor Generator', 'Gasifier Burner', 'Hay Cutter', + 'Hay Rake', 'Hydraulic Motor', 'Induction Furnace', 'Industrial Robot', + 'Ironworker', 'Laser Cutter', 'Metal Roller', 'Microcombine', + 'Microtractor', 'Multimachine', 'Nickel-Iron Battery', 'Pelletizer', + 'Plasma Cutter', 'Power Cube', 'Press Forge', 'Rod and Wire Mill', + 'Rototiller', 'Sawmill', 'Seeder', 'Solar Concentrator', 'Spader', + 'Steam Engine', 'Steam Generator', 'Tractor', 'Trencher', 'Truck', + 'Universal Power Supply', 'Universal Rotor', 'Welder', + 'Well-Drilling Rig', 'Wind Turbine' + ] + categories = [ + 'habitat', 'agriculture', 'industry', 'energy', 'materials', + 'transportation' + ] + relationships = ['from', 'uses', 'enables'] + + url = 'https://github.com/Wesxdz/ose_gvcs/raw/master/ose_gvcs.tar.gz' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=HeteroData) + + @property + def raw_file_names(self) -> List[str]: + return [ + f"{machine.lower().replace(' ', '_')}.json" + for machine in self.machines + ] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.root) + extract_tar(path, self.raw_dir) + os.unlink(path) + + def process(self): + data = HeteroData() + + categories = [] + edges = defaultdict(list) + + for path in self.raw_paths: + with open(path, 'r') as f: + product = json.load(f) + categories.append(self.categories.index(product['category'])) + for interaction in product['ecology']: + # NOTE Some ecology items are not GVCS machines or have other + # relationship types we don't want included. + rt = interaction['relationship'] + if rt not in self.relationships: + continue + dst = interaction['tool'] + if dst not in self.machines: + continue + src = self.machines.index(product['machine']) + dst = self.machines.index(dst) + edges[rt].append((src, dst)) + + data['machine'].num_nodes = len(categories) + data['machine'].category = torch.tensor(categories) + + for rel, edge_index, in edges.items(): + edge_index = torch.tensor(edge_index).t() + data['machine', rel, 'machine'].edge_index = edge_index + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From b7e2c61d09c6d36e9a8e1b7980ed144839e4f1dc Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 1 Aug 2023 09:17:04 +0100 Subject: [PATCH 1377/2432] Measure test coverage of exceptions (#7823) Part of #6528. IMO, exceptions are also part of the public API so we should measure the test coverage over them, but feel free to close this PR if you think otherwise ;) --------- Co-authored-by: rusty1s --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5155c92022b4..649ea8246cea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -161,8 +161,7 @@ omit = [ exclude_lines = [ "pragma: no cover", "pass", - "raise", - "except", + "raise NotImplementedError", "register_parameter", "warn", "torch.cuda.is_available", From 8c1339c1dcc84c4ee7a6c36d64c93bc085d244e9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 1 Aug 2023 10:21:02 +0200 Subject: [PATCH 1378/2432] Update contributing documentation to signal that extension packages are optional (#7825) --- .github/CONTRIBUTING.md | 13 ++++++++----- torch_geometric/datasets/ose_gvcs.py | 4 +++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 3f2ca08b52cd..d108e0e11783 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -16,22 +16,24 @@ If your PR isn't merged anytime soon (*e.g.,* due to its large size, complexity To develop PyG on your machine, here are some tips: -1. Ensure that you are running on one of the two latest PyTorch releases (*e.g.*, `1.12.0`): +1. Ensure that you are running on one of the two latest PyTorch releases (*e.g.*, `2.0.0`): ```python import torch print(torch.__version__) ``` -2. Follow the [installation instructions](https://github.com/pyg-team/pytorch_geometric#installation) to install `pyg-lib`, `torch-scatter`, `torch-sparse`, `torch-cluster` and `torch-spline-conv` (if you haven't already): +2. *(Optional)* Follow the [installation instructions](https://github.com/pyg-team/pytorch_geometric#installation) to install `pyg-lib`, `torch-scatter`, `torch-sparse`, `torch-cluster` and `torch-spline-conv` (if you haven't already). + Note that this step is optional and only necessary if you develop a feature that uses one of these libraries. ```bash pip install pyg-lib torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${TORCH}+${CUDA}.html ``` - where `${TORCH}` should be replaced by your PyTorch version (*e.g.*, `1.12.0`), and `${CUDA}` should be replaced by your CUDA version (*e.g.*, `cpu` or `cu116`). + where `${TORCH}` should be replaced by your PyTorch version (*e.g.*, `2.0.0`), and `${CUDA}` should be replaced by your CUDA version (*e.g.*, `cpu` or `cu118`). -3. Uninstall all existing PyG installations: +3. Uninstall all existing PyG installations. + It is advised to run this command repeatedly to confirm that installations across all locations are properly removed. ```bash pip uninstall torch-geometric @@ -57,7 +59,8 @@ To develop PyG on your machine, here are some tips: pip install -e ".[dev,full]" ``` - This mode will symlink the Python files from the current local source tree into the Python install. Hence, if you modify a Python file, you do not need to reinstall PyG again and again. + This mode will symlink the Python files from the current local source tree into the Python install. + Hence, if you modify a Python file, you do not need to re-install PyG again. 7. Ensure that you have a working PyG installation by running the entire test suite with diff --git a/torch_geometric/datasets/ose_gvcs.py b/torch_geometric/datasets/ose_gvcs.py index 7156f00c0909..6fbd2420394a 100644 --- a/torch_geometric/datasets/ose_gvcs.py +++ b/torch_geometric/datasets/ose_gvcs.py @@ -25,7 +25,7 @@ class OSE_GVCS(InMemoryDataset): The dataset contains a heterogenous graphs with 50 :obj:`machine` nodes, composing the GVCS, and 290 directed edges, each representing one out of - three relationships of machines. + three relationships between machines. """ machines = [ '3D Printer', '3D Scanner', 'Aluminum Extractor', 'Backhoe', @@ -94,6 +94,8 @@ def process(self): dst = interaction['tool'] if dst not in self.machines: continue + # Machines are guaranteed to be sorted according to their order + # in `self.machines`, so we can use its index for the mapping: src = self.machines.index(product['machine']) dst = self.machines.index(dst) edges[rt].append((src, dst)) From 0c0d9c1182f96c27b9663526c47aac17b8ea359b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 1 Aug 2023 12:10:24 +0200 Subject: [PATCH 1379/2432] Fix RPC tests on Windows (#7826) --- test/distributed/test_rpc.py | 2 ++ torch_geometric/distributed/rpc.py | 20 +++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/test/distributed/test_rpc.py b/test/distributed/test_rpc.py index 673c53a694a0..69d47a5ee691 100644 --- a/test/distributed/test_rpc.py +++ b/test/distributed/test_rpc.py @@ -7,6 +7,7 @@ from torch_geometric.distributed import LocalFeatureStore from torch_geometric.distributed.dist_context import DistContext, DistRole from torch_geometric.distributed.rpc import RpcRouter +from torch_geometric.testing import onlyLinux def run_rpc_feature_test( @@ -81,6 +82,7 @@ def run_rpc_feature_test( rpc.shutdown_rpc() +@onlyLinux def test_dist_feature_lookup(): cpu_tensor0 = torch.cat([torch.ones(128, 1024), torch.ones(128, 1024) * 2]) cpu_tensor1 = torch.cat([torch.zeros(128, 1024), torch.zeros(128, 1024)]) diff --git a/torch_geometric/distributed/rpc.py b/torch_geometric/distributed/rpc.py index b6f46ba3521b..7802cea2becc 100644 --- a/torch_geometric/distributed/rpc.py +++ b/torch_geometric/distributed/rpc.py @@ -2,7 +2,7 @@ import logging import threading from abc import ABC, abstractmethod -from typing import Dict, List +from typing import Callable, Dict, List from torch.distributed import rpc @@ -16,7 +16,13 @@ def rpc_is_initialized() -> bool: return _is_current_rpc_agent_set() -@rpc.api._require_initialized +def rpc_require_initialized(func: Callable) -> Callable: + if hasattr(rpc, 'api'): + return rpc.api._require_initialized(func) + return func + + +@rpc_require_initialized def global_all_gather(obj, timeout=None): r"""Gathers objects from all groups in a list.""" if timeout is None: @@ -24,7 +30,7 @@ def global_all_gather(obj, timeout=None): return rpc.api._all_gather(obj, timeout=timeout) -@rpc.api._require_initialized +@rpc_require_initialized def global_barrier(timeout=None): r""" Block until all local and remote RPC processes.""" try: @@ -109,7 +115,7 @@ def get_to_worker(self, partition_idx: int) -> str: return router_worker -@rpc.api._require_initialized +@rpc_require_initialized def rpc_partition_to_workers( current_ctx: DistContext, num_partitions: int, @@ -142,7 +148,7 @@ def rpc_async(self, *args, **kwargs): _rpc_call_pool: Dict[int, RpcCallBase] = {} -@rpc.api._require_initialized +@rpc_require_initialized def rpc_register(call: RpcCallBase) -> int: r"""Registers a call for RPC requests.""" global _rpc_call_id, _rpc_call_pool @@ -162,7 +168,7 @@ def _rpc_async_call(call_id: int, *args, **kwargs): return _rpc_call_pool.get(call_id).rpc_async(*args, **kwargs) -@rpc.api._require_initialized +@rpc_require_initialized def rpc_async(worker_name: str, call_id: int, args=None, kwargs=None): r"""Performs an asynchronous RPC request and returns a future.""" return rpc.rpc_async( @@ -178,7 +184,7 @@ def _rpc_sync_call(call_id: int, *args, **kwargs): return _rpc_call_pool.get(call_id).rpc_sync(*args, **kwargs) -@rpc.api._require_initialized +@rpc_require_initialized def rpc_sync(worker_name: str, call_id: int, args=None, kwargs=None): r"""Performs a synchronous RPC request and returns a future.""" future = rpc.rpc_async( From b7a523ab5b51dfe4292c5a6f6e15eb8205fcb57f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 2 Aug 2023 12:57:06 +0200 Subject: [PATCH 1380/2432] Move "Compiled Graph Neural Networks" tutorial to advanced concepts (#7832) --- docs/source/advanced/compile.rst | 135 ++++++++++++++++++++++++++++++ docs/source/index.rst | 4 +- docs/source/tutorial/compile.rst | 136 +------------------------------ 3 files changed, 139 insertions(+), 136 deletions(-) create mode 100644 docs/source/advanced/compile.rst diff --git a/docs/source/advanced/compile.rst b/docs/source/advanced/compile.rst new file mode 100644 index 000000000000..8ba9a821350c --- /dev/null +++ b/docs/source/advanced/compile.rst @@ -0,0 +1,135 @@ +Compiled Graph Neural Networks +============================== + +:meth:`torch.compile` is the latest method to speed up your :pytorch:`PyTorch` code in :obj:`torch >= 2.0.0`! +:meth:`torch.compile` makes PyTorch code run faster by JIT-compiling it into optimized kernels, all while required minimal code changes. + +Under the hood, :meth:`torch.compile` captures :pytorch:`PyTorch` programs via :obj:`TorchDynamo`, canonicalizes over 2,000 :pytorch:`PyTorch` operators via :obj:`PrimTorch`, and finally generates fast code out of it across multiple accelerators and backends via the deep learning compiler :obj:`TorchInductor`. + +.. note:: + See `here `__ for a general tutorial on how to leverage :meth:`torch.compile`, and `here `__ for a description of its interface. + +In this tutorial, we show how to optimize your custom :pyg:`PyG` model via :meth:`torch.compile`. + +Introducing :meth:`torch_geometric.compile` +------------------------------------------- + +By default, :meth:`torch.compile` struggles to optimize a custom :pyg:`PyG` model since its underlying :class:`~torch_geometric.nn.conv.MessagePassing` interface is JIT-unfriendly due to its generality. +As such, in :pyg:`PyG 2.3`, we introduce :meth:`torch_geometric.compile`, a wrapper around :meth:`torch.compile` with the same signature. + +:meth:`torch_geometric.compile` applies further optimizations to make :pyg:`PyG` models more compiler-friendly. +Specifically, it: + +#. Temporarily disables the usage of the extension packages :obj:`torch_scatter`, :obj:`torch_sparse` and :obj:`pyg_lib` during GNN execution workflows (since these are not *yet* directly optimizable by :pytorch:`PyTorch`). + From :pyg:`PyG 2.3` onwards, these packages are purely optional and not required anymore for running :pyg:`PyG` models (but :obj:`pyg_lib` may be required for graph sampling routines). + +#. Converts all instances of :class:`~torch_geometric.nn.conv.MessagePassing` modules into their jittable instances (see :meth:`torch_geometric.nn.conv.MessagePassing.jittable`) + +Without these adjustments, :meth:`torch.compile` may currently fail to correctly optimize your :pyg:`PyG` model. +We are working on fully relying on :meth:`torch.compile` for future releases. + +Basic Usage +----------- + +Leveraging :meth:`torch_geometric.compile` is as simple as the usage of :meth:`torch.compile`. +Once you have a :pyg:`PyG` model defined, simply wrap it with :meth:`torch_geometric.compile` to obtain its optimized version: + +.. code-block:: python + + import torch_geometric + from torch_geometric.nn import GraphSAGE + + model = GraphSAGE(in_channels, hidden_channels, num_layers, out_channels) + model = model.to(device) + + model = torch_geometric.compile(model) + +and execute it as usual: + +.. code-block:: python + + from torch_geometric.datasets import Planetoid + + dataset = Planetoid(root, name="Cora") + data = dataset[0].to(device) + + out = model(data.x, data.edge_index) + +We have incorporated multiple examples in :obj:`examples/compile` that further show the practical usage of :meth:`torch_geometric.compile`: + +#. `Node Classification `__ via :class:`~torch_geometric.nn.models.GCN` +#. `Graph Classification `__ via :class:`~torch_geometric.nn.models.GIN` + +Note that :meth:`torch.compile(model, dynamic=True)` does sadly not yet work for :pyg:`PyG` models on :pytorch:`PyTorch 2.0`. +While static compilation via :meth:`torch.compile(model, dynamic=False)` works fine, it will re-compile the model everytime it sees an input with a different shape. +That currently does not play that nicely with the way :pyg:`PyG` performs mini-batching, and will hence lead to major slow-downs. +We are working with the :pytorch:`PyTorch` team to fix this limitation (see `this `_ :github:`GitHub` issue). +A temporary workaround is to utilize the :class:`torch_geometric.transforms.Pad` transformation to ensure that all inputs are of equal shape. + +If you notice that :meth:`~torch_geometric.compile` fails for a certain :pyg:`PyG` model, do not hesitate to reach out either on :github:`null` `GitHub `_ or :slack:`null` `Slack `_. +We are very eager to improve :meth:`~torch_geometric.compile` support across the whole :pyg:`PyG` code base. + +Benchmark +--------- + +:meth:`torch.compile` works **fantastically well** for many :pyg:`PyG` models. +**Overall, we observe runtime improvements of nearly up to 300%.** + +Specifically, we benchmark :class:`~torch_geometric.nn.models.GCN`, :class:`~torch_geometric.nn.models.GraphSAGE` and :class:`~torch_geometric.nn.models.GIN` and compare runtimes obtained from traditional eager mode and :meth:`torch_geometric.compile`. +We use a synthetic graph with 10,000 nodes and 200,000 edges, and a hidden feature dimensionality of 64. +We report runtimes over 500 optimization steps: + +.. list-table:: + :widths: 15 15 15 15 15 15 + :header-rows: 1 + + * - Model + - Mode + - Forward + - Backward + - Total + - Speedup + * - :class:`~torch_geometric.nn.models.GCN` + - Eager + - 2.6396s + - 2.1697s + - 4.8093s + - + * - :class:`~torch_geometric.nn.models.GCN` + - **Compiled** + - **1.1082s** + - **0.5896s** + - **1.6978s** + - **2.83x** + * - :class:`~torch_geometric.nn.models.GraphSAGE` + - Eager + - 1.6023s + - 1.6428s + - 3.2451s + - + * - :class:`~torch_geometric.nn.models.GraphSAGE` + - **Compiled** + - **0.7033s** + - **0.7465s** + - **1.4498s** + - **2.24x** + * - :class:`~torch_geometric.nn.models.GIN` + - Eager + - 1.6701s + - 1.6990s + - 3.3690s + - + * - :class:`~torch_geometric.nn.models.GIN` + - **Compiled** + - **0.7320s** + - **0.7407s** + - **1.4727s** + - **2.29x** + +To reproduce these results, run + +.. code-block:: console + + python test/nn/models/test_basic_gnn.py + +from the root folder of your checked out :pyg:`PyG` repository from :github:`GitHub`. diff --git a/docs/source/index.rst b/docs/source/index.rst index cd75bddb295f..946a225b0adf 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -6,7 +6,7 @@ PyG Documentation :pyg:`null` **PyG** *(PyTorch Geometric)* is a library built upon :pytorch:`null` `PyTorch `_ to easily write and train Graph Neural Networks (GNNs) for a wide range of applications related to structured data. It consists of various methods for deep learning on graphs and other irregular structures, also known as `geometric deep learning `_, from a variety of published papers. -In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, `multi GPU-support `_, `torch.compile `_ support, `DataPipe `_ support, a large number of common benchmark datasets (based on simple interfaces to create your own), the `GraphGym `__ experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. +In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, `multi GPU-support `_, `torch.compile `_ support, `DataPipe `_ support, a large number of common benchmark datasets (based on simple interfaces to create your own), the `GraphGym `__ experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. .. slack_button:: @@ -32,7 +32,6 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many tutorial/heterogeneous tutorial/load_csv tutorial/explain - tutorial/compile .. toctree:: :maxdepth: 1 @@ -41,6 +40,7 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many advanced/batching advanced/sparse_tensor advanced/hgam + advanced/compile advanced/jit advanced/remote advanced/graphgym diff --git a/docs/source/tutorial/compile.rst b/docs/source/tutorial/compile.rst index 8ba9a821350c..bb138f5c49e0 100644 --- a/docs/source/tutorial/compile.rst +++ b/docs/source/tutorial/compile.rst @@ -1,135 +1,3 @@ -Compiled Graph Neural Networks -============================== +:orphan: -:meth:`torch.compile` is the latest method to speed up your :pytorch:`PyTorch` code in :obj:`torch >= 2.0.0`! -:meth:`torch.compile` makes PyTorch code run faster by JIT-compiling it into optimized kernels, all while required minimal code changes. - -Under the hood, :meth:`torch.compile` captures :pytorch:`PyTorch` programs via :obj:`TorchDynamo`, canonicalizes over 2,000 :pytorch:`PyTorch` operators via :obj:`PrimTorch`, and finally generates fast code out of it across multiple accelerators and backends via the deep learning compiler :obj:`TorchInductor`. - -.. note:: - See `here `__ for a general tutorial on how to leverage :meth:`torch.compile`, and `here `__ for a description of its interface. - -In this tutorial, we show how to optimize your custom :pyg:`PyG` model via :meth:`torch.compile`. - -Introducing :meth:`torch_geometric.compile` -------------------------------------------- - -By default, :meth:`torch.compile` struggles to optimize a custom :pyg:`PyG` model since its underlying :class:`~torch_geometric.nn.conv.MessagePassing` interface is JIT-unfriendly due to its generality. -As such, in :pyg:`PyG 2.3`, we introduce :meth:`torch_geometric.compile`, a wrapper around :meth:`torch.compile` with the same signature. - -:meth:`torch_geometric.compile` applies further optimizations to make :pyg:`PyG` models more compiler-friendly. -Specifically, it: - -#. Temporarily disables the usage of the extension packages :obj:`torch_scatter`, :obj:`torch_sparse` and :obj:`pyg_lib` during GNN execution workflows (since these are not *yet* directly optimizable by :pytorch:`PyTorch`). - From :pyg:`PyG 2.3` onwards, these packages are purely optional and not required anymore for running :pyg:`PyG` models (but :obj:`pyg_lib` may be required for graph sampling routines). - -#. Converts all instances of :class:`~torch_geometric.nn.conv.MessagePassing` modules into their jittable instances (see :meth:`torch_geometric.nn.conv.MessagePassing.jittable`) - -Without these adjustments, :meth:`torch.compile` may currently fail to correctly optimize your :pyg:`PyG` model. -We are working on fully relying on :meth:`torch.compile` for future releases. - -Basic Usage ------------ - -Leveraging :meth:`torch_geometric.compile` is as simple as the usage of :meth:`torch.compile`. -Once you have a :pyg:`PyG` model defined, simply wrap it with :meth:`torch_geometric.compile` to obtain its optimized version: - -.. code-block:: python - - import torch_geometric - from torch_geometric.nn import GraphSAGE - - model = GraphSAGE(in_channels, hidden_channels, num_layers, out_channels) - model = model.to(device) - - model = torch_geometric.compile(model) - -and execute it as usual: - -.. code-block:: python - - from torch_geometric.datasets import Planetoid - - dataset = Planetoid(root, name="Cora") - data = dataset[0].to(device) - - out = model(data.x, data.edge_index) - -We have incorporated multiple examples in :obj:`examples/compile` that further show the practical usage of :meth:`torch_geometric.compile`: - -#. `Node Classification `__ via :class:`~torch_geometric.nn.models.GCN` -#. `Graph Classification `__ via :class:`~torch_geometric.nn.models.GIN` - -Note that :meth:`torch.compile(model, dynamic=True)` does sadly not yet work for :pyg:`PyG` models on :pytorch:`PyTorch 2.0`. -While static compilation via :meth:`torch.compile(model, dynamic=False)` works fine, it will re-compile the model everytime it sees an input with a different shape. -That currently does not play that nicely with the way :pyg:`PyG` performs mini-batching, and will hence lead to major slow-downs. -We are working with the :pytorch:`PyTorch` team to fix this limitation (see `this `_ :github:`GitHub` issue). -A temporary workaround is to utilize the :class:`torch_geometric.transforms.Pad` transformation to ensure that all inputs are of equal shape. - -If you notice that :meth:`~torch_geometric.compile` fails for a certain :pyg:`PyG` model, do not hesitate to reach out either on :github:`null` `GitHub `_ or :slack:`null` `Slack `_. -We are very eager to improve :meth:`~torch_geometric.compile` support across the whole :pyg:`PyG` code base. - -Benchmark ---------- - -:meth:`torch.compile` works **fantastically well** for many :pyg:`PyG` models. -**Overall, we observe runtime improvements of nearly up to 300%.** - -Specifically, we benchmark :class:`~torch_geometric.nn.models.GCN`, :class:`~torch_geometric.nn.models.GraphSAGE` and :class:`~torch_geometric.nn.models.GIN` and compare runtimes obtained from traditional eager mode and :meth:`torch_geometric.compile`. -We use a synthetic graph with 10,000 nodes and 200,000 edges, and a hidden feature dimensionality of 64. -We report runtimes over 500 optimization steps: - -.. list-table:: - :widths: 15 15 15 15 15 15 - :header-rows: 1 - - * - Model - - Mode - - Forward - - Backward - - Total - - Speedup - * - :class:`~torch_geometric.nn.models.GCN` - - Eager - - 2.6396s - - 2.1697s - - 4.8093s - - - * - :class:`~torch_geometric.nn.models.GCN` - - **Compiled** - - **1.1082s** - - **0.5896s** - - **1.6978s** - - **2.83x** - * - :class:`~torch_geometric.nn.models.GraphSAGE` - - Eager - - 1.6023s - - 1.6428s - - 3.2451s - - - * - :class:`~torch_geometric.nn.models.GraphSAGE` - - **Compiled** - - **0.7033s** - - **0.7465s** - - **1.4498s** - - **2.24x** - * - :class:`~torch_geometric.nn.models.GIN` - - Eager - - 1.6701s - - 1.6990s - - 3.3690s - - - * - :class:`~torch_geometric.nn.models.GIN` - - **Compiled** - - **0.7320s** - - **0.7407s** - - **1.4727s** - - **2.29x** - -To reproduce these results, run - -.. code-block:: console - - python test/nn/models/test_basic_gnn.py - -from the root folder of your checked out :pyg:`PyG` repository from :github:`GitHub`. +.. include:: ../advanced/compile.rst From 94125fa8b895a5c7976abf020ef012d1e53d1f4e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 2 Aug 2023 15:31:55 +0200 Subject: [PATCH 1381/2432] Add tutorial gallery view (#7834) --- .github/workflows/documentation.yml | 1 + README.md | 4 +- docs/requirements.txt | 1 + .../_static/thumbnails/create_dataset.png | Bin 0 -> 511735 bytes docs/source/_static/thumbnails/create_gnn.svg | 53 ++++++++++++++++++ docs/source/_static/thumbnails/explain.png | Bin 0 -> 13931 bytes .../_static/thumbnails/heterogeneous.png | Bin 0 -> 141208 bytes docs/source/_static/thumbnails/load_csv.png | Bin 0 -> 2674 bytes docs/source/conf.py | 9 +++ docs/source/index.rst | 8 +-- docs/source/tutorial/application.rst | 7 +++ docs/source/tutorial/create_dataset.rst | 4 +- docs/source/tutorial/dataset.rst | 8 +++ docs/source/tutorial/explain.rst | 4 +- docs/source/tutorial/gnn_design.rst | 8 +++ 15 files changed, 96 insertions(+), 11 deletions(-) create mode 100644 docs/source/_static/thumbnails/create_dataset.png create mode 100644 docs/source/_static/thumbnails/create_gnn.svg create mode 100644 docs/source/_static/thumbnails/explain.png create mode 100644 docs/source/_static/thumbnails/heterogeneous.png create mode 100644 docs/source/_static/thumbnails/load_csv.png create mode 100644 docs/source/tutorial/application.rst create mode 100644 docs/source/tutorial/dataset.rst create mode 100644 docs/source/tutorial/gnn_design.rst diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 7282d9e8c730..e9eb5129bcb8 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -40,6 +40,7 @@ jobs: - name: Install main package if: steps.changed-files-specific.outputs.only_changed != 'true' run: | + pip install nbsphinx pip install git+https://github.com/pyg-team/pyg_sphinx_theme.git pip install -e . diff --git a/README.md b/README.md index be467cc17d5d..e4bbb2aa05b2 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ **PyG** *(PyTorch Geometric)* is a library built upon [PyTorch](https://pytorch.org/) to easily write and train Graph Neural Networks (GNNs) for a wide range of applications related to structured data. It consists of various methods for deep learning on graphs and other irregular structures, also known as *[geometric deep learning](http://geometricdeeplearning.com/)*, from a variety of published papers. -In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, [multi GPU-support](https://github.com/pyg-team/pytorch_geometric/tree/master/examples/multi_gpu), [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/compile.html) support, [`DataPipe`](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/datapipe.py) support, a large number of common benchmark datasets (based on simple interfaces to create your own), the [GraphGym](https://pytorch-geometric.readthedocs.io/en/latest/advanced/graphgym.html) experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. +In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, [multi GPU-support](https://github.com/pyg-team/pytorch_geometric/tree/master/examples/multi_gpu), [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/advanced/compile.html) support, [`DataPipe`](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/datapipe.py) support, a large number of common benchmark datasets (based on simple interfaces to create your own), the [GraphGym](https://pytorch-geometric.readthedocs.io/en/latest/advanced/graphgym.html) experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. **[Click here to join our Slack community!][slack-url]** @@ -172,7 +172,7 @@ For a quick start, check out our [examples](https://github.com/pyg-team/pytorch_ PyG provides a multi-layer framework that enables users to build Graph Neural Network solutions on both low and high levels. It comprises of the following components: -* The PyG **engine** utilizes the powerful PyTorch deep learning framework with full [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/compile.html) and [TorchScript](https://pytorch-geometric.readthedocs.io/en/latest/advanced/jit.html) support, as well as additions of efficient CPU/CUDA libraries for operating on sparse data, *e.g.*, [`pyg-lib`](https://github.com/pyg-team/pyg-lib). +* The PyG **engine** utilizes the powerful PyTorch deep learning framework with full [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/advanced/compile.html) and [TorchScript](https://pytorch-geometric.readthedocs.io/en/latest/advanced/jit.html) support, as well as additions of efficient CPU/CUDA libraries for operating on sparse data, *e.g.*, [`pyg-lib`](https://github.com/pyg-team/pyg-lib). * The PyG **storage** handles data processing, transformation and loading pipelines. It is capable of handling and processing large-scale graph datasets, and provides effective solutions for heterogeneous graphs. It further provides a variety of sampling solutions, which enable training of GNNs on large-scale graphs. * The PyG **operators** bundle essential functionalities for implementing Graph Neural Networks. PyG supports important GNN building blocks that can be combined and applied to various parts of a GNN model, ensuring rich flexibility of GNN design. * Finally, PyG provides an abundant set of GNN **models**, and examples that showcase GNN models on standard graph benchmarks. Thanks to its flexibility, users can easily build and modify custom GNN models to fit their specific needs. diff --git a/docs/requirements.txt b/docs/requirements.txt index 3d437463d55c..d11fab31e130 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,4 @@ https://download.pytorch.org/whl/cpu/torch-1.13.0%2Bcpu-cp38-cp38-linux_x86_64.whl numpy>=1.19.5 +nbsphinx git+https://github.com/pyg-team/pyg_sphinx_theme.git diff --git a/docs/source/_static/thumbnails/create_dataset.png b/docs/source/_static/thumbnails/create_dataset.png new file mode 100644 index 0000000000000000000000000000000000000000..77960ba0ed765fbf00bd33bf64766bec5871e0cd GIT binary patch literal 511735 zcmYJabyQUC_dZMt(%o&KlG2jWNDI>4IdsF&DJ@7hNOw2V-92;;-JQRK&*%HTe+;Z) zEzW&*U;Eno4w07?M@Jz+fq{WR|0W@#00RU6@*zTo1O6f#B&ZJjg0)u=7lNJdr}qNB zyta{0vxk8}#eMm}!X%{-!61#Kd=nA;;gWKYij?w0w63K=l*s_CsWPU~p#`*$7y9rj62<3fWh)#%16RooWlU zlcgc)OGGZ}xjjbt$R}~y-AIO`B66W z7-ybZRS%CCe-v5X&7b@X1zHL=64#Q7w~#i$O_6CSJa=$ zq!e!_7*Q4WZgirOVC9|j-TBimrZ*l*XZCEH@;cGeFz`RU{ZYEO8`rpG^{lnNp=g)K z(5F?+Y~Q~=DK&A%xh*IN(;ZfG3b)H^NlQb52)5X3@g*nD|^BJZ9X?lKH4xIxq>=|0K#{a&dz zvkP}Km(}Mk+%BIk8VcOjORmuxpKo7#-KO7K^YcLy7hX-@%dC?-JG4RzDA1FotsC2$ z`jI@v?=2Z!v7y-n%zl*l|H&Z+9MO%fafc^7OO=9ZG83e7x%~U2sr`CoY-VHkpny9= zvZiZ*v%1j6r0(um8hk!Doz9_o6J+78rBR^qCs{+K?0)HR)WOrL$45>3WLJ~wwE*EC z{p9p9{;(@*Kvfd+K9HcUcJIJ0btWwGhmW*`VAkD@yn6wKGK(5+KdaV)?md&!_KJC=R{JjdEubTC>BhM|<_N0L+-4wQ|A`}pnCp<- zPpTJBq%wjvr^G?RJ>{Cj7TAb#R;ddB>5!idk*8WWWTrv`m(x(dE`LYITe+9ao7vVH zmtUoELNSWlBI6+PF^rZR;+m5EiYxLx(q zk_BFin*D4TZ*y{wSf(;B&o-Ns9XRvB6v0usy;CM;g{#Lzyr_M3pKVx)CDPFa%hb;diJ_o z?I{dT?yHY}jWoHb(R^~#+St|VpDY<*KeR4fT-$EqS$UMKBfJ|qZe;M%f8i9=m zZo~w9L!1AZVCE$lEWcMUkv3OhGI=B6t21Q&%!}!rlzMA_(Rh@^KV7=3aG7+) zsdOM;AU2y!THTcQ{T{RFa{qF0^Rx-$;B&!Y%34?ANHFYdb7&sl;PVY*8f1>?S2HA}(1bv#dRM~2O zx?CZ9oQ-(X$YwU7-*-YuE0g*s$?5AP-r!uTGc*F+boSti5RC5cEh^fJ@x31th-<*u zuz)dwNOFa~e)O%v8>%!?Oa!XpUH8}KYp9p z771>+uf+I9s*Fa$eAjdgxgh1T?Ei`Xs!r@z5#f+3zy0;8mLBps0cdF3qQwffZCxNr ztK5Le>yy2X--~<_{oX(*3V%%CFn#6!Q(|Vhxr8qA?YlfC_2k6LgPjTgkEtu5=OcIW z<$LC}&TH<6mfZ|%F$N-jo10bgh3kuN2iLnQIvz_|{jZ-Bsr(~J^JIT1Zp6!-2znzu zIEM2$IfIIQzMG@Rgd-J^d8_ zzq3ESoV{K%`h_0u73`obe=21otTItY&}gol8rx-4guugSkhN?Qzw5|^>!gbG^XJ=M z6-evZTpNXZ&Ti-RQ&D^G1_|aocaFHiX!x>BfP8GJF#>fl3b?vkSwMfk^mg9H_OR3R zzgk8F)H2?PD{PcpqkaF~h!nMm!+G^N6l|(j3ET%RWN?@Mej9C1GMa@p^6?j22J zvzo3qL&|I6-uZpE@|b&s>A1&Mc6s5Qw?c6(TQ=O%nts3Zm*G)>^4e@LVAyN^^8Djv z&c|5UH=pFrbZbe^vriZ9^M{^w=Q;^rJWLW}ihq;LG$)2JWQVZkR+i^l*@PQ*PgPeQ zlae=j`Afw;ZW)K=mrTdWTN!P#Yv^8T;g93*HOgfqHQY0$E3KZ%B_%6~cQb<{rck1zh2c~eth+Z4M#V0iTe&8Eyz-= z>lgj+I*zY>@|C`b!*{hV2m324v%AZ*v;x{-8zb%I2ERP9S&gIP>(!O_EilLY?RO9HX(8u~-GfBEf=3pl%GIweEu&>H6_r#Hxykd#W78;m=nM>)t& z4g|K@OJ|xTf0xd#Fpt^WN0RD%b1~C7dcyMmk;Y{CvAF)KyZb@@9uN;J$58N4_$ThX zfw&PzK>g^sReQ8RGM4l7?-fqa__SYyTQCKH{9D`=~Wly8YItu;*}Xv)oMU57vY z`LfPl-GlA)oJW0Gt>jWB4I^#AP1&mnN=n?0CVuaWHPMbdkAm&fX^(pI`G2me2MD*G zlBYw+C!#tw-b$>o-LiZfes^e9ws@Jl@2;&;A~*dC{%F6J)s~jboV8$qP&lcDHGkZ? zwc&BO^}3nmiK$aD$hE!58|PK`IKLk)s$%E+{}p@)kVXmDy>R)14`A55t+=aNE)Qzk zj?cP=8H$w5RCXh*oF7`Diple7F;QOiCzg47Ep3#L?cdh-&_&OG$z$ z{v4u8kFy3oyEWcJgJ@Gqb|vQK)difzT4sxJ6rBMKi)+2}xq-ud=L7y6q~im8!Mao

BPwn3^X*DS zuA=y;%lX3yZk;8h8MF{rj$PTV-u9W@XqshmDwKiPz@Fn@=8^&2fhY@)l3mMvgGAiL z1Vg`9-x^9eH7;a6r<=Z_Md&aoq2226{M6FWFz=}|{&TCJ#~I|Pt)!aJ<~BW$y-S>1 zIN!YA+g&{>^dA`b^#V4qL$0vrJsBWTGPR}#wlWJebyHf0_!gbIXG7Xh-S-F{yx-qg zejrG&P*5abND~LqVV%e%3s^Y!z(OAq6HR7{rTcjt2^_!nUnn8ht ze(OJ(6*$W6uRFy!^U|tBmBc2OkmeD!g8@ih_yVMKZSQZh>Y#NNbnjIO>CfOY%#wJ8 znRiy|Z69Y!G|q*ec*_Hs(&DsZ&EWXl&$<>v6a{ge2w^^Yx%J_hl>g8nU`8hjc^93c zGRC+z2F;Z;UjItJx!qsDn1==e2%y9Ot79n{=# zo+}NP%pH?XLKBB5Es^%8;41^OW7M}+zbQ;UJX(5+27Rc$2@RU>5t1;H{6_@gh!Y=d4gOU{F1`Tb_D?)LKJXpfUq z;1L5dHX(Yw50;-{Sx|qPrMvg&%<>~NOkph-C+$klent1@EYSru-IhErZtUi{@ju;l zsc2K2ub+3OgF!=s*5NJ|HUoX+uJ`!zXk*e+Yt$fh%K*+?u=Lx(jWi&!OX2}9Q(caD z)*362mX|4b7kFsDEiC)WI1$8Q)aBH@2w5o}PmZQjKR(i_T_=l2AICz+rUJ=$QVWMN z^}~fv)|al%ciRo39#M3!Ui)b7h3{wqh5QF5@H!O0(|!b_q}%tRUw*;O(HHXe{Z>bz zxxiI1&ePTmwph^M8!jyv|6@G7UB|m}J!%0if?wqnaT!(C3~l@*v9Q0gO|N<70Rr0{dUhX&_cKXEril5<>Ey7^M(i}j0B!d-El$kj@lt( z{rB1LMN&qm`G-zKv2q-Mo>LN$VgWZHmw0Kp7s_lB$ksty9J8cCyF7kNEw^v3gtM=_ z=O%UNRFfuovoy!h2^S?wWs0qImz7WT2K6RdLfv-r2mKRc_#CybCU;5&;WR(u6x;`O zH>X|J*U!@#lh)RUn%RU;e)m(mWO;#LuvX!9YL+_oYr_(~5`XH%iG%B27_-(231{t6Cj znPU00{RA^@@3hyAd{1lT;Z8F3(=@SsopBq4-EGz_|Jk6-$=%@Ts9O-38v>n0MOGBl zoS2J-#J^O$*iZT3zCZqOZ@AI71;=r$_jo4T8j9{GFPolPoz7pOCoQd*$XtUmWBIGp zAFFen7YcMZ6K_t#E>3By(csz8f9QF3l{>h}ev1#jng<^Bjp_P#+BdEmJ-X zu!22R+BzAS!d;x!dvtyl3-vy9uo<3J))E}US*k<3gbcl~c|7rn1SQk+udq9M(^U7^ zBQ)|%C=(XIW@V9H4-Qj470J52S-$vSg}@=+>K!gVa{qFapBRP17WJxV^n)Cull(t{ zZcf4sXcRPsQu>GNws{0j%;dfZ+g}y*Iw4Ld%36_CnmWXPTv|E4Ty7FxX>2p_BW-QF zn#pAMoXk6#&2C$cuaSeo3Jl}eVgv5~@Yk@P|GngO2st`JK8hGjM;?{^*6B1V=6k}g zb6+@8=aQR2AY?Q|Hh8pY88y3ZM(z6qvc-RvgL_f8KVe?YS9HGuI3j{+>2kDIHZfT` zS5^+D8#|XtxA5co5~G$Ru(>G39yZSpo7JLU4f2*{rtTqV^r3C3=;mGnbP{lc7WHw>=eAmXAX zmWy@XR1zV+FUoNF!VkDYB5|t5c*@T+OHA?u7@`oVz~|O(R!qmo7HE1Jx8J^!+4q|M zIqSN#el-$zw;5nIO{qG>}{)(5LShmSQUZi@e&RsW`zsZKeq zoR;FuPugPKXk2pfG5^6q`ZgM9*MTwt{A|dP*Kb;wCWlsi!1UPRkPAfUJ4qYPNK6km zzmq5uc-U^b!@O)bmJ9LP;mzLo`1k|41NJ0H?eDhxeQ@bVt9(pA0a7&6G@^>T^+(=9 zNbM&D%jE8LP$&V{%JkXLW+GiMc7x_;5q7)KW29K4{zH9;xClJv3lx@1CJ@Q`7U*;X^5QRA`| zfV)^2bHJyM(%73ge=lVnAQ_tHY_{kIYEv8z1A}L-(3vl{ zl8dw@64D*`?}Z?}^y8^mFIxD80BFGhYt6&1)gUVYb#7!c-xrkBg$2ea7CK3<8HrG1 z+3WV=Ww&v=K7Pga4U+CK!G(?UX9TWmUpil3!QbT^7g1B9qQeteVSvj|=1n1U+s$yU zCIngWHPe@Zo9Ty5qfpTfMzVQZ!~5J+^|96GcU(*gunu~jM??{A4%p9*!H-@qX%1jG z>~?jbKMG*>u968$-&-6f>f)4lye)rg^c1wxJZo+m+T{w)wN6e$Ol*iE$3aYS9g5^h zOW<)_bElch5=JI4xTUeQfl;SuUv1g>rBvw#$`QptQ)CS=ssHSI!2=`E7(u9 zUWgllVln-35+Mq$F%(`A#>!y>vi|_0Kh}I>yov$AjYsQd^`?r~&B8r+#e(~2!MKf$ zw9;-&cBPi;2CN*NZ-d>IJdh}Xj|=F|U!+>Dbcg^|0M~pjdWOYf_Rsb<9t(Q&ypIx{ zB9ZFUwmZHEopxvgZ7^uVz9usBmKs^%48TTPzNYgUSSTNkZ`x-q6((1lJ(~6610x?! z`tl3}$jckO%&#C+2$^xaF9XvMWvd z8RtLfzQ!@AOFGD~#YPMAFIUM!^y&M8uwUvvI*=+B_0;0fp74F@uH%-S>)Q6O;_6nW z_exE0(_ITjbryz9laT^Ym{u+4yHYJa{vp7;SzG4c{Douo>d=!KlVI?3*87U)fYzy4 zHi{8e-CN(cf5v*-OmJ=u`KLxd^Zj$@ms6To=d>Tq>%!oP-#c_z;s}iE!xC=d2xq5qtXq!CjBW+ok{z>}N-ChdR*$$X zk<(6gJ}Ay#Sl7<%gD{O7MkELl918<%ivrDy{t!EvM?Sal_B}hy(8`5B3a84}Edlsn zsjc|zN2Gw~!#QoT(1HinjOZt%8Uu(4(En}Xbcif7^3}DIgeV!=_CkB6&>KuDQ-d(9 z=2Ke@1XjYRdP{8h{n93*I7{)qV3__7Gc{w$Y%|L!T3yn4cSIwJd(^M`xpOaqRf>4h z4xYNwaY#JbiccSeLuEhYO-pFdnKx{k%psV3+ni{pDeVm^9TG-zl?bY$S6RZKDym^#v7|zH#zt@w2vH_&44Z{I4g)QkDyw)Q$XyBhzp8nL z3Ok4z;2H&<<0}7S>Syo%lUxo`Q4db%VBMBoX|cyNZPrDdPJL$WA$?4;dSne0#L2nP zi+*MCV{;Sk^G&;WrqwA5ed9oS7FM@73W0T!;@^?&n4#C9J%OT;X5_(ewdW7adM{N- zlLRQzXJ|$q@adUC`Cmt0pQ{hAO$N{^m&zW^e9;>ijfptohAUirFPfTni*Nnoem#Vd z@{PndAp-~$?FTNs-^$O4=&b6f%+rbjKWv!ihZa_a5&AE%yNY$Pi=>bkq+4y|54yWB z)C(nWxbdA*|9}BHaHgHKgSw+V=ccx6){RGK?&$GY@nJ0X#&k2*P8`7*E3@VDWyu^{ zh@L3U-#=U_ps3h&6%S|D^F@N;VV%=6K3h4sCt5`H@}L>&edR zc=PK`e_~XgjaryJKgoFOI(rH*ZjeL(f`iN;^nGiR>AFRTf=SmeTRf6V7&_X#cbJM9CoV1!#Qpg9ODIrd#u4ap=-B@gh-5{2_u{1pzB!t7A334qO*)w0RSH%vcA}J%%F9|{B6ur=mE#W(N&}qIxl#}k(@z3;(QszmF$T;T+!1*Jhh6;e z0*?oSh*I%b-*<*8={uRT!ojf4H`ISb4Ao{LOeyUVB8YXrQ)F>a?OcAuqVUpPXMOYR z0qWi71M{WnQH)~A{qa$<{+53_lEdk_%-4aW_WtI!2svEYwbqrI2ugMJ{PZ56wVqzw z>?{;HYtDbvOPMGZ%U|ipBXEkpb;80X^E{w~-0f}i<7rdqEYp|lbbf*nE|R`Jh0#E2 z)1Nz3$4eT`qaS&Wxvt0Um&F~*_OVm=R-1j!OsZ~dUi*@VDN~rR!3{i*dT=C|6$L6Hql9FDj&q;X@&B6 zmnKssWfs<&d)somn6GZa%7HKLbF12JjK@ttzgqWyP4TnSzJpm7s`?vYmGV=j=%Lg! z|F6pABCjM5qdUtdFNGTkHj}C_e$YDg^zl`8Rs8d zs_0#`Gc673#~{xr#KQ1pGP~}O3-pjLGYan&TfFYd5Z5L-^H_&a@c3#xuG&wHErG2Y zlzV9)v?wLfs1`bSx55B4w2w}fb5)HD0spD+*YxW?{PstigkRIaP2=fltw?N?kK0;z zAj-diZ)O`bgw%!!z&qT;V#Z;)l%Q1`02tuC00XABQgGO3XwxU12Xz0xZG86Un7GMW z&$sN3ZZoa7_l3?JWzREzIp15760VYCJ3QI(5FElgBPJdvMT*{=dLW{=j=jeWer6o$ zln7HLOK5=Itv_KNy;}8~A~r^ef{7wKksD5$aka?nnIvsXxLH|JzP>Z-WwID4J@;FQ z{rESm2fb}a9$*1Ketz+)!7|fy1CO}w>qFv<`vW2Idk!+WM+b?GyTkq21NA23B8_8n z*U|K*W@gJn=5$m4Zo^=vTq_aj={WM#lFYdt{t6Ac##XXxGK3JpY{%c-s^JT7_afNL zRn7mR1pGbo`Hf-1L%wsflYd`S*U~V-sq{1k-Br13TLuX_Gr2gnj#!+^rypXBpVuYO zAp1T*Uy5-&s;N#&BX$Q=9Z-|pCGzpm;?*P5%F4o4`+i%QLfvDkb4~O0?=DUPGL0Vy z%Tx%eY_a--1(0B+sqlN(RmLlY0x%7MX^7tn?FTOjUaNCWXSLe;s|wRwKE!G&B3CR; z64l3p6agg)<@U*L#gCa23?){s)#nD3;g-#G^@GHJr3oHF7mU6A&;_G#UI?JJ^7^bA ztNJ4$h{?%?_PgGplnRq=3xO6#@w|J=!>djBVM=$eRq^EJ1$Znrmufk4%HQnT{wEDh`+7^CCi(bPs=cqRZLQO z{7Osluyf11$tLNffvvHRE*m`YAn_2`<^rvHQ|0m&8b7cJ zG!JjZ7TMuLNtqR9(S);`6Vma0ZK!Xc$C948gQzuPQ>)YW+%8rWnk~j1pf)p4l?Bn% zO6t*7LTWbtedKFkaI8S`Vloml&(bTwV$U?t_>S{y%={CNWh?~yj8!Z{E^$~Fl_QPm zT$6poepE6+`nXbR<(0+2Ji9D>XziOdzKV3r#lzcZVl8F&b~B<7+qA1_uJbDT<@9*( z%rnvDldxe_ikrJk3y4c8>0Sf>sheDp#hz3B#%Kr=BX<=W<SlzIToHC)n`(us=z1pHXxNlI@`z%%%hJT&}q{mH3s`UUGLmZadi`^ad z;4=-x83y&qJg|Px(S}%X|L(l&jfhpN#J(;!7A1nyt#jFc+Hd7pw_q}QxH4FOTGOOx z4;^Z++=!OSD6aK0uJt0Y{YG|HUzGe8TsG_=40055iZ(|N0O>4CxHN4s9rA?l+d_t3 zv;5ZSp0lNE=4B3lK^FxtG=^X>&XAi`@NuNG$gqi^w}gQPhlv_=Mhmslqfp=e&>&+j z0}i461I)@VMX4tG!wC=Vs@$s9-IhWV^>%Ingobr&tMCWc_gB(iNBlyjHF=c(zAFTi z>4%;%c2z5(I`lr^wLY??VDoZ-FN-r-#n97u-HvxV^%SRYXaT~#M^qay5D;6NXSAge zf%_&obFbVB&qb?q5T)Fljvcq9a|=&fz$ALq%=Ya(mJ9jUJ#21PCso%D`Oos#?gA-i zo(ln>D}-zFjPoxlzlZCxV!rr_WLgzaM%DPQa~Gaweo*jBu(z1|0ln;QkJZ3tIj^D} zN#Zs*U1V@~wiz}kRvk6n!!X2UVgqus8IYR?)42{O9Ti7nbIk_Wq_h<2vqFqGS*({+ zluOh%x3<2>$+HW?f{qdcywWc`mu4I-obtc_5$^t3@`;!GB(n_qZ2wf@+Q1f>KH%W2 zIp5_F5Ve7%8&YvneCj`Y)*qH88RY8{QnI@9_hQa~=9&!QWG2Q3s) z>f~iKom73#Oke{{)?PSGMpDM4Wo(s{?r1i0(*!eaxq}+GI00!m4%1Jslblp1{mu!8 z_Qv@#^Ow zRf6N5ITT*U`qM@n`Ks3<{Q-k$ouxfXKZs&&+eFRo-K;{`N5$L+UeBQ+L{<@85^OoA zG^*i9yg|H|VYT3kvI2GJwuP3MQ~DQ{NvX$_w8Oh6+L_zOf@yM<(lP`kTsmUEclKP+ zu~ZR~C*D5|waxA+RSD%;6kW9SPY&D5_(IplFvm+X}jj$e++PJ#=GD~HbPMMi;qG_L7Wh51kNi^j(w?n-w zuP1$PD7U<0{`qBR>K!`3M~GtKGHKHfZ-zvPY~f$K!1OTq7oD@~>Spq8bmectl0&QS z7Ldzrf7aIH;gW{JV*XUAoc2W1D>+g9 zb$;O%xdG&0L;dO3SBST*dcuKq7Ly!!7rl$89x@{TV^-;cB?QAI{zh7)lOD2e64t%} z-9{~VZFm!kH*22W7b@K=Uprh($rgikR_?VaP9-PJWFk8%w(gaS=bBkEUS-qHumxBz z>o=-yZgupIydKr0f&_&&OMFfl&m`pyU2Ld9CTBI4j(PtqDJP)mf!5G!VsOOeOZ5i^ zFbZP({#AqH6mpq|m{B%5zh6?;uUE{J{YXk~SF^ zBS=9=kt~x8gcmAp)l<3IjdXn!Lt>)dJf$O#IzQg}x6Mvu8QC|*?*rX8(Mgj+Y(Cf9 zi3suqChg`Z>$kxrc!~=pD4m#O5&-2*59kITA;x{}BQrlZY4L$3AWqaCqZbaWTa0Sy z1)dW)GQDpid5vDWtG+OqYgba+wwp3j0VC_bW(k1=kp7A{q!^m3vsDo^7Uaqql)3U` z-*Hk5E-JgSptI#Nc+-tt^BCOm1Im$)h>=HWI@>zk^?JiwwC=2b3ab}2q<=wr<2@{p z^j^qA6kkzAswMCAeoU1KG!&G`D`KS8i`hvHfr-m`4G3HQ)K(hl!gSHn=2cmbXQ>c@ zHzEDu*MY{c@TRLAD><2O=SV5vNPg&_MP-UE)mj@DXTqhlVWVCfQxkUBa)lS><}VZ{ z=%u%4NjGPCXl^BIy2z5tXHpR(``O)oac1qdSyv5ZmXr=I~_7O%6QCC`N*l`Y2yaPNIRjs zJ1Nzua@}rzHwk6pjI;Pgr9=`D)yigPpN{!- zGPPaIR^^TM!5R`bzl~ZTdNs%nOjA(+#HNC0+s*XBKHL6n?als`^w#t}(amcYzo;_d zxmVq_zrR{7Z&LQwfR?XR?>_pS)hxmo6xCWLaJb(M;D3|!YiHNQmeFM5xC7dy#g;3_sQ~ya@yd6coq*)j2_dl5gICi za#BjB`gUB|eu(s9s7`8x-+)f%UasgmZ zv1kfuwYKJY%;&`#gA(*7kG_6?GNv1NEDWDq-!S4LXk(BI z5Uc*o36*~b7K;m$;3P4J+h3dO$aNMbATI-ZWOiGB*Wb87O)0}?`LW)ypp~PgDHZHW ze@`XMPiiO4XE9y+vk#R?q8R13I2-U%NA_U-_5M9TCFyNNc1{LjEb+2^bllrYFBe^s z=ppX_cLn45XgbZBcYZ5`wvq2@ce5wrfjny$ah2U$o&XwKyQnEa^T=znEJo8uUEn*M zj{)Nca;n!k12V8mV~UH{?$g+N1>As0N}t-}~# zHGqgHb+F3Xm6d!b-OC$r?I4($;84W`w>{w=o3z4VwgGZ17%mESV#8(;zO>?#fgpGj@dHaPXh8nuCN_#*M=Q} zRo4bVp>q)3Ca=%;EK)3FpF;G#F}tag()okUoQrumOgAXRdjH3va*JIrsTD}^ahg(x zqQ+qABiA4+FWw;g5HK$EcvSuTwaPY?Fl1d5GvPrQcBw&^s1`gYd`MZml6h1@Ze1{6 z3jr#jS>wZkp_d&8<_&QdoR#PQZ#~iC0zQ1=CXcP@?TR{mh>6&YZ{mhO2hscVh*awU zOY9U@Gqx`YMyH87WQyq}Sfg)NaFh)jiC`)SZx9vHCVky`@WL;73>-84Ut zijY0*BZtA(T|Ca5P1(!NjwIDh&~Bg68tp}MrUQH)Bs9{ZXMkM)mui@5kq35J&cs8H zN8zPlpTBX7lY6I&BiOxfaRPn=9} z+<^P@Uh~&5N!JG~cn%3(Jj?YkZaU<(rGYdbi&xD&7fU8hyytnca&>A`#_hYe4N)q6 z=l=A*w`73pyo@HNQA17BiokDYEIEsB@sY`@uWHoC_R?zGN+6AQ>?LJIV{3uEVtf}P zVY5*DAx)=GX8nQP5A-x&CD5sIL8^sj1w#|Arv&h*HdNhuN7Dh_`51wi5AHQ!`w3{+ zBHQ|cP7*zT4Hs@@3X&hzI{ZY3-3o9r@5{-ds+)f*+yM>n< z-TI7RF3*&Xq-q^KfQp(H-q%P5U1;{{B*j&J1{G~MIr(he(L_Tx@@Tk^VS^?)aQt8= z7jq~(kP--1|4@cjQaj(p5eLNzkHn8;vMvpy_e#HXFC;)A-+MPvH*4n%=>%7 zCi*F!&s?g`RE^6~^2XmB6bb8?-5gSSYVtu)*Q8teKDQ)J5~MA<2bx&{RNM zG0a3+~5T4&1A%_Y+N%Q=|@PFyu{!LwnMJ(<^ zuyi!vXvI{*yyls-;n=k7lSyv27kX9-5de|Yn9lmT7Mx=kQKBj?BqpJrcSu3s2H2t8 z^mGhg_I^_S|! zS~($EAxklypFRTbDY7;^@2lM>AQM3#R0}I1jn8qr+**)mrWl=QnS3&*sB0vGqpI9nfI~@>Z+S>7E%3*A{qC2FM~YkaB?Y0 zsy^1=v87!2WCh1>6o+>SE~!%DCZo`A=Z2pvkCmJ6ns*od+>4aHNTB8z|4fSv%uNvi z9G9e zWtiz|PZ%oD2)1x?bK;Ko%qGUlOgGfX7>)YKSX{k}Tw&jS><`~_DDV=_OHSQr7mQ@r zS@cjbMfJF`{KSwqQ=K-s-tQ@=wEul}vgTKwK{pT5 z@^yQc*g2*&{E6S6&}nXjHD>Kqaog(1lXio0i4rPTG9D+(1rDgWPeR5(ULw}Zn%qCi zr&RyOK`J0|d=B0geNjJ?-`R(dUW#!PTMX}&9_8kRPO^(v5R$dIM(5;FZXtM2*<9)T zFxt9|yP-ONHQ~cHFkYw^$pZfzt{KJY_FEkB!3bFPjh&_=req8BAo*7sZGq}ll&lKB zqOVhNQ)Up_9Bs|a?C{&W;H%B;t`RU$yFcPUH?JwU)?Lp=hb&+B2lt4_3VS@qg?}NJ z;H}h+!fVYx|E{q}64y#&)KEz0Pcj82sdWYN^~;Q*)as|jifG#)O==|(*Lh`g8Bs2V zGJ)YJAMgD!{IW~&ExcT}N`O zQXr0|EBMJy#9S6{PRr{ZsMU)la=OV5{g&LX$oXBmW(u0hVE$ppiI?bOCk!q;aX*EK zZ=@R}ABlj7ba5K4EJ*1M{bmDwr<0*fQmNVHHgQ7p-7PC{N&pKu}q+o{H3kYph|X8cfdDK*e}Dps(#)7wm>Z6c>dlL@)HypyIFE zgfB&@4aoxdVTCTX#%npuV@2oxxO!hOIbfBK{zG`_2gdXn?ZymR2XT^rW<; zWUhZAxAk~?c`vX<`qG2@#=;a9a@VcdsD6*V+@EFAsFj|q?naHj*k`AbC;Hjt=#nCr zT{v)G!j0RsOXja$JG{OC`$mN}R-yMnK|#T{or+q7Utz`?MKFpE7*Qj9)T^wjiiwH2 zK3dxUuo@-k*BkyQsuG^UIv+7T&;7Zhk#sKfc)(oIfxm0KbvT(bo!3>tv!4h9`M;(M zShGh#MWtp@;_0Y!IbFx4jc{!V=nTY6;x+Tlb|@4e+dm~BNUf7#>kubajZHLq;Gr_| zC*#O9qu_Qhg^r}2Vgi(TEEN#h3M{H|?~ZpG8QW6$+_#^En;w09t+aSR5?X`kat7+2 z3A8ZDSxiOL*XIpUFNCw z?(1C@qM^|3&9pCW2Vf`Y16hxxu?~F0@8JHx zTNJ;`!A_KGrQO|U`k5Ng)x*O>r6&(C)J+7uL84zb>1d?cG`|Om3KMW~k*#PfH=o`j zV0Qeghwu4xI=`$n`2!ODuu`F~`! zL;-c98`pOH?qG6@*>WscQQ&TC<5dai;`~*Fx{L+5;wE$()oae^_)2gL#ym6vxPhg@ zFmax;FqtP;rqkA1yn^w6f|@hy*>NC%7%ndYx2rVxQ%+2`jz6iG>D0)UHFDO~--f@o zCJzkTxNz{!w=#G&)mlVk=IQF{TBK6OxPR?S)%W}Li&K)|Eta;xs6{=O706H=J?${0 znk!4@Z<5Wjm7JJOKtr3QWQE?#U6e15QQ1w?a0Z$u)jI4dB);_r!lm*>K?Q=YAp|@- zJ7kwaC%t?IE9rObe()5fb9cF)Kt+7KJL8ly)tcCvS}^?3*Yxy4et20v;a++Bx2<<^mZxU)In49C^$L@Bx+v2=@AiPwj*2OHlS^)_ zg=EIM!904fY23=PprF8HB&EF=%Ey?@yHnleVpWxXPjC(TFfYFH|F+{k4hjm)7;-~iJw%@iS(OZ7g>YMKe(>ai?ucYWEp zr6i~GMAb)hndEGoM5tLH3q^AffT|@@4{#4ThDA zCR#x~JXU4m$hk>AM%QLZ)NAWd`cX|W2$U=RL0J5Gu~H?7?DY>S%HC6Nsy;8c7olLs zWi>+ynU-ro3lZczGo@PY?ZiviaElvT9s1#+Anl_bmanY-SMz7zKjB3-`tU(Z+vz); zWw@{vl7v->QyNnR#4)UO=hy(zqg@Yk5|&=VWhI*yn(J&^8^ih0N0}Cz1QFG zZ^M1zrfiZk5;{-=1n1Y$6z&tk@FrMhTJtY>vW~CsRf<)ye>b}vc7FGv>U$=BN!IY2 zLkv*6-gEjvz*m&b&G#ye24&NDKc@SU-jDb+n2~AD(u{~NGN(}!SUEap&QvsE5q;8m zMo(I;8iq;W_WI$Lc}|x6^U-s}6^(|)a=`D~w+*CB@W`CaE5ED{XVEufOn{ZBo=pJx zaxA0lJ*co?J{LeYH|cd+J+_Au_h-vShlhWU+P-&5gs81=$vcjzzNL{6)gwQOSWo#;dK$;@{ZEYZ#Kgqe*;$qv z`8YIoOG5-}lLeSk3J##ndk^kk4&*N%Ej6Vk@_+pJk)3@y@p#L=+(iQ~6CCk}cztI5 zJOB34oV|g|;;ku6>{8E@ktiL(7L!_H%>mb-{9mcBnQNbO$oTmBwMi5i2l@V00n_kknJ^fAV#(TQ!?$!`8rWd+3Oa zlvE6Ads1#c*z=SQtYU%3T#B5<#(w)7y|E;KsyUr(r@pzfsxz$t*d^lx>+H5O%6aha zE_8n>s>;a!do};{_t&W8bQxE5bVr={%c1HUR`0 z%S)Ktc2pL}(8@7tH97#sZE`-2usTPI-dvM@wwuQE$A}wur+=fGa*1!<-tKLlaq~A> zp*(Bc|N3Z0^*GK-=~Hcqi+eB9GTF6jI(jxs`!Rf{g@4MVd?x{IW)^9hCVlQv|6r-r z$`>7OH)xpu)?j>)&;y@%c1OaN-~Z(Uo|G@1K-d71r+zl*d3wCx^85a5f7JI^t{Gb! zUxc&>m<837nEGT2)+3|Dun@LhPQF~lR4B3U-h0hCYi>Im>bGpxRDG<5?AFdrO2599 zgihkhV2i2Dh*B>SeVIspu2E-|I&>TYp~!WhLOx3e#&lF$!8g6!n6FfMie9JShu&3Y@eM>`7FGaT%DUMs7^Ym|`! zT(r1wuvjJ0p$%GH4@7J2ZD%>)5eb{8fhMQJfxf0iWf#jKjAhMolhIt7+SePiEb@u_A9@YY(+ zvpDRG^~cakoyR1mVa<-O?fG`!9scZ~v;Ekzj!k|+&toC=w7u(|7pBU?rKeDIF~*4B zOv1E}B(jm%JhVz%(Xjvhd(pi9cvpm7;L5QMpvZIa_|Rq*gJ42n@&a^xf&gZLC|?i}e-m*GJ<^k2d(?;GB~X1@P1+Hh(6VUZGAdzUINDX_@B3K%N@u^*eI@ z){$}ABexM;-=V{4>H*%tn1n1S5?wq5`j81^zO{#?Y;>)GJ927dJ6B+Fi)t&>w1FQQ z5q8RjQg1$8IG!zDrdIWy1OtH@w!1K*t2Lu|?t5BvQEi1Kx~qsMK1rGWI5has<@;fF zceR|OPJ3vzcc!@-YaTih4%~vipkJu=Sor?DEp5fp?{&L8Fosk-Bl;^p|C8(7eVSKf z&R;L~&tiW8_gEWV#AL)&P*l9X+^+*Ia0HvD8>xS#Kcb5c(J}lo*DMqw_?P*%#HbhA z9LHpuV@DBgT|~E#mpH@{@}4~00JSN*b}mYsp{}P=V#CpMyuOf;J~t3U8PcdEj&V@K z_M7QouQ`Y*+(S1S-S^0K;HI-4%RfO)&7=-AZ)Ga-d5_w*GaN8--u%62fx$w9N6pYoQ}|Xohs(Z{>jPty4X`cm^Y5ddGqkFc)8D! z&{)j=!8d08&99{1>~=Ij5mC=NKxx=K`&&t`HJc}wzML%{*%^d+E^b*W=94*nQHv2F@7*Xh8$f4p<{{Ge5J*$s>H!Dtv2vK?@j0Vo$# zBpG%?))P73;E^zR-LHyS7Z&uFV#J)C)uN*!RUZt5A`i}r5I!G<)`*9K6mhlKh^XV% zbRjexrlf@>Nnf+JdktK=of`2M-hcRiJiP;BCrtAM7+afcY}>Y-Y;10vY;4=MZQHhO z+qQne<^4Z*_Z4Qkr>bkZtMMIZ+9HX>I z192D9WHl=npLZkpxNi^dUPDA~_Ax1_E8|4SPz?w9dPmT0 zqtuNtqWSLDC-q(Gtj9SU89?zF% zc#lW(J2GM~Gz1sy#aWyVQmJ(xj;o6{ z{V@<4#h+00THm2^j0uw%DWQbpAz?^LqHW^biDf9(17MIf|GH6}er%YViu5SlXLoe! zw;ShpF^Ne5<5vXA*!uA+K*RZwooYLO#VgRqta`7gxcGXb1(7aCJ|}SGxirh0#c-t> z*S@15(l^|?U-7H;7E?PE7n_XD`dS$2Q0;$(*92(J4EfC{_)eF{3j!94neq5J^BU*; zo$^9>)w9@vhY+Y`i-68sxdPF0u5pzNzs5Cr z*dusQI4=R^pi90K$zr|T0>4nO9{4q01_`;jHyHUv73dn~T_RnO5Ws{eDXB3~RV;8G z3I@04C5^Yt&s}TBr+x+zeqt5;znjXKXOSfi9Ce?bPWcJhtL=iBU|KCvyb|U+ZZTmT zVg`|D@k+0%3NKIA+Dvo&(I(9=@Tlf7TkjxSD46+sRcFolXZlZF*L(neHr{)`gQ2dM z@#~K1ji%>jMj7lJ^hSH+^PT9%Pc5?CxI$}XrJ<^+sriE~M+q~8xw+II@bu~oGsQ#` z8WuSP4fT&vP|wJ^HL>H@4nM_a&;MAl_Am~YXQLAUVPd6(LLW?!5=;+@g;ldr6e?D> zP+x;SPsr3Uo#{<8;yMt&voTM_^aX~v%3U@ zGkS+YzkD)reMHHTzS;{}JS7I7KRZ303a}yN0r88A)W7~6$s3;ReRS=EJ~gqEg#5el z?_1CwN()<4=|MlsR|J>Kz~ujQsl%23yI2gJ0VScp0 zTHt0D!i*K#y-y3tw}|Jl`T?5sn0}8EOkfJEhJzEiV)mc**k*Sxo1X4}#&RArPF>V1 zm-01FgVQ1PF=J+WBo)GqG##TvHMkmJ&dM`4FE(54PlI77#sIBvY>BG^eZ^+yi*_Z^ zX?nayL1>5vW6zl?htddl?`hD=&Z{jEq{7Yr%LT?S-*S16C%?uU@kTTDiq8;Yz7N;I zkpq{g9o<&ukl#oOs6qeq3bUm(dUg!71puy#Xp_A`?d*O_eHtT2#`7>;)EZcZ-^lLKK{t+#80sBIQMigY%DtV`{qbV?n<37A`F8T2 z<>Mh7LFI&Uv2vz#(|%Lq@ph7uf-eiLz?wcRU5d)R5(LvaYNn?Q@Z{5zk_unB=z8qA z{02Uey$Iy|8ER!c8cY|QfTWp_`V#^QOu^I!Bj%12A%oa$sWaV(Cm3u0KmMRSY|9MP zekw2zNzB(-GR3Qsq2g`-dZ5Br^ZFHYGsVqR1LeuIi!K>Yt9Lv?6lHusjlji(4rXjM zm+TYyx&OkOUG*m015~ERo%;APn6xElK@M#rZ22$n#9=kEmAu`x&15YT^iY$b z_lM^HfaV3@(}D7p}3;+y-f=fbH}X%ABFi7v_dLjB;3PfnJLi4qaU zNw-!m)erb+3MO?!mxidA$O4?l*Z4?4Ut|0I#=v(TFK{%y*5Y-0OthG=gp`c;GKmRn(7Sc0kR>4O4p2P8@VZTQ~3~E1YXZxjskR zwjvFxSN3^j74ztNi0Jk8`j%Mp(~A+1YuB`tc4=m?CP+q2@FRL@|A_NoZIO13#i2o} zM@8Knw?UA>+b$csSlK4o>AdBiE2F^<_a^B6?3WUW6w8;^Ak;4FrbyY5QaX&NVHMGB zMr+-k@@#;YE9Mv+1pGC7j;GnZDemUV?N;L~G38^roA!0_FfYNYgtJM7w1o1L&Emqs zpPs9?Pj3yhl1+pAr|aFzf`=VI-CUN{b@lT3aXNI9MN7@g`6_z}UQQOgsAt_qypQsL zfi8Cc|3Sar|1lx8R~WK`x;o?6xo^d`S%9ig~GXs6^LJ}lepW&LwG=MO$$z+v4XI6dcX>neOS z8g0h1Kf1sRW6?$8a!lMbFO$~U4@N85L%O^+Tw0dQ)EewF##uKO-F=4nA#Cv6keRah zyTlyt{&M9Qf36G`|8uU=>h>hv!==Qbq)`-L?6vh~ zEg#m7xql@L?1kZ!qIp3Kwo=$1qzcU6sy*2to%-T%}y>y*{ftT{M?}s<(uErN5@g`pK@bq>M(zb$aLh_08JD&`Zu|Bj*okuUdB!YR(R1T#HEbO#g zriSXl(2iTm@a#~=Q%2@^3u8apM5zY;?YBMaC=0;|Sj~86pwDrljk^9^1+jTZrw!{t zp0@U&xD?R;Qbap0{b+JL{dDFR$sUc5b=?$@cvqeT**lgtnrNM31>d1)>${c zk}~$Ms9hQ&2l!I;UMJ5!n~q=N43<#G86z#n)oCu4?~lYv$=TY?10D>c*ip6*4u1;E z1hMuI1tfe{md|=>~)B5m7ic%0VV*!G=x82n6X; z>7VKcb_voE@+n#9@f}8SeoX8XcY0s=a%EeusGMmXoI%exYHU1}X753WOel33YBg$4 z$jg1!n|}a=cRVUT2cCM$9)@St-fOOt{^A^$!!ThOF>lANC^Y8&9I@Z&Ihj^Fsvho1 zluRan&cj-;iaZXn8nNU+cq>XpEp3w=iOPh>HzKeMCf>_w7x7fb&qI z>22gf`__yLzoL^YPD+N`c~6iyrzR2>E44l#@qY@LeH(IAwRtn)#p;-r&Y|RQsxL|A zVw?%CB6ZEfj-qO>W@<$1#vf?Tq0L9_hm=ub7lifmncHr=GHvoSy{!0|2(G?+jR`4? z<`8uFtuZi@91*A)Y$JCQl!a&Wpx=e?MAg`Sw*36`wfJ%Nr(mOUwME%47oyzIKD=0r zStwQJAF6Z{TIkbOE6(46`H0?(+T4i=;xE-U<0cp@o7Oy?q7Ckyx8vRx=i{4Q{`$mVY$^Mvy9i6SdgIEwG(Z9X*fH8mN4C1&ov zkKiv@$A+q-3}{iRVGOn=xCNcLl0*^zs(PnT%pPaYCEH(D5n3=O_^3@sJ}AHasf)3>hHtY?<3wVF&uGOjHg z0Q|$=EI0D;=#FvDeL0nBepMqYO*lJ;e#tM3mOr0+%|cDnOZYb*`VV8@SPj;Pc2A*W z!ej!!iHw8N7Mr(t5KWY{$Ojv(#Ii!1mh^cVkzoXDzx%zvW4a!EWAZ1zzh^yeD4^Ib z-Lu1eiNXzpk1R_}-!v4`C13;5gigDlpys*#NWg@Iu+EZ53WcD~c-OkQI~Gpf_>E1R z15Pm`zooCA0qH9y4Qp9hZYE?i9^FR|UN#ee&l0#(_7BmmRH&?KUyJpnvg} zLJq>HiWdgpmW&irpl&1g(b|l%IVi$V z01WOD91w6mC{_Z+8h8NJy$R%xC4bN!7UFe{C3J}c^2&LuHy=85*{LLDl>5d?)xz}X z85v(+L?$O4)Yjh!%QX#nd?3%4O-4os-QljIX4^E>K`-Y`pmH|991NMc`L~vlr=q;9 z9kgKC*8dJLgv9>o|DwImL{Y!*T$Xp)U%ab7x91k_7=QaBT`iHd|8t+fVhRbZng*nX zcq-v_U(!*$$^7ES_E4IAvF@C{=6?CkC=Hsd1`HZ^cW{*nlseg-=u_oWbCbgB{!_Fa zE+pL-UaPMIimGO0I3I`dUU{8IaWrw@?6bk*eobevL^DSeTCO3i61C%q&l0rV(%6C( zX1>`EoD`r(VKH8Og`Du;KR-+UqpL21bh@oUre>=#M-`tEg%kqDuEq8xRPo^?VOK|V zB)ZnE6w$>j64UyPrIs2w0fiIFV1#}tEFLzTgcAy5yx(!K`FG6|rn?8Wu zWwJ9PUD`=+r}zMIk(oLt)5rZUc z!)ec!CZ30nfb{6>(oY`k-+|mQ2Yv$H&Qu zb)ueUW+%`~R?IKmRA?Sbem*gws&kY83CV&`aMb}lTkoNAW|eRP65;X8?q#2QXY+l1 zu41HIunLt!rdOL-WkhXcXssaTEZVYE6Y$=)0%_CTFp)lerK)I!t}u`Sk8}0;_$;mU zPmtG{Msn3|=%M#d1jPPr0!FcfS}l?H6uaO8-}Yyki*Jj~{Fp`eAOf6IE>SznhyFZ0 z0Qm(+hDSa|O88P8x+pEJP!T<(49%8o3v_{5bs!$Ct@ox&>_%Tu_bDS$r~$k z*624P88xy1i`X{AYKTbSZV*K(L1{^$A2fXt;(7L5RQZj_Qd7x19&Q|q>7VF67zwjax9?E7`RAABQZh}eU2~Cdn8AF;X3TGboi358A z92BgnJn0w!lA#T93TRy^%klGFu*1yw$zBCW8bXzn<5v$)+t)3|i=4DZul>mbW|%&B z+}WSR&6s|pwAer#DW2irzv{Rh`)8pxL!HgO_Yv5A0cg|{SCB5Nj#!6J=iVLI!Z!)U zEHVHOfJxIJVmd)G`^D>`yn+fSdgE(wn=B=!f6QM0uA;SZ1JM4diKs}C!YKSOs3rKS z;6rtgqCZp5+!9mClsB6 zf8!UWtPl1=P`e}s#{sdm&D9)H>-CeA06ox?z-4SKrB+#rD#u;CdQXTUn@Z0b4(&Ze zs%wcEwx>0Eh06M-hnU)WR!hYOY#ui!Te7Rc`s*>{_X$E;h1~w2igy6;G=8r|Q#|S{ z%T^+GL9%seQ)qnrPXY=;6Qx6APz#DOQr!2TAt_@ZtKgwH^InR4*n!Rp|K6}QzuL&y z=`?-5C430k>@s-eJ|Xu?dpzBuPCIN?(N}{*9akDU+yCN>&IJELEpy* zOnR=uApjr0L73D(Y|r>@=JhcW+~5*GjN9uCgHz;-Px_|B#a~WiCNWmT?q`bt^+xei zW4B58wh#(RwvnDq+O3Z2AbDCkqM&0|Ea%ESaEwMIJTN@h@r-~HLrUS~!r;jfgC^6N zh<#QaMqeB2luerAb?|)+h1N>wk5HB&tP$k{kYs_y`^g1)f!vL=tQ|IT7}%fNrmSe! zyg>n(Ru(ESzuo*y+v@A>2ugCZO8z}Ip$`aQa#y_0q+>sag_9!&-774y=_z{? z6S78=IPP~loI;Om823iB_<4yOh*3rmu0o2}`-3*+D>4!Tw__yYRmgpm-aj(b))23X z`x%CQJVKVPFI-uU85Y*J3A95MdQNhst7n08I%|cx!05ut?mjph?)c$34eUW`AWVb-4uf62gpG#|kF;hKT-IF;xsH>Q{x^mD$`Tt_R=j;j) zwE`^h=TCH4SXoVt&3ivW+98Aap19yFmeZ{T4e}oNoN9BPo+~S>EW}!nMhS{sW);Wh z;l4Ov>Sk-Xfc8i8pT_R|JL(WJ68&}7#?2ezthL@TRsZv7I~ON6pS5;Mue|XFPj!7K z=;)<@;ue^w3i>wyZ5T_RL-O~yj11>38d%Jp1G`LP-_GBgwun|Y`&w|%QZrNI`S$we zj>p5aZsiBq=&qKe2U2m6NI2QzJSD$J#PwAk3IvV4DtPtZyOv-z8I;&0t64CLD9;ZosVlV_b9a;|mVi^wTGGUqzeA;o~&YJfRaOL~<@ zOHMF^dTAbmdiFo?h@5BG6Ml7ZE2RuJBR%568oxWXJhde_fzFv&4qFMBJv$7R2=scQQU70&(f#bs1{|6o6dd3S@g1ER1dbrsPqtJf%|%(w zrwtywq3OH9^3-F=#dL7-ZcE!0 z`+xLqqS*)5!y=uNX}`C2g_5)-R5%rry-jh;oGZqrgpo*!zpc}s%K_58n*pe zgDv;s^4f(4WBz5s?|c2hjk2^Hk_^^%);B2PS1N6dyCvtiZD* z_4s*%oy0OmLJ3?Ynix`H?t>qhA+MRq_HP`_Lxmn4Kd~~8attRC5p@kG=wqJ6d$1ny z-sy>9JaI|$x1;=xked;O3l*Wp$d)>BcWS}4r zy~%>*u1=L9KJIvl!XPnc_%}qXLS9!?s8y`1LkFE0CG8nXy)v^4{WU3Iw{-JB2PP=E zKV$UezG~q?f=4IL%@8s8Rkf|RAe+=pj_$0Yh14~>urd)o;}F@SDGdMRyh0b;gc-=EYM zVl>z4u7>Hlnh`PX7iPC3Je+eDiXJkjKa&wodu(^JvV{S17yOqY9RMGu+o2-*pbTPU zOX#Ta0{E8Sj(7-eImWzu?i*&+l ztgEh}{rW`X?~cPs5ap&PTjLWFMEg6C{>F*Rqv;&H`nZ1jhen8D+XgEJ(H>kozow2M zEanR{0PC94Np++nsV8D-W@Bccqbqvx!V+lUB^r*FK6TLWJ*qobyG5Pcr2cz?8034s zu-b$j`x9pGSvE!&FDA8K+Iz=h?r(6i`hXr_HVg$hZd3*~Zp$eWEtYtazjPwxYkmL! zKd)$()k^(0^dR!Jav3v~s?;PshlfU0h*V%W+9$B( zAD~6d(bjf>iWRThPwLtV&|=P+noL7Pc`0+~s+OIwygdhNKL-Fc*Gq;9DTEb}@GqEL z-Z#>Q%aPuO2iOHCQEr!j5iw7{1}-H%qCu~oS|~j#QbxzI&~U5#yXR% zFcD_x=QXQB&|2jxdiWSbBO}9x`#--f51^BwiTq5qC(p~zk@xrQ`g43II2P^G#O#6j z#L$4j(Qu`#La27hM-MlQigapn3-#BnSj)@_&r;}*P6|tghEb0;XF0aVNb6zuTPXhQ z)oneGsNB%dCmgR}JwumQWQgnhCVcNR9V&8@q)fqLu^l+LVOY?N;F-S?9_})=g-jnl zwr-!YYWa@~OpRf2SduJhk+;5ITDy0+R)|Y>4_z1y#gmx7c_O_sF*DQav@aTA)0?l7 zhnBY>vPLgRMH+(>UQ%FDr1}X%%COin_%?$$xLb`M*G9Scc+KQ$2)9pOELoekR965x z(e9QjPINt-PZnNiv7XPbQ$}ih?2H$~@t^-}pmo1(ROxkgTvz?ogYWiP);FZY1q-&`Le=3#ZjJ6JFO%k zLmC^h&O{w<_Gxh~8Kro+bPHxo`gj?A@7fYY?ve(2feawGQ0myAn(Zl?kqo<#EJdkK zOi0NFGdF%;$R=UYK>JvLkEdflD_eWEZHKF7vHvKAiDC7rZwV|9W5c0XPLlZ_lURy6npmIE{8E{} zItp(XKAEb^p@G}-tTI#JsK@eSJ__t zSDf8epO`f%YB4x0Ez&QdSKL!JI~OC{^f#_n(?M^7$GHbbA?RQ;TEFW)G6I;JM=n`` zCX(VWe^PoEN&@*;;ej1Ts)_Fqh{eN%Qo%vM*efhinba?ucyJ-YkPhP8Tk;aBg<7d@ zk_kgoY|70=EE9r}j6yEUKnTF5>wU)&3nm_x2@6)!z~SPB-5oZkhYiKeytXZmyfq0} z=EjLU(?_FYBZLMBT?mZFO+2!pQalRE0g57uqT|vNJXYXlBm9&M_b{<-KXWb7cpyGUh55xn%SW|Q9qn`aT8YbCs~=AD{8L7`uotAr97%B_%DzyJS(;T{@RPMmZSKs(*;% zgp;NYmG{8r_Iktl$0dVrFp4(XwKtzb{=)}{`rz*u*2@FnDMCgyyqA3W>tMIMyc5Kd zqJ-*>p0;&P1jDW=;eM6mTmG^F0{6NZ=xtJ-EZsF zDfl|Cy{AQT_+#Aa6gyC%JW541QsXi^?N%o%_?{07=d{$SZr1!A29x0-5VRLEw1w?e z7wgt0Cw>k$?<-;7T297G9^z!rS#uX-Rx=2CAEO)G{Mwj5o+-+KBw#5kiNHLVcD|-R zD>2f+*??EeD`4~c>DS?Kpy7%6fOw+PKl0^S^VN0Z3Ix3w5uzBh!*Cf22SO4)ICvZ+ z*KvlaEw>9Xeq&>6X==hOf}YEWB>3P1)T3ln_|O;i#_p;4r)tcYSos+FxX8%v6%8!4 zy_}*3?|OjgrlyVR9|Bcf9dCE1SAYk){e|iAT$^sjx!36~IrQIpUt(m|(R!Alq(tUC zVact9k5N?amFi-K=(tqp`ke3c1NS>#7yV{I$LJ5WYnLnma)@I&Od3^dE@SkwW~e&z zP@$wJ0%PN%{mAzTd$cI?1SyH4&P;TYagK&NdPdfl4jCKiabr@Y683F#{c6Dk(TOw0HR30wfT)1Fs8G4f7YY?0rEHN&}ZH4SsmpmCkg2bOQqGf z4pa@cmfh~;vOe0^pD`ulcO$TnD=Xrlq|7TU{XNadSSI;k>hbl^_0p4iKj+`Iz|YB6 zx3m9;cg4B5SP8Tzs3DyCxw@B(9kvooY$QmchPj+*M+UeLuGQJY6cpL zGay$6nY5`_gJ>*7jE|V8iK0UrEmTXwKz~_Z$XXF&e00xap=l zcVocT4sRoOgLw{-WPm8v7Q_7!H^VZNmH(78}5fjJU7iqw8l*Pj9lIyu3}=r&(r)pNs^#k^He)Z(XMOSWKR$!Lc(FMi29MxxdfTw)&@DQak8FMgcy|Jhrfhr; zxUVu6b`a7TYdcKo&#}+bo@PEy`3o?9V;JeUJ8N1y;}I~n1g}Cm5`26V-PcxILf|gOYT-JhybPG~@)+qwVo+#-Ga$BQtMrIXX8jw8E1W zAnmwy9b#X%jy*H)a1ZST8~M{!yjo7r=gS^@xp}bSGMj+D5%Hdn)7g?698a6m{J#Om zn|H^{>|U^cmi?th8O6Hx{C#R078Qp-MksidmEz~Cjg0Gggwyi~`SD|zpRZsz`V2)t zxl~e`cUSxGCUm~fRs%oOg}iT}d2mb`Fq!mz#Qo7AeSfd%!cR0` zFPixXBOD5IkfAqBEcN-zGipOMVNW0ZzJr?YN5mKhX+uXzlik?}RqpbKpxzL)NzcKh zmw(5^1y}LVyEt<3@Z%Nm?7r307?@}WwPCdQ0N?Zlivf1*at;&|uFaV0c<>^e+FaF0 z1bHpQeH7r%6RHa3^tru;38H^fry`bl|B2*u!F6)I7A+Q{)m$$9=EF1@ zAMw+kEk}SB)y}8)_Lc6(PkW!QZmD8LVm8{E?-yBa;O31bwy2Pac!J1yh# zr^S=dRQ3&Y$HE2Srv=%zVIbQy)lhJ;RWx+`a>bQdWnEDLB--1smVPS%+j@Y$Tgm|l z8jM+M&5)23-uLBWpG#?8Be9~sSXESmWv0Wga1NWFfs3D+yQSOeTyJ7|r8}?`Q9FTc zkRfZ-I7iOAMjn2_3Q}a_v&}XKU|`^rnVgn2OZJ))WBnXlYA&h#Knr&`fgDO>n?I{x zTYZ?~uzyI?<}ZAq?X!gseNdO2w#ZjzIsJNq;i?GYg!XR5AS7wW?G+Rxg*?M=zxS%o zzXK>c%!MOpIO-6$l=`CU70oEGF!+b4mRc)!2B#5k&G0D&f^=eUV5izt_ZvzX6(^z&d{(CO;_-uA3 z7i&*xEV9)2YHv3#`CUCbeklY5o`F1e&zBkzL-UT@F474|n`3ZPr^F}_1!x27bNE(p zo7!U_r^xM-h7BjC)IAt2_Witgh%&3}cJVs^xf{H-f9CWSTuW9~EzuQRiV>Pl8uL%n zOD;~C^G^MH?C2)64`bR7OM7EsGUsQ%d-~srT7}xzAy)KK6@H^<16vm-$qB-0Z!@z&&FvT3A$c9K++b*&Y_p z$|3B*1XuCxtEpc2ZMPQnIt75w&4mUd&x!Pw#6f+}R7! zi90LjmXxRcG&x|7n)FA|7`$cfMn~XcHnmne0DWqDFVd|vz5ljGvA`0)M=Xe|s+EAQ zH@ka=H&;*CT3FE+imxCLFhm3;{?ZYQ+ypq?@bTTmb)au8oqLpLr~zvF`MELLay*lJ z@k6fIx;2SKuMU2Q4LZGU;6r<}iGPwT?uW{WvRV&xb&2nRejFxoZC~1}qSfWM)%SZ| zSqwskgWO9RFM#HX-{3`f;CXXlxC6e)*P@{wwJGtuu~=BEW_b~({3Jdb(bE0240k4a zi|Y%Juu$^J!(2ynCNv4Iqk>B)Jq8wyf48I|!`7?i3u>Up8}A~9<&Cs1MI|jf5TNh^ zt8s`MnXeq_6pZ35dVJmR8=`d719Axdv7TxmKz9s; znAmTT<#wzC6_l&w$wFr-jp}lV@;vEoY{LbWnTaxuT|XqJ{BUKL#%kEOh9EWF4}%#~ViQE{-MNkg@eFV5b|25Ztpn(hNHO9!iUEeevBv5SpUp;! zykQ@sI)B1HbHSP=>XVy~qs8ZM(3ImkWkd(=+9ATiI!v7f?&v$@ zdb&~b7?SKFiBd}sz}ww|{7QC^ybZw5tK%1&#TZW06Qu|-mq#NyXjHkCEZqGbp0o5mT+v@JN}9jbwbAOKWn_9TCW1fw9XK!%RyvWZ<0Sfzf(F^N6J#tC z1wgU{Y|>^ngC_NDKMbKc#Q0N za?o@tlDH?%P7&n1p=a_RT}_azjwX5pS!T(b z>Wv-K)^*%IP>A^j>V3-TDp}*vvdwun8#w9@bB zAmkyNdAlOe)YrB<$gL0FiJ%T6aCTn!B!h?G6}8GOb*iGk zFP}-iuIi+K@>w(}e_|>>S)T?~eZF8UIq}jZCcE}@81K75!`E&-xJ@1pM@|&0HvKp8 zbOIuL0k0a!-pS-Q7Rhv@-QoRHWk;Kx+Okx{<_Op?D@cYQU?`@s>@e*_nO7{6185Mn z9kV8i{V?5Vz_iOKS|85|CRY~6ylzi_ge3tkaz77=L)$H}^|Qi&z;hsH;9KWtCwD;R zFP2a*Hegg)>b8I3VpyC&oit1!=kFEo17N7GI7B(w2}gCVeIk$oYY0cy(wC~+?~^Yd-pv1@5|YLx;NcQ0N222Ubm;!d8-Fu;^_o)-9kZ2W&61(3sYS_ zSQZuN^}qZmrz4>Vzt2fgI!g6(0(WDZa%UUzFcN?JeP|P2xqkfRzE^KO1Yrxkl^R!L zaoX@JEG+Er7xKRZ1MD7ax`xu_!>gOg&j=wTKDIK~!#?ko>+uLLsHe@K0>GNs;mY@n zf&W2P{+d;(uZBpSI#mk;zfbz;#B?wT64f&Z70GK>o5BP5p6Bl`%G0iyOP4(jCKoKJ zjoXsp#l!>sI^_ft84i@dkvf3yYXZ;NzgkRH@3M6h48~`X?hRI67WE_wEw$v}WBXoj zg2BNo(ef-Rzf5oIEgzx|EY;vvis^s`?EqDeB-rJ^;AgU#G?a;HK06RgnoQHs(}j36 zVG6%(9weidy&|zXvIa!_|B{*#8QPpar@%rfYG59kaxn&q(0BmdGQa+H#Ey&=MNB#A`0KM6SW``TPu zz!d8lT|VjGjdmG19TR|QRiw*h`9;-mP3fQJj_8Q>XWjL@1<&#;WD(gi|Dp{rzB+8%8EJ7&zcmAPF}1Ra%Q))Un=KVA~Qf0K}q zw8D|kNA$5nv%*PLQ0)nX`35Z@EtzlZDpFEb47J5t-7WXuxztij%_-~mV5wrbb6lMU zkIL$zt658KV5(zk(qit8*h^b`z8pkGq!85#VDBD5U?spaDP?RNR=Lh*NxBn#@v<}C z98MoZOEC*ry~LWd{!;&GF+y0x5~#vl)KajbkD7s;B!!3NZUmsI^vUm%rti`E2Yy!C z+G;3w8ZrBZMu1$)LhW6K3D zY%ouY-6lYbpF__z8Y0=(p+wE2Mkf4EbE;Xx5T5Oxkg*r-n(6j*TvXruoPCs_3*Wgv z=$nVSf;8KRQpTusYYv4RLDC*h*q!7XC-`_HDMeSY@*Lk68T2q2Hjl>gnPXmFo$9y0jf55X-8CEPB3eAHIYG9!dsjG8m$z0Cv)+| zV|r&Vwq=jYP)X8ZIJ%kNl%{ad2d6@B~+^5-#*4+`m4 z7F{Bh1NAE`l0b4iz+0`p*fbS)ZiL`kQ2Xtbn)A)%x>xbOOu1_tj`F#$;IGp@(T*Oz z;?a%qgiAh>cM1ufzkIF*1PxFmPN)Ru`Oysp=x%X!oQ|~ZMt#s7;g+%Q<#3m5ke-F+ z)a17u|MIC04K&?QDXM|Mizps|f?P-+5&v|d&P%nEr0l<-@o-G7{YQJL9Z+=+>OlCc zuc+#9Mkr9&V2!YoIqY^_k!OCEiDmQqbT>Bs)|cvt+U(Zy4*bDHki4$YP^ed0A9qNd z9)UELTR8~@Te{piubx0yE*t^L{F1nrIMFUf$B}zPZ(B>n7*d_GqSd_}G25RGFV}ch*II0zhv6rD>rX@P5g9&hwd^GX*wNcT3xa;+ z)5nL0s46L>I?_=Sa;0$=dy$sl?xP;{?YydfT}U2vo7yIzXo3p+Njl8!iN6NIQQHk1 zv~A1LI1XJE8EIyb$P^;+!tN;&XA8v^Sa>LI<2VdB_2(ko=H37=;GeAv-x;{3d^;Z2 zQkr)Pz=@$8o6A4|Bc~5&-2$tCK#PD$HE5V;Qk!r0?-C!a*V|oA(Ds?D?d<}?9@zjr zp2ze3f{Cckf?W>ZN~R2z3mM2?$?vePgClEgsC%QMb;=ShKOlp3CXbI0hI~_J>B6m z7zs1nQwzauR3ztYPOnDi8M(BlCXNjhy$)I&Kq4Ot3A->SFhesViZGj{ z*Q-3Ju3^uwBQKAHge0%5Tv%1w2S)Qb%u?AMLUIEd=^jgLmz+Z~KJ7|3z;NrPs;a84 zu8wkDrK%O4Fopv%Cq=kw3f3Dr7Gt!P2cgj%fr{SE517K8fXoH!R;khiG+rIf6`fq- z8NNh%J`I+BRg^wUm!0_ExRF~Nmg>AjZQL%pgmlBw!9PfIdhGX0^7HfSr${-#{FITp zk=?d?_VU^~ADIIVDLGorNAtt0>+-~<4HHsbRnxgMV>8{EX$I)#3fMlyd6&VJA~41% zP#idwhXCp7HN(-0!lx56doX`UQaZZo;^JTi3hHyFi9hsfcS7!4{Y-A=-GZ#VX*`cOn^iKult`9Jh5dd${E6bXB;xSP+h?LbD+i-B>hQilYcV;t&Jg=( zaUWN0;nPN`W0y%U=5P!SVg#1sb3AQ} z1IXdAC#mHNs_UZXp$!Y6MEsIejuk8(nda36!i^5QK0qh4=?N%e<)OPAdUn} zcH$uWtf@h~;f(GHq@gBi6;c7uMo#83Ifz->Cgf%oTVDcT|KFOcLGQc5iL!1{SH58% zBhHvK8oh0QK0h_JbaT^kh*fm^z&33ioHPjv{7J;g%720@Ymcf56m|MrjhaV>Oi4@( z;_fZ3H=pdrKI2Z$2`|BBJTO5UMB+S{(HDUhKHdE|1E7X@(fPBe1g=gZL{!Iv?rdYn zLx4VK8dp_e5xM+@>wL-qQ4S&fwl7%W3lp+`-v3eC%HDlHJI6UV6Tz2Rd3LQ}+>==1Tn<1G+d$%q}SaDPm z=uRPQYD`zy*^oP>|>=V?@y9cKX(rI)37@0Rzj!r@VX!CMRn>-fOy z(5%B(LF8Z$DBSKa`Wv?ay~xpTGQ7%8Zu?BUE$5iA(vsn_GtI|a+V=ZM5MIaqq~l)? zP4U0I5to`Gkju)lK%<2E!dxT!p*Y^XJ9*W~-E!o2JX{nIF3qk?Khh_Q}%4fT1m8lKB&O zJY>u|bP^$E>qbxx%zLEKQIj43pY!}iG94~chhr<7nf@@hjj4!Of%l9(@lmL8Rj&3h zIgo`rtip^5*t@%aDNvJBuMt@5Jx&Ea;S)w*S_weBXTWW=aRX$zD01kn2a2yY9(1Rw zPeBZce4In{cS@T;b^!lWv!-4TFB_UJ)@^hnTt#~!Seuzi@( zHXTDD9LgU)V`I`WN$3=!fjw9xa8yg7EHlh)no>pB^N>3*N~ZYz%QyJT1Y?a1wp|iU z6_uof1mQq`Q3aA-!E{Gf1Lro{0D!`e+e?(z!}9iBK*|$lXAys3=bwpoXZ87_4!M^< z6Ln?{h)Z?|toB~?GYvQ1XS{4BJja%`HCZJQ5z@@@OwU#TPGYv^Mw$tJV;}vWEiC9e z8y;u9+q$1OS{~nlI)=sgpIkG}@RTujAn(WB7!6zI6353ZWdNX;m2 zlm&Ml3EH81ibsFrEF$UA3Q0%{wob`)fU0n{+NV7|*l89eghy zE>@@K$XdlllAJwOQs>p+xlFJaPe7)k`_D}fXp7?lF|~^AUYDR)&t>}*@xL746x$CR z(k{dw+M2h?&#`QobOjO97D>249G_~|GE0`Ei}cahOrZ;{L9_D~Z~ z9_P=LzD|e(9&^#I0e$;dGgV%#-;-=z$&LSDZAg|Ig>Fx_s+vrDOnuxQHX0rXo;7R@ zp?@yV*m7M(XJFHtyX1R*Nx1x*)ZpE!#I*-!wYGS@))QcbpessD zIzs(vNTM7IV`WvTM+QZf_}Nc*M6x%ptRXJ|940s+Qgj%jkKcO;Nk~sbL_|p$)@i_& z5mqa$#D4qQlt*FI%}1I0*!yzA_hYhZ0Et*WB~<=tU%o_Cej!y}1^NFF^$pyCt-+SD zZ6_Vuw$&Zmwr$(CZQHhO+qNe2?wd7#;jCJ9c70n*9=t4uoKH{J3?&CCKD=*lrLDQO zf8|JyoX7!;fyKP0BMbS~=5$yusUS)#C7bAPxtwqeF$^5n+5 zdYP2jXOb|&UqtZe0G!k`?KnzJ5Zu{g+nL6f(JLdRhFd4;BezSTyBz2Ktg3~@Y2ha4 zC%iG=m?6V}lSTc-#z~_9C@7&8^b(#Ry45xfunciX#Euqiii%fY4 z9*D@{2~YsBd!d||3RKr(2x$FAYK!y_pRjeg-Sqct_1wm#wDf!)H>W6zO^-LcEuF*9 z4rQxE$4v7*W@a+WOmcu-76FL8v;UeyGS^{E6Rm6P(+bDU_S9+S@Ne+BgN@{mt`~K9 zw|?GEqYSPeuRkv0(#_ z5d35YRtHfkJS7io5Z)Jz3n+1v{sRcp!T)(LEAr`a*}Kht;BtAiT+s6lxrTm!&T?2G ziV*YmI1QotmyHWcaAUdqCqbC4Qj?GzPVv>sndsLv<((?OP~>IZ0_U55iMy_}bz8xce)$v>@xiF zqG}pCsPaCcem~Xu#Gkh@*b>oP;+_YoXs=sDYb#yd-1U?o7F8(etOuA-H@fF{4HG0h z4m4Iq(gA2+Ns_g{e?0cv;w zbbCPfg_oWwWN>-hFA14!YuNeh-a2s(iSf~R zdp#N9<#ql_p_Cm3Bzf8|&S>E?j)!*XpsgMwyRCG-1v+o?lgB3xAz(ZtrlY93f9VMw zgs=Mt+nr`^Ov3w9@gMh9gmw4KK?Mb>2lOvIEUwRrK6^#C1>0o2ZH-MhGvvv zz1eyvdmHCAlHA$)wDqCPx8l6M+u@b{$!L1t-x~z83mOut#67uYxty6hG|-iOee)n( zQv}b~ZPzJt`3`iK4q%BC&!8NPm9i*)%da^$7^M!U=5pZ+uyb`GSteh%^niVoJ8is7gqsht&I5@~3wo)&KH z62QmXW@_O5WMlng=e%WL%yaL2;7diohq@3sXKX#_Q|<6)!~|ckW*MQ~VqCEy96`sg zQN)0h&5^Qg(P>>Z0yDK-V5ytRBS-C1G3KV@S#z~13Vdn1!KsF1>-~GxYW{lV+31?y z*BndLs#A(IE!-8DU7BA`%Qq}}L1353&mNe(0&M8+vq(vT*Pv5ui|b&!j%q~aCBvni zuoX!)+?ThZ+wy1+mh9o#I#TO(`M$1(##8T@PJu0J!?uVtiv+SE9%~y^7!mAk5i>*y zakx$|IGK1Ij5cs_CR>HuTX4DcE(qUCyUQX+wulUlxw`HIc=Jz1KpqEIXJAz?h9`^e z{Wcxm9nPG!`GxgFyQAh>-qgG@F3-khTjvhA$u_rq{RYV6IQS33vsvcLMJH$3A5ilV z$d^{+G0dSbq_ST9t40oD;=|lpypt1rwzE>$DdasYreN*mIR0!nw}H@ zF*`duQ&ZF5%t$No;mv5o(%oHZ{py7AWX)8r@7_~ZUe!i7BFY<#`r4CGeEd(np_7E3 zx7{EzJnbICyO@SzH&>d-naNRF>f<}^p^LWuOwSmjhh#j}yDPnzu@iK!L$0raf`k&B z95i{9Nx(5{W5Y$)71#aqF*|IXo0A7wuAB~aI_!D7?W`O8S7vpWFi=+JqYcz8WN-_y zRzfHir=_|(2DA({=s4(U&?H~xyp|Xk_O#K1{r%sx*dz(u9?O$)H(3O{kB04Zz&|ib z8{%vwHb|EZ2l6mOHfQq!(!Ii_K_RL&JqVaewq3-}`Y^Vq0#dcKdl~BvF6+|@%6#Lt z#Ra^dUIyngp=C*UO-PW0JJLt>%DsIS8Rd?!d$GLDQsCz?+C0qOUlG72PZtff8Xcfj z>TdF}_U$v5?A8mXS!NpL{AIYP8o8O^gwX!Tm*Q&V7HH=V=L=sB($)|a{tgQi$UEc>maJMJG~;xRA(i_g_s(NePvJGdC|YESt1sS7vNl5~-Ph=gMeVeMYZR zy=s|#FLGOGMiD*X_*9SUA9Wq$F^Cy}NRY=j`D<;5(GBCD57?4-l@DIs@b9=Tw8dPh zs>o0Ce56I|$Ts;9%0oXTIoT3+S8N5~@_-3{<98|iw(~PCbhz`LLYJSVuXoocPusVy z9Q+UV!go!YbE$qLP~>cpXmUh0#Nndi=F$tqzy4i1I~-@$GvL|3_412%48W|g*PZ1K zBdE9@m8WxF-Y+3t`0pdl5`T`+&-t|^w9FLcK_Uu|%jt;mY7D|T6e<0FXKlSgj>y z`{ydeoPRrpUdFR=RhoYVCA&7HOB)>%B_`i$r>)Y`Fe;ZHSK7Sse@Sz8-=k>r0Iqn* zI}rOd1M*@)`}An(+OSaVw#}9m>>RiG+sVOGPRjZ>$DZ<2T+b;9 z_e~O%iT(?x#XxREH#TR`=dHSr3TIoT1Wmg{3qn z)^`dFo>upgfR(z8vFBZK5cTk@z$%w}45dxceI*9F>1~aSmFI4>T#T3FdCsrG!zmYS zYf?P3Hy%c%$T%oj)?tsb-SY44F_}u_^#x1kQ|I+p%eK=Fy=dTpRjSZaOIGPwj#87&&mVo)s*ZWR!4(`2?DME~xaO8(In% z`(-aoO?_>vX%=ie{I*E6iKVgP-5-Xi0p3erwJ!}{Y}M~A8Z{0GwaTsVcuM~C(SM|bG6jMit5lP zn6a4&ey_Yb#aMC;(`Gyt-#9augQ#QO|9J{luN_m@&pEy#f;8bTMX7ipx$&_08q{+L z7P`FHU#t7sdTOs6JMuUs;^{x3%m7PP38gl$D#EVLd8XiY11p1bJ|B7u7J7yTt3h=e0$+Pee-| zN-5jWoYA-K2K~`369p3GL5G=ox~vM^-I2;q4PygCs2AMZoRwyWM~q{z*q^Sw6~MXaeNHtr$R>H4xEl$(fELw!`+z8k z!arAn-KDla-q<>LpSU?6(m8@a-N9?=M{yw>`HLk2K%$SXREbt#;`?bQ;6{^8!w0u{ z^UJE5vCRlC9j2~+FGNH{ITe)zaO5rQCL&B?BGRAlV2h)8~EMsS)Gw3EyS;6}J6a)C38)`_&s$QMZRpxn6ClTF%q zoG~icbQTlVzt(05k7P;O(N&V1aYrs|$z?(>^l+foR^~5)a(9wyaT&4Iy5TCB58W#t z-_8-7PS0m^>lMIFl`MnZ$`%_0TQ;LU5v9*maTv1bHOs&&j$IxTzA7&<8)#iz+++;g zG}(XLPx8wr7IN>k+`GdPuiuQaZY~QtZeFlBZbV8oZfC zBMkFFni$*r^!#1qs0aEK{??{OQCq>^KB42T9#JPS2pp0TCK>;xcBhB^FD(1z?=<5r z9hI|CRiT3V586h2Zol;Uk0FttI~hrCKfD-vUW0Q28{#GCZN>L<>KM^VCT@>een<;V z=}UO(Hm*|WbfTl9Nu_z`_o~WVl23fYfH?(fb0tKQcRd}1bYkV>)8*WT;8y$h^H)b- z*X!5K6pFWhR(T4BqdmXg*mB% z(1zT_%ZXSxz&M?epgN|G=zUaHK5qV+;*|HS{_d%FiR&hyAC2B-Zs?uEdkO+$0tc6|GDYbMm?0z8-p9N*Tp_fWwm<6~?e313b(J1sHPVZo zD2G-Z)v!%iD`0N~P?il9N-0NMe5>C}U^`!DNuY&XjpP)y3-tFA>)m;TrbJdv2lA;^ zJ&K;MX5R-g+HS`;M?N0J27Q^FT2X=uNERf#^q%ZkVP3-Ynj#Lbdlsl9duI%BZF7h! zL@~oSVbr0cB{qdBCW57n`RP@nQ9TJdHr400TY8?&u^w0%qjApx$|1Pczn|WAuZge_ z;FlPR_k|wp2l%L~7Y&FM1`e%WeAK~_5~oPoFl zbrv%DamD+)b!6XTy$ajc_PH)C$= zA=F?uAECR2sHGi}sHE7RFYVEQYkY|IdsEC~IFq__GzDm9$d0$_jut9^W_<5&Y5 z!?T%4deZOU?CoJa+xcUT>>|J!8yA&HqpRP6wl}YupLWD&ktNBC_W_E@OAD&nn~Dn} zbjHp4Q9{o;Fj_3F=?leUP>ldggG{0k7n>rp2rKC4wImGH6pP#!&TB3&OXvqj3ObpU zxk-$j#mzyYCOIJ$pq12`pW+69(}Yo*A5C)lW`RD!a=K_-mfu8sG21<~;?sKbm=7ep zc);Iq^mvfXNdMnBqwOk!&%AP58>qLRX8Bv=-Jqiz*K2j1z6``?38X$=Kb2}G0j}&q zrkc=a^liYwV!xVVZMW;7^Jgmu4Y?p-ca$D_&X;|+#8WatLllg${ycMc4rdHZ-7om-<=vgPSEM}O@cNit~&A2Oe ztLrm?rTj7@@JFNLW6mqaU%ontc)9m=Y`z}O@(hOg7|ycwWccn6SI-{MMTkk~;5tGL z-&QTFZ#{lM2b6iYoZkSLcWesPt$Y={L!_bI;WO8N3Se#UA-0AoXoTf58LsT*9UbX` z9aALzyLoPxN@?h}dp_@cF5g2&%GAt`nW1ab*r7959nloTHA_LABv~+I3s?Pttm-9c zq^>Rr3@jSffPjJR;xP^J!6OF!<%Fv0@B3e0Rpkz~)@hh6>yGlz_v5!pj+O-O5A1CY zL4tE&qv7-Bq?gnIrDs~^A@}Z_u_p-P_}0ULcGx@1S*v4h;JFmj|4a);digWDXNG&F zn%X;9YmT*I;U}1v*0G#!Oi*3(ANt3K(m_>+4>U=%wIN5fAy2g-OI7M;5#sZ?aJVGqtSSeAof0k%5yc^mu%tXZ z&O0vsh8+NR$)$S0up=$Uu*K?aLAGkNLazHQ)L{h1H*b# z-s1`hlG*ZI&lY9ZO3XVBTPI`fsDjk;CGx+ouj%LLahWZ1(j{3SC_}n{5#e^MVZjfj zNexKwI>yGuIXO8?mJ3Q3S_$ooOY9j9U&}2*y9gix4-bGaZntxWj79oPSW!})1LoiP z)LbBkegm-euFMhPoPZ{+?@}*cerHQZud>Uen__yE%-+~&c~q^?VWCy zlr=-&1ZFyW<6VgIsGrl<#|{-<4%;g&q2_1&`@cKO*z1itTXip))dnfN94{wlOaF?r zcF!-WN(K9dhe$-VDP%O=-LH4h`8-F$|73Bub2nNJrjpEj#(z72lCuFZ84^&{;==Hv zsG#OdoGkj98;FtUPg$n&7Jj=Y`c!Kha#inthywwWNW8`w;1b&4giQQ2S#fB`v?pY8R~D% zom}R>tK}846hk@c0m|G)6{nY`RW9DAi=2~DPRGnnlyq)HL{5}f9IU7%pPI>!y(A7D zs;ex&nw){I9Zt~=C+p$Xg$T@>OSd=X>x1re#o4%H`j=BuTdYcP>9L0Xjp$f$*o;n5 z$=o=_Mh<0uGelcT*sZtpLpDlD$18$UA6z2F@FE&0ydg`GQ?zWE{73o#|ag~LcAeS6r+z<8x6JTD%@PbR9H~I|ES^HFAv?H`kgZf`p-8P zR#~=Cr9gkab55^hJtaIb<)n%-!1P-K-y}WvyX$Yj`Pj|BaCW=MCV@}3T|_aey(Ue0 zHD?1IrvFsBG)tPbG7i%e(3x*pPI&q055(;tCPuRAnrOQo%c>^%u+JHdo7&F(SoL}2RYO43%<_f31Plf@nG#9&K$c-E5^>_WaoFv^!AlVRwD+)ysB?dN(`# z(tg=w@_9{&!1s9^0pHPn8GgTQDnat1Lofa#fj{Q=71KGt0+u5XX%Q6}O2xT*?U38Z zuigH5-t#@wV&+q%(6Uc9vMlq-c6alyWfS2o}FiT#a5 zHA3^$WZGEvOxwhZsF5YivyQul(}k#)H9+N6(6k6>1F$lk7GVEZhVu^so3-IEHj^C4 zolcLkp_4rIaYNPWZWVv+7>1@Km^5~m*N+b)(KF_h=}%2M@92lvWk5H$q{d~*(@ic9 zhZYvfN%y7(_PVy~2b&_>cTmes%+BJa28Dk=3rMj~@}iUWerW8-0mGq+s;YSW;lz>N zPu|%hiLB~GSu$`D3OW7qgFu~WFImy#KRQs16}#pRZ7h5gxAptLHTDDwhvBFkqvx@3 zH-@}#*_IfEWCDJBYiVfS<0VjZo$*S=8bfnY7%`{Ij7*cTQ2HkVvfE0tQWqc#X_ z;GV0_n;@;2=B}&HbA%CZud7%FXM-NM`eD|-V%)}1fEu)l#;3;je*2@2V+TIY4%>}` zDK@Ch@jCg$3%QERF`N7!0$F`oxxA~B;5wd1#aJzR*8MlKInRHvX&HrKkC~C{?O+%? zi@~-GZRsQ2If}#yd{jI{VubMxCVmSj z8-zemBcn?E(*#5f-29Nq6^^vh$6c`fv@i5G58c)~YgSt)cDMiXzXU6SJUU-oS^0RM z_fNY!JY;wn_}mZpHH`w1LiZgE)Z4Kk4r)kU4TYT zYYV{1JYih`vm%*`{O&o4#V4j^X2#*6;NaoltZgjxwg1UcCaF(~o#_?C?!QVNv;r1Ni|cs`wR?mgbTp`gyQzpm$?40#IPj%kXSA8XY&@yJ*8pM1vP9 zgU9Rp$qdHlq<0lLS8onyb8%l^8(Fl$_>U$^@63%R`m98*nlKl9@mY3J zF??S_2E1hiu~_|6&IlI^h;E)9d@XEDbWHaW)$yKa_FCwrNY1y*FR9>}g+#Q-WeO&E z1jx&%eTwF9)5JqZPl=7qg%`F*-1E`d`+MPU-q;+fbP?a{QT#V?QFKR^_D&K?{!8D* za+BFQf9>*3VPpoU&gBR0Uzsmme6I;nwix3NiSNGK<5ibRkIskbgyTeuik7z3-Dly3 zL&_{f58fRz4!Oj;TKf-Rj?jb+9TUfzOW{N6T(gbmi|0!H9?3!Qt*hLI zP+FP+%8pzBoZ_T_KHp}R%=8I^0>#4mY?YkMBpBRwBv8@R zL_Z<(xf)nU8Z^}sOa~*1X)fxeHg|6A*O8)<+dv@xvt@Yp(dkhe^XpmMrfCl0hOv}J zW!PK%jK2GT*Np3lo^^UG`qDO-N(d|BdXCiM+xoO)~!0xy=o-ok<;){ksmT4>5^Y^6ny@0mqF!Jj zqC>-Grs)tSP&eapu_7Iw`qT+$2SmFjSlr$28;Rhf+B{y_m#4HPaTInZtC_K46Tqie zq1mj?*XzXWHj5i_kyh9JUbf550v0o#5n<0&81zji)9IiIh14z=Xt`~z;Ak~cgwD-m zJ-E%CbqKvgm>f94h{3%Ym}B)@L-ScsGHS}?N_WZSD(TunRMPz9;%}8#hLWS6dnQA@ zM48roMjy*ugNs@P^J-gL*@=k*7nF>He9V$0MX;sK+&vc=@j|>NMdNbOpD}~IGSbuS z9URoG5U)n;V+zRM>9l;?2WH9hPQv&Zf+fu2ks^XDRb%7h%*lhHJ1YA0MLQ#87bOq; z3r?ndJmxDIn6@)IcNg49l0;Hr42+Yb`WnYPNd5j{Brr1nGEV#dRYfzLR7}-b#+6 z>u^?9^knf(61vIa;Ay9%ki^RTR4d9#V4)ZTg3WY&xG@1s*w82*fgwI-#shP?uqPa+ zIeiFTN5cpr#N4vmIt2l#=Rnunb)#4(qJpJa8m zw6_xGo^GmZ{3h07KUG`shXH6$;;E}D#lSwg|Kc!1187PUz}+B3`um-t zNoDv*?V{aCJydsbZF^oeCMfJQWSL-^?de?f%((qL*!Da}SM%r0KCVWd#-^@@vd8h0 zr~E}WIM5X(fNfMm?t+u)cq%_HYNcjvy?BpDyAjh1qd1zxSO&Tq>vfpW;=^s8aST6L z?G}r@I~lRL+ezMvD^hlV;|m`gDh(IY<>Ap?T^{yCazwH;(2@5@(|yMCEALoWFCZk$ z;d_CFgS)ysL~O^Pyu=*9`b1_LeO$8cf!{qBHdG}bGNb6cOhmDLA%;Y($jABobeIss zXOqfY41=N0Y^Zg8uzu?f(49b|ck>^#ltuOrjKMs6x!5kx{MWoIq@lCZKx$A^0Qb@8 zS+*?V$6-VySulr3Q!QX8D6(LlNu26Zp4x+q*;&qAheAp9Jba_IL_RskE{K!sJ7hTzqj(ZG#mC`XeU&-o(##&r!LWLQeYUf{Z1;0qP#Jic zI=ecU3KAziySAnsn=p|yC1{=3cO#DnQzrvG<2a+Go~Vs=jK8x;t}JL|^Ohu>qEwA9 z&1HVyp0!q}K;AZ=n9e%IV(#e51A)jQ9G*XC0HPvcz6^9 z3etJ&o{fPBI}*kWv4O-HgZFhmML*F%W)mNE(|}FY3oF#Ft-T0-lbrfobGoDA`*?<9 zsBECG+-ivFdO@?tb#r^%rNMQxm1w0AAsFDRIykJ`P%zf|bPc1S$ zOeKQVsJ33WbKJtvFg4s`)NFiWBG_3ZAd81_qA^nwHX}1$Ig!$@tl}D$KgGkfdtflp z5J$dgcy&_FSyc_97LQYOdB>($%`2qj0S4c{knC* z+AypU>2b7r7}Si9^TZjynobUE@2?Oz*Op2iiS12ELeo~3mqx{Mg!jkHe+?-fiHozn zv4P7YjTwQq>5r_|a8kE@g44|G=EvG9k+cgNi%xTk%I!K}R6;Fg3fc!&I&2;M0{GK6 z&+}9JdSdn`KxLusUm3@}gZB1d-`?H8M&?LThUL3SU3|z9WTS>1-M^X~mPcg2Ena@O z-y3)~9s}P`eq`@3PRzLGR*YlLpA}@qHSMoUW_%r6Cf-kQcU4tpofppp9E>D1ygW1v zN=5Q0DTgadi|33ayDS9~{t7AN1V99FZxHNa;nEiH#ktJk_-)>YQm}&@8fs^-TzF^K!p2xcNkW z_6PtKer7K6yq#2NvUWJgPr9m^p;`S#{%IAlGNwI6tDDko?9-=Zr1j(TFd@UfqWgbn z1f$FMe2JrT>fh1DCJ9qBhm@pC_m*Zv+*<@^o9w%Pmy zy1dPmm60BP%NegS9#8gjMyQg4Stx28%IDhev#d+b4Kv4Cc3iwqn!F$Vk!H^c#cNX8 z)=X}1T_Pq-Gs#rgsR5_*REfT(vd!0=4cKXsFgom~i6?P?>!zK(n_CWUP{PIwXZyec z`5kD^`bwsCqWCzi$IloZtS3K@@9G!y$X?n$HP^MnfFywy2_>~R)PBS2hG#`Y>>+!~ zq-N*SF_ESwty5JA$&Bu<%E$z-o`Z4J&zi>xFI}T4yRe6SD5e)o5Li}N?Tt=c!#~Dt z?CdiqWoM1(=3I*sxEt*BRH|MdMV%)_3{$I!(>T@1v@|)szk(4L7L~4jyK-I*ke{Li z?&-pii{y3yH@Y)(fA`sJC~i;?O~YJRz;oF?wup*q#AG_!&d0pCaF0K(T0CToIMi%b z3f6#c?|CB*ageN;sWFQTj)*|IZ3T}E77b$^_B=y9A+_HPGB>K1LArhu@-m_+Fttz> z19(!HN}_kMf9cQIytHJ&)`y*m+ZgUDL0!8$>vU}?X@aG}mLKGDa-ZAv)!z1LGn@#tl{{PT!I0xOjh5&+t#P z(|x1Q*&QMi$-X+Mir>Jj zuCh2Hy-zM)j*Mis8lswVE6MGs?k?saYw*<<@_tt(sq&!0c0Q)euxyr2o2H>|-VUnmJ3i%@iJ1k5SX@xs`0!Q1@82Q4ZnZ z?kWg40LlptbPriZTR}NOk;S5>WW+IAh6WoE-Vglt{@$9KZ*OAF^*xaeyxG@$n1Jn>2utu2%L=ibnU~&|CDXHcaf-;2Wf@ns-B1vUP zM9jqmzrr9ePw%@s(4FngUojpBhqLC79Ty0wIO*1Cur$&)%0g>YJ2n907l`8qcx_jQ zy~5%@fh}^J=J@l0yh5jip>3p-8_WU19!YmX8nB`rc51R7`8Yr-4Gt0CaoDh);2gyr z)La~T{l<*AiCFz1M8E#?zt4Ok2BRsR+P6Nk^w=ifO|;8K_mMPMum1c4#*{iMaNSh$wb0l*+0K3$zT=w8_WQRX!xKSj6{tXR z*?v=|%5?&dO+6S*az#7HIr$`E4e6<+?ovPD%739ULu#jr3B+p|imOq&A4U3IwK5tk zKz{^DuWj2L+lxAIg6zYPs#k1{m8BgNIs{g$*`6_}VZP^3Sut|>=%LCs;4NGI-ao*| zN(#-$OEZ9M1Zf|jxAPLgOcVLXfVoG6M~>K1U^%TBN8JeaLkZAhztIs6+3%NwTBJpD zCKV5p?<(N4x%XbEXGeH?f9z}p#gPKMj)IExy9sD0FzvGiELC(8zDEX%n)+0o0H+jx2Te=r5+pxg&Ve$O_lfFEqJUueLbs=`p7VQPG|%ohrj z99xzNA$CD82kgCZ5hMDX531H1(QAzU<*$MQvXJ-E(h@%z%O`x~axgJwsd+J0i`Lus zRDuH-K0UUxyP*ztg5HT(WCy+V=9*jr&2XoK@v`4;&VGmg?j@muGb^*pUyvdH^&D%Q z_ujjQZ}1T!G-DyUfBbOI8OC?(1^pqD;EZ&_B!nCE4@TM07{}QA!H+xc>%#Olgkgo>bLH-dR{i$zlU4U<1-nJ%m`N> zgo<;EWGe%auMWmdZNss%cSC#Oysg8RLK28YQ_OQ{JBu$$xC?+TkZ(%1df znvs%c!QAAmX4Bj{rPfa$Z!OyjWnqK`3FLuTfXUL<)M%utz~Y3x-azkeYGr0P)?2=5 zgC)-l-I-;(q)Aplx*?<_Bo;s$^CJGr^fCwHIbol&BikKgzt~`0VjnYNIBPcx1Z!b1 z-^%VBt)zfE1w!~%win#J+GQZ@oQkb*=WIVsOHWeJk=Ia{nO577Ptxs53*@x)ZY>2X z-S%*K2bb@Co5dz)4(Q@LpwH$e^zov?3RBuHOVk>wJy)hdX0SoW;3)sR5;5otd+-Uf z*2d_M_P)35jJLT9(a74auOteIg>POLhW}!jZ8qKVhgw+NqH^=Hjtk|z% z6RqA*t$+Fm$9E}KToK$HW@dyzKTnM*c{nUsuVZJDLbh5%mb0 zBOu}Bpk-|rn)R>uQZcesAkXuq&jgTA9*r!dz@e}0Muw|4f+tUt2vr8I!$Et{H|OXDGB@DusMkyX;wok}_%{uQZ_MLa_0rO3Mb}=3#!3e5 z6Zi1A8_^X}-NQGmze?qt&uUq{Qz8ywgpWMWNt<%74cQW9>7>m^P_w={(x1v))B{XGAfJ%H zOYzoqp+HK(k__GjV|fG()6-e^XJC{%Bdvq`x!AGT1v!2=d3xYOnk1-%ulr&0=`*Rm zKK$3T944$NFF$Cx>37C)<3poJj!_+G0>F*;IvgAVtS%UU274IeU*+R9JwlZ_#OH4+2#h3Ljt=(yMaiTz?|seqId3{{a1$i9jhI^1N|s2D zwKkBF#5R>Iguzv0BGm}!;X1Q1`nGg^AL~)N{E0M&pmX=0hhT1=G-dQf5l(U*0yOlLD z8{lshS)7sH_FVRBUd&OiI)402ZDXRNqaz{;pu${^_@qdZq_r5Y-PpK^4v|7IDTY?P zNT2-M08{t%dN~i-(^Y@Jta9L!eW+~56fj*5xa7y-J>4=bd8lPM0Rol6kU&5}IEZ?V zm0|mV8DcC|m&Ur=;+IfKHtFKYrinoe#*I`;pGq;2DUBBdRO2zzbi01|ana1Z?!|K9{7*xW>r@_)rJ!F^gQTc>m?nr{#PNZ$6q^&P zhbBa-8soKOzs@zXeMjw%rV&5bSx{d&ci#G?@Nxpb#iLoHgP5DYJ4k@|CsI1b`jABc zrij8TMqs+;^<-V>l8N~eAYg20k!EECQ^E{}V=h+-IMGVb_SALco!s6^C6uHGPZ| zh=Sc%-Har>qHW~~y4cargXej#@|&pwVxXFSS1P#wkXzXucUcc1)t}}_5A_~GeiZ^~ zpyC{B!(U&}vtOuy3Cai~G}7AKdLD9o_WiXx9og+#k5~R;^5X8fj+Y}n2|~XcA+=yc z0%$-kNVH!!2gQbv-^9*JUeVsSrDrC>SFsMjku@OMe0B)}MPC{;vjTe0+4sy=VQ!vom}^XPk6PbA_ys(PNTLpqwZcihKkC~K(;CDlioi&uPhkSsF-Q}lE6 z-fC5;X(=cq#Km>J=lCj0c!UJ{cJSdV$R{sD``~yHNFNqz3bpz69qt_u+ZO&j_q1Ng zSd7I22c^qub-BKs4V1v>pSv$3n#)&%>}&L;ux-?AUvyi&wZVJeZ*zP6P>le=i9qEO zrsvu(Mc{ErjE1`W*UQFJ>lktymAfikY^fo?Dwa zs@^K>fXmvXDFfjsjU9G>|H%ZP-DpEOJT?>-eghr!2vP)DBWul!bhef3Jk&Z~nC6F$ z%})f2PLIDlOZcq>t~3A>HLy#Mr&0_H8?!xLK4lSf&y`$Z1|?Kcaq*CpjQ37YiL0fn zQmMY_BNIc*Qg8ph52hMZS>R;ioTV*-Dw5NJi9=mH%8klkvxA)?B$6vI>q>QfsL0qD zPMyhiG@oWuVcQ7dJi2{IF1TESIf|qECpDn zz3RO3?kLYsAK*n=a>ChrN3bW1z-laPupIi9;p6a4L8IkrB`jjy zVq*^N3r(+2PB<>9+3E7Q%f29?Tai~ATS907((9H9uE9xW>*c~((hZiKYoN~au=zg# zw?Ih0Y)u2hB8}CZZs#91R5rlQ$e;xS7*7Ex?5T`emMxYqZOT zUiyLDPh1;IHNZX+=FdsGWZkgf8Hf1T(KKX@`+*hEbBzk)2dqP?G8y zaM?HN3A6hSu_!z`P9l}Vz8Z#QBKp9zJ`vLZ``C@*mNgWu6+_gCK*6Ps5|=U*nv#_e zpAwgnnfm0Jmr70aDv*XBh-zzuITY49{GjvFh@8?LOLbNJ0wSHJn@_J-fQj$9A9R;Q@x0G-xRwzE#Z zIrz94V&2E}#DFErIuEel%%F* zar2Z&t!Njwy-4zQ4sl+4ebYNvm%nv+(L(#hKi%CGTpQX#wPRc`R6kE^#r*W?ak}$m zC-sDgWF;TY$CQFJ#6G$Q`yeo>)#~W5DDEkOZBm@vmbU-N;k7r`y>jrCH&4E`&-d56 znRj{6yJ;Xa$4vyruamAqMLXz_rmMgzFvU02Ww!oMWU+*A6n_SQ#+q>_#OHZzD zwfTPE-Cy?H-}UXqwFf+ZJK=q@L|LYUqA)OxK||n_jx~CcVt8qe^#Bbmp^+B+#li0KtA=&sc34}0Ers# zq$(LkKt>~VW1_-gH#UH&qz#q(w{FaeiH2@E3dn?tp0XZH=kMfHwsDO%jgQv)4cU)U zuqgzfwgue2D>!_l?zi9S&b!Fc^PmGzNBu09QJAt@loMZDnA3FU!R6qDr$u!oP`5HU zKCdK8>ti#l1*Q$gq;~~2$9$}?j#5QQwIY=?kTPp5r3NK%uJcM(USW86Vs&+UNJv6w zrx;#!LdXmryf;Z*-7eitF%Q!E1tuJGBli6xGh(07jApc#7!5Vym>*TL%0r$9=UTt@ zisi(S$41PwowfMf3b&M-^;j#)iCXAmIxqj~Wynt-ECV{o%gV~qlOOXe2Zev3=cjEP z%uenYU{FrUOw1}c0kTsfsfvh-JbK_*L0X}(Q4-)4>J<=NFW|BD5*og!-Ouk)W@=fWzFeXoeB4HZ_F`&d%D%@5YZ)tzj6DKlk?vO9_pvkGNHQ_uD&LE*D)-?1)x_PE6Gfa9Oy&AHTDUphyL! zQUb-pGW-)!pfk7h^k!_F`}FZNY?43 zaFr9Rz#-@m1BZ(x8`hV6yrBHuxz+RDuUz?6+0{E_Z9UgX4J9cNpyfH~DakQ;5nwaw7!5tf?+!B>+!HEdv~VTKP1MFp1Or{23bcbLTx&nZySJvLz z@QT%|$Pol_MUZpItovh+rtEg{J*z~;#id1|S>eZnEtcO|HQ9d7CwIQsdUNBmjN2l0 z6*EiK)D)%Xq)JGI47LRm-OaxH@wt!YT%C*9^g+&;jI|p7)~Pu^-QQWDE|deMP%hR_ zV7*ErXx;qH&22cSud2&V%VJ$>80&R1m~8dbLBm-+J3hO!z5~EOvqk_Uzk2Tf?&)`f zkC_g!8aUEs=xer9zrDOVN03cG*%qy%k<>N##q@JDL=RF;ii`L;d+zU=W%&kj*tD;? z*=siMZo9wxS&5H~p%Fq67ZR!$Y+*r*LiMdt|1I|YzNABYK!k==p@5+Rp&Sa}iRy_u z+j}-0>UJ3=LD|A*2`rXXVt`e(p+I_k)~6qSo)DJE9G*-jVIU}@IQ2=u!=1lu-~8RC zq_Cv)i0p#Y;_}+c4M#Wr^5o|^HnXSLjU9L#F|!^#!EVg(BWB;;`qm@%nM5JiJ1${c zJVUScAFz*sJdvXNs@tuUgnYe>9P?g=bet}S-n!rD;YqGr8)AE^G9?xS`L%ZQd*01( zEwc(L$Za6hfKeVRsd^> zr0hjxDV~Jt&A3{Nee4NvE&Vucm%>W0aclgnSF=W$RgD5wV->clNYm{o3 z6o>MKe+;v$NeLsl!`7YacO2wDW2 z^TLaYvw?2~bcfT1D<8HU$Un~|{m+^a`;2BZqrJpfFA#%$byV%eT<3SM&KZ1qV81hc zCtjMk_}=mh`B$sxCM754g6q=AUlN|xmq4N@%4zrkomY|FvJ=$bsaFude8>%)xO>ScgozCUi@N~<69Hl#tgqOVDZEE zJgRS%RTe~eMldIPw>^= zD(x0@#>PicYz&X2&>o>b{p?C5ulg{OTF|B(@-m2ilp(g@7l6 zRnt<2{L0EocK@*Zy7NtDz)%6o4V6&P{m}jH&%1W6UGa_YUB6I|@R-op{Mv%G7Hi(K ze0ze;NaQ5!;enh+hMphH5dViRAG!Frc8LUB9YW7YbJ+Jg=3{}7`EzOHHJx=l-`*<0 z)!M%Zu|$!OmU83a1A!9eIy5xO^D~>*&0LTGVttj=+@;>$O`1U)y7TA7jm2x!`bY}zNfw1Ntzs0?~D@;RVFrUn-u&CHP z2=-F21-3NNQ%FH;%WlJB={0Nf%}fgsq`5C5>4S(6q-*x;s+B96i;JOeQU&A%#T7xp z>Cb)Aot%?4ZAkuPVfOGr5sMbq9sEt0Uj+Mw5V}sgq*n18c0eZCq83t{!pB{IZczwj z1)06qw17(&`F1DCtzUPXJwdxTHU|3FW#yJI@8SzO0|Wg9-F#S90#Aj{7B*?sAelWX zDN+MP1E6pW-NSims8mS&ofcqXk-4XpE0|-y5bX}bMiA-?J)&U)q=qcBn7E$ zS*sPLBiYuZI5z>)bWiF!Gn=8XKLM^*unjI|J!n) zj?YdkDk%i)+cAK9XZio;dF037-LJXMLJlEj7YC0yGyL_lGp$qXQb6vnA?BYyTr>O9 ztWWN}|Mk_Sm1s5~siNros-pUkoVevK-;6vnqp#Cs#B$h_Q&TtJUVS^sNdOu$l^N!K zRv%h_{_1bDM zN0)WQu`{It>S-v1;V}_Hu>e4ZW-A#vfxTXZuJA&1)ODt%WXe#51jq$Il1mpJjWA#3 z@#WjDZ_II>wZUhNcdKU?Xom$I6c>fm5|qQPMzEVI;B9QQj24Q!t{+k^!TWa6oY>^2UCW=A_J(RSw$Kp19n-W^dlYzH(Xo) z<++usELNO&a4<`l+znc#fDavklt7M&j~&`?b@VtUPy(H#i$&2^qdo;%c%t zm2&>(p2BMOdJ!aA*HCTmaGqwboCbw-Qdw1h_12}RM1PX4q7{nj;$s67Bf~oz>mh^D zG8~mMAA%|%z1+Vl_~PUEFFveZ_-@{jV?4WSWd$Okd$zk%gw0BaMWYT(p6Kx2>({@w z7KXS1-cd!%hT6*gV(D?Q@CJ@11JK0Ae092l1p5a?adi;g0CciHnR_TG1{^pjm@uZH zA0i!O0tO5O{Y@#PZ{5^MnQ>9D!h^36dw2zV`(>QJ7<2GY)~4@-lg8BcMXHANulo5X zU}4cEQv-c5l5WH4o>shsZcuP`JwtKsJ}C;^ZmMR_h+FZYbo`KlcW0HaS(zCUfHMaw zm3PI&1XYz6u|S8ZusRKBQv*y%!7w@3l}@8u?r5=()}>U^_`k>{{m+^a`;2BZqrKcX zRV{!EDM5B~re})h?TA|s;~w$>|5>DWa9o%Yig&T?+H?^XL%ajvPeh^hMzSk-4cqoZ zLrCK?0wVLC1(iIy8g0uUR9J;)H(iCnc?uJWr5MnE?-N>?7}{XYp_o}#qaoC+djN*~ z+KCp+r>EYsdgJ}e3#K1>^H+~!_oF;~Lj7P7Fcb+BrL{&L4f<`J*Sb%=zgX`Bqnd{F$X{8?N}v=j&#{K7QK^w?B*>~fu}PzQf($QY*jkuR_8 z{xf4)mc71&9wv2AMQBJ2sM|q{dEn7yx0a4OH_qf_|8a*+Kfm<8Wzc~}P_BefrDjn0 z&ZVorZvBA}i6BXlO15bQPC#xW?)g6Ya?9!)cdt7*+8sN6%+CGX=@_SJ7H^uJ9@p2Z z-|%w-`3QlVzXk|uCM^xW^BU$Cbf6EfFVjR_c9(lBF9gPI1g zZFAl9$;D$vv?2W}&8FtRw=Us+QN!Ud+r_SPCRh#ZciIfGpD@XL<$KwuSCf?sP{}KN z;uRr1ThjfVy5k{G*0EQECZSIZhC;^~N3HIzQ{CE)$>DP!L z62z212DFcyRP*VQf|N)glYpSm7;oQDI|q-1B(USh`UUSv2KC_~2;a=4dms|Mb#qy3 zCuBHWR|~G~zeck;q6BO(4Rwx4g1{uK4l`(x0KaHpd6rteRx|G%FtC3+f|L&Em%VP4 z`kakYE&@@ZLAAw2+N%z~7BIHSGG&03y(8+h!EFQnWmlO0rWvu%Xht*IOHIR?VW%}t zU4<*cC@n30^5jWpXD47)_n4=8+Lh8Y^yj^t`G7VR4Ne|xNK=tf7jfk7&K-}|FTMD| zH@8>p_S_s=_mmH6HLy|-zEYs1SRlcBuurEA24`Lh_9S3t3o)DiRjaDp_hiomhtXrN zj~Qq_>;so&yKe8l;(uMF>CgavOr`hvB+C_dKc0SZ`ec_GA3XecN7#-Z{C=2e@%lWM zg)=YCHM5=E@A$|WF7K?k_2srZo6mTjIB2syBR4E4%F{Q&^JkZRE3ChrW$~e@&D5c1 zrVQON^ryQUaung|ilFV!*1cjoVT98N#0nXCX2{a}AMW<~DHTogEss3xV)N+vv&7`2 zoZ=kjI8mTh0i~3dU{GlH_Y@1Xk|+%D$uu;>79cadq_m`o=298~uF!}SIFD2M;HEw1 zeLP&4#mhT#@7R@?D;}B8a$57bG=EwRghFvlR&+DgEQPXgYEE>G#Xhp9l0!?A0F^1& zej&IVC`16317D81@uD`+A@r<`->Injkb1P5A=k+02vSA$BnE0Y4<+{i&T13>bL^uD zwkTNZfF!wLffPwIh+HJg-*{*Jc+2sK1%jMKCOMl;wI2S%^KXNMzIT#uKTq&}{4qIwpngoe015Xr{;{bor+kQ zjJ`H*hTDV*)`MO-GVBA}H>`1J0T~P`le3zN+6J;14VzN50lEXw z3T@IHh6g`-RG3=KoQf2ciZH2HU|4lSTVhg-sJl}uZwJ)ZZlBGmNYOxl8HQ7kN97tI zRDkd(F#p|b1W92#l1Y%^HE*ph`SEu6n%lpNWtLG~n*t;0yP5WXu1tO1Aj zwa=eZHhfgw&>!Q2ej^{C7>9&6uM;ZihDuPs) zm|}hUVf{@d{Y@K|d_o=AD^85-EXdBFWC|`tSc3u;fnq@OKjq(z_AktceMU2y(f-O9 zbTcfo(32cjnNeF?8x$18HCtmIQLELzQ{6M{&-RCpX2sRyFq=&UBmvcaADsSZr1ez9 ze8@n%2}4dy*yz4BLz=3BWJ8c__7EE_`V;JgbC7&O2p*Urkr6Jpc8SGjsacjz&%mK+Ffc@3vsujm_yziM;N%t4@~_<5KIJ+d|`F zt6Ce`OgN%jBjL&*^e#LOqS%~%9AoH^R&&0M%8I(=%uMFN8dZHnW8|a4hwer9!&^hD z)a42wCjcP@ir|J&yHvZSmlrR({L$to8!n`tFI7~coGu3qa*Fbrg{>k~0?AAWxd0ZK z0+Q&dw$w-J2@Py0A}f#;yr_F|%-{T@(~Cd7vU1b&4YN+nSnTkzMaZedt~lQ`A2}_D zysa-f)&|-*F*f3#P1`ikHh$q^g1?J9!(=k7T2*Q8f9!4hw+CCB4Rah}a(v+YJMYfF z{r(3xKK%UNH+y{dFE#({Nt%}usAYs)!Q5Oxk=m7U%HR6ufCEchKKsFA_d5sPv-P)3 zmc%T#T=d4VH-?#yGd(*7IX4KgL}uTZI?Z~dP0E3v?r!|fdEJAg2Q*M(A_1ph2YX-O zFkcSp9n#F%wg5VrQoO*#zO*6%;7x#(kka;R&JO%G0ZZ+&7h#V|t4hQYMM_dCDkGo| z3??DAUvjgqZYd-f?pI?fCC(O$0EryLMu9Juln?Jy+#l&4*hgk!R{hbs>I1hj4tU-E z#CG-6W3NrHpVIf#pvhO?T9$Njo2>d{0XjnnrU<|ro$4PX5+7XIs&)AiG_8dFZ2^|n zs=nU>kXN1|Ly`xMEk>q?uW@RPQGqHH1mDfQbG+cEtu^m0E_{1&@lji4X*u9`fzY7z zhYvj5ZaUSplrop#p=a~5rMZ2P`0>NKW=`hq-JyPTpAd8dHM`tk{hMF}YNDI<_!}NYTy{I1ZQ(u|U(9jSS6;)DF!o7xhvA%$V zK|tpBztZ7j6@yVsDg#27WY_mEEkkT3Aa)~=bL0EiO?mhHBCopVGB}Z7@@_HfN&0i_ zE9Y>AjIh zMvk=|J;Zi!-{XCz*iU}{;{4rSJ5PEY?|>h!gq=oc%E0-m4cET3#^+`CgH}G8;sj+d zU7VL@!{8{X>ZIEIs(pX*`ssDYIYW+)m~&;`!Uv0%1b#8pVGOblndvr?(Q(aqt zKnPSyKoklpY{DGpJAN|n$tS}c#!o)`>WF=#Ha=eS{nPbt+s>M3F@A*eIK-w;9~V>P zFf#4@xH-1d64BttId^`!xbNblORzETKoD!Ah11Z0!a+aV-4UG~&LINSpHsqBFrR?oQ5*Uz5!SQ2_Qzw)nNOeCLDHb3D z)QBmC3<5}B{@BUgE54|XN>p)fU5!-5Rv$xAK$2Acv-`(rf6t8AXEdW3?WM+Wo1xx{ z-Z%w5=2^~@*V57w5fK4(k`0XGzrTv}(rj`cL21+?1rXfKz5U6}uMxYch|_q)X_D!M z*S~nOIYJO60v#|*3e~VgLiEBu+CUf<`ZzeTGPMdQ(rS}`Jilp~%iJ+G1I#WC?R#+$ za;^{JGGu_0*$~IkGp@fe#dY=&yD39XPc}U^X58@!pI!NQ^{tOzwH!3s3K`+lkHJ3V z{6NIA&tUtBldWg3bXt|dPmRtE4G8fhVaF|{K;7}c=cemj$L9f$yLfG(!4XeA{c2j; zD1DS{gT*s8dRZuBVgJQ#BHBc~-_ z-`_C*>eBgMOAyDw$i;sBt_*wi*4)j$yK>P=DInwk1Qo__zO&2p&`9Q4#0eR6(F{3) zOmP@H-g?+Ow`cV|3F~%|OMMYXWRS~{v6g1*?|c!}>ffNQK4yR7h?Qk}ei6Ie!QhZ^ zPGxv47!WK3O)i>*MN*%!e6NY(kfrKeJEA?hq$u{OuNQO6=Vw&q#23&KK#M?Ld`*27 zY|uwh1d8D?ana=!HF41e+H_1BXmFKWA*R{XxM+WR&&HB(K5qDSdB?8p_1Et5M1c7N zK%b_D?sYll8hiUrmV2BarVa>_fck7x%PBE9BBWhVJOto82&4+2kpYbm&^!$g2?$w* z99&Na$i!#_DMtF%Av4mpxzo2x`43~lThoghLkc@BCY(cED|7fvwK2;@k&uhSPk1C$n*vUVZP;ve&MDJj!y`E7osqe7?;o z+v;fO56dsVKIKIJ;m48D_GXi>%^2=5d4|)wORjvgVBeC*anIU#E!QqxsxB;IfrhB7 zFL!c2lad;nl$M&Flb@QE7aSeiBSxG9f3awXi0`FDA@4 z@2FS>P1t^qpu`8edE4r$myYo zeSgHs1hE`A!|qMje782xT7y-d2st_X+#9Cn`ZH(R=N7{JTpjkckIb)>u4TxtO4(@T zZhtBI>M1|#2Sty4%NSS`au|(c8qV9tVIOky^a6+ZG15pr=u|>a52p;wN+v@Q-v0ya(~biHE1lV-eoE)^gk8LVvW{#h zBucMm--#3_S*JS8z5W66D>CT*aO60G9P9hV{nh7!Tmw^M60>uz``zE@_Wg)cqmlCn z;*KCL{f4>>GqvqwcDDcYJLCI1L$@w+#{{|D7qLRd95%BEJH!W#7*O1}bARtI<~Ke3 z6x#45EE3lcY$s(P1Ii>&bc}v64S4hiPsd}v%f&J0y7J?0-F+OLQ7u(~XHS9%5ec2y zVi50_iYwazXeWTQxS}wpAfLyRCPWv}tXQ$WSb?CRt}>wnwT0m78D3f>$V!HeYc}Gq zj4z5$tIlaq3Tvh9Dj*R8K?6`;%t(j@AOwI+02<-QsR1bgDlv>D0~rPv4@3h1t_fh{ zWq`b2fy}Q!mPDPa1DA5^vOAg+LL(*Bt+a|;H7RI-$d#U|(O$O+VbSjl9CMG5?FR!DcL!Zybe1c(NZ|_g%^2rDomV&bs z?^*VJn;7ezFMin{c6js6O%C3c1@#GVmcVZY(J4C`vDn z4Jh{B@qFjh3$slwjYiHPh%GYIVZZdrJioY8af4Cb(QrmBK)>EZJd5` z5-b5>j{Fp2;xw4~X=;lwoY$XqULuZ2pUW`F`|jyCExeDQY>{$hapR!_XYH?eFtNz_a@3|h&jKRov@G!R&hnjGTep9wnl>(B z#p0^HKeq*h!)hQH=+^L9UA<0bBgs|;6mlZOG{~(6k9~lXEAV^{D(Yc8oRv|e5;Iyi zd>1--{F7IvM6CQmG=HIF@(jsVX85pC&HegijT=_|;hT-N=91cSt{=}o zOcF5K-!mij8O>-$`ya-&*3c6k_h0?*zeB@YCW5hGuKmW#OsP~ulw&4w zQV9qF4R{UeqV$evW|M>Vtb&k>j>p6;b}Uv23@dLg zC1FV_q=ZL>&%S)LZq|djCKrbwHpnom!HaJ%cCEjW1d0OGN&CaCwmkmvO6kQ=>GOC+ z=$Y_S>s`M!4>aHBaqyt~(fjcpN`U9*6>t80%cdopye>VvY<)Q|C70P@u# zx9h2QcY7RMe0t>}?<2oDZ}$qn)6rcUofyuXQY$UJ6nHnVIO1OTt>o&+tok_no2Od% zRT@Gn?d*#44+BCJPLEtGRpwwxoZZ;wTqN6=fYh=EL1;t-|MX*P+en)=57&%4K4Ik1 zQSUgv_x zS&U=|X@Fy&h3@aU#+~m}wGu#$6Kb9Fm1QmnEQfCR`1s)A&SuQeA=b!n zhanT}M$NE&<^9v|{B&=7kvdldx-l-j4;bhO8un@ZyZ4z6`{1=@&YxjEr3SWK!}TrX zIFwXls6wN1c02DI;vJakdpX8=!>u*D!nbb=-0=04&m&udU@0jDMT_NfzFUy`w!BXLMhR3Vl~Vst15^b7vkAU@c*&*-EmQz?b;*K1qA`Ica26()YvuA zXw;}N&BPXa?_!~MkS-{Qfb=Fv?{(?D_ulIQ%l6)8pKqR7BzaHXobP?l$vNNm$KJn< zf!*2J*_mf}uIIk*>$?2EBE38vb^Ycq+bowY^w@o%=F4M+??MJ}Fhv&7TIhpa#aRhr zv{?drT6$`;i%O*3ilxP6{e9&2J`fm5@(m!hb>fv)II_88;~fP#1ZicT&xY8kyT2s2 z%-TMzleYUgU*bkkjPL1x|(!6);s2WnOjfC%qdA!J-(BQ&EBoW0t%=F_^>Ydde;0uLS!we`c1Ti;Xt z(#yx~RcTH(@blxOr2{iduxod}ymHM95jk-yeaaL>kl5zObMJja6|59}PF+qyX_G_G@QZ z(R&Zij@m}p?<0mOfQiX*37pc!Qcejp?!*xW1~H+Bu!s|r7~Ry_2(f}>ssZ0G_q}WD z+qJJgUHE#www1Dmp4?9R4aNn}-P?UGM&4O#{KZ=H&-T6DeZuERSZ`P#7_d)qbkFu^ z;kMp3zvbj^5BJXnBNcs@4xc~1``E+#k4zjadIZCw^~lg?qNUDQPwD!h-i(v8g_H(++u@77HaqqjY&+-p zi$|Jk8>N{In38Fg>5rM$|B8&7WXfm+7i@-7!#Ed}Ed*RK6odmpmPp$F*j&?+5S{4d z@rF7<1W+<$Jik9XxH=%ZH8Q0wKCvVU>it4e2ll1-RLNjUXT<%eOZ#1S9trqviNWf9 zulMY8-?rXv`665G8AkF-R&!JhG~2N0dJxsV?Rz=5?u|9I;YC|Otgp^* zs3`z|L}3r5<&;rbQ`PSRLZ_Qc$@M!8dk$3W+uM5g7U#7qo5h5;Lv2HDUrz&vL6vGK zVyl3h+>DOyDxBqj10gmQh^QGc0YcUUyl4VEH^%!&3laeq7sUo1HYR$hpc;Vll+hKm zjzxek$6?LlNIn%xws+M$9cyGZfl%=MzsJ2lF)s=PC~) zNRKMQn1M{FD-Rq#%!rByZGBVvgg4~!K(y@L`qoZrz z@FZ}2DUhpCN_gL=9ayA`KGbE7v?>99BHZ#{pbY2Vz8l?1zuCeo+spHH#Rpg zc?`CI1t&-<7;dIywUyXcxo-B{qHU`>>lvc+6j}UGX`lV3!=8sW+HTc&wm{ubOUHWd zQj5iU2}VtzC$c{7ZMruTFd8~4jr1Ne`bIDi1V-7ld8I%6c;fr(KYnp>_ua_nSHo`E zWtlpZo9++UgBT$y?kb2mhOsn$aXM^Ze?K8U8k>^aiNld~Nk2IMyhd-yH$J=0#{Im{ zW!JAhr`#*tL#x9o$19_XquZ!$bU>p43KNRVLGJ?r26dQHB|H`o({XcE2#WY9D6Ok) z=xrWg^cA;de<1i}RK-%E&ne{ma&dkD8D+WGcy4+5`5bFyB~xUckPpKCtVnXA80>%qbsQFmUYJM#c-f;41d zVw#wkIXFyz`pnMjO<;Fj1(e5;z*b9XY7N#6^(PQvd=41!^4eUQ~RIOsfDOze$``1`0}nq0z{uO~qPT z6a*p4&0xwQ6B^1a1!PQDO`<-NH%E!TY%$oj6@32#`0j@$Gs~W$5{O43qk{w0Y+75v z!w3Au3wyD*tIm{YC~%f&H|y(n8kmN>k4x+s?t#OK5BOZTJP7oUYHMdDr;qxEF#W>8 z=R3w`&E?7|0YpwTM;qwq0G$P0Pi;Es_!pAJ-ySm9Ckt7~;%|#Tw)71Q3=|d?*4EZa zy?3Rr{*e@nYX!r6+(#Ry-^G5R-4eFc7l@^1p2-R6c5l3jnwq4m|7q=*X#^?#TnuGq zW@ZWm0-k^i(IL*3$WARPpJ=!hcmET&HS>l0LG~5?Nt3bFs0~JY#iD3r!^(b;Nom z@&cJ-qrTticw}d48|X9eHtJ_}@g(E`PggffGr;3vFjNBA9}2^EKiV_@=92lQD^@zJ zU#7qKaQK1UNjqk|L=f!p3|aV0>5R)EeVZGB-tOZ=#J-8ayWV=An{AtKyFk%OY5uE4 zt2{r|GFbGP-Ok$ydQMfIQDdo&CGJ6eQT`p_@xy7&pofZMxG5z(30oo}Nks7bVkd(m z08OnuwT(@X>A@%W_1Bk`XGvi41<$_VGQuQ4jU~n&4?MEMVWIpz3)tG{-Q#p?AhKKar2WI|gM2hccxA%%Tn-c&jbdId`WMW_C*>{(pgzArhQHFqoeUtX~)+_gK zr&s0M1Uf!(x#Je}w5mOdBy1&v$q76c4KvIVHV}|1qEpQu#-2Lcfy{12<_P6xbCDS> z%ht9pLvs7Yi((lm%l%qEd>x(nn#fnXoeft?L<^3`$*<0=YF6}g@@%gXcf zAX_~-Gq!)Q1ukOnli1n9EXc2g-gFWS&Y^4p0iIB?%bZF8(~1#)ViA1@1w?Mve|`Jw4L*ayXpnlCiXiJU%`i9UV=h z(WcEp7-S~4m(-NZCSk{|gnR2=tyVBW=Dt8Qt&jz_h=wU5Z;HrS$w79i1%lWiT27iL zea@th7ZU-+I@r3TIfpNvY_2JCFf+(WiV^b36foM1)*p2}I^S@Kyos8kwYrA+TwR9+ zi@X=9yQ?DRh?ceD7gozJxg1Z-@-N7UPfLs~s?R%Qctrh{wub9`$TPK;Q*xZEWTuVW zRN3uy0957dfm*B)GM}&%@@f-j>BgCwt zToEkCvKZ`PLT&<=Lx<*gA5S0)gE*kUwZ-vt)DPS3HlO!7W1D1KIaW;ttR_xtaBb+d zch}B)o;Qd!3Tq4-5ccvQWtb&3850R`TMen%2Hsv+S=X3fRmK5mPD4pjSz1Ka`zXTu z;)Z;-n1KNiUoY?K!pf!={OZ{Pwo)xY*>%vPSHvxb!Il z)~u8i2A3v)9bhow#G^qzgR~E@y|?#$QY06CH8u;H8jI>1S{j-=u%oK%Xl&@I8|Mvp zCA$7%{QXx?4<67z`nEV|5R5S;bm=$2794Gq-bZf@F21|C<;j{Yx3q719tp2;Yv$LE z0pTPZoI-eM_>-kcITiW*vzHpQKkq~44QMXz+PJOv>tFhfeJ2cKg6@Vsd=&gJxht)W zIvAOp4v8)(fIe(u;PHw-e{QydLO+7gEt+c|9IqoRk=kVwg`vgH4!t1za*!afKshoCAz)dv@+`P?$B0%pIKP3g!}+ z%Z6F1gCnz;VjUXzv>#k<20z?qe!Y7{XK@dr*rL6Vxoj1fF>_=FGK3&4T56+P*E7%j z49=b5?%vmW^aT5p^^_UNgr+K(EiclYJ^0g)Jtajr{$)Z_4Hh4m+q8Xqym_8x!wTK8 zU7wG9d!Xvp)s(Idcwj;SU~^#WKvUIVT6{WAL1_V?1H-6g<>1t>4YOxAAlT>1Qy{$; z;Vxb-I)4)l`XIHhk;UH}GT0{zS;*q=45>>S_{aigGMNMdAwNH#OeRY=z!00X@+-zM zA5Z!$Dz#z2d#3;x6bWG-K?t~PffPH?M1uFJNzV)oIy*bDU#ByOEEY>jF~KmO)GUNa zXFbNN&hKK?N_$+Zb65APy))zYkCJp*?bX(|8mL;{gMNNvqRErGRFAh z*azk05hB7P|Kuq*UF1doUHum9gQOEZokoGpZ4wcQIGUA~US3j7r|~4Xtg*ChkWHs$ z6Vg}`IvbY7@#k^+ummSYuz@JGC@sGs4}*PNJR4eF(b(NSiY-Q(TDuzRYwH?HZC@Je zy}0|pRnIKYHmAL8j7O1r{IOY7KHyFOhJKRyYP+>+rV6tkBkN4GFNOU4p*OM@wy|M2 zhYNN1D7|^H(QG2{@#r~yp>6Hw*@wO>)wAx^ds}t>?W?UXwyrW&U-x|Bsn;jyd$ zIlulGK0XS+0)qkzyCN|Uw*1BwK|vzGU^B71n9XZ+E_7vNN14CmF>)>c$hV1M_gU*hq2LJ7*l zC3&Z_eYolboW$r-M|ptPtyp}K0P2e{Bs?E~KN}kxDOp5n`zzgBadB~OZZ43D1xf|p zgs|Z(J3g5>F)#th(e$L1cAp}8h?)aT%gwY!W;!5_L1y%LUCKcFsqXTh(2i!Hv=vl@%t8esy^d|He#WoYO8@fR-SF3 zqocogsm1EmFV`<{TBK~LGW&&^ww31Ams_p#Y&y^$o)i{CV_Zn+z%5Eeqrx#lU#`Ii z!yCcZY>F-8CZpd~`*d(RF@S~AU?U?`mxHEj3_2i}4(G?0M-j>i(lbtFGB|+G1nj)V zypXuCmga70c2Fo5@dSLRNGCzj-(eNt@{Afno$Wg$ z8O3&0-`im8Y|CWQ|3Nn!cc1v3ZT&BYw7ySbvazFqC1x5eKPxLYHDkDE7z1;#%#NQE z3WJH~B>8?p?i@g4V=DrwstMj-2y;sqpb&E>1*E9-@cOPQ>?@)oNpnS8Z_^OA!Eoh{p`qxarsb%mV?1xG z1BN}IHh%L0(is6o5 zdslIo!X^5^Mc#Zix#Hqtk!k`}AR@l6Tua|nliM_kTH{Lt+st&{(&m|}uNyf7BNHXNs^AL~2H&<01A4?Eip(^io zjq`Ek=pw^>y-F0>^T)VW4y`_JXa{Y6o$giL9W`=j0f-Lco)`f_NJwY_d)L!PFw>Db z@N+np2B7S8;}zqmK!|qJzn{zq+exHT%{?kM*6D3_4rKsspg`#C?Ckb$ZCVCAw5~Ro z^wXT^=u(=8H(D^1`CZ}TQnJ?3BMYhwYohL?XeA0FHR&sZQkF#_%-PgdDAW-|9Labp;3FS<)hIhB9-<{@wNX7Le zpNZ4HNt6`6S7q!}EhKE~mFz_xCLIa@;4Wc7k-Y?hgbRTe z`c}Ocibq*nIXRor+vo=yeCq^psZOL#1-0`@GM~>sqwpWjRQUZ^+2Y#T>WN6`D_e^Z zgnHIH-f-8Z0kRG%7C@FS^2m7{*|D;@{&axeb?RY+#CU3-4xj;q6wkZnT9)VFgrQ`G z%$iczAzGoeMwjN7(y=iu%#5DTyP{}>b7xgj_S(Xps(9M^(h>II`iQ7;e*}Kqd{0m& z(Fu>S?uVsLPHogZ4|?gB$?zRBL`MQrfm4zH6{m;fZPQsVy12z89m7Wi-4Zpk!(-} z2aQ4oL=7GZod7pdWvn43shU$%-FI;Bz2da=x|+-X86b`i#1*GJr$`}`;+a^yD#mte z+Xs~sRR14PDGC9ae<^(0TH9IBJO{DXgK&Rq8S33F;{CKu=9*@4zA8(K;Q(uo`{p!nTR{?SS2QO{H%QAhEYx!@^l)9B#9=+zn~t;Vjn_`<_~rAz z%lylz^4{KVv-hkQym+^pip^Hai?N10XP`*>Us%Te4csr?C78ze&TZHkeuo7lk%;f| zt-6{HCMPK+MXJHsFdX8AYe>ehWTVfXt|T?IBto4#^{}rQ>28pJ5TTAkkdCdZt;^%+ z{+(27@HZS24_`uB76S_-^WGX+&VUzCOI=HKLv4rbkRi=KL#+CB>}0;=xg#liI?I9= zyG~Wq+KkARDy%A`ey+3V-Aazj+YC>?_96qDR)zHEKI2JmV znud#$fi68vQ4mV-@ozTwbI;q<06p7J=Ophe%s`~;jrM~Vwo$v(%Fes#)K}JySC&Wm zYc15_GK5n$_!gWUgyWP8qcqcN-_hYgDLCJsoL013EpFD&690i6uxlODvZ60-kGTd; z@*O-VoBK47fm%P9U}0FmZ7Z@s9%YsV`vRNejbrYGbc6gteO4>2*J}KJzuU6=DDA+?Yw8PD-o<-%`Xt-SiR41;ZK3{7Q2|Ip%1Fes5fLkHQi2~PwCM!Wh!@eyBhK( z0Mf^H$Nv@=QP(e>Bvz?!l~>;l57wtm2Xz!J%$A>njijK}N%$&X>#I9sM1#JA@#pe! zIF);TB%iL-to=QRbNH*1z8vD}R1wN4g2>H~rl;Y4QLUi$r_)~WjJ2;R94Q|;V?$EJ zUsU7`h5*ONt<%>^Cr%*=DJ>`K_fab=mKMwd$ov2;3*a7VZ^n4Ir|OwD(vR`KHrRvk z&+qCEoe>srV}0Gx(Gd$9+kqxbvT%&18bB6)9{i&q9sf5lJ4hDPetqObxmPrJ)jPD0 zW9HUP_C}WtzFCVo`)C3JA$XA@KaKu!sNdaqAf0T0Qy?Euw>ZbbZ*!OKBG*m7_4+{+m zlc-ZGawv&x=M@H!EhQExU#RZFa-_;pbv>+#$S|#)BZ)6jeZ@&2u2y_1k2aUN}ueBeY^c&cFtMEMJ25K`)|`;il)m;eqI z9Sv=t*Fu{~a^B19#s7c}*M}5U$T<0|l9WxW2j1hgorW;4ywxced4%9~tmJ@hAkL#V zQT#iPQ;H}})f(h=2~{@bOoV7|a8TG#QBuR>==K!i5bVo9Tf%+{@667fQInfdf~JNd zu)Bjo%hD1kZDbWa(AkU*o8e8qJy^_8E?GI{FbO3|RaHj*baf5|1&z<6((UXs?J-gP zQm2L+2O<~~^yrN^ff%FO@LnC`v#9=z)Wzi3clZU_@9X}lnzSXROy?b~tqaVf(LG@4 zn)uo+yR}_S1vcqqiJ4p^%jfd7(Q$Q}qCW<3mL#F?$QFI`3DtFjpaMA_v?!{U7Qd#nEJwJgJT+>3uUYI}0DYQdt8Mk?cDOd!1veh#G@r9-$w*s;E$5o!RYbmTB)QFttitXgFN+9c1D}P5QI-3XY?4NTU!kqVqezktidve& zB8J8W=g@X9!h*vX9ys8-`Dfmk@U>Q}PqFi`&U%$%G$&MT=3|)Sc6C}t@!s^sqh|Jv zHj*IVGfDMnIB@6@lSyi7BEsJ0)mG_LG5gGnySc${B8YR01RON zQoa=U<@fso2J7}hk7jzud`ul9BO_9hlKBM%Vxpp_&hIRAgT4rR0bdMVU5gkY!bB>G zDS~i+)v2tbqmj2+E)N8LOfO;X>-J{(c+=CfuF!FY5&PL1XW{X?d7Wp+=CY>YfuFH<(>i zaoX7=r)uV;0W3I@ssNkF0Pj=ABK9INd4a;V=wvpwfDuy#hs_h@>#|wJZxd6W;7PK= z-$qLIZFT5xf8wE%5{0}aPnl87jC=2aopLDs`9mrzeg+7{w7W1ruQ-gU49Cy7It{q` z9xQhRXL>{yXCcp#P95k5k-@iwLm0T2>`-hE5rK0rU?Nf#s#aQ|7a(R|ozd~tWHmxN zmF@EMSJYr*B z@xIng5mWdwZeu;87CI{~4gnD{;7dzGgLCTf-_Vcdf4B4BEEpeTu(LzjFAr<$%Hm=& z_LN>@g;nly2v6rQ`&&T5QIfwvF5JQx*L`LzkSNA|xa=S{{mm^$*>b$xZJ6FC*bjCs z+8_NKvr!<>T7s*KF@N!tX$JlE6-7K@%M6gD;le-K`~#SE-f$p`LKo86kbb&O!eNsy zTH38O?yQGGHn6+BJohiRoXqI5?qGbpPCtlK=ydrW7VLI%J~`R`O!xGzF!iz6FqQpN z69q*9(>Fai;%aL*C=HBKEcL%s+H;zHdPQn*spw)SCWBh9IVO@r5OEToQgj)cXgYDa zPEX`=3#cRP6E<|czwl>Q=7*<&X#ct1PDmBw>v`)V5aO#P&c$_>6>Kx5%Ol#E8{niR zuPQI|_FtR<)5g+wWp1Y;pHL)Eh?bJ+f#G;en`vH~VMXK4awFe2v{sV;lGiX21I{8& zHEobh83_pqseNlIF4ycwm+eD>iG!S7Utcdxg}2eknzt#TV3q&*3lADs)NN0Zo{B*a z0CZ_w50F7bm=T;_!~P~c;u+t0B+c27mQEJw3&VlDfijd5m1qWX`(B>g$|3|{(1`(M zHvqGh)A>>8xwT+zix;~xI~$jr9D+OVM*#ebe#|>vl>LVJ%}<>eA_@RWszwr~xP%X` z$~Rm^?hPF@9!vbI=cE%k5t%|m6*_d>(euRRdMPZaSRXWl{;+x@1SA#lfQ?lVolmYR?|Ks#e|^E5Isow?**k3)x=O&kfcoa6qh$!bZsI{jw=A=BQ3 zlhYg!eK3Gl^3#QYPGS07Y*mN&sX z3N0Tj_hYh-DAy326EF%lHyIdfpmQq&p0l|Hqs5t$08jRlUe3>tV^s6S38sLI;$j(x zi{oAcpYssYQ-x6B!vQo15bM`^FQ!5ZM*~+(?V$4?^f3Nxbrj^psZRDg1tkK7* zsI6IB;K9Jcx7T8aBSGgZ{%J z91||<{`2i)Z*TAI?d|0Bpx5p)vG2$9-#F=qbgIYweZJ&}x6ssS5dxktMng$CJ~m^0 zW48yu23#A`ryiPXhUEIh;ND7#PIWvmPZ$E|(A>u%=}7I|`M&pA5|UHt-RwEE&~$-s z*M07@z&Pv?w@$pvr)NsfXX~`8awXP*wGGXBVsSNAp4p>-%+~j9RV6j^}o0uH#n~>T!I&PbeYo zY>Bs~j!hB(?PH`uOhZCX!s7u69p&zu-gk8gj3Dr^4$4C^rstjU+Qm%ZkXo4xiVLiH zexoCfEO&UDsGp;3Cm?Jgd959JvyrlzkJ?>CR#{o|uHn1i`p@)XJFw$8UVaF`gEIvA zt0`!qC7?-_PD<2FU+TM+xD>CEX#T316V=bAgKx2y_M2D6%T?Qoshuv4#C6<_iK|nr z8=6u&Gf7IuO3F5Fm<m;1v|ym~mLp;hHp)AppwjLh%JhEq^n{UPfc*U0thBf+^%Ok` z9IVkR2M=>WeRIgPsewD4?)>@dD z(owR-X1AvjR77>8ZkC>4M}uJGj+nY6(oj41OOB-64np6r>B9@pvfw3R9L7+3v^$a9 zLL81Bu#T4`zYmp{ZhFa19uXttTPSB(mLxS&{Rtxf`^fu>^$ZEaPh-d32vLf-Tf=3~(?LkL z7ys#L1qNA=wYevH+jemE5YQe^bevP$i@QS0QvU$256KQ8Awz1iQ+XsU7z`pva&q<@ zXsd4yE8##~u+vjqhPwLO`-f_~N((!?7tf27cvXfW8bm<4MQmf|$3(JXu-evvI0;%n zlzI{tlB1-Il9UuRLqo$EF+f>=D~2z(Z^5zs9oYZ2DLEiezV!ItZ-k#CBYI5iC8UXf zyum_P^dSHMn%WA=@+o>u>ght04^}YtlwO$ZNGrhY7_+a5(NL}Pt}fPS=zxuSn{MRC z{o%FrOZb#yJ4%^Wt@>3@$qU~pbR0VOF^>3nR{%)e{!)9us*XyD6W*9T;7@a0V8%>U6mZM6bgPLM}j(3(XxH};~&WwBeD>ZJ{eh)+J@p|JIGs1X8 z=P~Q!a!YQlm#yfY4atiApd_cmGRbKVZ`)=A~isv0$$*A4g1 zW(yb(x(!sWk9A}Jh+?<-@376{XUOP_2iKHv+b}|qBJ^$IvXgMKP%x$f2PF`J;nVj* zkqDAy9ABUP1`+{fb<2Ndea047V_+G@lCqFu`_;DYdA+#gjfA$35-Q|C)Q+b|(yeX3 z&fQI_4%nCh`!!8BMcG1Cn@fZKbKtkFl7Kcs`~a91Fc8;wObWFEl&tyoqqv_xa|Q}P zz{ls~;Goz7&;pYt?mvUsd3^psc}`MEe2`ZrNE^4y=NK}OJQ8`FtBfLyZ^teHB`@?5 ze|~~|HQb1SkFQgdUsd0hPfgZK)Q!W-I6k;uSeqX*bjZbLOo&gL-^$vAkq>N0AO(gNRDfI?ai`Lkz%?8{R4*9yaswyuwgV>ivL zFu*H&tK+<%iz}YWB-37`)y44zgQ1Sr$?2?~jh2p5Ut*~4Psbn2bE>*2va!`S${|;h z{r&@#e(7p^V0L_%qNJvW2!4Xc z58%}uAMZDPE&Qj^-;_lo;2Md1R2>z1L)`0-oem7AH}~G~93Vdy33ki9C8JZ=h-8Q& z+eZv;#HYaOKX+y?Z??*wEJ8#2%QOcb4^gT*;cX{cxaOyiBwSqfwFQV7&Yb?HNVd;K zQ{k{N9gCID$Lbb#PMZ(h%Z(oO*ct9c&rqv0N5_b>Y)n`w8AAK@(BtDcmzg=-zP`ts z7Dm4~7{8p^!P>&*LL1MF6K=)jT7~2mK?GpqrmuQnUA^pR?vBg3f4{+PzqbQUtjDXq z-U6uDrbAfg_!3RY85gxb)|+QdHy5))QaGwpEE$m)l;y3o@(zpXH|_Ri3D>4GM-8xu zCXs0knWh3(2ug39XzdSAXxp%D@pEY8Kqz?#<%Z5rZl;L{WB{xt3~vhiTPnv^rr6Wj zCdFJ2boCRiRdQ`lBHN|bakM74lf6qpt#&2I>w+l8J9 z06sWH_x@q<8`T8gw~ww9V^V-fWO4QeVwsKIbi=(t1ofmx&b?1dWLj+NHaoUGAL&0w ziO(2#spi&nPVPo#=N1lq$1S;3KdK57{-q`DY#Ng#%l1*Yk@_)e$ zQR*7Ap>GRsdVBd*R8*9em8GPhb7oSAI)A)BB{T?ri_jtW@nEE$?R{TguWZg1URX<>&Ja8u*KtqFY8v#i~%x*xKIOLR3Oi8EaQu6g*5pfpMB%XZzLt z80AQYp-u$^qkz$fNitfGe8++0(>k@~q-EDiH2#_}O=YXmUJjTO1pE7lKk-+aFs_IKS2w4uc|0qDE4(?DUeLi2PXm>?8}{FVCJ^yQ}CH1V?ZrbEA>XFtGhyvN zu09p7?|h*)zO)xkl5oe4ydwJG2}Gzp*i~(K9NcuF{2&RD+t#Bs5^Uf)-5Mvgx<_^6 zm*AE=9=KjR2;;yotM?;)4^Dz88GggPaXw{zW3%+=Y3G(1w5euomI)In&rNpk0E`7B zYcmffi8=5l`8A>2?-ZwJXcb=*IBdkKmiNtA(37gV5M4=&NkU*igxSdXi~_r%g{PO7 zy@ena?^W1$=UZ@A1;wm z=w*DWT3@cgBLf(Dol+AFL^ZtH8u)mBvFtsTlKmjOAUn=h> zH4Q%bKM)_qq2zv&YapmXq{rv$P=dC{cLyWU(J~bk=Xo>WLa)3OLx&A}whH1&CJk%cfGA59Hh?ud0TVB;UT`CRZgjmDJ`yG$5Ygnp zPKNG*9t;y8G}UD^)sv(Pp5s!A>E{u3 zDO6PHTwr8pNO2mHr)16jpip4Dcij1L!<1ykP#(kpv$?%_09EJsZ5CJ+b&p;7q6wMj zLFv=I<2Gf@x*dGXmXbJ_>sY5D?2B+q8Dk#`Z&Rj<)&BW_d zPRp*RzT4doUd8?OSi%jc8jQS752t-%ng1A5T|?ZJV6Yf{U4n8`pca}j?2yj-_)^i)?{ zrpoRbLibn{WLZSoXnfuT{r9(d@k)-e#kb=9(@@=A_VQ^!my>)O*IV`*g=gJ)=pplQ z%uUOTYhpy&WL7Tb4Yx+7;lQv;E@*)KhH`nJ1W4&a(_j3$7T*HClJ3_$bTpeKy-)5hxEQV&%{Mt^TU-0h`%REJi*vXEZH*@N zL;qUwugL=}dg&?kga+{_v+zNAr$@K%KZY}jk_??_(Gz<76HpyTDQm;_{IDcGvZGwuhx`D>^ofU+~FA&UR+AL_IAgjpSq`bv4E0 z*yJvAn>7+O^B>wWwt0%HGqM+2$>;R2hJTq={Q^HaeN{EB{;An0DLC=mixU0QJ|Z$U zK31fw+d2sfl$I!8K+Kd8!B|^1 zyTzVPKE;fZf&l>)jO8R%Mg-;8;uC(j=pF{fzsM0b?EgF$Q>qH_fkEL43}hxWpK|f% zxa^6#V%GEm3XyexQ)IA9Z@$Hja_##qV;!p9Sy+M0K#lmBm40Kn$pg% z<^kQ;>E#v+j|lQrQHE>}f5o>xXy?cGe~uwI?|$USU87%eRAEylpJ11tK<`n`6}CM| zV0&G02|f%j>sF~zUgR%&Y#0liyW3msnsxFlG^aP|9}L6mh#?RbqH|2Zb;dS(_Ha6g z^H56Ih8JMk-x5*=3bh=a%ZGK9nPUNSqr;)nkBrRj`6aFPb^9r^8TRi}?|M~vHpq~T z)+eD{HyoIkz-e+__$lD1TJkr48bIZMxXeUEr}IlWJ-;r$o4zhS9loQ4m+Q+7Co%4r z>Ulkv!H10k+mV!Q-R^$<;QZ`Q7%kx;Ku)~&6Q==G_I-LUSI;H?b^(UR$>Z*}rfrNW z>Iz1Kp6zQ=c66Vs4!JKzrWelodDoccfMf3g(hVKAZ;ZzS*M+MK+v=Xr=W?lLtv{!@ zfC((UX}gM=n(Gns(2@txR1MHw1KM2^+;pZ}@{Mz%o2JC3qgXAha!fZ(wH46ikY#0O z9zjTX^4z>^i7A+{yT&}zKws|)FR^d&(GL*D5N{qZQc*+Dml<;ocs)AQg*ooZ+64U(Ddy)0$uJvtZA4q3!uNxn;;d-j3Fez1mh}aON%vtyf{Avt9bC1 zF%HL4(0gY`u^VSC&13`5q3isEU6!2hiTym))@95N>Wa42iZ}I%_kt51InKnSkp$rh zx8LXa8CwSYd-zh{z@#W60rHs73;x!2t-$~}_Da_4)kn+OO}6Oi4n&XB;@+S^h7la6 ztyq<;E!%ectnoLDlkgne+HRfu%trKjEbFv4$BFJsGx{Uz^CEY<*VfI{#c}hwnu4ty zP|jHi_s`JD;kp+W^^FC+cdPYS!IRl1y{55xnka^j=Uqm?%>!Jc7l?capbx|>S?)1| zuxY2PsF|*hdsxi@_=Xjv8z1y$%juU{SVMd9-)pcAnVI~j{s_SaDyRF2dk?{6;ZX|7 zSbk1m9ACSh+V8@Ly`p1k1S6F}ZIhURf1vLZHJ#lolo)}P0kO$w11R-#QdUHEQKWed z=FS!di8DtA@|2x#Uu+p!4fr!Ud$p2Z0mEnh%#c3e%KWH#`)<5|GdbsfI5!T_MFAzC z$49FOIc_C1_~!%qrdF3*z4B&kLADuQ?RQf2l@?Oc_0km-6y{b}IT#u5KvC+T*fEH&1z27(xnpBic2f*m5;`-s0J?7 zpjW)1lCJ@Cw1G!`_p`D2bt`O}=tS_&5``$3Rpf;+H^R-)fY3?-n=}B@=BjJkmRcYW z>qE3=O<_jZR|2-IHd4xz5)Va>l2U(N|7nEjc@T-Fj_6Z)mm8Wt?=BijT0P=0>edVr zVApsucH3+WwV;5&ozEUW{68MjhAe4Zki|v8#z)hd*{Y}-DC1X>w#v$jWhco)4&=EeeO+hPOmr>HX|B@!YR|% z?u1)3PrQwU+X9~l9UbK$pfV`R&7j-vU+3R*;)77;{SK!3kcWj(<_VDC5?y>gbVtXe zylW)lsjM3|UvxM7_`GMVuc4DKzwUjHb)ruRLlJie9a}FkCqeDcZQu`O^m0CDFQj>t zW0ji>?rZ}T1I&#$ECuurg2y@6j4G;`qeWcPNuM%*41!Ui!$|)wFI~LJ{G&QRY8sZ? zHQcVv$IhThn4Q7f&%W`*7#L)|uZrmuUw0qh*rU2%r+apr9qp&ga5N4D%YAUFroBIzu9 zNjKWFO86al$tGQ^MsQuMb`2kFM@`P{qwQ-KG`(HpT}ZAUJya(T@U?FDzO9u!(hZ6E z!>%%K)!mi^RV&M;`8w0-b>`14^~)CZ)ylbMrS^y6gT>ASB&h@s5077>Sw`~xTZ`EK zh$TG<@UO^>p`oNjkWxn~!_E@sumPb7pqirj1p&=Q*oEx;Od5U=9jLUcQg_>LdyPGR z=IOoYF}7JFpP47TGPUxO)sitaF(-fwcMPXLL@4qWrzikY9p}l*{-ZK9lBeksX*~(G z27Ph{!IV$!6F~`0uD0X-b~^tn766B4XI>zz-iJDX81dCZP(sB)VCbAm*G2j2O$a4N zr$rNFVF8IR9R=Z355<7P!pr#YUxp+o5mCr96$iZ_`Il0F+rPqOKu-X3kX*bU_IM_| z`OGc&(=Y2jEx-9r&S(h1%nGn$b0`#6FvLP>8$yrbVGgDSe1Td$O3C{u&TN=^K7Q2_I5;P#4(U?WySky%hQ^ zeDm9=*IkG0uaN)+2~MC@*>O0gsQoc_eD~y{@M3b zV_iWolMRiwhztPM`b8D*>Xom9xQZS+qiVK6NlYpwt%G{omdDAE8#?ZRZeh}(L0o=O zy|W{N5GW2(E_jo0Ce)vvIiCp}rL3c6>exuWXORRV$dByRa2L$9}F)q8kpRfJ8{{mKG%bH`k`i97SENJ7n_4M z;((E>Wf>zAa!HHY-MH;G-D&4ARw{?V$1oj9THL;)7t|@bRwyEC*3Zu5JANhI~y|$&m zGB3QRcI>>hM`zhy2C>IK`?Nau!0NeXF)LWN-5=XXT**wA+|YOs=+)@B^p&wIiMo^BNNnV zl&Bp?%rpn`Vh$(7x#ddivJFUSR(!W!(tYFPHb&z&jflsj5w?+b=-Va#Fz?|pg9By& z?KgLF28WS_%!pGJoaz<%^JCyyJiyQh`_+Ry*Qfv>Cf2PVE$R6eH{^iL+ ze*-^!xv4r^uZfNFKgGDdfmJ8xwg7)Z%^5K zL`TfLz{Ow@EzT%8CE9*3(H7WG-K(GW7gf7jI~zVP6Bju*gFEYnPj6Tga?5Kr-1}~u z`#-fWX#5VwH$5+XruNVX`|Wj8!!8Hc9r0VI!!`$l_Kwy*L2OF)+zzPMOf0MOSYh(^SzFd+?H>Mk%xq>ZO!H6Xwj05bA{xRDBG49#Oqv+m7@RuRTjg~B`6c2~OC}0~d z@UcCn+!J7T&^>kGCc{;;y6~(*W0hyPR%>8=O+ryVBL?041C#xmXJm{h+W5G%w)SWG zZHA>$3Eo$=);?{%5GtwN3h#aHU^C&%Q@@8NmuN1#aYCwYTh4S6_0R-w&z z3gOZ(@v3gpbWMHXE`7N@!qu2oDXRm&nlJJ(>>|cz1I_(&VdyLwnr!RCAYmnWp(YkA z@zs0Wb4av(?Gq4$50|)m4yu_UNVVxGsqC%^c|BAbro{&=0myeX&GasDc{w&`ve3DV2_Og{EGZGeq;~;qvd+mJtT|Aq>vpAmnFPYF9mGcC z#Cgy9Wj_g+zXF|t0RR#dL%`C(AL=O7-<7UV@Rd8v=~g%+S(Ot27NF7GB3wne zl|Rp9GjZJH;=z{hlp>EGEU1QeWxZo=xyCMyH@h4x1ZI?*Rldv3OivsmyOL`7!YCEt zSx^Ih8w%qa-lwbJ`KKV-aHlkSz4VJ*2tst%*JgZ>gv=J}E&)<(eoz$a_&b(}gMtWE zTs|8D6`~eA5#j7wu{t#tsd8;y5g!+Idww&l<)PnPI^>Ttk|A^8=U)Oj5~I4?A!RB7 zkk;`YbtvTd2QM)7ds`#HWQKDm=hKp)(Z)%EQ5n3^^(Xfk;|7U@&0%1v+)F_)+45f5 zEMu!&D@KR>qsl7!7anoPw|_V~@oK6&is-4|doN<(Y0J1W2jB~28Z%la%@?|1F1A8&F0DChe{gPTdLOg1f}`$u|BEuZ?6@xQUvioPx^t@GK-hiUctzzc^*< zfpmzW^1kO1Sd#Vgl~^1kz%e?I(bRXCD9sK|Pbal0gS**oBqEs#Ug<~rS}$`y4biQ( z*9~UVx8Rh(wgaunu_6+AmZ103SdZ{F9Y#8saZNNx@Pm{vX()cA z=;tBwcuRJw0N2-#rN0VKCIoQr{EUu6E$H{xp3H34KFoHLxe;5EZcg~7Bi9Z@yDd78 zvsg&T%U`%SmeqgwU0x5{vP^B-WZ5MToH>+87Gyy#jM=tr5z@sLH|}j z;xa%f1&=Sv;|Z*KZ4GwuJOBzg&KRH;FxN8r$h<7^a!qkXXyoQLit6=Z%ocQs$VR8R zxL6oa9#!DtThCw={qzHo*PAO&8;Y4K_iW%B)yitQDq!8Xx|oOr7!b43T7ARBY$o>+ zK|IDW*BT;+y)a7Q@`JBWw9al()MoKJK?0(AfFqVH*oVj>1>!1h|0wNCIG;1?^pNCL ztMSEeYPM$V6(kbkr}%yH#K5XLNEAQp^S-RL6$4!pd7rAO28ge)m>hAaM7vXh#}~$3 zsQlk?`{KN!wvEtr>7Ec?a2m4KZ67_$sgyYUx&@Z=gZFLS+H%5)$8_p3qr`x=)J(s5 z{??>}Tk&**rg^f#8rTLWH(gP>qJrKx$@Sg0T1PbgY1|fIE%3W7meZwQEmT9qShUUW z{*TD8*_|Wregg7k$M=Qzw&5uxm|blLzUfZtuBEmEc1SRcAn6VCh8N_L&*4-_Qdm%z z+x3<6>Fy7XD!qNCfq6dkG|*6oF{~L6soB4FcFYe5eOU3AUU>NEo8m2s08-#MiD2(; z7}CwE%=&GbiCgTR(5tk2M;v>Q*2M>*To*NqmxZfh{L6W)b1(PVA0;U*^vjEciNU_& z_Ix-=U0a3IgqCMpJMM9>4a}eqsQ4UrVS>vG$0pxi17?x94YD$?e^;)LLjln7W@cMy zU414QO4%iT8VI=cjEMn?dkLZN3yR1Qp%?7Bn^^IeTiW~m)8DvcbOCu_4A%7wE_!Bn zs5xPJZd@~|b`^Xpw)=VPJ>6B0CkF!u^oAdy_6I-f>Tj~WpaOBf$HK%$Wx#Ao@S

}BFoews(TeyaT?k&*Y2lq-Rk0Nrp8)s}A2q@8FCz?vlYM%*m(&_}J*kI2jH!?G z1?gybo~`{1(;e}Nlzn2Ty$2F)N}QltAi;}g!7i~-|1QAV`FS>t_W8H*h4Vn$&+XG8 zI}ki!&s|!lkI-&joyxq*`pT4>oY}Q4i2e{Jlp6T>{iSYi#>4$Bu(|EMy{Jnf>x%x% zfIzm?1R!{x(w)mWDvhn=3Q&XP3Zt9JGe<5K4B~& zz+6;+IuU*)B|&H9++?OjdDHl@DP%JE&qkD(EJ;wyq4IT+hT&zKoeht7qns6j3XuD2 z-sU!!d(lR@u`e%HuNZh4T?*C3xKco2_|ZjiZPcRlENZPAJ==XLeHnCbOsJ)(`P+WP zUKIc$R?_!pS_V?o=lQ%1r@NVq@s^t+TJ=q1ef6>$b_o8Q@<0@SLR{A7V$Y+;w7YHk zAa9VD=et&6BeTxPT5Fb;n;#JDp>752CWXH{v>7|>8~hNL7eWT%7{kzU2)sxn$s+j- z(EAoUENwHf;+ld#5Rds)Zy%G5<)4m{&@%jwiT%qN0*E1rjmdiqKfAB3 z^Z|OhPXov38r5$7ba*~pKep#pdb-zIU-nb^9}}*eH%{aB6UQfeyl^NtJ5(Y0QQM0P zdw!={RtCKN@ zpnM6xdpj#ONWllks~YE;p#A)d-35A_g^I4>K;e6~6pKf`<0{*6mhH+{-ya*I_$r8{ zpb1@PN4RqTr4>KRN1%Twq0Flp<`692bMBj~cf`kVj26*qlQ?`DL?4vN2={g$$;&ie z=+1_V3)j;7-f_q7>zv8D;s&+`X|7SO4Z&%PZiGf1#zx<)d)g#;P#w^{rhd>NT7gf; z1j(5*`#XeS(1*YfDIxgDS(`0D=JB;SJ()4p&xQ8 zEamf!ncF$JhS3)S01eW9G*?fr%4aV#WaEUZ99rLor_)rkNna*DUzSrM10Dd%UpR77 zyXl|rSMuU=n&0(#Z=eWp1k_ae`9ONPwn*>7l0U%g@F~>DY17T-(C2}*kP-Nx?W#e$ zbahEseHbp0uH{$DdW@aY$jZsA-3jct=>GG(A53zOV)W0y`1cQU!5_V)gE>Er&>tJN zh+nve9UyXC$m1%9okaUQ`>3Sa< zgy3z*P>}!^eBnuDM35BeY_3g+3k?Z-6Q2<_AZ+X8HFb(xX>hojRej3(%t?zgFQVS$ zv=*>&8D}Ai_YJ`gOHfEJxSMcy=eylwtc>-n#!j%BcEJDeh1XYJCBE@`7{ zR9_{<7j#}@TSIhg6o)}~vA6#m`0k0PV`Rd6?CVr;i_UtV-eA6EwduM`@2_3_e17wr zg*vvluTJHO4saraha_z$ElqQUsSh>ATdOR8J5$3{ag2@5oz7MSQKk{M!^e2-P?g7J_6EB9CRf}2Io;{s+iJYI6b*iO=i%gP^CcyLCqHXanG_lEt%d}L=a%G_)MiB&zITefzx~OY@%F~x zo+8OkTlJ3CKEFK?eW63Re}`Gm@BH2K%y`?0lf5P!_`KtC(EexPf3k*R@`J2lTMDGt zpyIZ`^p`bAbt7z`!vXAq;sV=<2qK6~CdFV}Aj91~-?`;b90KIIctkPA$U-a?5krNJ z;EIe)dA&AvU8NG@H~6T;aFAV7pHtbC%YlL(F@2s|p&GYastDSfJZpPJp?w7Tv7DjV;L->J7 zMp78q5?F~aBBeDaODwcJW>4^2KJC@Yol(bL48Ey_g>2X*!`*#Q$c4=f1HTi8U<}UXzqS3)ooJKdlvTh&Hg-my`v6#zkmzZ_mIc30N^Mp zamVQP&7D;m_#W2Q!O+i=eTTAVFG^Q8$|LD@;97l(kfef9U7zvz%9H10m#@PuyMzEV zD*%p~iUa_Ls)CHHAkS?#GTG0{k4z_ke8jHYj`Gi1Zg@Cty@9{cBDWeVt*@|8B06 z=dSR+`M&>cHi~_tHfsMS`~Sv1rVRgwWMsa^<7YJpyvc|21Tqa7)Pe9vC_MuOi3IlH zFGireq3c&!u76f|$gz(rkH&tTsnL-cSuXqxc}!VHTK`v z^b~5EWs2<Z)4620!fV4;|E-7OwoHGduVkm9?>hxpkae9V z>YM0nf3fP9*Q-{zPM=__r(>f@GAAkBRhsQK{cg(5c0rpIkJ$S#OVkuFxL1Y9VLVVlM)LK;4oX5k1xd@k#R>lZZ%o-VpEFQOo+4d^$)sfdnrddtX zw$lPuK;KJm@`FiB9hduc`&Geer#M%aaCff=Z);zh^4^kuaSv58kenSOmJsU0Vq(B@ zpp=6$ptu$eT`P255_~}0Yo5;I84LYaolm{+y8c-wY!VVdVYzh6r=ZWvmafM54+9h- zA|tAD*aG1Iwi%H-y{LmQI*Mj%)w^+RtFC6flJX!>VBtM86nn@MnTAWLvwvx}i-p*> z27BBq>U0@^C#$PW*urC-5S*_8hMfTL0HER7dpP!C4;Jx^fDD8ab)?JZadvI4IU+nT3m_S=2-OAYnUttf?C_mnH^765HFAWMKMd-XbKYY!`G zVL(X#ZHtPjGm}*{iU24DpdHT3HjpJ2mNC+|@@UU1EGD#M2%`#2^r|iHnYp4v`p@*GgmOsI3!L)xz3F zl+GUMvxnXElops>#4D^7H+JGMh;1%q*hs+M7yX8P68ZEo(wybnv@_D1Px8;X=&$MS zTgvh%_Kn)8joSa+_MhTwe{F(ABVm`3=@Oej^Q7=34;?R(sz9hMLuPVlzd%tu6!btb z9U~q@z7dx~4qqbsp8Wnc&!Qurj>5kpH{W%y?-JBohTquc`?HR<4ln^)_C~9ouYFbf zk_CswQa+Iw1i8a7IxNoO?)~!o92w-6_ZPS(xn796wcKmleCPG&BX9WRK4-uoiHwDN zYoxdv0mlqPAQti3S{vWL^UKZ2N=ix)i3G&+*fJtzvzaGPp0Kbmqf+s%TnHYr#t2Ko z$NSVW%9#}}QeH*XM7z9qy<&B(hu)W$U)bH*XKP{i#>X!sBO|Xqze~`AfmD4@T}(z4 zRXXJM(dpcyQ_F5GTx$K(AMr=FhHV<>JPzDZ(6rT2yRW*`eeRkkOQ+h+FfyOE$aC%P zH^1)n*tX-r>QjEd+Lzy-<}lURb)v#uWuu3NQ*8`yMxPskgMEU2I;7Qds_1Y?1bGR$ z(K*fOUP+JczO}e)ZMJgVp%+h6|GfKb0;Huj zd|q>5)j|6sPt#r|bR?E?a@$~2Iog=UFFPB2YmMFJYk`--Is*dRUL1J2dzS5-DR#3A z?o9mIW4&{RT_qm-?qNWhOQiRS*B7<#YAN0anx^WrtY@q-U6I!GB{J$wV|gBSu0MYY zZlm^6p@;)f4kEzWQj9_gDtFrmMN*asVI$~BDyWQDxiOBDa8W#o4o5cz!XZ*Ni2$n< z_YjG`!Xn|iMdf1#ud$^wi zijIz{mVxP*RgSBFar-r*E2THc%41`zE7#2*^tGDya8TMbXufPLWs;^_kq0d zSs>#;Ne5(}jyK&HegxbbqhzkDcYn-c`}x;CoNhy_MEK@MBHzFim!uv0W6$l|x41k3 zlZSWwLu6b+v`>Uc@vn}-Y;uZZ+m;M9P@n>+7_ljlAy5<%!YeI+oW0|>y7)po7GFMC zm{6TTL70{N!Pf<~ffaDib^6q~ECT~bA`2Cj`AQ_NB8d)2-9V*}Oh%NIcv_l0>w6V(11y-;ax_ zHqP{@6j5Fow6GeSXjreVNGB_EwKQcIVrr_fjK>U}IR*Fpnu_fJZ0u zonB<$&2`^LgTFmLvKRfA3d)RP->8k+zskP4*}h@le?dSw^NqCSkPJc?1K}YtG(@2) zlxcGrmDjRc>DAC8?9UpH$OW$Lr zhU284-)}z|c(Rz0gWx%T2+j)}3=u|hceq?WrM3M_L{xTKR$gW4^C$a^S92r-OQ2v2^z3w|-k4;O zXzp6#x!7d&`B#5#zqdWLH72Ds^1`z}_ImB!@Mb%>rlDhHsCip$wbwkmoI4fbidWhG zwkfuDX%=bJ_;PvyRnq4l{N__!nC~0^z*lh_7MwV^$^6^}&&_{cn|g8Ag!`Lk+}?e} z?4h}pUwUi>c3pa9t&Z_Fb8ak{e{aceFMj(oE~xB-HMgCC6>yiV~B?U6`)cf z?s>#u2>0y?I8elf5+1^ph-DbYi4c4vAwmhYUtGC|@ZJ*R2Y2+yU}arnab+bN`xFe% z1Yd^}v5Vj_SY_o!3Ar&WNQ-WX+-kdJ>!U5x?@ibJLvQu1H8-A{6c3lTuqhGgF(SM) z98Jkisio8jp^z@7#%IM0v-<4?^&sbuF;>;M;QxJvS)d(9^o3uXkDVr#sWH zy*kl}Rtq2%k?}Dck)g<^&#&SV-a9&5xqCX}F-a+3gbJ}Q&gbL4D2#bZt61UD{B!48 z7S8R_Q>OtyC8-Fsb%Zl!poaRC3FA_%ELuuSXw>0`B8dvrx)s#=fg&4#0RV~ssc!s4giIPFR9EU1l$q+95CFkgt>K9V zq1UgLx3iF(a zJ~hbo6u*KR(efC<+a+V54+F!QhmMvnUEDTnM)#5hJr^#(fDbU@3$(R{JGW4#Pbi;k zocG)Q)+dkgRx*6yh?*18k>xMQBfw$Z!Q|x9P_dQ-RP}DQn-CW&PRoIpp zUHa0_@6E#(VQ&Mhop1hTzkBMP$-rD4II96WMLqX%he8f~=?D@*8p3AcpDY}S^T7 z-6z7YYzzKn?)erp$6Pjo1}bNbzx?T`0Pv75B;>8uNhC!8`Yd5~6IRPgwLSAQif z);I6+?gs}q-rM!d&GnZqF1>$jruFXC4yR1LU%q>bv2q22MgLqEdc^WBi895Dg9XWZ`g~jzbnMJAL zvFTBXElm_kdpEV0#%Bm5B5biIk_(B3twISGmur*=aPmX33&vs*^2XRjV+uyn`UJA? z^1~1W|38r)Cq!UTZDUD&qg<>eIxN8EJ=nYC#k<|WUg4sn>tOXr7l*)#rasc>K+DsBn}|aH4OzowFjsT z;0`<`zM?8eNu7l^?I6<>mBj$G&-tnN>fNTZCZ8|gPTskrcKJf8fjai$>WSmpPM(Ir zQSi3K;M$cnKTYkPs>hkAgX)kuQz!7|&g#2xkv+hai%1T0r320`LGG@FZqAMF&b^NI zgC-_50q<4@KU*0AreoTS_$pE+2CB}M% z(^;|kG3j?YI?Vr}5i@VxQ7l>Cyc;+oQ$oQzCF z!Y3AHZh33o%iu?+EsvQ5np?yYdr!z z61!4g)xJCS>C~zZD-1oxknSmueK=mjM0cXy^l`RRwatw*&GnV;sj8Z*f=j@_R(G7O zfzf@viI>K0zPjV_2VXu!0)pP(xN_C(x=C_GG=|qK$gLDKFS)n%kMJA!%WOA2+pzE1 zn)QzJjjkC_cb^NafPy`k;xXoQ*ujGCG!cjM*z{3LN=II3)pMt?U-n<#cjfvP%kvu^ zota^~W#gk$QPo9-6-^j&38BC(!~NI5-&IWw!A+oLN!B(8nr1-X87R9dfJaKe4FD%V zvQ{5!YP{8RYrd?wm|qiJoyLMUJQ8F*%&Oaw(#4xh#>DzC~ZDbLF+$W6}9Ny^I3%*iS)%Bn7< zN(KEK21cbajJ0qsk_e#$8!PogyvU3W$0sGXEVAxc=6gJ;NlxwU=@JqH03}Er|A8S$ zNjs&8(tzL9fdVXDIL{V4=Y)hx7TEK}ifMWuTu|S_;NZSd>^UBT z+d6o!K0Q8cTA(=#UepBhw9>dnwK~)q) zsz9m&`t`}>zx_%nE|XKeM2yn@!am8j$qZx7VFbSx0-`kt4v56&^K1J)IUfF(AjH4W zMzL?yM(y8YD6tzB{r3revhT9)@LymPY7^cd3Oy_o8Ka0Qlo~>j5tJB0I8h)PH_V;b z&6+*L{R2f4Aue!6$Lj?Cl7A6>#XiyBKkTmx4~(}sxX^}>1vxB8FJk0BEA+h;Wo{C0 zx5f70Rp0xa{BFX7fS@ul4_lHe%ac=!8f&|;_h(B6N`}%8J=&phmNfg(%t;T&uk%^% zkZD%I%@RR6zQvc}?Rs%{9`<5}Si~X3+^Ayg{qoZD65|uftBRviKlh6|*>E_gJNbF) zqYo`_4X=)yVmIpto4HGDmmc#y^s(wqQe$M@KrtH~U`YpE-aJ@$ZuL5|wQF3~o=7=0 z_sv{j4FHBv9zfMu72E*Rz31szjMctBPV0`*WZNIs`fgb6ymFq&oVgc&IPUrT8RwIC zT128V|1-M-t$&AT8ur?a_JB$IWDsCn$Vt#*eZ7;&%Kp8w(Z%WmJb+U8@_ zz4U)o$L&Qld_)U=(|)$DZ(81y?dK2V-oEXIjiU<$pGf znw{TWR@vEvyJiGmTY3oa)+}sMe{|93QbBPfE9SST6O&wK7&?tz{&KNT|_A0v1Bq1{3^-960&=#l-BmLhQ`3;#K7##k9oO4xw-Krr9~|*<*jWP za6Jx7IeaT*=F(64#z|uh3YY#=dgn&pI6VLBBSd{ z>Tz*!Vua>oyy1WOtoMg{aqhT@8E`~2d-@FS~MiK(}7wto}i-8Rt7mhrJK%z!Yq08U@t40Jnz zIs3uqYgk(8jaBC4dq(i+n>VRrSLb?HFwy|Q*U06b+isHz2l0s<5y0MJ!5@c$~P z3V~v?qIQ(atGh10wCsWG)av~CQ3s;I?i(wJ2G$}9a!7vnKzCjs2JN zKUG#*wALjG=xrnQk0W6@Ho=0!`1Jm*Vt2&PC)aafiXoc``3xK@cznp_KxT`uHD{9rYEHoCY1IRyG1(Pd4J>T$IF}T*X(?~ z{il};)E;Om+*X?GFlnjVqVwS=-SQlZ#rXrUH>B$0X|Iz%o3Awb(_ogz3=Mm=v9I)$ zo&xX)083nK`G*&CcE=pp^ZBqnn9!H=3I@M%?x~(2A?B|Jp z3fUISvYeRS9-Eb$G|<-F+tX$L=E04?Tbq4$l5Xfv^q8+@H$~li+#Hvs>z%hRzq!UM z&HGDxR8U{o>PPF1A52j+Cuz87s5vW>?0}XdNyA22$3{!bRA-XywAq&P%+jnpv%QBQ zGr2swy0LNODt+s{ERz%#XT;}3^}+sBLDs(D!%DZ*$+qf-4!S#DuD%&~ng+Xh0yaVf z0`V#!;zA=t(3U69wMS4qG)t_;VnPhB9z!<>U(g1gQQE?K`XVYCpMB0>dDv5Do;M)J zE2^}sD>3!e+Se{ExqY>;IDZ(IL?JM22tlY22`mr#^TWkc!GHYXyZhq1Gj8#YE&PVo z!N!QlFznN7sw>|I2QVN#yD#&k&xyT0dv*ux-uZOL<5>5M@|2Xk+}71vR$VR>igDkN;A?0SmG-iF&zhZ!Dvyk*iAbmqXTq*wlp=;Srl=45 znmkm3fsw~E-)2hZx6zKE%;#@BAGkgB|1h+1?Vz!)P!VwSaDBVx1xuSPTqy;$f|h0~ z_DWx0pPRQ$8BAE$%>47-Rd0WHAufKyXF(Bm6WaL54qS#nvkxeXfPzp-2~kmoO1L&M zAAs&L<4fl(&wTJyHg8q>j|;Ik7wX{2d^`Y#NvdKJ87e5ExC>2ztEwf{Hmdz$$ym2@AA;KEVH#dr#C;hb;k}DUG-37?Jg3akiejZ0-sExtk~50_${<_rCK<&B_zYQ z{*5nQg$~jBWKbYNg)AW&nUMU%_fhX)3nCRrr84Zg<4KkyJz%6?t7S1nW62o@#%VA$D> z#lx^phzf8DWlD)j?5|P|Vw)<(lW|4z~?2`~uZrl-x)DcG=w%k-h7zW{aRJuSSR)Yu$ij1Lf92AkIQoxe}0aRr{ zqn3e{8L6gGfv<~_)7V`M+!cc-@?n!~E5yZYTX0op+1CU=VxGmDEJy{2jE}FE5aHoT zTM{{s5}<%$6L%kj8xj;1`yqziI*130#C$GfeMtEfk(*UP>EsG=qHAfX3X2J#!v2rB zfmdBG^&;(3$mDVdh*D1!I}j!=3HO~cD>AYk3q{;sR(DcWf@`$P9;ZEP9oI~`H)W#L zBz03YRR_Fb2FXDMqdGNHwHYoyoJqRi)#($9BwvcWzU%qERi3NJSCqjmpy#Eo;0O$# zs*iD3RlEhTCr>4udsyj~2P>Y}1sr~I=3$I`cwKaMdye;qCmXh|yK(vQ&1;udZe4WI z_EdCbL_oIxVT+?C&+dOIjsKVw&Xq|d#N`k`VLl~)@0~q!PtEz^?rcqKBVa=YcYvO= z-ll-{=J6&SqNcRWI2yH|51Dp}<}>Z4l5A9fy#lZ$0UK3i8#QfP?Rih;&#;-b+;;Ug zr(KTO&hLBQpN#%vg8d}p#}jA0pT970*-Xz_hF5hqIIr4lyK%GGmfszZhSr2P2;10D zP*PuBTVFRa5+u0r2<}^DvH}C%38j2GWVoe!E_L}?-*%jWHCfe`G~vG9M(ZUti~^2? zE}8lzm8J=ngw}x{w$7-94srRHT4Yi!r>Ze+0_q6);Q1BH&@lM(wJ;E&LsA_ zO{k?`oF4}E?QI=N38{V5fd)pCU4-32(?xnW$LQJW{opj?^4qiSK@Y2XOJlRXc>8(@ zg?!uz1tF!QEh|4$g3E)5qM~A$G%j|~B04%!%qxSbnHiVwUa9S^i^+}Z;ZQ_`uNvEX z;I+EMPN#v=%5W|BV*oUWN!Ntoe)zv!H)n&L3571S} z5-lx^?U)8*sMF`9nc4~NS)s<`)AZDP6#zp8h}9JZS{fKaH7Ws)DjqrQC#!J)U}>s1 z&zSM{_apA_0^WAE&Q zrc5x5IC3Bp+nF&q2+=_i7S*>pKl97TDHRhLdqm;R`a{``JrKv7j@sH^rVtCr1MR(tU_a`U#-!xJW@!yX!x z=Y0wxTMX%9qN}vL8;!hf1QNBwxF=W!+q+;xBR=?eM1H0ap^c=P$iBugWD*%F*Af3W z+9>vo+Nk|oOos2lG3-M|uurb8O!(f=zk(oA0@^TKOP4_}k>nSVc)CQa3Q-b7jiGcb zl&M0fj6f0x>M-Gt4IRIPzBha2=JWaWr=*ZCLoKx^uNJ~g3>jtcy%-G@3iBZ{h|1?| zdD)#zhNpW&sq~xFpPbC_1sIhtqCBCjy0{_UKPoFI0Y5Gtw~y5wT@asIQ&isBg3tJT zVZ?`E`oNGFl~Ve9LZc%izC`x5w989Yu;21fi3p++iHM*|l&A}p%Ij35I9zfoSmhRx z<{Ml$XO_a}dJi>$1sLb4J;lyotIwK^Za<%VbNqa+8&`kgiTbYq42szvu!pY^+L`oTV-JRJiLDOZ4<1eRd~yMkrCcU#1Q~D6$J+s zU=GO6O28WEJL((VpRmzm`-6msm9U}^WzQ=a_zds)iGhPr5;vPbE31eKjwaXmgTa=Ztg5g z4NXTd)>lvA8d&MJ{HW)Vl7Z6d*4l*ll;Hseb_`8C%AMf*KbX$dxTl4k8E_4(wOVH8 zXOiEUp4XJ=7xE%9HXK1kLNTv*s3##cUP4qIrzEBGnc@+@H31gbfd>VD@_qfPxuZ2X zE4iQADuK@Zca1a_j7y~Xzt76eYxok8La4no$HYe3m}MT_*D_YE z27qRD4TK2b@&M>DGNkX`D?W0jI3f`qJ=UbF9;dBPZ>%O6qYTM-kKHC!4^bO4);RCGGb%l>Q!0mH`2)(6*J}t0I1o2 z6ywEmO*NVVXqrB@XPBFB!8?d#8S1++WTtvE z<)Ws1@kTK+QHrWTr~+l`P(qR-KnfKFP=g2ORyME7$i7rln41|{@%~9*xW9LD)UDRW zD_s705j-M+M@8@|gl`}$f%3j*Bc;9)!bc*-13_Z>B>Xj=D8m*J3Eq(mqC-PNF(1O~ z(uzBZY9LQc>1atVNJ}V9X&!3B4!OQb#Lfv+#ng*>MU@bWgcnr@PkcST}nZ7K}1qi zX@E)qh|Y-U6?iLg!WHn(^q>f8*4sJ`HI#Q z>-ptT08E*~dU99RWjNiUb)-1z(Jv&Y_E*lqdS37ZSAV&3wgsE6K{Us*tx z(LyRdEViF-;H-uB^K}GdcXeQcXY46@-~}$&*CE}}TwuH0>XY>V>?%ewyc%4`Kb*ES zWb2mjBR_lYh-m%P#cPmCac658@wnZOH(7kV6<;6zF+D&CxrH@(@hP#`XcQp4*raGGYgi!SXJzDahWU{7 z75h-w(>dhq_g3ldzyq zExZ2d>&(k{16S{LG;yvrdB~dgbLql0Y@pl>K;I+-sH@yNe@0ViFw854R(3tVA8TB; zs&V_yVaEsXgFg)YC~9bc9PZa`q>O{z$f!Uj=8UBIVfWvNcMZY5T6S7O3T?1YD5MJ! zTGwDhNOaiicL~pY>P+wS?B7+pYg>hd#Xx=qzC9pX`rt}GLcHil$cG#Wp4%tDBLgxa z-rxCam4*y~_4Ux+Uc6%)Yr#z3^4Wr)rj=XWrggUyJK4WwEz3jCLYYkZziB4^-)y7U zH)^ByZ!x07selN!{SEtMBi(gHUwdZIA*|#>KdSIN4_zjd>7$SYp%RoSArgQ{G3--E z;Iv`>%8b07uC99@*}IizHdAV)OI!o$`izp=_*}+`M zpmf&9Mt+vNS%e~3-7s8O+>VzD4#AXw7)ovx?zItr?f-_8A3_wRVoyq>BEA?`iDP%c z&X0XTYo4te>oNhaZ+HZNv%-%)KkW?Oxh;6x50B^SSQ)CC>8Y9;YMV`1^mN0qxQn~r zA6oTj{RW>+V@-@SFOyFN9cvRdC8fmE2B>mD>4dP9`f}VE$wXNh4QbDkyka{dBRfJo zb6uVld;IRbXO79li6*+^Of|@#ZmRV>Ru@8{7Z1!eggrx(5#qN%&0cqQ}O^4RC1K z;>t$2nHkAq9-HWNhKh;A%-GD#n)(*GF9(I3CRWws9Pg7Me=cxbz1U&p$v1zv#oE(h zp9peAxEza&H-de5+2%-=8@|&Z+F{9XC(tm4ePY>B9(w|}UlTIP=->oy?_sgP2g1;v z0nf0CZATLqtuB~6rFg-QRi{q0z3`z*#Q1O(;RPaGHstofZjU7Uqi^@l_nl3$P+aKs z(~KL_|9E`T_O*RzbT~Gy7<4WkcxASj23ah%UOB~fy8c50T~pny4(o3EUMQ#Lhh%&h z;Pni$`tnP1^9yqjD9$O&s&A>|@;SwYB|UAuGS&$5VegigTkP<_B|0*`xq*_KRmfwH zxUxiIncRQW*H3-q`LwX4TAoiPCfXEoSX^w}l@!BOD>K!!s(?Z#P-s&h+q7YKi}!Pw zlK~I!%R0Op>XV1`G~34LRcPzPt82Vp_jBRyZRxu<`5rl7_rTWfT|iJ{1-y5;d76Hr z7AOXwOhcQitVlC7Y`$Z~dKE~$ZI$f#w0HN8lBwg9#v7(AS=hc~2Q(Vn13?3mf(w3wL2PGrg{eEMiVWj9tl5YUTkesYG;yhXX~@Avn*zBbJ$8@Q3ksP zQzO!db`G$$s68hkPaqKRA-9-X@Vo1;JKZ-Q5BYubi?y3xuU+A@bj)2HLrYz4OC?1! zqL$Vb12tt}t~uW12a}Jc{oF3RAVR?B@)&H$OKHtIZFAN=;Bj_VQEO9=g|nS&*rVsg zFK>Ukn@Y=kTl;>a%a(Db6Uf$NvZFFE1!Q}5V6P0!0A6Tf3-s^l9Q64ux3{oU)M@$d z#==|Era4SjG1XMQZM49B-T99fnq-wCVl|D(@pscvue= z5xUFrL;?ncF(|yr;VohgjOX=E5%m5n7`P!|MnG9NMEb<^sGO>(j2_>Bmf+9S>RLR8 zC>G+YyHLcH;E`xH3)1~k{ciZ(+~B%lmieq@W=oEHov?fVprENJIw9tBSY&H+dqquk zLQ&G24F9`fX4^gYY2L*=qb@rDPSIKKu@W!x^=q3zdRaK48)92ki0~iou zBbtkyKpB1lG0L?u>zOb!V4QvX2LGvp=;YpJV~q{~29*I*0rV>< zP!trK#~IKJ^@^4)|LpWAH>Iekm&>Ychqq1JmM*DR0+r(o@!q(as?F19=B?lHVaMK( z#q)}cHLDEC4bvx8|F}GI!eYl|zrWO2;10&80j(CGK1fz)>Z%T{`Kj;z<%XB;McMJK zP1PJa6}q^VUbvEf!-Tza8*j=u%8ZF^n>V#Qc{7Y%oi;wa?~$TE)zf%VuD(|PINhF$ z=Y;pnx?jE(tymKcKpFtOhGVfK8N%RFgVe6Coxf#s$@7~qLS{uVY$mq*xTPwUX-4j*f!WA#FK}xZ;eVM zQdEF`YvIHvlmefZJzMAgGwC`BujFt6lip2T@_fyyn9KWu4^Fb4taV3K*IZ5KKH11z z!}yN&!Ju957_VRS`uz6p_x=6{clrIIcgV;m+?UViCq^YPd)RWNz{;Y^%)In&q}@8y zZ2g1PGt8!dv%uhio{_7O#se)?YaK;16=idvY7Z2gh@3oU>?etQ+i;7=-tgkjxYC#y z2PYxqwQyRVg?l~m_jGggc>f{T*6+c*i$5BhjN28w=Sa%ZYfI z9NfX~NzF}?J66TGvlf-R`bG-GiQG7%E<%LJLv}L!1sJmjh~YJXcuRlJaUshT6$Bun zYJBC#b_D~a4f!oS?P>XCwas|Z!(UpGgczhy#t=ebL1T47Rc>faq(hXGcjD8)tdH3> zS!@v=TJ7%adj8DE)9X=CbVw_U(gO!#DT!~Z1GAX9U9!%s>I^$~>*p_?3b368h6_A| zK0u9+jVr4tj!lZj?vc)}w#0-uIj@LNab-hgTE_pw-djdTm33RgyS(Bal0a|>?(Q^= zJ2dVNK|%<`J%I=bmf#XX0>s_j-QC@#l5(&8opUPC{oH%sAKw`F$1|RrHO@$>r4t=xWwo9nV3~8gC|fLr^!Qk zFo?%z%vu|C_f^fal%cujzk)e|U`8>}E(6r`jT=U`uTOI@3qkfIDN(5%4ba}EX44<3 z`;HIIUpYjib}K7Lh=8H1K6w0ShhGp3ieuP3%bvE#YW1eDRVz!kY^-|rij!9glhWaA zy+~!bP5}6Tzyp9yr9fF30RUxMI>~noYdiZ;OFc~WkR=e%q`AJ1_NK(d$h?AN273Ux zGdux5H9aLDFgPQvI6gKzJv9s4_H|)2GHiFj$Lhi&9$(laksuEah{VHuK3%})N`4u; z{gPpk{1#&$8RmWYTE2XJ?37uJGQ@5&=$JF9?fy-AY|My6%J~rfa}=@QZ$FR!dod3C z#$!DGuLMSw9pabVmVQ1-JVtn9Wa{YVELl@r8}1Ltq1 zAxFaOgI)GvHcN~b%H5)>*el{3z|=3(rn@X!<*{vwy`HQgRn?rLY6;ZM!9sV{$;PCU zfm=`f+_B>E+!fBtmN>7NVY6Vh{kPJk$deQp4_;(&#Up55wep8CZtg)~|h29B8w z*cV?JlGBp%Ex>Ej;f;Hpc4}Lo+SD@cWF70tH=_;THTgWPdA-H=&#AAclI`#WM>25K zq`FN4CSZ!4>?QA`MeO`x)a8NxEY<1EkG(Tp)F{>fm=k1dl;*!%csOX^y~rz%BJNtG z**N9BNM{z;u$mAAGv=W-0;|hv({pqAXr>hTb~vbgnvezgjJ}b+sI2t5fr03>te)=S zl&B<(n?piN=8t#G6`zYRo6IBR?|i2i(bBc9S{;+P*jKt?Qz*{ zO^snefklP+on5WfwG|QZ5pkJ`O+y__tY?%Bxs}xwMFqug-QRe<{Ko6$OFwLRVQF?w z`n!*wbnbBPKzl-BG#^#X6Vmy#$ds_+%KVR?-sI+F)K=FZe=Gfm!;?s`?g3# zKx$%yNQCTXQ0g-R1d+cD3-Ukl*R?NRbq2;Q8+Lc-uet7f#HJuUK z0`b}9CE3YIv5gHiTrQh0kfdcI-$-UoaZYY&R#aSkb#?PU6r-4u-!vLD&q_n(pTuLd z@W1&YVr2I*2K|^Uo)oq*8Qi+sCcog~BbVd19Bk2^*fwVdec$#$8_V878XS}I7X4FF zR5&i+G9KeG9{>4)aVbWGsP+Uli;hj6DAp5WkZVk?0u`Gb!gv@zI6`wS|ACO}0wq4E zIjoV&ch^Y!Zf*0Vy|P9b|Ck2{qw(xZ*6 zZ>YXdn(Y+h|Ftlsw6|>(tMH7af!LVDGY#zt!^(f+u~sl-3(pjw&vx*;jXvGq<+g2- zwKgyZRCfjHL!~XAJ1%}YpV%J3hJ8Xf!WIo77&0Xza){Q3mTyF(P=XTuMj#`*vE+Mv zSY=1E6!wYvW0+6ELko_b+yI?GV!tN080UF6_4KS}%Nxmphk#;ETKatbVwcTI#<~C#V}~ko0AB?WZ5{KfUns3cGJkd2I#$34VQ} zokGawLuMPR-r}3_{h)hVH?*`(b(G9iaSzB!4w^d8XUo`4!I>&6S(BCQFtxuY0C->x zd5$C%E1kvnSKV~FSyYwV+14l$qYfg~-32j)K|Q>d<5maMZYls{K>bJr_CU#O!YRKS ziJXEG*lb#4b2RqYocDUboCr+hsZNs>j7T%?Dh5@(W55A66w{y}g_ZFj>%rQet0+%M z$a^V!sF59T+RjQ-EHpK4E6;vBf5QFACwwnDr?|cHcSF};z6cp6QNhvOJp))OR@#}z zL@*zM-?}6x(3}A42%snN=u}>TV#nACo`E+*BnP zlV}ja;DUzwQ3*}-D?7*)Ag3zLEN-glNJ%Uj=tteScyu0`YZNij!2(A(BHAcqCe;*% znxtmWd?9a`#;B}pjEPE3h|6qhZ0B=%eLcMq zfstA9x%3`xY*2DhR;9GXE4L`y*4d(|tD43dC@#q#rHw!obg+V=zPI%>S=*U*cC{22 z7nYaTizIN2Z!SQ!kkZmp5QJBhos6JLkwnZzWrGAkLB5wRorsK%Kwef<1?}AWuH5UI zzcT#D>HNYH*xLp3lLz-~b6T<{T3#zdMx{-25{D#L4{)VaVymir#frtL*%`2{9a`Hx ziAzj-_1f?3sb`lid8zB{o3s4r8GXYu*PPd^{2>GUwPc$W@Ld3uQIzX}YW`mjq5R5j zppy;e=I?TVd+j>SzR@4K*1!2&Ra)2(7aSe@E$!%@&+At;0#HfOU;uI}Ae72Y=m(%( z4)ApdaG5q-Hl4d=UEPiQiH>hVd}EWVx`eh)rH&4~^QTw~XZL8UF=feo09XoW@1O_} zXqwt>dk^$~@j@GI*}Sptk?w)cl=Qfm_^{TtM&wR11{tYIx$#M*`DG2^Nr7=00eqC4 zfi@Hf1?Vxx`4Im?c(5O>4+uokdyVYbM88@sC0OcNB9U^mBuz~%(b3Vlx!K))+|ov9 z^0?)}BY61&&%q8B7NP^HyB}qh3$cAA_FW654X3|!1pXQSbnp=VKaPKh|4IJGKm2!( zao9H=z&rk=a z@@u}=VYlP69xPBb&{Q>(-{!k-hr z>S;Wh2#n+@cB;S#%y(Y6!}QoiGoz5y=)&6Kw9Evon~*gO2Lj7Njy^qo?CS-MTa%|g z)SGgD<|fBYcOnfpyga0+uRF<1Ma2Zj+G6wzcN}oX11CVV2Z|RJx8B?C6Zyl}_eWN0 z4tJEtW3Up_BO3&j2BFs$yqYQFga_7uY$rd_cCS@Mwsa_F(FaG^=vO%MxW#vY{{iq^7r2sb$3E?Qp89wc_C-Vo)z>yajC3tTZy5_s%JP_y$BPIGVRHl=2`sLyk5A8x zPK?I{O_0yd{;d#%tja?1SWo{i*92*X1zCKB9JIYsJPKh*R(jj$D2flIy>uuSM_7`R zRa8=(R#4bYWAa456uuGQ$3s8`y_{3{G}mQ|$F}*FOLm*@kE~3jLqTCfWk_6DSxvzx zyHx@QL~uxmY$qfVHkM}OvtU=_P)=%2QhZcYRM6+opWE7dxFXopJ&;jS5}A?M$?j!A zRzSS(^G~kG$4pF4>TE?@&GHJf(sB}d2D|N@?79bAQ?fF+d{h&J!9+(ZgN1&~h>+yu z=(P6wT68pGCF%IMs8i<-1cZBY(bmaf1Un)Rwp75kPl{~shgaqg3?7do71t6$5uJ^7 z>j{U2v^sv1ZQ|p__ABH}6;KZ}CoC&w0v0}-b#!w1VGKx;S49r|BCa$-uQ4B`~ za0HbZK^+66iZJ4~wzjCKwA7R`X(|!ruEG&6>}JEhDsHVqr27H4GrM0N-1>6E!8bdM zyzgIstH0gj#1zYwi=S>5Z?oiBnQc#-m-$mc=N!{o~FC_bpRh7HC_|n0;&JieqyU zlYjC@dy$*j&K-lWJms!!7G2OI&5tdkkQvX_!ptvZvsIn-%B)5;nKrn&i7d3<@ z<}|YFoYJ4{`m$5)vAUdv+_qO+ANW7`UK|)%l3d(S&Jm)m0U}AC1P)_v%h>yh$DCS( z3}#nbZNSejH?G}`h)7_GP@K`h989aq3(bmd8fmH39IqNwM9*75o~Ej;cZ9PXmVmsVP1JpTNiq|QCO5(e)-ac$hh!P zAs6c_9TLG_C}GiC2VD(5oZWW!%L}jkob>w-uN}Q`?y|uv6Z@*KU*Me^@Zfg1WM0?x z%h1#eUcTJ5YJK?OW0_lbgiM+7Z1&s_+B)wvH62&4F`GE$&%-AlSUvfE^3sO|%RDDf zPS8-RqmViPu7|2JL{b_A)FFT$1fUlvG%P&THhq7J(ySO&t&mkqqSh_?x3xEG0; zJOmA*Z4Z1nGQ#dfO+JKzoPy%8u(;ylYC4@M5OTy~sUiSN#HBOn93~T%l)@9o8rG~B z*4Av_uz|B}d!3s{*Dyz_5X$CbF2er}_We%s34U!@cw<0N^4oDv@Z0$3SJhYw`#9J! z4*SMqJjUZcJ;Y*hcXxMgZf;n(|C1+ojvQS+Z;sN=KNdLI-TwL2F*fvRQTBT%DH8Is zd%NOuvVR5zeEIP`AT}m1FTZ)La3U7*I9wD2io~dihX_R&ErRyPF)niq7OERf&~VZ~ zvs?}Y#fQr0LM}UIy0*c-K{Pc0rM2nwq>RD-AqfI0P`eAUbQ6wJQ!q==?w*1C!rHuo zn*Jd=rd&uLmGrV8od$W~73r4Wp1Q@n3aaxd9!aifPQ7k)>Q(sj2cI8Ye0tv5-}D^bN=L^Z*uQv(2w-wr1HE%37dy} z05;?m^p!pMX8x+=-Iva9js^B-ypG?AxqT$y{2yQU?f2iY_QTwXk14XYRA8nJ%ye+} z@)RTDVv~gzT+TRnm_yw^HTAyq{(8>#=F+>XmpQGFHKr&y$bq|HzT4uf0r%cy z`$U&!(8RpNyc9NM=5-{$PJVjy#W9`JlTSLIoV{a;{ae#!dUY+Q)-(6Z?lU@rrn;K+}GoC8p>rc$kBaR}@~P@o;k|5dbB)U%m;$mOr1fil!)fx(w9jhF}` zPr_oPMMVwu^q~z?5?EQ+6c7=WnV*aKtnhy=lR{}FPFl1V{$2qkzf&W~H_b&Baz5n5 zr^O8n^^bLQBA{G=y2xY|W~Jq4rDSD{ve-gCsuw6hHv>E-9LVTRI`;0^47<6q_L@r0 z8VjGz*!pypd&aYBMiV+1#+GJ2Dgh+oqx1j?%DupJl`;ONnAKca_2K!~$bjUi@Pyd- zxaN*V3l8|#qd!?L=@;Gn3)h+Gf* z5MR&4l$dOPfBzl(cU-#t#L}^6Nl^0e;t8F58$OsaO$LPJSbI>Z<41} z%Mh!y)w>iZ_38>mRKn|}i(lEhB6*73l#}q zZ6&lZ746>AzhHLDygB9i_xfWJu?++Q&e&|$$Pi}g12Ii($-r=r0PXnY)YO-RMFtfX z=g~&TynvA{s1R!|5b=hj>@=YOT3dA7yNSTl66CUppixOK?ef*i$~tu1qH_#Z4H!dv zzag6Be>%kfBoztAWbMWOG@ShP_!veUhkfHQ9^>&J7YNi72n1X%S0E7l<6&V?Xhx2kSu-H~2=nA1Pn;qNIl$)8BALy6;@k4x2SYlgu-xwx9 zP9?@*p8#36h25fopGAQ?Z1&7D)SGCmjk^i-TxRZkb7cvJ?`BrT2E`mvk znL~q2DQSfx90MS137^Lw#Bu;|kj4y8OpeM-p+n{{9HPPD$nsd9jELaE)QpzWHc{sg zY-7WAA?yha`{Ml4I;}X~+0$14-nq2okj(Um=&WGx{GU#RpA56yj{4tO=e+0ayR%N= zcIAU*)!lW+FP`}L`Q3LfpOVZ%Op-=|+#fFFB{U`lHwPEMe9xAbn_jF}yQehOW%|1B z>&@y-E~K2?_H(6%oeI%hn_w|v;xlb^Be^MOwf7nBuz&R^yCg9oGbJ`JV-N~{)J3kh z+rG#NdGSOo*NNaJLE)N`MVwW7PnJ)VZ%J8Ucvz4`!V*GSRa@Rmzh`@|?J{t?yWqh5 z!>h%xNh47^ zUhUL0o&xSG6U~%m48Uwt%~wUQ`lWjS7L?k}NWrFP3r?Y9OyL}`wD#Xk8MQ(BA^OtUkF|B?++}(S#)^BW8makTp>r$5M z#DfM6c|HXUOjf7qUx#l$!q=Z0i>gIG!;5%gn4R7HNZ-6HyBN9Tt{!218}$6#_TpXb z(^vWRjVQ`VOK5v!=z7n&AEG$)1?I#i01R@)N zc0j68p3OT0pO{tKnf8mt3r8g zI*`d*ysGt~5q$3DlE*3I$IG;hRS!3xq)Mn zD=%D_f36zT(`GA@2Go^17cXhJbfuxC?Oz0zC8#%`h>faFLE7kGVp3FOtbb#3nGm&M zY8)8astFSn>571dAiwDon+*+zvvSdbGB$-_`$EZo z>(%>zVZO1BLpTQP{>#h$r5A7<_KnAQjK_asNb9^J(cenA@I1fCX8j7Ag%4xzf5`L>$c?5!-Y67cv{Erkf;MW$MyK_4G0-xx z0Bz#t|1ye@pmv;e25nI4*BMcg`Zo3R$3k!OP`f=g$L@Tzh;GYngMHQ93f~-$C%zA| z>f@?9OCGdD`r*^nQx`tCdk*yUhNs7rvs!{XGfgAh!a7o8>*8HJETZGShbRB6 z?I`#9{?WkTuJ31GzfXRnD0LW$*l1Fto!R^&-}_y~>$?dzCt2!{9s;r*nCPK+Dd*;< zPdj$`ZdNxTsM-J}YanwU%(o%$vfQx!>L0t#ZAvbS>VX|eEy>@D{a)m~-Ribi{+1Td zmshaT*0j)-zo4-E*5X$oPXa~rEIi*GMsi~P+c@p$RY?`Q}VmKqD_M?CMyBD=i zn_q32X}eJ7&LkznNxEk0n_QRqGzRn`<3WtkNd$jmzF({oh`qNI60*wk@_)V$8*b(z zK#?~vAfnMY-Rz3AzO>N#+LG3n&tLuU^T{u0`D4Ay_RZPqYGnu@1E7mU<{*fQK%x=w zOgxS$OX4nD0QF`t7tQNDe-`fCo%QN{v!StPbZGowXM1==u=kIIr!Sjt8+QJ=tK{O< zelyE%zaXZYN9dJ%`d@l*Ei!6!fM-!CP)VM@d<9Kc&t=nI6HDhWw@sW(?LS+&M25uk*Zmni zdv=em4#eU3Bq9Pag#;j0QGhZ8E&v1a6fPCTpgc8s4Aa4C@GjvlWOmA_ zrAV+0A6n$)kMg;w(Bs2<4J#Iu;z6k*py?_z7tQK0c{Ey6i>XgSY#a&%{{s8|-j@6; zsFV6wihjFU{sY^=|B=}7-yi??igDOC9^)|{{|kZOKfZ{~N44}A(q=3PYoNK-y}E93 za}NPR6%JIR@s}_Jw;)W1a0rqL+~kfsI2=Nf~JfUKr)0Vj0N66!DQcI5R7M5X<&Se&u+0Qk`H41C@vo za5+3~2XipABEjIN=~|nu>mL6x_tA>2Pxk-y`obyCvnM?cAAYv!{JXtZ-&}GIbN5dR z$|%eU^bLCD=yCJ(?S$C0oa%~?vHo3<-3M8pL%r(i%SW49-a9(@eSZ@c<0%OXeBi^8$&Y3#7-$i1C`_@K zIn!|3UEeG2p;oCS!MO#o=H~a`yndORo1dGUlm0Er+sSjymX%A+tZB$Amd!BUNAtwOaR%Gd&e1KQX(R_C=)kTK9+VzDK8r~uWzLD`ZL zsVzcoTx3ypD)QBGSYt}OVgU^N9v&MJkF4erfl!*P9fN(`U-d2t9Ajxp>%ORf3+C2_ z;lHf-WX71s2}k6(PS@^pyve`92K3x%2tjKi;hR^W%nP&P#W` z-@np!`{u{HPkcP&kY-T{8+#zHj!Unm4VL#bmv+|nOGX%w$AM^npKz>&iwA|H63EOd z%>4E(fJ>KN2?y=IPa7pfM{*X#xUU2hkIP4z)@3YqrPw$I*>!UOT;fW#wf3*V$V;4m_W~I0`u} zb^!v-2~buB6=xyhg?RMBMkqici-$PCrxK9IhpANLC!b7#*kum_IW7WBm6b&l&@xMx zrZ=-!hFXRvc95uSSp>*}o+(r6?-_L0xAH|;>!(DF7D+j5lq{~VpRsmj7X{Z!A<-yQ z+C&}3j-B+re#qrwa!XQ0Cn3zuD)sRSF3hdwv8CB#0h`k&5cXsJe@tX_@&yv%-{LI1 zveH`rpQW$ddsi=xJ9`+CLB@hUvH1On2=Z94j{`@L{WP*o7jv3~ zoDV$lW-e6VU^c$Pyr~@nJA3+HaCl{?i4(F22qhydZe?xV_n*ElE>2%Qzptz)lZeC! zFhELD2E=MB@#O3MA>LV~NJ9dn$Qxvo+*QkT82plLiw*K+E@r}Gv(pRRyFq6c7x z&Af~LhBo1Dm9*BnDO!lEN!HxhNfr1Cv8o2w= z&ns6$40pI(oOa>tjB|!_PFrYfzP0<>>(IhZ*GLb&3zN4#-Ln1d_M4HHj(ypG;@hFk zu4|UrEM9KAXxp<5Ywed_@i}(s!?D{RFQs)SJoR&X>gCbHVzssQ)+FS=vT!(a^SIu* z1#zctpuzUkV_iFe8^z~fF(}RT5Fc=l5<~f z7ISJwA&13bW9$quqoX6p`}6QXC%P62`6ARi7+v)u!{VbNVmU0XNQmY4q&!ES*Yd$pr*7YEiofKJC6Z{Kg)w}1>Rocu}a-mS;?BJ zVxh3yWtmx`#oZ4^4_}*pNqL{uo7x2jQbr4$GCc2vK05dH%w_L0!JR*A#Z`2a#fx!- z(RCZ$1JEEXv@H5V!zxT0f|GPLOfY0Ba5K4E+VN3fI2xmT|rN5@P)<iHxq)`m;2q5E(Dn5J_S{ zW&qHOqX<#BNrfZ@h?C_5JOdB}059C~Csa}AD{D8XOpQNtzj5sm-{t$?T{Wv&x;bm% zs^+CDIePPZ7Oc+QbR=-y@q6n}+?~AGMQw5%j?xLpd?3?@S1+2gKJl?3e{ zA_*eb0}vccMjr1|Qi2p5n*?ZNFsLNYClYx$JPMA9`2KZkhwndV??qhznIcqQi6<8E zx_kPPGK-I#b~$vSXycy)+S5DbRlC-%g@+Hp#CTX!DNWf5yV@I)6QZ-y(nkjwzstUS zwv<#$7m0^ioIYf?iTQTGp&?E|er`;3cxp=Kz##1Hg;5dk`jxuP8hoR)}+}^*67)XaN9b*LzW41Wqs9K3%)jbo5e{9n~w{>@?>_KnAQ zjK}{%Ao!0b<}l$P7h_Ojb6yt|e&_S|i{LD_V1f|N8lo@D&R^vha3U;3wuKoKJmC6dFkQpRV8SW2JNB~ zLbSff`WF8wwA3%XJHDP@#=^uq#C#4Ein^N{(qm)dgQ5$w3K7KC1$&I+jkkQ@kpt=JUd#w#`_w0Xm(mT(e0R^6~KYV=q-NwrK+{J78#YLaJ zKD0E~qZoz8ZA!1m@QcomiSuxB`uWNCr*E*nv9ZlZw|gHaM*UaFiPZd>D-v zZ;e;5Qkv_$Xs_o%mjw4-IEWHag(#(iJvdZVkXMwOE)ui3D7Qn*Wpkt}TS`jWS6`n| z?kGpX#ORh-V;z6&zu1~67NYQr!-FjBt3-uhBvKt`5zI_T9_jB#7I?9UJ0{sJgpDP2 zX)&p(nK`3S@V4mNLGKd^mTJHXWt@;r2zx&5j2()JE{sY@jbrd=OgK`;DYr_n-u!yQ zoM%(DY)P{mm5+Jt`d0a=OEApj!qFiikIonIMh5w9MS^O(q-XkH?v?QJXt0F=Td{61 zF^Wamkk6u5S6APA=y>#u%i>ipW-N%sDOCVmEl?^U>y(qVssN>E`9=tE%>Z;LC?UY8 znv8Fj0W2Ko(bekNwY}=i^KAd`k&%&w#>PpqWN&$DBN>5lcp6!yo1hh`yUA|N<%_!e zG}l~Pbj;Ym{#})#R)~yDJ%un#!Avg*I02Ew!N?h0EDP8NWQG8>4^W3>)FDL~63{%{ z5J8p(@Fhx<3b!5YF>!;Nj}$E2kUwc=v9?Z^tXwTcHc@lxkA<6`9=-9y)UjvnmKuO> zLi2?x{Y0(WNsG$<*jM`bGrV^By|tBnM;rU-fhZYr=mbsVeOZ9S1%RfYEL5GqCy-mI z^6e@rg8;MwK$k`3T3AZTXaWztFu)?pVE$~1Tmk}2!LU4*|C1*pKba(g2v1;AW$0R( zofj?$e+EFd7;?o-4r7EpG7=Kt@8;(E)YHQ|IP+UL>)`qFMeAD?C$z0v11G5WZ(P;* zK)<%O6vjs-hK7A>Y$}jQ#+d$N%>!uuO@QgIU;^M!z+|&3Ynl@hG79nvJ3HF=d@kDQ zA(f}6!NwNYKMY5hV?}=f8Sl7vXQ@_wO0wux~uZV?6$M0=3`3iZjBoDRl(GTqty)4<8sF+ycd?A+%*P zJc{$*2l>DG_U-GBpFs(UsVS+Mg+(P@J)LM#N60TJD*pWOi>WQjsizv(wgCf@0EDm`EEg3i7T-fQf9>GR;!!AX({iMO3^0LChjNHV8 zxVoBpWX_hbkcn703Ww~It+u>at@cn()l^o)R8hx9L)%H6V2jl^T>#aBJk?D1g7;~o z&v)YsBdzVsgZ%t0ADI8Se$)A5$6q)^4p|o;t&L^7(Ja^xt3hE&S2}yZrkiw|kvXu$_qNGus04J%vTqThG6< z-FD}l?2IU&SPQ6)fLf-vvPoy{o8=GAsT*ohtmIIeHA^7pq_oR_i)F6>xm}CTvg_Q=9ZTS(|IM~MTzO985}rNLCZGwzd!%c95pjdU_k)p0QVA0 z_*nt!Gpv$pg|Rn~&+CK{cpn428wq(M4F|P}#uKgvTyjf((#WqxAR14Itf^uVk2^Xt z91$Ht=Zs*04o%{T&>R|SjoL-)Hghs<=XTKH2zJnfQjtC3Shi0H#UtWT24vFU@Br+k z!J&TG%RwCi(WevR6NZNe5!i)kuVB4aEM8qNkm+F`DY86Y_US$80B3v}LPLkQ4OBr)U(3;+hzDeTEw zO_wjVH#ftY+KTUfv3vGDU9~)I@j{eHfv2*7%ph5-i8M7-b*0PFGq+b?KDhY!wpB;3 z=`DXfX;OlwYCRFOkpWYc3aNMj2~U?-pikEmO`bELpjAOuD+lroL^=M1DG(4x0A&QV z^uW_7iVTgZG>z$%GMdRs+WCN(hk#Le3YUWG#etGpvno%X;hj9mS+Q!6BtrvKE|Bj4 zvbA!WLkfxw(=-c~%=(y>QTyYk|Fh?L6Et7T%hqG1Y;Eg~2}=q17XI~vZ)iwVO=CMM*)4))9q^tNJa8PUsf*-@jG5Z>%g02Q zPlgxfbfJ>is8A?f>f0vfO3k}}D=iBJLS}VMS!{e%W^PVbUq3nxVUJ@*lz(%}e~CvT zI8ch!c)!wdLKFgu$GZ6hJfTp)!Log*`!gSNo#|)5@?v>}u@Yujn zLC2`1nGKs-1gT+-F6}VhFU|!yiFlZXyi03u!{Y$Ezdr6)HI!4glv(;p@9>))o{4UW z>9KW{wcQw*Lz>IsG8tSr65k)X&+`vG3*9Nk8XDK+b?#~`eZF|9_i}|ds=xtgxhhY! zP(A6r%QF0KUU&Q_|K}q^os7O-uNNK{4jp@VQ$Hgq>8sc0y5{<<()`Zep1R`7kT3qW zMvotv+U?dqHS_AS=}vR;CPZ)_OtaNlVlelL#})6ApRe-04t%*pzNZ0f0KtWk~35LX{drDA8oMbkBkg6Xd?k%{hX|xynFLLE-%r> z-(jQu<~c6&RP0npMuZs-({6{{DrFY8cD1xswV}(mL=x2!w&U#@UYFuU9o za0Lp)+F9Cg&Sd_^^}UOhcpd)Bs2wMgf5jfCx{43hGc^V~D6wN0P52kXtDPjw}I^P~b(ABMeN|rcavC ztt8(LzyPYuOl|`*9mrtAfe|^-dT2}kq22EJ`Rzer)eDx|FItv=;1E>P8YL?*W#suY z^dLzweb(}VDGTCpa)H`9bP|q(YP*m`Dr!P?EwPN8gsRA!IGe6Mxov{(u=YfOqJl)0 z1m%d3f@i5IA+R`h*^1n6-yxj^J9}YRcxF^Ya&k;wbVyomdg3hJpOk=BHd++(Jp8S zCW;EVgOJ~v7yl0OYapi{a(khOH`buTg~NlKrpC66jI8A3YDuAg1p?K zxcKDM%#5!AUiL4|tvv3eHu}AaHZt_yYY?~1JnfKQh4UzCAA%rEVOVbDlO$JN^C@!n zvXrMpk}XdD2Gyh3gM~SSF`$?MMI-%8Y5TPZ(z{`UZKBD}r<>MVEIsaW;8oW1?`?ki zu|_l8mut9Ane%$GrWtX*z4o83OWjk=4c*Vir+!aQ2#fme_u2jJ*&}D{tRDx5gw?k; z@*$5SVxX8p1S<;a&F!4F-8!UuV-C)QM0!C0&VXWt*S$0G>W75g4ahvr($-R1M={=hx6 zoTkg%Bgz_+<*g_<6M&q7p12Er6@AelXpNxpo&W8M&jI5HJo#QHxKr@{W8Kqdp=LHu z{lhYHi{V4l@+mX?6xFj3jhIMaph%b6w(mDccR+MDBpsM=J}``KoSC+01(7PSqVvw2}sDZ zEkx2Vl_DhL#6%#X;t>3Vl5o*xJ82&tU;^1uk_sK*2Z*@Q2{MwY%H4mgDzGpvyLs2c zKdS7tSM$!pSt~a1Crm_(xyouBs!9%C@tfkL_ZxQBE?x{}DLmxl1e`!c73%0hfJZN8 zf(}zddtmA`-qh((SqV}J5P54VpaYP9!zt)WZW zmCI&ACI@n%kPhM1+s(_C_bAH^;Q$>$r3#>K&$iO8PR#X1TA9Us*nXE&tBV`b6La%3 zi-(7hzeaVuSpu2}@_xZADfZ$?u^>jv5M#e!_JCvk2T;oI7D?zLA?LraIz-Y}3t&PL zJaN2QS0`0(TCY60VD-{UlZXASZRm+G?)otDM!%XzfBPHpFfPbA>>H2q7?1z>ko@9n z3V&&t3i)l-<|ny;F#U8h(Q;?(E?V+`A?@7JeX?~elKe@bpZCYz>fD-z8l{j zmVKzDXelFW3MSaf%&=24Ot|Fnt1Hm!p;mF=IqxOMl1tW!^&4||`U@q8}PmI@4jyqkvHQ*~gD!&?(6 z<~UVz-25j>mqR!1eLj7WK6&*6&AqN`&we|#_w8}HD-%?VHI*!tH0;!s%@ry42yzcd zs+NlK<}y?ZGR2BSu_xngaL8RE+2er;0t5*dje{s}r8dQK#_St2++&@%aEQh3MYC^G zZ#4-lt*pqeDx;weXBb7AkCGZkdHo}>_g&J9ttM+{-kq#*Pko~Kqz&)4IFvsrf+YjU zNzkTvvDEu?6tY5^!_Ru1U2L~lb$X*F$iRqb^Q ztXC^EI@;jw{lITNtdV{R9La4+F!_APBg^e;?dL>#awqH^hODB-s-ltx36^_og|ydI zUdKaDZ~VA(h2!Bx*KV%g@2#oyd68z(sqL&&XKKDAv+hT{-Tv(e{((F&#w(fw<=gl* zR+}6nUjN8U2}#Q+s;?+0@BHH3w{=r00Pza4?eeN!a@uk8wmGahd+3n)x_zee_ZVz= z;_K?_u;^h=t0FHx~gFvnWrxQ4L*SmEW43{1K!|B75QArmAcDR4BjL7lHs3{Ci z>-&|7T`Dp$$Bw4Bzlm%erbQ;zU%Q>Te9eG@ z5*Lr>0nj{odM6U9w6cSWr-pLdJ+k2MI|wE28s+rPL?K18BkMYX-{}LOuSvHMMpQ4YA4XbA`_PXlpmTeQO()SWE!D zWY9NzV&|Wms!~!gM2Svyqhe{_q3}1#L*1(}i^6f(Hy-0L9{=$n`Hl84kt?)ApBE7t zFxcBOrr9R=H9w;0P$-ZBi7{OdF?(nvD={r5JT@=4sDEU*ai}T1HQ71R;!()CtAX2e z?*cs=FwqRmvj#hDrwqYTF6^hn;nLyCvoEhqd@w`j$uyFMjIQgH&9DA&OmL3Oi%-Z- zuNrJDZZ6}}=_nVK*6khr{;>POLq11!O{Sg-I1|zt*b19Ud1W@SPZrp1lz*g+Hzmuv z$SAtYBB*G(>%_Hg%U+khu7DkF5LQ5#D2AKt-NEt|M=Xq7KRyjh^37_`C>^Zm817|C zc%88OPUszQ8?z5~1?fcEN=I;lWT~-+doLXQqOX=REsqV{^?X*cIRI)Ku-V7zIC|js#SZk=5tEgM4 zQw^!QP80EWfQp6E1Un7nmkAD<6Wnx2#vLba6yiZQ-z>uOGG^d%Szb9UYB_iZbS8f)$x+sWAV=lH0Kce)R#3>?X7h zhE;{>ko6|-?Hc=)6OAV*8Ol#Kn>6YA#BG+_y)wRijrOhYYT}9c7pR^Ik%&#hHXzoCJ2*qfjuujq&*`Mu{^7F)# zH|tM*Tev)5ol;H$)!H&GYMS91iygEM?ol{9an|FRS{6#PttKrpUU1Ci%G<>7;Ee2V zp>ctcDPfUqMg}d1_b_Bo0>lsieMH$hB^^H_r;@L!@vZ?L=FbE6pZ+>;@!KU!e{S0z zao;TOn#uE>C!A&FeD!7wO`Xz;pqBYl`!1glzVYZnkbYHFn~T$DGn1I(M+$asN0nb> z6`_nKjiMW(z54m&&H9J*9g=gp(MB;beDUeU&v>ukl&Gig5?{RGEnd(lOYFjfQ5hn{ zD6bf#4&%vL5kRyO? zJzZf@KIIh!4CWx(Gbb65NLXk&Km_T1gXJZ;$;sKV@k2HaEk};ktlQAEW4rLkA$a%@ z=lJoyUHkGsc){XIR5&uPxHC2}CBGn}qoZz&FfK;jSlBFSYK?~_^Q2Hu{4bNzf6-?d zBVM5CKoqn|%f}K_K^y_2LcTyGl1LdS&m10= za!cp}A@d)U4yk)IQ-qe7$6?=ijK_HV$A<*nk7I`Pn1q`|Ff`oj7w9X-Oj?B)pHuR; z(t$uIr1RLL=nT#m@J1Mri>8JIOkrh3?T^raoR&Pl(r*{v9p3kL+0FEA)13j~CYWLh z7Mp-;Z&x(%qiLc#HXLL?x@XFV^;TPUzTdyod(WQF`=1p&s~6WZM9j|aj+D~WsI-XG z=qT?uo==}!U$8%^a8X&-OilacELmffjjkKs*LeJF_dVcwK<1XRisK|;N6{J^7&akbI!ir+s=ec=`59ULG0D)8A^ z&ttRPb$~P4KZkQ9>Kf0!`K8{mFS znns&Dw|(!f=A-*VW8->4Q=>MXU4HAsU8|(W`=0Nac1K6y21VCaUDb>@!2&nU3^(sS zIP!R^!JG9L?3Ya139fxVd?xs$s+q35^(1hYtYE38=BOoar9w4RkTF$~HBqD*%Bq>D zs~M>xcuVP~;taFtbFJsfUsarJI(5e5>GJxr8dudWe7d;r(cUjHUxqj|sjr5JE8%g3 z2n5g0$`=TKX;O@lbA=2z&<~prI&(owOcpe18>9B?9&EaYZc&pY$sQgB%q4+^C~ zp8;9EH9i~MR;!yR$eWSn4Jiw3=N)=>@LR$6kjyYPAPIEpZB<7@AEFBwxOTP<491BF^d5` z-Qm%_JVi>If*hMHOP5t{IdX>Q`c4>_*@|GkJx9Y9FG-s>y;g~swrO?qxr>$W0*h>4 zWys10>CF%UP$f(1o31JPYY+VK4U?l5K-LJmqY3I?P1vxyf`BVgP!bU32Z2(8!Zcsy zSU3GVDT{|%V zC8R+yP!v!R6#?lG>F)0C?wNs^f$2HF`^@0R`}_TT|NgwMAFu1aW_MX;?t!^E_v5_c z`Lul%ec-nkJ)JBnz6}r9WSj)x*fP;(5fV(n@c~|d$50Q$KAaR`A4>R1(f>kJCn8WF zAc|x|6)e=or#J+t7@~1h6`=xwO9EXw8iR@o?dvwe)tjL8b6DIA-v^}xM#g`5opX3c zn5xPT60HHlO$rXf$3`v9rp?=1Z{CM9=alFGmq2L0Xw^_bFAbT6@QH*@0=W=7G*pN2 z=v@7>GjdUjDhXl~s6y{sFuUQKAB>Fdh)zghjdU^D9oVmV1UVFAoFwS&Xw6GXN{WcA z$jfK-4T&YNts5p}!1vzpt|>fo9xh+bRG?>1HmqE~zHaxvbiZIhb|Is+uU7<_5H%ri zM0}1|FbZ=#zIbpL%^S(E@83Qd|HQt3&D#n`&SGtbR<_=-6RowtBt$XLs zE7j6ySELCEKtM;}n|$K8y4FrP_HhJ4_Gq?GB>(e3I?`j`8}-rc&)di9O4 z8>h`q{eJ1Fk@e-n7CWX`YLYAg!5NUOXj2}{KjU$;WVosoHk*atSovfo$(l%ZqNqJm zTJ5~~)SJi0%)g9Z{{F)K?we+rpZqJ#OB7n96c?M#SoLh#^4BZRWd2DvQls8b(X%$xbDDx7 ztg?+BxK9W7Xa@F%dJlCsyjiPzQFEQ`%C(lO*I92eyfka_wds>?>ua4+n0RWOY2bs? z*5|&&`qy-{uw_l^LJ4eY?ud>_=<4YG+iXoRihbB`xD!^}W!+o!dMd$6CIRCDz$+Zy zfv#?>yZGU1n_!3Lfi{^yFZRGrX2l;1IG|&xtY$+a-vu)sPrey`wO}|uttNetH`w0V zmX?qd6B^0sV_`={eu3YYkJ#l=RaKOb6yM(6g9?0NB*}=2ZmXKA}OL?>yh$d;D_WDUW5_qscni7>5BmpQg@Ou%hO|jhr8;#UFk=d!1%+@!j#K zk*;|?E$q=Iui>uVs$pIqb`6#kz{Ps1oqNpr2E;7U~bJj=2R)rWuq0uHi}AZrR?jY(=XAxj~uAR#6yHP;8sOym|4Owiz?I>gododJ~|x08$4a z4FQq_qf8|_W4vzJ$rI&C$*`+m$`NwJXh+BuKp~qyQdL=>8J|&>p2cbF8P#``DJ*g! zlKp(<4v(GU85&|gQKu4xWuV(NcV1R>0_+(=Lq;PI!^f6EP7MMqDa!T@GDUKccCKLf zC-#Yc+B5TI>9-M?AK-te>&aBe#Xk$eqn5kaQyAQlriRwM$fT$rN%c|5aKpA@74>2g ziA4lr8W1Ui+9UfL8=B;c99JwEmWW1E<8l{4h{RuH*e4yszOfjKvG^|wnMS^JREF;# z`&Y@otXHLfHMhv@v9JYP#KYESv5@&AI$%V=WDYa_p`q|o=^k4w`NPPAO~Qwg0ZD&3 z>?wf4HYgf^5{?LR2Vmcuf>+~jXsX@^(?2R4tzLQ}f4)uT_K>pMkp&jf8PC5*x`u^% zdc1!gkr>*^Zwtu@WTGNqJ4@Kx-O<=t(U2Yy_rlrE)zvZ5-@70u!QtJ*<#!h8IZZ}X z_XL2Kr~{8~L)39lP_rcg8$h%N;5JaRQbT$!j{we+h_loH4=1VlU7g&Jt;c35L#PY| zQ%CxjHD)dFHdDDdpLCVJz<%b=$J@VVeXe2^zm9d>^=dus77myIV1(DbH*N9VHP_s4 zc%;8J4!V5MbMtgpLp6I1ty?^!(@ckas7`LpIH z%~%w?<9LjF@PJ=ZpWXY6)xWqeSy-!0>7fAz6>ugfLrvwf`&R9*zmCjZlBcWRtfJgW zCs!*{;+HM{;p^QsN*w|;z*Lys`_!T7%AZ0asKx=lx;pZ}pd!&bjs}sZM{Av+m9TSn z!pg0Wcl>6nH}BGvg>MPuAIi#oMC6UeC6M@d$_OAbD9T(CwHHt5mep#7$RrJip#9uglUqfyAoJC$Vl|%}$mxh;h?-A?gET~<#wSMc)&h?v^WMZizrCUXDXxzAg z%QxX;N4Q}ld)C}W-6?6x2I)Yl8c_Ri6pTc91YDnvX8D!N-3`r1?gaZ4V`RkQGO8M@ zq7vewqY~O0WWOSTY?PM+TUudDFYLlL6B6j;!Q8a7OjV(S1o%Y2BI7GHHKH)=U+0am>+5n7l6(Wh z-!ybo4)I}CBiywwVeZ@lb){M=XjKADhHB-%?klM*hr@g{RXn+Kth zu^5a0%0PKI4ipXw`2v{)3d-H^V%SFSP87ZD3 z&LE#F%K%E`YouI$c~B@C7Vyv*EM)fy+rzo#)|~9ig1pgI(}yt6u#A-C>e?cah}At%;}-06@$HGN?we_k6v0CPTAJap zt_-h8>E;Ycr7*Xk((>ukCPCNdLjU6~CuU!swc2>;S@%!fC@zZ{x4AVPVM$Z^GZ5az3^5 z#m?E5b2T0q{1$(4ulMgO%r|T`-FCt4^wS)haTismJedd{0ALI#ckmPboU-B4p1ns; zhWN%;=GDnuY()b@{a^iqI3rLjH@cPIp)rE%hw|bW4+?5Tbv9Wxv!2W%Srbvh@0n~a z_%3~(?XqircdNKH$P$e$;t0`GYdKwSW8M9meZsqgi`iuiD9Z2ex#j5Wh4#RZ*BxD@B_$cj z$%(1yCBDIVpMu-2o0V_-HA{C!(Ds9sCXWW3oSHA3d;Q1RM+YyP*nNrg{874b&)YdO z()5*@sGw6B3{TO9yLZFiE@yuJF=%Ypc;t`beY;zZ9boDzeLlJ??xR~?c6x=J{Q6p9 zsBeAvH+XPY4FK5$L~H_-kOL#K#2ChQom7P~qEf`HRS`x`U8b+PR_~8f(v77k*Wn36 z0QKaF2;^b1Vi!%hT~(E-huT2bm)Fy&L(4mw4Hy=Q~3AI#8H!|C$_YrY<#jQRSIEU2tOaP;`e zb3(aRvTR}lH4#b%Wffuc%@V$tKO*16O0eyyP{NjH8fCyIEi0{!2#-olN$BiusI4zb zNlT1N$W2ch_~t!){I~^;N{h*iXWQ zyc%Ao_fXr5&MLdwJQt+TAX7~h%MfuGZIY4v{ZDm!iMXe*FVDC?aa(`LWbXGRytuQX zQa9`lJqWwrl(?^Yxk=LkLnt!1MbvtryY$w)J-0U=xw~uqu~om{ICkOw`R86Q3Yv<( zB?jEIyYoKG>uZFMe@H-DVg{>~Ew4z5(GturYb^Nu!y~dj=%m;2g-#2!?DfE7GO#8p zywp&4(A2fi-t=b0rk5)gJ1)|_r!~XLz|cxd{hErV@p#&kX~5?&tto}h9Rls$lX;Sj z4AIKG6hoiu1hesmZ}f;pfOv~?^2en(W>OPuj<1O|4>sTKb#R8;5>;C*4Re)w7KZC> z7VmdIeD#OX^~h@%KVLF^XJ_u@lpK?ipPXJ?m}=s1`~F9>tHCDY@6RIrsi1aCd6o0h z+hO-M8}Iz(;hr1sZ#;N+^W4K-*B@+p^6cmrzdO&r+&JrgdXC*X1;@$YsV1*tY zMnJ_`YoKUJ*1I%i-KG84?pj2COY!sgHrUzA;q>O0W|mf0qa^M>i)O-~2q?3_>w|-V zbzys54`|%epqfy%pK4Q#X@(Zlj(8y@34TF_~@dv>1M%N>{t1>I1{ zD$0uI4Af!Jz-IPGe9yW6FmA=>AfWh}tesBL%$%{P=*aPm*RP6RJ#o8x_tMuN!5B3Z zHZgY{aXNA^%Wzz(GOk*i&el?4oVx&xZMimY>z{a3y!p`L_L}Yfg75Z}@zOs3dsu2= zeu2Mtcy}|ep^BZE)cw)DV$s~!n^tvD@l^oSB#hg1xVdgjb^$|*0`DJ z(^sWWTAW7H&L$}}$Q;s8R*yjBlIU#|#X1$GMmo6`2dZXFg9LmB08Ic?C~6J?G9Qqn z2>VEEK<)yh9zfus=0Bw2#{ysgd#x=>H*4ru0#J%0*5h%lBmzT$#-WmW3=MiN zorVFv@JmqRbd`3yAcCF3D% z#lZeOD+gY=Ub$muvxauHhHm5BRn@!xD7^eA<LvIS4KV-|pqw06Ai_RgVOv{A zXh>wBe`HZ%Wl2d{cz9@LR!VVkVNW*;yK;uQVO$WjxZd!`_LRG4i@jZkn#xB#zmY+u zRPGe_Q!-HU7yQZV%VXF#7Gp6M|3!gqrbTQh97gaY{i{tz=8z;3%6s!hhjWpX!T^K~ zurC%ixG{6i*T?P54Orinc^`J=AV3_JNvNRje~0X>0I!tS=RMD?cUU>+(ezc0t1bmxvI;bH46^w1*&ho}&sunX z_ToS1-S;+%s0nZ2H{Y{2xp?QAt)oL$cCP&GW2--xCsS?}NTZVDI!C$~kP*jI_O9Qn>+SKc`C64oszu)bYc>m^yQy%$mJ*&KLh1{QeW7f<|;})1sp80g)n$+vl zYF}-JwaZ32H?cZTb(il;b~zArX1tju?J{2bwzk&gNx!~1pVW}Xg@Yo8UxQyoyY_fhN`bUO*4)%65e-!QWJ~!p_H!l-jYXTItVra+ihwnd${=CVXHoaC$ zqiE^UvPYJT+!ASH$3S6mUQ%LAXmD6kLTYaxwrzmfCDj!*?K}H zVL7k726p#DCI{6PSsXN!ED(&6fshAZJ#)#)i1+vP3W{41Q_2_47O!6qkDY)gFH4V} zD>JjtcX7>*3h$QkVQy}F_>ZWJoY16H-|F(Rgy^LEcW&F;SsniM$mMHKwr%s7H9JmK zD;v-%0I?Z>7TL%x8jr=Jk=RxM8Wq)gk!CV&fJh#~;n@I()LZa`Av|$_NNP}|XX=hi z+Oxms#2KigQ>{R6qf>_f7$Op+c!Ct!;NnKm+%ArX$4LlysHO~66=Zg5gi`$(xq35` zRmK%goH9bfa|l47Ovae9O_AK7KQ4R9%#SB8@K8U<^QRq`&ks*AXdr@S8o5_pt7`j!`ho_S zRSKOQyKY>pGrHB`=>@CnATnE%$SiTu3dDtfsW<=CmM7~)5X-8)La|8tQ$0W*qyKW%FM_r)0Yjm21u#s3r%G~R{;R?ve`gq)JN61hXA zROZ$2b7d7Wpr9HK#*QT4V1;jGe4p1HIj#EJbW!OIL8UhwXn}00m;*W7kzQ73Lwk8q zX;yktW=ceOLFlu(@0UwGr!j(cIDTZIziyZNQcn22k-|_hqo$8vozfclI?4XN&o%3i zM?s~&4ZNy)QJHz@z0+TQoqA7ys`2=F52kIm+G_I7)b+DlM|DkrL+h>9^@sX(?{nv#9=vLM#5=|%rXZXN zM;fJ^SFv9oc;5NNWXzaIhPw@r^@Z`=y^bfs%2Ed*KOoGvuCD0A z$CsbKdI+WH-pL#h$g1<$otnXheb~Vg_h9%gmWp^{J`a0JV!$Q)OMh6M_?-8}=&g}o zvR`OoSY}pEe0*YQZBcSgbX2sT_xqRqH5IkFd47Q*{@;S7-3*A@{2L>)U)FVp_c79- zsHeFep1j^aeH9OoyNM)*rdreL6(vqi<+1T)h51E2?JejwDn?gbM?WVlKDoB7TPl?& zbRkD19Tq}cr*t#Rraw*=oWEH7%(b(w8~em%*&!~>%7Q`ugRU?0Z(eb6dGX=>d(ZH& zn5ge9M%U{O?HX1j^vK3h2ej3?e?8omo(5~`dS5(!eE-s=xAsmKe?Jiw8XX#(9udL+ z?P&j=Jy3I8F95kzmEr}?9@4Y(=dmB@G6d-a{fYiG*R9#-5Ofg9Lz_Z3%d!fhQZ? zK-eeeKRmukO*z#--`m`#-PERI@v;;prF3moJ^&07fJ7k1Xbd}ufJee1u)_|d(nsLq z@dFAfElW4_F4@?nGOij=Zk3II@M(An4*7Wu=x8=inV!1;Xvf@THGr0f@hA}p=tRDT zTGR3kNe54-7)<-9qnD_m%%TEF0ul-lV(*XyIEHHc&C6RXY|^tT2U?l%g?snTy*YdL zH61z9b?kW0V@tSwd*RCENvoG-pFG6>ZGZd2yJgr85kc4p!oJJr(srz`S0xl{Qn*ww zKqZx|-%{2$j6Sm7e#l_J#wOUuL>mvj1YsX`L1GUQv?#%Um3N_BIb}pF7K$Wd*`5Sp z7h@1sR>8ys7#ciMUx(UKL}EC^K{>$2rW!Z*m*4!oy}dsN2Za|FR1Ea-&}J5cOJp#~ zk#z)e1p4XuTSHd$1y3CEQy=!9%=oe$X1;)oD#+`T zNC$>7$bhZg;`#w`8*+(-+)`HIouI4BpU%^;q=Fkj;{na!lGYzqzgs`Gug+_bXCpiN zIugpG!?MEb>-ySj_;F$Rs93`iGDbK}oQ_Yaz7Y*E<{?&>-L4pax_8R_$j8uUY<>fq z+uYPp5)d5m!7shE4Gu_PTxG%a57%cKPgAwk0T!Bbyie>2voO!{xaxCjztg(kUTxc9 z`HPXC`MKA3=O0=3+WT$yVDq4)wF8zP{j^)%gbwZk`V$<@0ksbTD;zMztK1?Uda>Ch z;8}cn=1X^NLBzm0Lr#B>g{(f<(hr+@VRLD7PIG5{b$LZ*Qf7We!RzO)=g*(t zduUHo;*YnUZ<<@`+Z$UYgF;R(x2Jy)yHr~F_1@91YTDByqP^?t8&OMDKddONd0QLz zXYLz^zVIjIAAjV24gFrS{zxZnGR85zcp{GqdZ+1@zIN(kb;~Qu5{az2P+qoAFUW0Y z_Vlqh>=C4s*2jVIN$}{e)jPL{RxA;(Ud6g~51K!#i!I=JM>b!){o~w)=GoJJ96wlQ z_Mob?7I!Tnl@dj-N_7@lBz+j(5OH;d))RwX;L3~O@R8-N!dtdA= z{cWvZ{Bh#Qwq46N@7Qtp=+pTNUDs^LT(rDy$x80b1*{3vsuWZ(%&H{ghLornx=AtS z!y(0EIra%rkryvUX*@g+k8jac%hptX_S=P5c5lNr>~d7nct#^ss#72tnc)g4fI|UH zDt6#HDTFEtzD9{Yq(J2X&;~%6j$zHBwS!Y;@rbm3nIJQgRt7*o#(r>ezmig^;na2l zwGj~eNhA?n))Yu5*3Fn1xn+0Q%(?DUru4h;wvbO-2%AGGT2!+t0Ub*EWwW zD>I28f({ypphiP6f91-!tRgX2J}o9iiY$DY&zfw;NIcpt_g9|p?+IMd&(RG&Uw~54 zL&H++!U&J#IoPp}9_G{d z4nZN5K$b|_iybm2HyPyd(fB$V-Vh=wO4$`DZ!H**VV`6a`^5iD`2BBWJ~)PbV=)$E z@jt~s<)|e5pHrM@y<Ed`1h0A@hbnl|C4)+yVQm#$rPxcfXeq?plw z?Y|}M;--rJ(uAmt4^I9GLD^6y`-c5QwH-}~#i=Yf%!J&`*1VVBUOn-!uWKzul_SwG zkHN&gGdCC7J?VOAS@+^&?LF7mx<|*W-X^J-k>x|z|{9aweEMr&K-8&e(>Fw;TY2Tc5%=2t$`}dGs-?yI} zhgj{>;gR^r1fQ=#KavvDOR}Q#!-CU3M`ZecPYsBS3#zOrX{u>xuWsjd4OeDY`Fr}C z89lgw-qXp=Je+&)eRgB_e3clPy}eGXerKHGou#hLJ=poEy%tjIL)xRi4Z z>1(ezC9|fLDiM1DV5uu|_4F%euk<-`Iegvz_w!eOTfMqx`2ya&$xR1$ zHe_a?yd_5K7DsD_pF%NIa#AeTcKj!%)$>|$Z-|B#fL`*|F-?G-}BEQo<5QO z;f?jZDH)t;bKTVR5&@|e5IH0YhGmtL^rPx3VOO*Cn@`ls*E#MVx(*-lJFqKg@0Q3F z^Pe6$WP0u9-R0|-tloHF?p*7sll@k#sGc{!W$KKsd5b${EJ#_fEO-3)Y67Svf(|;F zqd*=}pbCg!Sf;Kl!T~X!AR>}^1j+!JTBWa}Pq~YC% zG;2H8uRiHs@9Oq%k2<)uZtmpL3ECO^cQ?IxBVY_6$ix0t2m1PjQtZX~0x3@>#XKUC z4vR#B|HMA2ykAhBc;oVUYz+HY!?3X#T3U7evbkADEuT(Eo1~Yv(;_l&0gkTTzFrupYb&d**r z8NK-Q*yp3e=lH0Es?IVVqFDU{0ujHatu8Syl?%nCEw#~EsW}xT zsRcQ?#f5{z$lMb-3=+kEbgc0ooOyb5(us^P~3Z z9{1v19;dq>vpaUi;dl2ew~&_52ed)}@)3z9` z-S%#qnz<^t36zb&O6R3_gG?W~JUwyhT4P;neS1x6OVYJ3SLQ!lJkx51zKN2-Lq!8) zon_CL&as>^@t&^Qbw&Ces;}PhDRmkA8V@-lKtvy01e zO0p6%GZHgXqLMw|el#^Q-MM9ZXkd`NwN*+&Y+7<$L3VmneB_&8uYJ~k=-!yP!ejY@ zC$o=v?e@xg(IcoXsmgK5PhQqiI05!(3K}+cX8RO{wN^D9JRY`aeZHPvj-hthqKTD% z9_-4FmPx=c_>ea;+Abx6V)kHeN_c-)xlCvoGI+3?2^}5b;<;V=Dgp`+DG?=9YTc!~ zlILFV#I?4~JKF##paB*YbnV*)t*n{D$QBISQ82_3==-Jl=7oyemwK*UYO%0tY3fG! zP~6rOmKyH#!p6`4!>hM;0b#z_iHhprsq5blovk6M$l9>BGdKX2SHOm5 z>9u>&6Q;UOH_Y0;3T|5j)o}isHaIR^vE9Vh_L*zi48vGm#TG?kI~m_hB2_P5*|T|1 z&V`$;`wn+%YNn7$X_^}K%8IoBl#z&Ccr?$!$KeD-3YVr$lQQrTU> zcHtg5s@gA;O z*$qGqi6RC>9$Bev*36892SUCEq^0I%cJ>Sj1u!iUK7Z0|Z{2e9X3iV8w)!U2GlFXO zvegD-G9l`n;K-ZrUvJHls-2ztb5UYH@*f5Fbn=a2lb3^|@bA?PZP>eLYVVCnOdaHlh6Rw#gfE?ISFLTHGE<7f_u;5zvo|Cuu0El1 zR%fCWdE9-%?8g&V+pIqL;rt4hQ%miS?zP@`(dCarcDv4hKDEJNo#FjSS`T#S#>(2x z22;Ie>$vEV9+Fhdl{6ozD&M9l-X-Hr!TNwTIk376ijp8S?&WTmKvmX=k_f8u;DQm( z{M_gVwQ+7G!CU{_wdA*j3oI7UZ&B1NC4wWUWWzW<8jB+-Bc3lT+!@(Lrx~a-#~8!*i12a_k+P zUb{NKaCRt5Ev?R|>uVZlZ|baVZ0H|mv<$SR6r|X{bU1(cY)xHdTznK?z(L~5Vs?33 z;l3vt?;;vMEX`33e;dgd>SZ-8BbiDV6 zVmpV1)5r7nZ%IFYq}1C@#Oy__U?SAfH^hf5frtsQJ%DJaHaBjhuTCUkaU{cyUHzE_ z{68<2X()xNkju%Skqnx&w960v(d_I2_Z+R$9iNQ{ZHfSr0ei-T-un;wm`F{KDT0kd z!(Ac4@jb2RBZ*I7CuQ|vS6yCnl}}=Da%1Y=+dE7>ZyCM598vteQz)uuz&GE@&fL!a z<2)QUz6aoIwRC#cuI6psT6Oq%z$3f#?T3Pvtj2uq59?JdwLi(;l$qQFT&Rv`|d3vUXcD9yI1BKLqVIL0g z@pvJIeN^Qhs#dw8cIK8XtPdV= z+Fz1?Jym_~vUtvH?wZwb>NH5AGDy^ZKkIsZv0YjJkRt{u?17+T#H{7q27% z_#s85c7XGnJl*x;^^fUA6)!$I*}Q&v^3vD6zg8?-Dx#|mlQnw*J$v<*hU-Qx7EY<| zUn5_6xUe{=U^>hlEUS(V3XlBg370Nv8>b^cl&K&Fr0P}cDq<4lTSzXps0^}@hD%E` zEUV z+KxIJHX5pCO1Miv$66Kt0L*ro`MT-V;r1OCrk za(y3tu~~n6-HgYR!EHb`r_n9c=(ki3e>xsi8x~)da_QbR?*zX;Tux56n5}554V;vK zB^5XrkZ zri(L0i9pqtTG%_^?X`2S$(C;s?}pksq5NhyJesp$cJ>t{WEXrd>8%`+@KAqeA7oUB zt25cznS+@xzrQ;B=uBF9YD7Y~7z#T&n}b8Xea{OqiGQGWTT#;#D475aTO7>{s9Tda ze4K5Q@>?L!>mny)J0tl}N2zgPUUp(aL-2QTHy^t&^3w;(@D*$M~*o#6Nn!l7kV`1pF zgMP0*^;*BKU%UI0{VQlNB>_*$q7eHDI3XU0aX?HU2}x8QNr6LE7*JR4)=+6vS8h^O zsZ>%eR8T3S(>thCHUJzNT?Fv;+7s$`9AYh5U!$&FJa?W*S#dy1L#(MKQBdL&h{HI- z2q2DNY)4X%$R>z6IEnyI86i-4WU2@d*c44Z0JS8gruj?zw(l!Ha5U@~odoR-E7uat5Lk3#}cqPMob?v0_L^V^EnkKqD7#-ctTO66p#cR}a3t zk}>iV>HekoEDOm$B#T7_zYPoWzJe30povex7tBXQ<@aNWOVR@R_X7{d+oAW5!1%PB_MSGB$iYr05zGA``XmTEx@28eY$4M7 zN=?qlNY7=mF%lKUxX^xHOnXRJ>{mD}L?0Cw-5I6w z8=LZma|QOBHd6TGWcgvzm5pE2^wT6+sS~ z1F825dOI|P+~?R#MtqWgT}Ntg%B#H2n3(DxzrQ>+`{{gr$BC27rd|GSprg?+bylFm>&|Ny zv5p@7n|3zn7}U{JTJcCNxdlh5Uc9nm!iL2A<5olh}B&L|Fq;l)BY)n`SNT(KM(A zpjwH}qmsBvN&*!X5tSy8&4oyiZySLJh#Wv2Wp3TktE_~bQRe?f;b1@bnl(etFW_sB!m#Mj ztemi>n!16u{=SY5U+e&C%SW9wX{p)W9sQkMJ)e9&rx#~3Av3c+E2|;9PdvbZJnU9dUS97J z{Q1o1)5~5in&vQ-aEVB`MA9=d*l4{updyF|N3v5hW4t4aqw-OU&afyeJGHpBu5%dL zd9>)vO`J5ZA5SR8gZSCgl2`xo!6iB3TVHVSP>f^Iiw#efO|j4dHvq+$VsLZ%kvAu; z(jJ?p-I;1JLB)tlxB@0y>nwMexz>5!hPTVqO_kAT#bbbX#o=s#-aYkI*B3=)NBReZ zHa9V`o0@x3okdB#dHawjgpOTRx3fL(q?@jJxrA^Q=vZl}*prDi$V&rfMxXS=V2$&< z>+g?!O@8$`%wzx6-M?6_9{+$&w?!^<;0Xa|tGd`_*{nx1^o%EHKhoE-)t~Ts5&*wYlhV^sX>r83}x!L+@IN2^kK;4 z$_7O|&$Q3`UmV)tzJ8wT9Bp&;DOQFHjTbGywDRtg2N$njec}8%GckwD7|yFKHT8R9 zmEy4Sf#LL91RYbL_JR&xDF92-ToX;((33x!-gzcJ%WKUpsK_l$PeqNM0>~S{b_YX) z!sa?ol~?|^clBY%gKp|tOd{RI>sl-B`MU95#ND*6JfD#79j$CWTP&NaM7P)xo**-; zhS3jux?p#Yj3Wf-E%&gX+q>rTXYzM!iN1Y3&+}swPYPu!&?6~%$@%$}2s1d4(&UfvPcCN93`dTZtVwMynX;3m+tG+b!A;=I>sb8oXgsidnP#z(;w^D-u?^lQ-| z87BguNgiMgkffuXFl~X0&6~=(8*H{5iq;&TL8Miw>oO+H zfa(T(Eq$K8K0{5VLP0TU!IB0wtz?QqhJr#fojOEBQY<1Ij)%i@kS8yGSZ2S=!x6jj zgl_EX04D}G0rnNda0CG|?NERu1rAx6p{U-6BR9#EoEbR4CX;wH8jnaCL9c*4izop^ zK1ErK+<^%!Mb%*?tuAG)Rwa#gl0rLGjg9f>)Om2)EU~V3udZsRDxp@BS~Fd*Wx7H0 z(s{fIdcy=f7ny(HvG>E*)`nV|%tZ_8Hf#tjEE0CMGfQ);zWYZ!wX+NKOL^+huyu0< z040DZ01A+(Hau}k_x%U3tPcBwLs>d8GI5lr_>jEjPu7x^Z{*ek58FU-h_8P#7v5*;HFDSx#0Pqs$OSJCf&;a$yLNNQCi<_(Dw z!_uS!2qc4j!h8aJdqmwS&B;HSqH>0Ef7Hc(FNq6Ik520?uq?4%$tU+WUjsd^SYCDX|8&beWb>uy?Pu$jEiag6 z5cta)Z=<_G2X5Rk4|H+Q53o;i*=4_Lo&8ev`#3FgMeXZT7MX22?R(~8`04qMGbdPT zPq8*Q_+{I#FE?KO^y?AVU#445A)3<>Y&`|Q6lnjhbkg|9uSX78-7$M`!N}Ik!TJ5i z=MfQ^BcfCxHxqVT3cS6;W3|d{p!Zw@Z-oT^z+FJKQ&G0op5hz}^o{EzwOdqt-Q4{>?~kXt8d{?VS4%F6FRQ;krL z>AjHUS0Av`eLMXgS;bCag3CBfQ?cL%n`ubbbG1LY)K!+1mP zx?kY3O|2pE@Xg2m{YTMoOz=;V29szl*agg^e>cAVu5&E$M(H;hH1eqHf zkq<~*WY>mc;HW$t8N)j!o-jlrb1^zvem_TR0TmtA)r3#cN5*$u|BO##~ z+0jG94I1jRe%a9BVBO;FneX=EYp`EnYEo=MT#nDzrc);?b#)p5VB#nenWtRqg|pE7 zaWh|pS~k%bl^9Cox{%0AO_o|lAwm{GlAku-!cpZEi9{|bC1=DDk(k@kT$h@hk`SF( zR#4U7IV2u!2jpT4kw`Y~gTWt0|IA^?lZ@u{r1E;PeDL;P<};%IBtOajaY+9U;QLSJ z`;XTg!@jW?i?R6ciBSO;Y`Mj@W|Ighugl6=LINAe1X`T1|wF(g8QPf57k-U zQgWj*tMjWn>Oy0~#j+=laKy|Wc5uM=^o-oZ%8Cm69IP*-?gE z-5>|$6s4nm)>3r8mheXe*vgral$H^f29ZX;h%XUT6xHO!7Rai_%+L9ln#KhG?)c@*)rhcf9E?SI9OxUBY4PD>d|PoB z+-1jyVXEE&k262~%H~{ivpkN75 zZ=V&?m!aKNHp?-0Pki+)#V&LUj<_lIkZ@nG*&_6S@ys0X+DDz8_&n@2@ zE1xV=G$mt5O1ukHZYWN@IQ@~gg~dzj+_GX3lFH%dwdFl@f1K0XR129=?QvJXoZsoP zVf;fwRflCuKb*eo`e=a1!uUQW(%=1!{X6eh(KXpVnJMLU18f%TV{{EkdNUfbTob*P z7;T?rv6^z1NP7&F9^-V(w6{DtkjbtONzcNr4+ITp5EE7FIy;&((z1E7%#f6i@|74v zU{_0hOKnVY3_?Jzz|HkVX>qy~aD_ftu-Ehvv~1{_x2UVEryp}#_^0hgH(w{u&tFYUUOjipzjwWB^ZL@2t8ypK2>bm) zcS=6&W}ti$R|KnDSOt|GuRKHlyz_Ly3X^Hm0>_W5&`@UKz#zszcoLUD6_OPsCa~C{pwZMb)@0a+5*vQn>AN~Ekd@&4-fa}(DsjIS-m9ZDZezd+d%i*3qSuL$-%1g+TqCa+V zzg+PcHRy@ueGFYd`OK&w2O$@BKgT=fm|`B096y ztf^~#?<;;+N=#N}N_%qyT7LY!=#EX}3=MT)ZMFj}VJ}m__)&|;=JR+0Ha1R;`=bXK zGtn=s_BZ(ZKL-5%<8cJP)B^s$XrtISYNIx4|JcM>?KE0WEn-4?IlJsmQ|N}4H=4si zip-D7^p6vH@qfbFK*+0xg5K)J(y+9cdv60mQcaXMCG_k1{g%-=FPzg9AL zj!)~3O3n$&Ecle47*U#9+S?LQmh2F1>0RZzJLE5In+XQCdMYN;>Q0IZwsP|xEjSZ= zD!wYDWw0VLBlwid*=3F!H@)4n;L+TrF0;2BTeV};rku3G=K*gA2D=&D_N46h;YrUD zv%-rTa{aDGK4R9w;gp z?2XSZ_4R%3;_Be;<6_}tYxm&J&6lpLoww1mgXiR0MOmj{`OO-0BZ<9 z?wK4}_`t&-vcbbN zryrdGD=MI=S;mzcXJS=w#_d3=I&K6XL_m}u1KCZ zOa#Mt(1p!m>jq$eEQOFzADPT2koZzksE`T(bRq%mt|gHKRB5iXEQc(`z!Mla91o8$ z9Gdy%;Uwu@JOWoNRATw|z3HAxm zNFf+Rm{MLAk_f$;THiKrW$xa`pEkV{;98|+(J{fu$xzTHUlxhX1fZ5AQwKn~lr)1X zBgPXsC^F)hcnS~A-r_lO6sRQCq@nyK@abS*4@5rLVzH3PW(_i#EFrvQ4`)tAV7Em@ z9+ClDicog=P<3J=R#}JofdL4+v0y*Se8`GWG6`@5p2U(3B`D;Z-`~%ys&2^p{3R|S zzNn<2ueU=iV55$P$3ce(2_bWc{awA8$rto}AEANv+Y0zxp+F)%%17WHViSA+*1*C2 z#|ht`Ci>|d|6~8h-@x(zRU5^=Q5&^U`^P51KFpypA>#|I^I@j%9gLbX6s12LJ-au0 zYip!QbMn*SnnK7N6bk8m>~1qRn>R5brPZ}G8t=QFs-Pe%DJeEcC={}6L9A}*?`u?hjcVc{?rvWNLYiHV5F<)ci-Q8Fa=S9nImAU1esyT5&DI=FUg2FU5s}eaxh+ z9J`lZAq_F_s-8cLceSuFe(d|?tg(4SUQSSPd?)OEknD5i?zz*}Cw<;}hZhG27ChMO zxn`F898HJm;F7eI4NloX27gvf=J1+ne|qcAex$GUZ10W?t2=4uA9)$r%mt=Y(w#BX z2h(v*)1)kBjx$@Z%43s(x#1M|Ng58yQyv)_-kYdlp`&%dVEU!Gi!U!=ce}s zpN7~&LMUj1jkYmnn&*`i&QX?oE;#&X{|4iY4?-SYv%8*Lo)VDz_~grj`c_K#i(vdc z*;)7X{(65Zq$#1ix4f|BOL~8yTlmBF!2vFf*In1{X>J3X8{trYQDs$7N}AuZ04r-V z=Ue8_9{9e09vT{w6qA#=`{uq=&vzYpyY|qlEtYX+0XgqOvZH-`oolPgxh!lB7y4fI z7L~>3=EtJQz(*y3(4lQ2ju_HHA_F>lYI#`j<LcIV51)GGR%E?P3GxW>dR6iK#=9$LyiZ*5xO~&m`ns9rmoEhk4Gocz z$<_5;$vFk_8J}Bw1YB&M1P|*%;Pt_rguMK;yqd3-F;UTpiP?Fdxu*_)Qy?Z%Kn)2r zsj1d0YkZYc{YI3oN0<+RCyERmka_^2PexvZWlquI83Zy*TA@Qmxt=0lO^|B96NiWd z4jIoP<2fY2C!mu&1OSzEMhCb7!XP$Wj!6Up3J{Y4q@YTEVj>QcWx@61D75d`hoySa zebL>Cpm)4M)6QM2ojZmNCsazyHUL}?k&N6#laoXC?U7cd$!hje)tiA-ovgB$ipB!b z!6E3Z4ir6+dk#=3v>8)-_U!#!QG)>&nH%-_$Lc zy^B==3eZ&|vY{$Z{1HH8@lcI`k-l4zkR|*XgOhZJ@Hs-^Fc$OswGV^;y~E?zYCq90 zNXPwcI@^eezBh2Nf2-&F=S?C1zuGAFjoPUF8{2Qb;t(eU*b$2UOV~td9bsCUEvq+clO>Dm&iM=!M9^dqTi&xycc)($df+} zoejtqXpU9uJ{66hIp9_8$L;n%(IV~-3X6=6OMde4`3B?drk~6ne({cL2sSml3!)v#+=Wa!h*7+GF{`^iHf>jxg&$z%do0X<9Z@{K0lWZJBgv)5UznQ>#Ao5E^hupUd8R-Wn;<>i&-ZWeXT z==EvuY>z!QyDS3Cwq4!ws`R4SKY@kDc9V+W0O(?1D;t~+C2+=5g8rU(b-5J>O?N*Jes)rTU%=G=gW>)wq9Lk z8hGqs+U>lqbUI|V_O!fx_b@v%Haa@Fvc8>$d|F`tmzKoxwk#gis{fEfDu1!pFfv3td#?;_ClQ87IwFU;A?#O3zY_iw#FzV;0bdfGo!Kgj9q zWb{VoBsH*G?tZ#=#Q4z5kk?*50fhy%JOQ_(tNr2Ax1lj9nZ@bIrwff7^RR)iJZ4K} zTUvNdUP38kz^2N^xX6U~m|P|e#z(=G3o><;i#3#rX3Wi=GA}`Ud^%A!AAnAxv>4#K z<<(iz@@yi7ji4KeguvMVfzlzZR41caNmi`DN!0_=5T3}PkhxS6SBl6Z5xE2c51p}w zXJHK=XyzBiIJ`)j2nlG+k4+^&96Id{Ip!cBGO&U?6#H;Eh{uNiQgD!rACRV&j5Da1 zHLK3huvSL48Bgpdl6e#<1n;irp)U#n3b<-kZEApqoKy6lIYT80Rf#OLRV9U zR7$&o;+ONrTqo~_J`QXj^RT66_y-vv723}P!WfrOsXTew0D!o;wzzAi5|?qg(BNNH*L&@fAg4P}=2D6vq4 zPK_30g@7pejSLX~t@cjvoyst5LsQ0RaR92-EE2JP7u)+mZ$Gf_2P*P^xrzO|q>JE} zFyY@Kg+EGVFeg0Ht@4W>#_l2cPl5P$FgZK- zYh?u{`o};~5HcEjzg=><=oIe0#&*ln+v~J%=#O_EzrcT{U6JVkY^tcwi!Ki8fK8d4 zH1F)Y?Xb5K_K(zqanVZ#4RUe@3NMG;P`{x~u_uxp@XDrgi=5|sW_g6QgnQ@v9)D|e zF~!`#Y|4C_+1uULT7JBe&&@pd>azCPvAQ;zvKBx_(tBY8-(b79NN$rA)wzaC7 zy^5@vvZm#9(m6G78OS+NrESToj!J4)iZYi86C5?iIVr0YF=$_;%|lpACNtBEI` zpHFVfZ5Zqfi%a+r71qriq(OEStELKmYlcl{U!Pd)Fn5}r;XJPy;~mGSo6D-oqQ1*z zi)jcTvY?=gJ6P6MbvMu}>q}B_@QcFS^x?klfu8Q-894xEi`TMeM%^6NU@DH<`7uAeR2vZSSh0x@6#EYaaA;F1ppQ1h|C%*{rb@RKG zRhs%S_+?jnLqbe&TXPwNod_c4q~^wjXZrZ~JG;4CJKX3QX!=@NUe{bp6STezdGhL0 zOhv;W1_DBfzzvt#9Ut>1D?Op5z8U=z@nKwSbaGO9abfT9AZ%)cJGa01V{g^QO}QKY zc)EJuBRQ>mavIq9MBrdBB=u@nJP2*XrrgPc;OoJti%p&qT?BQwx+DvpOI zhzJA@i84f__Mquqd^;eu07^F?F!2;jRE5ANqM2d=8u=p%N1}Q-KEMlbco7~C2?R() zm6-(!XtCWeHnF#Q`b?p!dL5BmL7~!6h(voth5_KqP#LRMz?ZL}r(flXv(I;JO`WQn zDFgD9h`ow9zNR#sq`}^{W_a7K#K-TOKR57$Grq-Uw?xEOlz(gOALwFo>XBDSz=u)c z(Cj)~zh?NKk!GsGbT_4EuTLfIZX?aG`+48*FK>tEeoBj0(!hO>Jsw zU^1DapK$g)m-^dE_4iu3e_ojf;fSOBb;z3#b3r0W1X1*T!?5zV1nz&R+xvUp$4}$) zg=kp|y7AE=gF+To=YobJC6zFkn?^?riqVQtwpgMg^WSWv*f(mU_U~%q?^R6y8|)KF zu#XG*+<%09k|gKIGW3UH!mrqe7&;77h^h98ut^C5k&rLuu~FwG7!tznlD;IX*vl&) z&mZTfqvWKzz+=;)2bbL={i=uRh8ewSaWN^8vF(km$RTPw+l}olUq^pJPJ_kig@|Xb zC@jrO7mIjwkwg`k*~zF1`TYE5z**A|$8NkmcI(x(+`7+nR9P4HiG_5?ABHd`FTbm& z8+E}#1{5<8?4$E~yk7aZeY$u3h27Z~CTpFxt#n#_E6*}vID`p1-$p$NOnqP*Y_`{X z_d&lyufDzOg+u7r57CGlM_!+5W}R>LgPjj|=-O*5-jrE(Xa3GR8$IKl9_9GmFMIsQ zhZA~kGu52N>e=XM8p-VPT>iT0(Xo$b=H6LGy+$Bg0A+WIv>nj2lV5au<_7b%$Lvqt z5A(lp|Jppu*~%sqsvVHN17y6Yy7psE-#&8k;^E~xS5M!&MEmqi6>CFaiwE8S?*d4d z!4mVSXWb7+6@)&E3$%ReFzxbW11qYA303#(YOVE_iW(^ZWK(s+b!OfFbD!%OWAn|P zHz(ZR1Ds`n2MIW<69bp)rJW-AFDBbk6>b2{>tO5+8Ldm>sE75W{}@9(s*F1ilwGN0 z2O_wH)44W7_v$R!>q-i?a>Pr3e2G9h59DqVNjCw(787}~0eB}svH=>Fst7yo@Z9GX zeb4iSUt@c_MSTOat{ew>V0=vTGrUg`ne4L78cK_wK<=S#(Q`iA@<4R3n7~e87#wuGnbR>WnXjiQo-)2+&&~`pYj1l8yGy47x32j3 z=eE|v+u)L!%-tK{V|N((7CwL15RuXPCa%TWJ2&tnx43*n5*e}D>VYQ@n$8?=IJ6($ zya9{LAdQWZ9tVM(;SoJ>A%iJ`!wB{vOw`SxSGH8;6=mgQr|0Hm_4W197<7R^gf{t# z1em5LZiwk0d+5{uj)@VkS$%nmh6(0`z?wDw+ui~ssx=9*iqH{gcII!r^$E?7Z^zyy4XP%yzc)>v9oW|T+^Y{842ycoSgu@cGSWGHbFa!s4 zd$XUF1YD0YyBTL?9c~j}9p>}IJSpeZ-4NTukM;v28KSLpK;8l< znM%oC)hC}FuVpe@_JStS1gGRiCfftNF>cI-2}%carH?4cpOzncW1QRx`3csBnr7;X zRx$(|AmdBGdjp&oD(r$P$Xt-3{v|*6{QLuFcRSfx2lzbv81OFW*{7(`$mD{QXBp2H znlI2Z*Oj-C!Q11IT?np3ikmdW3@r4}+p zJ-P#Ybsp4%aN!W$p?9#5D`p~>bvhR^8Dh>5kJHCs^$yZ{`v?0{l2X5wR`5|UfE*g5 zv$NCZ@q^cKLC=z2ddK@+eQCYw*2Xc{RSc~4Cz)+j-h5GBGfsulr9m1{)@uh_b5>+Q zgC@pQ7JXiQ)y(jB&j;)3Abj$n?B%=8vPKw{0$;pr3I15Y$7;hOV$zL_t_S$tul`zu zd^`HttdAj~$ff_q^S1%79(Rh$LcTsc{pj!k-`#ufZMBXy^N93HC`+hoFGKFj;MR&^ zzYy{>vTCi&lg&-O{dKTp?dp_;i^CQzO_@5qSWzu(!n_w#miZhuPFt|`y{!IYqRI=Z zMkqxgkwE$az*hojBZ4M*nMMW0S~-OVl2j8Q_F?ME41i}!Q6YgS$=foCL_V2}io75U zhbQ(+$qf*tI}sMdXiq5Vv;_nfz|#SqhHVTSflVOskt0x0G~^O+d`w5hK z04gZbEF6VFlItbObV*6k37}S4F5{1V={~*zA(4Uc(M6%}T0-A*++Es^9L>04mUHi3 zOh#7q*E$%T0=JcZNn`<5In(AvHn+{zaYqqSbS~8!ZDBrhdH}v&|4b4y} zMyu;ahW2nqR<0dA-I)bNAu(}L@sY(P1@u1p&q@^35D&n?Ci4zq)qav*jsLPj%0CYZ z{shVY)T4-h=SU$ocupYx%~6jef{}F&Q#gu!qc&=z_RsAf73Y0VOh8GSoTQ-SZ&ddS z{0Y9-`TgyP-~R`rJPsOL<1rzZE9MJ@d=XmAgkV>HOj*z#_wBk@brdX=q)aJOZ|NR+ zw(&)|S1+vTV>RSursgDM3`}MnMr<1c>;}hOS!iXn}@>hLpLYl(RC$TMofKS!;ssb(Lk8X59~VFR0J$fxTU@ zzX5h9lz(vy{HV44BysH}(u&(^`|isBbx!@%<^@+*Y`D8?dHglGciVtH33w2Q_Y{De zvR2w*@OUn;r&8^SvQ{`bOWAQ=i;A(V_^832N91i-MhTra>K-{W7RG3^0pM}wQ1vx zU4TZ64Cqq=EUHvLn2@giS|rs76(Q87_3RY)nh$(+E3Wp9fVbS?zN0lO*C)@I>i_AT zATg!c{`P$jzZdqdo*X9H2in2zq(kWA_deiB{KLDEoju%Ue$%yAr>B@|Y2ToXv5}wW zF=e~gme88ue%R68S5r}wm0OgOke23rJ9@>U2sQaA3JBFy%T?3J);0JhE&EkbwGxfm z%DT_p?6-8E`_g@P3>I0;c1FNs7W z5m|TwlSmQ~$pQkAj>pkSKuEzsDF8`;MIm-6D0WH7HsMIEm_(ll?e;)(zW)*X@I)S- zAdseFtsdxnwq8YLnu1EFf?5Mbt^w;W?gZpEvTQezI0(SEaXOWb_RNB!#-!AsqJqlW zZ?uf`=J2TUgsjHmx;7fG6=B@YZg}0KTURGfO|em3wsX8r^TECCPaeXQB)EUiHylXQ zR27pbLaJ=XfdlX9%Qxg+A90wBhyop^NLF9x_W6NjT(F?q(LDYjfG(* z`laQ9`Ll}dH!}N|;q3p2eE%2%|L>13Ne**mtB1yx%7d|2Nwx z_Kn)8{rj8vKUBT^q9gDt-|>F|J#3*V{2Tm|dhnnPBvgRrKcAtx&1)58b%txkOYU|#y> za9hw;n16tizKz~uIo$aHvd{}aHotAnTO)8wo8qNSwo=fx)0<*5{g1ncS_K_U0h-eh zLV84IWb04|kH;Btog6e0*$0PGdOq*-JiWqdr(KM9bX8h(c49?Q1&`12j`K45WHiNU zn%qSN?OVF|bL2&yOH5;K){3j?Z~(o!jA(dp1(3mkOiAbgeWiF^P~1{i@gXS5#_ZL; zlM#EzJC4ybRhsEEVY%y!Q=tb;i%loppHFnuM^0G5%V4bMq$OSpe7?AUVHE}yMQ*rp zVDjY^vR8FUR!T%W3dxG3Zmw?NG)B{vpyP$=B+J{&$T}*5>i}m*R&r9>{CeG?$2;wx zo5f{?#Uw`Mm3dH5;mio!Y`z+m9F6%zd&^=eD7&6G5WeqGm0l zdsznQz(Da!9~m1K}75DUdyu+jF76GeImEK;8*Hiz`A=o+IWAvIc4F zVIdTB^>-&_q||q`N-mefJa&9WGE>axVYfbuyT9FT-Iz-XRBJRbO|UjtdjFx?oOF3Y zn-UnNfZgm( zQ~cvI<8t!9RM++-qO-b8Ry(iRk)$&|nj)Qq(4nkMhn)0~G-Z&4>m%U?r3fr3JfDc__VJ}; z*fL5j6sdXw7$gEd8ExGik|VXND|aZ!wh#y{DE1LVXkwQr!X%pcBO5?|q7{CUK3oJM z0j`H4-KL=0WH7Z|XF|Dx)>n$mFo7Zj#C}X-h9OO+smnKPUQ^G%ItD*31P z``20`eszt4Uk>?W6aVd>|FT;aVNZd9&AQ=^a2^)890_wG{)>*@C=&b+PW2eYzEK;s ze`ouVL4cB8Fs@|#?qj}Ju!4hMT#=>7b`Z;cHh5$V>(ek+dMn|ton z+Ut?~&%E1p^4&h0yqmkY)#?E=PW=rHTL(h5AR`LXUv|JBw59G|i zG!uh!cTe4PyLss3Ut9m!W9DJM;p}d;P2)6AjhDTwMzTX=kmS1*iWiycK$bJ7&UT;S zm}V24{JbD5rRi%Gq@%5tA|dQx_lH%-E-+s*)@rPr6-Cwwr{oEUwkRnoSc2IugoDBJ zG_4ipdM-HVvDd=O*ulX(sj%Q~-M2rg3ddGvt9F;j59R6CN80riHb6cNm2Koou8G`{ z@y%!xIzoP5Yrh7?MIjf^4%!fMQ+Q065bgQsPpOGI|M=KUOFdOPKy?JFZsTXUnT+2V zqO6;vrBX9yTIWK0xFi!!;y{WJDnqe0y>5I$prP&iDeHu4N)Ui*JSb6=s-cpi?>>Y* zT=+abjgB>JX62^6jElbeHvQ`L&*%R7cJhzPJD%*LR*}oCuIf3BmvRC!cL2c(ta!4} zE!U|HwjpOl8-ic*yZr;xcJ6#PX-q2~AgI>{0G%Ms10)8ZFvwIk0)L8XUl(r;IBV~1 z;^lnN(ec>zfbB;@H|)uqJU35Ww^&xGo`Dp;d?n!(>pkWNzs#`=qW82KtUOA?74# z9N68@>}hPLhlH>kt>BWGkPP}MKtKXQRYlRn34LqU_guUop1tH7AT%OJBcgJE9cl_+ z&-~R98s3(XogWn)mX;jX-`V?fAv&^&B>fK~EwdvxeUFp$v&$*UASI<8}fx?kiHpz`YuC*hQdRV>~^Nc9=aZnK#WHrv?iTMIdTuY2X zs=(}%u(uR>tQN=$rDq)d^yl^$YiBvlSmV3RDAX~ow`yplc(J>?BR9V&87=ISC@nD1 z)9iyZ|H@ZOAFoyS(8t+Hk*%rIoTpw5zTolB?p`T$Za| z9ItbIs^PT>2fUVAha5Qnddmve83>+^bD5=PJBDOV!aD-s3Mg(s${b8LH}sBo^9#7Q zVdVxVJD2xCp=K_(CT^N5ze$_0Ux{*Akzgt%eM_EfMwT_EDxXo%IHbDb{L<*0$hwBw zlH#wCQSs3UDPNjuqN~yl+&wnMe44(!j-m}w$DO?FmF{wXt*LfIgBxIjzuxB1IYb8; z8B5KLw#z*N%-+2A4vl=B*ZJ*ERnpv4pQ%-kCRezP`*6fGSB+5n*r5GrIe zn*pUju=?E#<}AiRjH)PEB|qY? z*eAp`jQnsyOc#2XKH#L%=3saNcz#R%NGcR zOpyqiM()wVZoP1i?Fn5f1K0Pvv#%)bPgWyo>*Aioo`gK9oKEcQD2{@BGR;{@jCQc+Kr z2==2f23lIe%jV_^0iP>znqnGy7EQ48c9GAD_Zz@1A}TWH0JI%ccRt!+^32FL$ejiU z2jNg$ed6u-JJ*BF_T4@B?CZ-G*g63FkQZtMi+_l4i6zL(hV1Ir?oXTS)YUHm8DlWP z9BjLY5B#t435kVDr?LC%ks}PRy&S;xOp<%DTY`2p*%FDJ>DkXYM#q7 zwOez^CKEMGrq8rnqIpbnh1o3kurpDWZr&+Zw%%EyZK5ZARfS?ILvdF({tCPwz!+qmzjAHgg`>cbJn8Iw?aPMRX5+?Kk5fFatZ$?@+d|il9%elQ!idu=Zb?nrxDv z{U$3jB{3~0BK~toWNuh!der-f$Tx3-1N}d}e&*ofd_Uk>$1uI5x+*;EQ)6dUy{Nkt z4i$D3WH&}UOuF)y@627#W>2slD|d3%(%UzU!=n5<`<_DHiJqESttE39)rxukdQsQs zc+6aVdc&$YpLD?&GN{&~2ne84LB5+Pn{PO$X!Wja8|Na|TTj1a)`o{MjLtS|D-=qB zZ_P4Sd|ZVMC#Y zMi~LLQE(hQp^rovrlM7Y?SPOsb#X~$AGEy(m#*f^$)Y3ah+sepG)&U3^7Mo?4Y2ep zEGUNkL#S3COMtbVjnI`)%ws`5Q_}p3Zs;ExhD|g96mqe?2;t8GzL;#@zm9#sdeHw^ z;sSp`J<%^e{}mHLCLJ~o!BU#2h$$*%!*4va^rcq>=~xm4bHCyrZF&E4lKMYwqu4iU zqxNrWKU;Z4*vgWRNwjc`{b!fN`li?o$S>)JdtU8~s4;MXOiOz@hAW>b(Q@zF?dcNz*gOedCuOt@@gcG|( z{WYA15Bb3vO*w<8S_F5bKoEOcV!==Ui(FuY-15?x@LUW7_AY21#ck#d+ZOB!(n`Lm?9%w!QpaS4)fbKIef0u#P zcwGkq_!CUIvDoa9gNGT>saV&Ue=gtI_+G*sm!j_myyp| zELLYt@1uLqJYM(~^w+gP(OGx*{dZj4N-|u(X59Xo@T@WYcG}ZiR-w_8^E0n5nPfKq zXzaPU9`k2ePMvr{b(`tr`|($DxIt?@7AsxVkh9jL+RLF;eRt4kAA)@(Yr-6d>Fckp z2#S2Me(916M^BdJmH2r3uGzj>WA3;KduC5QGe_^#gqhdoo_~4qfc?Q$f2?$HvU9Mx zwQ9*a9W6tBgDFckZTi#9=%nwp_1?P{`Yi`n0mTW(y8{G*$Cv@*sLf%`E|;ILTkfEt za0PjN#u$1}J?*ulsN_8qw?jcEzrCocHEx09Y~po{b|e){m?q2G(A0p~AW^LZHH=Iwp<)RDWsZnymJJ3jFZdig5k zsb@lrt6$i$X;vhC52}iu)7vE6+zdVP1AIeC#etP(2}(wKxxA3 z73{QXcv|`O{QGMz$xfy(F4+cLyI_6x=!vWTeo?uJ#Rch~@A}-i=No+8q-5QOZxd%U zPn^vhH-oWq8#KBJPhEjKcR-`F(Eb)QGU}T%t6NibhyZ$s1SUH70?+|jh$;E;0SN*! zvTrauigd zwA`?gO4Vtj{=8Do0CS+btCd6Vulo8qD>?hs!|cmvbLK9nl~?J;8bKIxiVPyP50!uc zT}0WE4g0#>UvlohgVW{~lJTFZpjKVBhYSi=ET><)23>E%T|4QMC#Ra+Y>rJ5NNQ?1 zSSPhWAmH+tSh7Mq(#3)P=@h~S0c_(z1{3nQKNLmLznLt05Aq%6GnxN-BK8-(z25_S zVu|OJ+{hP8>h6U6f6#Iv{*Ip_k%-6R2?Qhe5x}7yn4Qq%@v+p_%zsBjiF@r(v=G*F zU<(!n6nqzO`JufqihZLtYNPhgO@e(WoQZ@Z5`8HAq0Ul_ih`nDZ~~!NBw+A37}bgB zs1_QXCu9h?479RZIO6Vp#Xg?o;1V?5_=(fKSn>gsPBOA zRpGt2S(ozj&!%MNxa43b9yESmPmi-t73x&xZ z6O6)7`eu8&KD+fP=r!bwNCF~Hf-d6FAv-)NijGcx`>nx6BpL~cV#jB4c~#$fKHm7W zeyz9S1ZyzE3^)WY`A~Fj|Lw^$%r)iBR8*bE&h(i*#!OZDA{c*5M%o0Z+e@jMR~9j(UTlW34gMY4X{RCr;lx;rYfRIyR=gw>!8v$|2hKX3(=~=TGeO_gUs;w(8OK z*>_JIO!GAT@+y6(;cZsj6%W&kfme>i9$xZ#k+zkl?iJNJm$k3IKYYLH_A0+svX)~g zj+$t_<(vxm>Rn7G_^^xeFA$MhL=#O;*y`H*vDn>wD4in|8k0{KCfQ%2s)#==B6+yVKW#bxh~++97!gf#IE zb(|@%eL1PJ*_jXPk@dq630o*x0>vzjDiKv%xu_W(_o5fSNJbVj^wtw&c>u=_?j$jhSY+)o8Qnli98T zGD+{12QxR+emoOsVt;XG&Vq?;L{Kj$$B|cJ0zx&3lz(7POXTausAq-m?{YWKEjLs} zu%?YH4W-l}P^eLvoowWuu>Ypn+jHTsTH|+eCv~nu;kZ z)J_U^PaJ}Mcp4xL0#YBkA>>D(WAi)&81@Nq*yuJg0aA#NOoSvNn@C_0a9k3BPoeSw z=tp*!lJA|nc=+LS_#vj!`>E$09|tpYqnoBDP9HyEef8z?c>$XGp8@!aU>}i!VCy#k zDy3yP6e=Bn(kb)ntvvak5}?sV&Z?!2^Cni#olh z?YmR6zOV+^-&cqjUm;&g7Mm~eGeY4oA9f00Hy7=@f^Ge<0C_-$zor@1x4_1B@%LI} zKAIjDiT*Y0`yqWGk_aIDqKEKvDjPN^?)N+|3Z`6SY0W_(PlTmJexTkDxM-mbpT|s1i683gO^i=yZfp@@2~)OUWPqh$WZ8~f(>o!* z9@0u+cP8veM|^lyYj#k{$Hcl+E|Y;gRy~9XUA0jKHU(*G8eb7QqqIWO_QZ6&e^Hnb zU(t9zLz$NM?$9q!>}U1{n`7%>lgiIn!R7@k-h(7X>-2{mbI$2mJ+!xQdbD_j`JCmZCvV+1d+ojQ`oaUZ zHzqeG_(;*6q2)0XSSW+rssO9%1NX4bTsxrQsJ;2l`nLsv>7`L-mgoKMxrKaq zi@Z2ZEzK*ptW%z%p}*E}?q7?i?3upn+MZn}c3WB7p4xwQ=i(g~4xaY+e(>bM!;QN) zY0lTt+^!>gTuIJK0R=uc4Evmcric9UfH`{uHcdAhH~yBIyd??TB+PlZaF6HKkc`0e zkoUzAF_1mbFYi=m&UjXAx zz}y?)>iwBavzF{XyF=YINg8Ge)nU4 zU9dDWI(=-7@x@(cH;iu>n|^xv=7En-WJGXeT*`|NMTd`OP|5jJB3n^T3~)VaYD1GJ z3zg*%)GLz(&5FccH2aCR%_8K-M1y%awj`v7BSMtFC z#*d$RQg7;1Rec>rZ5=he8M+hpscBoNY6U4MHWJA+q6`m>`4Jc>?V%gv_h=tl=SKh# zAwMEIs7-`J^S?X-5D?MnZ5$N)D8o`p-Q#DppT9vfbqrgvZTqGjTUM@~w`PfbxY5DA>r9kEjMYdl>4LS2%Y1y)I<3v=-@Yu0XOP4->dDro?yc5TU%}p4#H|sB) zNk4h^^XW^yhyQ}wTEjTdgG0*!E9TAp_92Wj%*DQjd^B4j{84*6vJ^uz80f;04U;pW z`yK98;{kKCfq(#JQ!`{T(eE{vN&A;;<9=vbNrrQXe(;|F@kOYz;IH#J#5`%x{6|7j`+zPrnBAkAt}5$2j6wF9MmzWs- zDE5uosEyh`H!O#WE|Nu9Cv}RAD(lS!s_Z|Cwm958$tVg1K{1I6EQ3#EuTTuPH;@s^1_SV|U^3=pQ)MJa$ zj0la-;r$#*iA~Rx2tV`LXvH83TcTQK&8vif(xPnS(NiFFF3)-X*(N`IMehqBh{YwO z&;L+i>y!Gam?5B{@Ww@-Am*|W7tKS}u7tyoUC(H$9;gjY3@IxsON+{2HE@dKi|VVZ zgb4Mr2H64@vIlLLRbSJblk)lJTuna8jyw!`5$uz+wD8btOuq;=viW%lY4)}BHvzSl zTVF4tSP&_8QfdxLs^-$G{pSWTKDI(ZY~#1ZS2y1;@%1Tkw)kjt!s?F$r}kekHq9-m zwtL{S`}$uVVO}vcF;63(J`H)=%53+IeYnYO<9IuLxmy$oCz0-=631w1pgW+t;FL_I z7un7BjJn+pYu&R=HU=yKH`NT^Z!vhi1zeYwcEuC#QNSI=Nm(}L$%U34u`geQTRyt3 zdq){Ph!b)U1+uk@uG>^y@AaF(TW>dJRJgSCgj_J*Xli5hJTg2nGw7rDn(7yGXs;)+J}j&b-2U3({Lw@A)y4!; zsrdlZsVer!%jW6mKijeUfrE`xLioqn;2h(#ALh^gsHBpvrp?8XAs~nmG*p%QxO2lR zZ>P6kKWA1J#a5Ig6z1i|MW@(VJMY-Gd-K*Ut2S=mf6RQ*@<-|lHF5-0Bo7Z5iV8F( z#a=*qusx6IJx)YmZ;AGG>#u;bA5HztqQhsAv|DrMskL*l1q}!g+We z_7mHX;~?0F6XMWlpAf-51o-gi?}1f}6H>0gz9`4a<-r4s=eR+;HYXN^DYfeqn8P_LuUnRdr3R zU4zVF)<^-INX(PG(OLP>!W^zy(XS?7WS~=Ibb9cvXJ1Q`Bq@Qo1J1vUec~Vf?N`N9 zl=Juk76TpG!r+VO4Bh}&Oc$cf{=XexlGSCi*<3DH684iA0)JgyU1fFY=8x9SJ_+=W zf@NmX+iz<5M_s6BcsIx{Vxi^A?2+NXqu4iUqc&>)*Cq+V@v*|bk=i&R{GoAxteO#5 zIaDjGdRg__GTUkI>k}tFUG*z{R?07@WA)LHhisf56~(`3=Kr!39!Z5@*f-KFg0ZC- zQ&Rm=6fF?9RDCP|oC|sEsPLe^{!VOWx`>4=oiU{?k$9vUI6ZKI#eKD$8S*rr#&peKM+l4(gI&@AXWU;vd;mCRNxS}7Y z(cRb;`ynnpG7EVoMTMpJ!@d3U`~tK6lWLP18Oh-0+T#Q!l-*(N^-rCC3+572>sDw)%jvtB>ydr(CJ-Hsy9BmeBInG8`9$`L5 zw828wR|*wIcEyps$-o@o9e|RB%o69h`)qePe7w2VZSr_m1z=0o4_c}GY&|g3Bw*@v z;GWW&N_+E^C?iKN&lk5(KiZ+{NkMgzeR1f~Or@pGwGDiiY4|JvuDZa13fyog5xSuJ zD%?`lyJ@)DV)NV77Z+{Ltl7ET^^S8|T1sHR^Ng%adrwE=SPFg?(cmuwwF9cNP3CTL z+`8iWn%!0hj@TWw@i*CTylLH$CC9A}yS#BcbN|f$V(%@WqUzs$@$bwq#L(TP5(bLc ziS-)T-JPfybayv`0Tzm)Ac%lSBS?35cMUPg6!X1%59<4#bMHNO-Sc1Te|~3OpS2gx zFq<9Y-p~F#@%cR3Hx_E!=?Ytm^0^A2+ec2y4F_zn;4ThtD=z3O0W5&9pA5Lcw>$8l zeWZJOTJGofX-CdpIe78d>TPNU$K;o}FO;`aR=leuW4oO1-G%w1U!^Inn8_j08HV-@ z^)h$-1sv&tbUGTSpqENTb<3y3g!nJd(Ww}eaV{E0F#73z$(3JTd<%J%;+0wPtfKK% zMuumw-(|}?2d^C3nfK{)MMD19M0T>s;LTMq2rgZt%)t?{U z4!wTu;)92m-P|2LyMD!i;0r>P%%7YVPW|v zB*iKfq(3RE1S@Np%WIyHRXeS$;yGXAjhsxlqFSh=nu(m2tF%fwzi^Wvfr4@}cp&c| z3wv(CVP}zr55vV_I9L?$Q89pw08@UzBH$nn!@%IE06PW15T7VnW&VVoLD}(>RUtvt zrh1f1!DMk8YA1_Js2(Btrp|fy&Fg&prZTc%RW*G34qm$oMTN%+cqk@}Y`j%Nsm|X6 zlBdyVafSoyTj9BjHS!9%k|N0V`-S*AcJAnZ6E)f1!RYCnY;EdpX==~S%TG$pNKVU) z`4Afy`zb9WwXS8PrU?cF!?mlZG9nZq3{eE%y<;Qkp7B_H4UgE1cm)>e|61<&@6zq; zFAXeI4}pzhGCBj*CZwXZ5mc}SStu-sw(hZ5EDneB@3+L|5{bmt*4C=3s-mKztgNhr z)Ys|tzITK5*InUTeGb3&n9%D)v(f1Sh!(67&&>YYSEH}ke~P?+?2rAi|D(n^Xup)IgAmSlZ8WhX$68f6Dq#!_TR4Wws>Ed(bZSi%p$G2 zI0IBTlV4cS+R*Z|!pmUIZGg&y&d8&Q%VLvxC=fk2GPmeMd}7qIk0ozBihVDpnymES zvekX7k;4wtXT}MoU&=cM{9-Zr{0ct}nwb z`aJy5`qkvULQ){S{jz>0@s9O9x7aO$Kdh6@m(~-wESQ{8Q z?>u0?fA_;Z7kw`8cRQkZL0!#izOc18u;)i1sgIygkQByMgy1KOoFMWWQBNstFWn6m z8!R8$Id}#+IEVT^dg&7AasK8u%^ld?;hTibBt*?rfz1-Z57%{xS(=a}j>Rhn;)IuUokX0PT@|{Y5m{7 zzWVak+v&mCzt$}BFf;r7^vxTWC(o>$i$4Uzj+Dua(2%pH8^vELfD#euP9Rc=70+F^ zvdYSGGUEBfr{w0m^2XGR{1=h0@7mlncXu&$idmo=CMHlSPhd#nm;lrhK*nO7$Sb$J zk6$@;`OfX#2X3uiWw&Of-;SNZOO_jJ>mNIP)BT$1W4+a(no2`I3%FnuCsd0UE5`}9 z0KR^Jp`fG*VBqh${ul&3_`Ll!+It72>C00I!V? zGq=oPJ|^1Uho$lHv2Zvx9|Ha`92_dGLc?Mxe1IZ?;|QWhJAfCO#EO!E07*y+O385* zm4@c2*KJzWbn#SKYI^xNv+DdiqtKy->Y-H(f@$R)1NYrPX+EA5V~l{<9y0#hjtB=u9-! zL^Eg29ZltpyNfnFK$N|QU3;v%%AWqNx0=ak<6l>HiI$>L@D z=~J^rDkP6{$C@Xa+Q==_kT{J*rf`(ktn#-p7>x1p@xH#kf`Wp)ygUTzWoBlUm6ajC zK|bQbspipytXj{ce217ctB_YWDw|@6q*@Ln@~RAfJ6PVr{zK{bV}I?_H|OgM%%+s%+s6vvBQ|Hh8+ zR~7Y_fHTh}(YYwnL$#7;PyuH+HOrm(H3kk@nLbU<{F;fT?#4*z8Q&@NSqcNKnlR@O z-Y?k4Me}_u8e~kPLp#}2WK(SR?92>gQ6?bEzISv@TfYHgW8J0k8pNO^Mnd9Z18vWa8N z4Tpdu5oeS<6%m*Pya_0dv;%q$Dmr&ngpU#wZmSXQh)CLr$T|QSM1 zC&QGM)Culnc+Bg~Mmuktm<9zsjR=2ZYHfMR;)?lebHaYX`Hq?*wgkKrKd{B(J;X%3 zWch4G2u`A6RuT(c^wv5WZn(F7#l?-=?(MaSbJ^y#dx^Wgrk^&}P5{^eJ|7{p>}vyX zHW(FOMR6Npp_7<9-lx2SoSwdm9ic)ZkxpfdWmZ1lbX#$KuoiN5d~OQBR#Vjbux{E* z<&JjoDK_5}RGMI1W0S+FeG}+~NuBH6LY|pXg{fDAjUB#N-HN(+|Kqji#bI5uUC0GF zLZoG zqTLrrG)d;Yp^~P^bi}w*sJG!l@25m{onnBk!Q=ZSB>JRf)66V~t7=ers2Mu@|2Eb4 zYwE{bihr&*8dVFYq0P{wvc9sg#K&fj%&a49B7eNDrPWTuDJDwQU?C85aB#4#t*xS> z0)cvoiHY^~_5J<*G#c%n+vjje4A|T|nB3DGH`tmyI`{*^Auc3w*z})aDzC$2=8taQ zANynfFWcW!bbr17cXCp^sFcmCpR#xoeN!A%0+h<+AwLwnaej~GKu68?!EXC>$N9F4 zMIDrZskpR_)&i^LmtNe+>dR!HV=Abqqhq7O&cCyLGrX!98&Wus#6)o(g^QAKk*Dn# zkBy7Y3!iHQoccW-lUE^OPB98H@=?YGhx?vv3|8Sq<)aZpxpR9fM)^g*miQAZyS&jZ%*pz#@4y5>awpbM<@h3bV5oZ{J__joEO@)nL~RtwTHaM4tfWSV3<^U@s`-iV?R0(&w=A z&Phoh5IG#WPtQ(kzM0fATS+}@aeY@ceOJAcAFix5+x{*m>fM*OS4^&-ymFzbwQFQ_ z(#hHVp`(T2rL9}-wyImGiP{MRQ-BFXtH$_HVN{{Ul}87*1G>+aYzW`F#$}tK<-TpV z5AHNRe9rskv4Atj-k#9**5b1QB0d5*4?u7O3U&f&=HeTJ*1Yd|W$%CM>(`G_@d-t> z%}f@W3$E`@^!sw>a@=VRucbmRi-7skwPAOTq<=ivGq{@umkiN#s+x}!=iQEb>*ViI znwe2wP+D7Cn^s?7`^iuD&Z=cDYZiO!Z4cjKk^JCg{)^DJFFidTzkmMq$F~Y3{P-C4 z`qlGC1;trTJ}$NaJ}K2zLD8Qt8#}u@eYkNdbfr$Hltd-q9|sbXfPf$&Iv=)gjaL1G zYYnF^1?@e5)4|8b!rjR>IMCEHBsR6p$+6=4vEIWQhE~n1IJT?%`nA@EKA7Fslvr2l z`?lurx$5Qmlj@2zX{i=L;V4aeyU+X3nYw74ik;s(@fU zzzhh85Jkjm)HE}etb8h=aANU_lZQ{;-oEqrwVU^@-MO}7?@_JA=atmGWaWxPMEm&( zBf{c!^0F@!)lSJR6qV7Dlu^7bBatA^*C7NbcrYV|CyR;HDk|4VOE&`0fCtl9bQ;^Q zPCg71iy>pNL@aI`gCp{4xO4OlRK^bjxLCl&V;DlX2?hD7Mf&u8r{J2uppYCx5FOe9 zg$1D?pb3M4!#k+w&&3v&b!O-1)i#%Rbv5*~x1cV81vzB2Xh37bi1+PRO-PGYPKwJ; z08G`|t!$7BDb4SepOfF~|p|Pn*^oI&g{Zbf3Be5y!%+Q?oW6mOY zi;lRts4s*2P8F@#RHw7FX~*v2!$-^9y;_KqJTU_fHACIp>{(jr(R+|*AAyt*@`sk8EHZW6M! zVHOfP!@0643;q(FL64AzY>LBUqHunyNzZb)>?STfkxO)=H7rVYl74#>SjYk|Wg%~Q za1ZFXEcE;qSUcLDSyAvUKkIv8{?ybIeWlGFTS0#YU+nGG4!Qz*BW#X^&OV_aBTSDTU) zUtf@iKxf1|r+m!fpglfZCW;8hdO7WJb@A7pn4OKhsp=>#;R5*V0NxP@I`Rvc@vGXZ z|Mm1Q{{~-&B8zPytCVeo1uOxtdyglc;>Kfd*aDn8z&Zo%7xPqI7c6t$^tAk)PfXas zb4PBQ8(%O#zxLc(Bez3xx3m=lHfq1wsp>j!rMJGJhoNVdUvyQnN${gnPSHy)*hn2+ zE^}L5##xomUJO_Y0DA$9x1gYl=wg>eGRGws-O#-rdFi0P;Wn#x^ln z{ShzYgEp6A)8E#&6u9}>`-l3qb+jg>CR;dJn);gVvDvf2*+AJ#f!`a8vNYxj$8HGP z6X3HE01ttocf2h6ST#`X=xOuuLzIW7OJU6qHk@j$&y4>3LhNKN?miyw zY@QkOj>R-@Y!79!BUy|(2#dx>qM{R=&FrroKXU!%;hSD3_WSK#;A9|VrNL({D`G9X z!gKYC+bbT$M@%!=$Q?4!GZ`QEH8JU1V{1oOU%%0z1EBKKc=W*+v&@A-4jpup$v6Fz?{Ew7q*^SUa; ztH$4{_WtEBzK*#C*`w7pgN;3XjRQUK$s!=E=S%b$8V*mp|dl=rpG(o@-0y9E@EC7gD>?EoYC zviCic&8UB2GG}?p0K9%R#vmjn4QHFBxg2_P@o?1Hb@r?560Nd&bBnv{;#1PXLc>G+ z0-l5heoIX#swr$7ZcnMrw0mQFKH#X!w`*@&>>lOce)|1Jey1x1#zB}5VLfDxA^3<& zK^0TUXfcL016gRCO+h2uitj}fA`;Y6rpW^Z72gM@hvwt}$#Ya$UfeyGiA1Zr9Qs_V z-_MRhUOAZr8SQX9g;R4b%RxJQFL2Z(_{>9a8^Kv@{j>G2>)&TJ{`im{pI26xo}QT) z_obq!Y?g_R(?!0S?uoAWABk<0p7_$_y2%#54F9bm>*X9|gsqiVh8Z0Xz7m@7q^+%H zag%Ilskd$n3p0&9v)4@?AWZbZz;);G6vZQ7Nm?1k7VW)|6)ngr(jP~ z^E;CmV->R{5)K*y?tB<`l$jym$S>|BDQY35<}iQrlPzYYrXj=rnhpvwwjxUYQV7CB zkdt5tKQIGoVM>AyK*nDH=Y)3JEsa>UHt?@&PaZ}l#yFZg*%+H&xqi;@!sgYso0J@N z!CkyesIHKSu;dxRT^4Ji@?Pe(HQ9%MT6fu9<>!yXGs+#qa2ajvLbll{?MIh@hrsyKJ&RN6w#c7XEVHSaY0nfr7hXsF* ze4YI@J}|;FI^k8px0IB`ADs}M4Jz4S{7%~N-iE6V$lERm{nC<=pEb0})Vj)){QMu^ zOWLZNr-ys`#w#LAJq|qCB5OY%@1TNpmLk|mZho}ME6+2j`pXPtH1~9;=M>a5cYaRE zjE?yn6#V$W-XnU8Rvg-Q=I#yiZ_$Z~(ce6LV_p2K=IgvwQY!|8c62a?z!<++`=;gf zW_Q|L9v6GPjsKYVxvadXwyCwDy}zS-GUZ!YTueb@^>{^oS4YEmOT%DEQF~f;drCI@ z+P%(=e~kca2f)zKHa`A#A@NVE*2e7E_E=XVNK34RAM^=>DSchIegkdej^>3cq8BfV zRg=tHAeE!PAneqU;I~m}afvA&0nfJVyuHl8NqdQ_s_tzCom(>VEhQCS2@AC00TT~c zf|!0~+3bZ&0z{N_WELn&C@YV3C{0;Oy4RNsdgu%HoMy2DgbACr_}A`nvm*LC&` zjQzqT&G1kTlgj0}0-k4J=Qro1SA3zvX%dafo`EwY9_&NeT66|?dVGq3w)#zz3RAz9 z=48yW(XNvAF*rnr!(^0=_A^<>)7_k#Ey?Ea#@tTv(uq`5ke&ufR5(V3!#}kPcsVr+ zgN(vHo)*H)EDFtLc-|9Pa~QVXh&dx;DNgW}M3db2fToMaVL!tk-3cTP@i$}lw_MAA z-4o+KX@6kfANynfD)t}4J{F5L*KtgP6a@D5vN~T?yu6iuThD9Ra;Fs+UtD@y{cZ#f zGm#LSGeeywvFJ1|>g1?XkodhfCoDU}KF-=d+asYpww2a}U@la+gv9M>Y|qO|3w`Pn z78UIL!sEEp>Fu^#);TP)EjnXYeL}}sM8h1r$4}Ka?f4{2rNCk`Y$3vd?%Bc4*`WzY znSfIi?j#DDc+jmPx2mqB7K!U;sWY`Dm1X&5bEojnAxH=!Wpj~Zo8+QW^>ZY)zeV>E zH$RRdKa%;6;0115`AyU$t2=EcWc=2JJFoMo};?4wN3w|*R!hG*}XTP66n|n##UPZ}O9eWEaa1SSD zAt-JtsOhAsXeBRiD~dNmxeh{}e8Mi`iZ=6(1zvEA@D25O6y)G@?9l#WcaQz$vY-FD zq==saaKgx2x9H%vPA+1B+6FodCufD{U{o(0RGO*II&^n6M{0gP1QAkMj^- z{@Czz$UVy-pDTB6>l-dQV!wW^vx=-OK)wx}tGtNILTO`N{kyxJ-bcFxzDbM8s!Ys{ zboGx83;Xctg>Q(D-91E6}gL7MF%`-|B_c49{VgUKRYHNzP7$< zc(}WxxwfIUtf2Hq%#Zi>iN@Q*)<{?@0%I)33J5>MthUvA_~J$%rv%dGyDJ&!=VUv4j(AmvsM^}4yPgZswa(|-JTzHJs^dGa{q5bZ`+=FMv z)@&x>1-nJW=y-uCX_+y3`C`K@-|jqka`3S0^|NF{eJC!}y?iBQ)n8QxyVF#czEjst zl$K4?P)S_A)N9{P+6oZrK+x^v1Ok611xon zSOA^OJI%*GyuyI7ZRP5-m^<9hndg8V}O z5QT6v;{2m04zZp-A7Jymz$}S0=LM(?rc;Y@?R(Gq9Wm;qTs5r%6qj} zs!i2!E}J(qWX?D|`!9J?{C~7Ru~tr=8`kCfMzeg4sf<>Bjr zg`j|`)I7Vz$DW-{>i$MT$5t|UBh+Sjqjb@N2N&(qo1noFBJ7<+Js<dHk=1%U&=*ns&i_@mGD8sMi|_&KU2*fie6DbpjQ zG$MEG#{?Yb^$1cpOfs_5n%bs@hK{+zCU%b&rsSf2lKOXl-`qerCaT9VMMX6ccr+C5 zFDfpoAUKNz{B3Z|DLv#`tmWaC7k7v3(|f3EYBuq zZ*Sv2c)s#<`(Ss{ef#zB1D-i%#_z5?st9j{#BZ?EuJ)}@@vE$%+!;6o;Rxi65Qo~z zOY#GK0zOB^qbUIz9IG8DNXpGl%9t1!i+W;T;?4}I=Ai3K@BV2l^(0L z9;m6`P+DWN&cNDW_w%hWyUCLh#yU(mT~xWryq-4Tdd0E{IL*ogsqF`T0i zxCf-I@Di5%{MG`%Q4%;Ri-&Dr7<7J<+_55+&^%~AtOGo z^?OC)n>TLmrcdHLUBewN-?Z4U$!p1)YGvJN04l`@?W&Tc^W{U&oHVyIyWL*X(p}xr zR9K!E{VpdxxxS^Leu(&@C}~gd7D*GVu(_ym&;mYN?3!=`t3=C(e)m3pdE@2pdCkfq zBqnC;=cnSj+FDqc4&7`?+KTU& z$V6|~|KjP^(bGTI>dB^aCP{Fj3AQ!C-hN0MA)!_*rXPSYaXge1niUovl#m?<00E9< zeqo3fuQAVs~Q$`n8ieXm22w z!Qqk+L`Z|Q^vb-H!nD@*D##@xu#dw;z#4ZhGe`TSqdXTlB0g;xj<>;%GDcYyskU== zuosRsaXT7e;{fa(fqhIg)dvym2YG`asSNh$)Ob!t`j-!}&qBfjT!SP0pJ&CT4m35< z7{jH5S$Ct2{|etLY%Ht#aKVEl(>H}rXCRTzAv2lG|D}M-eBW zcpew+BAI2Ol?@sRPJJnk-(+Se?yd~%0oFwVL4J$u4T8%b_rg8|7tV5cI?Qu2E-c>U zUFI}{K0Hiqqe3Do(MRXbOmXKX*0SNmIP7YGl@C9g8U}0?yDKbi1GFQ4jq~zG^GSN% z(l1Ax+$wMHc(GQ_Tt?kNYy0E9W^oqP)N*8lBlON0RDg6Uqp3Kzt(wdmVhK6i;9W}z$0a#-vcBw`CF`mpU~y)!5SeIPBf)h|d{`KhhI2dKz1Kf~gGTaWqRp=Y_C&cy$!Q z)avTi@o^d|=R&R-FFW9Cq~@fCemUNFU@j_TrGU2~V4VP;7r=O+=`kU90lc~J`d2$2 z4M!b%erc1l(ZXv>H<+$9etzSU?=|x%t0O`C_C^>=S@H|oVgzl46m8URMw^7Eyl^nL zI=SW0^;4Ix-@c~#*TO~j*Qq)$<-0Ag$#>piujQ)_>s~Uylixb*_$*U;jj_g|o7y)w zDc;f+xxPlobPdK$5xDc?+z2=ujD#%^w+2G?fX`b3Jki5{Jiekhbaix~!`JlN<^gH% za=U)Cr^RKwe;4Bu0TG*Lg9?SN6ZMqR)tG4oi{<62a=f2a2BmB)vT5?gjU*zgUV8s-dnFmLLsa3XP1@2Sz9~rb9=B!`Br_9=_ zveur`T6pzNo1$g`03~?w39RrmPKbhFCMhw919V~Bj3ggfS*BpCk&C%o#L0_a_8)B* z5-$~&8Ui9+0t#aQ-vzLpK)6?b-Sn|jjb|>!9XRK*daI4R+$%AGVnNUcz$g~<3-b?4 zh)sxyjPc_~akwcwjw8%Jbi`=t#GxdkL$?>M)Rxx}l~U%Pw^(k?HiON3wkzo_#4D&s zXdG71h!GKM#9$`)a1evxfjk@s@Nob?8(nZH$pa5KqJSbN(6dROxpN~_l~(UlUN?Hl^Y%*Fgg z19X(JM&U^{!>J}_t#hWEar^_DG?&*^kz;TG?KOmKF3PMc{-JaFG%=lUd=u z*1s??um4ANX9bH*N0sq@Gih0~Jndu#kB7@(&HQX2=D^wN?t+)ufe+qZwaqd;5_N1> zw2_RXl%zXOB@74!0}zA?@bXy!T^H&5*(VLd^rYSJeT{*A=+&$i|% z7WrLz>@=j3AZd`%QPG+i7H*TB84Vds%FxuDa5M)^%FRykkXb~b>NVOQnSsJQDIWi;Tcc5(}mLA6MlI_3NwHXWox??AQSm2%?+Sgpbc#V7C0)yA!ToO^(~_-*@{+ z*w+}pxJ>EQcU6td)GzFjxnv-G$x!(IIsqqXtP2oyks#O;B)OE=M+xeQN^Odfu>0V#5)5_>xu*D7E8`~{c{N=1~9&_0={5BP`sZ7||)^N@K z)FvwpO;>T?E&==&N|roY$!gN$HmzYcY-r3Be0XX3*~3OR53jP>X%M(h-WI23C!uGx zgRuURh+2*i|0o{xs|qre1j$0+Q%FeVrLuD4Hb-ex95 zK{!r$kY{u-);S>KWz68Q zqwNZkHNv1@MGO+a1OYoPDmWq{Jjl=AgYr(0-v~CVE56*m#mzhLgH;dM<;f)Nr~kz(yLjrY7Ahju>=ZMkP85nPoV4gDazBAG~SRL z5_;p(XZ{WCjP+B6ZiWN%3ZT8i@ZCETm(C5iI?yt{p(u^X=Axpt@;RkC>6Zs=>cnXd)*wb@Uf{2s797w0orhoEbc`*kY z(p#yuKHuH8gl*Jx(~vfmQLs_b^wic3T)g_(ssk^L{Ih&teu{YP`sA7Q%h#UK?_b1c z)fG0=n+F+fRPG=O9yzl=LFAl>43{;*86JaUO|Y#Ec1^;GY4#Y;nf~nL`=7zzee{GdWJsH3(U^29jm2Q1TngIHVY`UWEbA-tPjxx}{J`cQgSAhWuYI#| zSb+N-T259f?R2{ljDp@GOuV;O()qUJ<>H_ zB4vTWTA?Z`AaDT)KBV83tqUn?6w=17{#%2ivB);L>{?V%0p<1OrkBPZK`GD); zV}WPaKiVwoE02<1d2?w6>;(+m4UYSsC>|&ogT#-eacggGa4z&b^7K@6Lws0jcp0(S zG0N)J)64dWHr6TjY17}!$ki83FI>NL{!Q?c56@m7J#fVEz+U|W+XyQ(WwvR1XN2{@ z{(En3t+=}UqMzy7%U2b)UspeNP2<*j`Mb)Zcceuhiiufage|e+R&wGNQlb_>+5w0; z093>InLbFkpgR_?)ey5XF63_V8#4NQrc`$ptvsTC!u#m*JIfC{9gfU@+s_$#^DXLm z@=GEl4#0u;-S3V+KdNJ{t$0gO;iiJ7$-LDM*PXVwXy$Hu#pU>>OS(Z{>^^3{m|-$z zra0dcz8M|ecG>rkdHw?t_r<_!(dwbprKD<2YOUIM+2)bTg>Qp)I$7&qJp``ki3S<~ zd)~3#Qk1=Z54$LyU#=a195IOowPnqVcO|XAWa9BLc!)`hOZj5wbSvsZc<(q2`P8-E zI9~GfeZqZdVLKpZixaoeRP(>6{J=y1*bgCrxQz*SHjFIy^En^r{URad^QRu8%|-L%+GIqTe5m9y4d7|$ z5L*lh02VYCinN?jz zA-Qu5T3B~4*3K2hmf%5+uG*xMOq-NMrL=UpsCW^UuS7tgS5c`_Th~EGVfo>c$Bpk_ zH?uIkd;i+GtJ?sWKxeW00n>aql)%A# zeD-)Y68q6OR32ZKN~RzvjWRon=Hq{cwX?{rO8MFncQX8-oSn3Q10HYB&+jaRb;AQE zfVBmZW@5YDcRu^_tTd-;yn)1?KwaPvoM?ru1F&rzc2eOO3vn{YVG()JA#x&QG*+I3 zL}b@faC(9@$}3eM=gC3P9^#7U8vFh`@;Tri*!Rc&*uRv`A-})X`+h08^BAwQb46an z12Q3Fl0{6Yh&NBXclepnv5`8bQ$+;v^`7Fue<^r& zL#wY!h)fh=bLWwmTj=mFBKaDPb#HJymI0$OD)O=`m@^Eom!22i+}Ra-O6U37J@Lm+ zdY}II{Zr2j3Gw`s2ukKM;q2IGZEAPk2T1OOq!HTW$S7$bt3EaCtJ_Vl6UGi#?gZR5 z3fZsbH4o#-k4`6Z#K_vnZFAgm`2OC6jM(PBx-XTVSDUO};<8lNb@9pYbLV|8K1v8S zak&-o=~;MOc*9f&5lw2+Cz#{)RVC)eH*cN0V0`VCi;utlWHBfrb|})8mO8nZSvT7>-8;yk3}C6sIxv@CTt-rl+L*v8;gU&rW&w*t<*_!jay`c+MN zt=&W0;2^*7_>d=M;n(wAB?AtqrMk!$hKZCv)9U`bq2-fNR)l|EqD6G@!mta#S`lp? z@C3j^MacGoy2*F!#scihOi{xxB0Gb3c)n1)u~E-@)q3-7mT$~I{0Q&~w}197=kT?U zqRT_TdP}iO>!lxPNE zd3KwL#jS58S%IIQ91A$1YA=Jg0l-=e*r*Xax2zAj@wF)9TT((&S=NW*wD=$CiG>C4 z^D1pi8n%p#EP#-}hQbV}JOWoV_T8&1$(-&QV^S*%iGg;LN45-TtM?KF8zto0w3bY& zY7PjB)d74FUcOpXqdUw${akT4A!{seGUd57+|yEIgJZ2qpzkS(#}d zNCv{wK!^-*Yym)%1%vZJ);cLK+jB2o1}4~;#~wORxnbqdU(2ALa_zaZdBG8VKWgE+ zT@_;TeKG4yzhHa!6=?Y5#KL9@XVYGnw^`{ z#+hqU;mjiM*i6X;>~3l!9Zqo2K^m;Ti6F@P8O{K4s=B>By|gGfGrKUYti7U_IK~)fGX~+LNwSHo z6#?%EFkS!^X>$=!@YgYGbbT}sarE(lRrX8eO=TB4>0Wwt@?GBJ?3M)Ac&oJ@s}}j_ zEe%|`HFVD&>jT>^?Am;E%c8wY6_0DGS!;@T@Btej>LRu2{kAJPXRl|U{3~LGx|yt& zneM4qmjX%xyI=zqc0eT9rtnyCbM;;BuOct&*M!PhSY6dnSyeYTFN8svK^_^2v58a) zZ0Q*MnDk+SIXJ~4&UFlqGht}_Td%KvI|DZ^@YG!O<+ysnZShZ`%SMOfAV(BJJY`mx z1m$Q@jmX?MLciHPk=#on&m!)fWb@Kwk<$_}cRAo8$nPY!JH*K5t+h+I%j-{1Tif%e zCyU!UUM8m9iB7oG)ES=o%{Spwa7$xBRb^&E($o0FXV0I9y#D;$@Zsjuv1cV6rTOiI z6zmnX&9v;^TEwQmeG&P{^O5~2r$fta7h_C-oR7?wm)owrzq-qKN94!n#hJxVJ)Vd8 zMds&Kk57@&F;t8xE^ErvMr8?#U9uheIp7 zp6Zl%$V42Mb6C0a@s6|a4;TdNgF9$Km*9w#G8J3zy7qwmfmTvG5)rg?v_Fjs5B?Zz z^6bvufSr=JMZ|3dMLqEtGmMg@-XfbV+aDdepJ}$)X~PkZ{by`WUVnJCrlKDgSkJ?#r`r=7J9M2!6cl%|}Qu;_%Y= ztaZr)%Fn(5^ZW8Hhc$gy<7|YHV-tBGremSL>4E09bF20rHCwjAUqaHijzhc2wAZ ze(|l13Ri%EwV>X`6N1{FSm6c$SOAR3igE=&pN3k$(f*+uCXHSJdAY^SRn2w&kArs~ zIQQXW%llVs4~H?mg~ zfPOWQwv^y~ZHsf(%PjUNq)ouIH0bU&e&^!w>&GzT`)JDu42|kqy)A9=QYL~^`Ed{+ zf2GoV`Eq-9{$N#oWldFmNl8&*&G+KEyo#FYs`7@if|{0^_NJPK{2#ehwPlrcRh6}E z6*V0d6;0*km6c`rRpmd*%1Y9*vSX9JzDr1q{+bf=t>xUMg7s@#h4HNze2d8Z9Ho7s zlD9VDErc+R{QS0}YWK9Zn(s07wSDs9MbwL^gm)ivGgB(dzqd42b+vSKRQF~k6@E_q z7LokweDJAduDWDq1K7^ivI686;T;qUe(9!4BsW{fDsKv z<=M~zrIVnNkDhHs;QO8=`?n6pVK;aB??3$Hq}@yF&)L!WwHe`GL$5|$KKu59dwE6eEU#OLF*7^Mg5PtB z#zvXLW2ENJ+Kl|qb*;r59>#5`@6E`~Z5Xfp()spIqVf4`$NT+RH;JQrr}4}^Hpy$&4(uPQN@?B{<6F@7GS z-LUuZ&J)2$-xa-~!l}-w)_eZ9G|wq0nu+r{qxD~TZ>g=14Y%Ff@TRsrVXA|?{@ zH|H&Ta6s<#Y8}UQ(k5EU##{An9Z}i1Y}t-g2X5?Jdvk@ni44{P6^H@eB9ajX8gWN8 z({G5pJ^eOFLZ z5Skjg!Dg+RjiR`lAl4TkE90Hz2oCdvt(Em2tyZ;C*SfEvb8+6DyE`UXlWi@Xf!>eC zrl;%aEf12+=X=ak3Q`er5|?yU5WOdx!0j^(xjR2_@YJR)PW^U@ldIbrO&?6EQGRK6vM^Gv^(x9@OmE(#bE9K~PH(SrCVj z3X@U|QJ?3s=iql4?K&XY4L}t}gNRWq*mR;HD3SE4yzgdI)Gkkl`ybz~ynJrnhE)e{ z8W~(xUioOFV&((EmRI=E0=?-z+fiyIgzXUaKv>J1%x`Tgh72Bv2eFX*V< zlwIK>v&d9Ud528pKt4ZMUwEl-I!WdRA_6_1BCqUI9)4pA6P1vY zlWXT_($HA_=2fQg&5uSqzNpA23E`S?fXFW}g`iAfz!U*=0m2|4bOHWRfSVS?Qx%2V zwl7ICJ^mphqI|jrPSnHlESQ@EYwJ;6O0+8&xx{B_H9gSGzE?`JT1J$KpjAH5A}5o( z+bBKqeR<}OiiUb}PY{yJy;)N80Oq`rC&_2S(c3n_Amjnp+3kI!1eX zhX(q4hx^-jOV8k7e?wDkXi2lb|Niy|_WiLx_TSHbfxn;2PhT|G?85w|Yw$aB z_A66}{=-{gk~~{K(UUi|3QrN z^VG<)rn1rLkg04x(gsNreG}tdEq!IdDL#gFtLB-BXxU3kUXhc& zrY?0;O7IRw(u1ET{3{GxMZi6v8!C0^wMOog7Xwxjhbqv9AkOSu|KjWno6G#!(TJQ? zM`vGQN#*}w?=6Gk>b7p-g}Vk#AWGa5;_gD+o+KoM2o?ws+}#_3TX1)GZ`|G8-Jzkq z*Vnt@Ir8IvRk!Ysd*5?1YZuk*_U_$tk2U9*W2_MnpC&BL6r{6fw6e5?Rv^SQwB!_( z#tscP5-^y`g7W#13EcV&7LE`@4Of70e4Pyk#e>_CC;dca>_f`6{TaR`T3X7Wp09WEMkva0~`Uw~H= zJh3_2Je~}R(}cOX{7f{30$W>p{6hjhYsnb-YsPg1s)v0xh*!&*O08U~7>5(tlX+68 zC)+hewwSVT{z70Avkf@xw*N=8qJN@KXGK?@UrB&Ttg!;>#lv?}QV+fPB>1|mAZRo}Hws1c`CiGHDK>ES9&!4$&>hPRZ zd-Peu(i?T{^YtTN4+$s)4hKnSfNDDp&kwbr#w&P}8?rd-m#$gj)=e$b3YERf@b;TIp! zqodPf++xlvq|d`I$97Kplvj?=*xGDpRKoD=Oiq8!JO9V*YJkZIFz5mvBc45$heY0P zJ9p!NJ<7&7DCAR!=YE^_yTateOYFq@GQ<}b#BeySg);6oYk(4BwxS-2^uUJtetEey z1+VIy(RbE!w2k^Moh^B&mY1soFm?j^c5e3OJ=@Ty4!a*XU?#ZRbK8j`ww+aC=ev(R zueK|Ikp%d(sQzX5_^Yn|59R2A9J; z9Symq=#K6Y6K%KR%FeHD8W&9;99F-&Ld(!dtD>yh&DkqGIf?o!2=+^r|Du2O+HL-=T!%00_+e~(<&M({ z$&^D!>V$=A1URbrIbt3?j1GNGX1O&-^{Vy4bo3ymH+avQp`Yo@0Gu^a8;sT>c)_P9=0F{ zSxwI3CKuuSAN7=wM%4_2{eiew|>7AoLPVgOiIl{ zx0I!Hrz^&OKka&&!H}jjX-dap&CX)YN^i!%Xv)BE#4Y|qL?u))vo$@tDI1mI_$B!B z3D*-q3ou)90X4>5di&q|erY2P&C>{qv?&o9{e+77ka08|qJ;IIg8ASd*!L&?#Q)qt zDtR>e37NdrM#@Bw6ZF?At%18pscsHcf5aoWnrC!TR{xUaLb5ukWk$QpN6bNbobM#d#EQYY3 z=7&Tgu3>48x*HTmxV*AXg?-dBSxH6pDwaU}dmMRj1)r9XL-U1OAi}Zyx`>bz$K2Ay z*!UpCyM%f^MwE3|7a-C_Zh{aEp>prwox#>an6WLSX=dUaCvHw~xZ`*&`azg)6=`A` z&e&y`$V7hJZnB@x_TawY2U1yvF1@MCaE?gqURgn>H&}_O0Uk>Lv;n6vka9W_kY!Mi z9a>#bl$BZ0&^qbh=&NUBY~yN~SDQQwdosHs-1Dv6%N(uqEfr&b+;Vv+puSDmQV49A zURVHnTW~S%j7_77VX&6Fk85RqO<(Iocgx85#7rJK$HmiJLG3S}NJkwf%@*rmGtmhOYlq68%^ofLGQXO8MdIZ18T zAo<|9yRpGnYST_mHf>)U=OaQ+5}s+Hn6uSFEcPO&Ejut{1eR3T$IY#|<)Zd!>p=6! z_*@0WxFhEaxp#E}mH`0z7}?smxMHqdcav8N)G;kNanbqt+Y&w71-n>`OD9ws#qFzZ zI-#l57oAi6Q_I-5I_G_J#sShOfEYkV8n}CA;Q5i&cZ*9;XZj9|R@_)> z^`?0d7ELbO$L9GpjJi}b<&3n=!^Qf>;(^vigcm!z4k^UE!nBHJboY37L2*ss?=)jo z@5snPMQ4K-zrJ76QxbUall6Eq;7wuP6L<1n;RnUGtb8~(10zD(>T3JR5T9QbFQ@$6 z#m%;@r4us;)8f$CDcNBG0jf&sQV*V7yK#B%5eZcTop)btM8)k-o^EGlt_Gk}jF+&5 zy>qKbfTc}mQp$+;?>H)cYU;>O&523M$Z&9ruyJmC{A7%Usf8Y45Uw+V6`DCO_3{io z-6}Wx+D_q_3do%`1ls|UgjB5u)IrP>cZ(>Ri&oIB}9$~2Uoj>mwW;` zU4Ox+_qr6ntSc){d;MN+>PBX)+IyjyA@P!AH!E{07iaSA>$NVZ<^CQxKaJa9#nBW4 z2w3W)tU@dnVeisxb_w`(DyG8VC}Tu8)j3W@%x#`-^G|McUpdS+J0z7Pg*%@2AyLy$BRyB9@^$ty7a8yJ_T!M=Q^94`5 zpRp*i@Y-=Pm;oMho}Es+MXkgbR9NU0>9=YNNg5u1ZS&MR*gPQpw`rK^E#q6_I%48> z+u4kHIkfqXn@N80Q|yJ~GZ4Rkm<>_ZHqs)k*SJK{D6Cv$6eILy*k1I%Iv3XPCoPeV3OjJ z*-}y6+E!IuRhVCpRF)T2njV{*6rUECo|#r%Q#UX=IkmKcAy9!N6;aMp;aBBAa%i>7 zuWDN}w2pJRe$iY?7iD}DV&jIBZ#Z5Q&|~DXX96~CzyvV50WK#Xs0;4ef%0Le<@I_J zZhrHn(`Ht%cpS}C9f`TQg_4poEEd1fr^eCVQ#rbFaIk-5rLzZH?SVGe2>T-E@>bsV zLup6$Is!v63-eQWgtUivfOO-aMnv=_Z@WE(fkK06yx3APO5N7(#3NZ=pJF|qseSsX zrx=wHXq@D**YYWo&nt;iGpYIj*TtGfI6O#fU50I?v*GtVBstUozX_v|Cb+FH9+<9K zkn8tDUiQ-$&1Y{^-Q7aJE2$}{$a#l3p+XFmT|PXsxOL6^oTQE9apyCKyiPM4adX;n zGgtt2CpsoOz+g?sXT@{T^^_Fq#?OH7O)HHfaAF+JPLigva5b|q#>7Sq_>O z|H?`Bo}tt^ofG0ltV|{}^y~mwEa{k)dA5E&$bDV>!pB=Xub#RhEqDFfw}T%}ihdU4 z)84|Z&nK)QD5SE5SBZ;Djg!@YgV~Y;m@}}WxPUGLgE|Y-H}(s57p|IKI;DQ}h}MB4 zT8DJgw3U379xB{7`(->jI$T;%tg4{u>FrU}Qk2k<_`u=*7M=Q7V4Im6E!-jF*Ce%ZWxfT01xSKP>=$-r=}0D zE*)B*6(&sXSRZ?YnN;j=*U3t8icKtO92uXY8BRk?e{*|nZvDmt`qJ`ha!%&N5k`_c5 z&#SK}t1d3im|JdMBMy_`CU*mIHX#!UBrMHjXt8gt7CjK}o@SL=Us&Gvxzac2z636*G9S8qI1$ZtE*NfW{3nhI=Qk!ggxVvRqb`D z6$xRLex_M^mmDsMnTfL)(^EGdZ5|PgZTs~OpEf^x%H;G>%|jnN{z~sntRJpOEly0V zhq zKFhvP@2U44NyoEi9nNSbe9q{P=_4$3tj(wO=O{+VifbXh5%lKVz?2oxW=r>(0%?ye zj=^s~%ic&$jA(6W!L4HvpAHInacy-CG1x^~z4Oyk#RY{MRX_@bLO=%VU}slfPHAp0 zu|wWp=BoYKD{g0YYHvNHvG0THyOObD9FneS@-%qZ-q@d>UF??{d)4@hz%OYK^#$br z;A=7y&(*#ZuU86(JLU$h%`IfVDabn6y(@}6+>>;bT>W~!@JX4+(P8kdwhG_qV)P;ahpC9fzKmm8(}OX2+Y<2&R< zM1F2zR%c{2<={Z^BKEa;-2%J&w*b&(eA}P z`Nr1ja^K&)Rg`+9dGChB!|gwhvdD-s$#Ju30ajB$X9ZY(0Uie~9?fkJe|>yqDDzC_ zj_?llgBK6o`SI|E!zFGV7JBM!Y?y@g_=HvXxD|O>HMoH}J8)p4?&mNCTtDa^``!B< z`@+2^9{ zY8($59NVt5W0#>Q%O}9{9_-QL7FA~B`^LUqL*kaxv$HDqugN@m_3>?aU0Hl-l(L`F zIn))tpTgTU#EuUGRPHiFK=?El1g#7Dk2Yb-R z@-5gc_aUCY7W~wgO+~s}O&IT^js>-Nm%-tD2u&8@)e?9#5$^Yg!kSgQuPjf?e)NcS z4+!+}vbC~Ok#)5-Hu3T?Z)(3YG$yt%%SKpcBCK&vuiRLmyeVmX5gcpK*k(y2N%VFL z&o7Fv5(Ssm*%qdG*5}w4r`Xq)m?>m7EOytz^reo5pS4A`D~q@d)&y>mygI!yvC>%5 zw^0g9$V_V+9PS!{-xSiXUCi4dn0k0;{?UUe`;NKnzi7MXio@~yMS!gfFhVxO1Fi4M z`M#)7Qesld&}dm>L$$ozkNCvgbsRF8zc>T)vl{$8VpJ4V^|Ul^-n@9?uTWQBAlPYw6s-5C)+3IdKOg_Hnw#Rj`h#4%o2!X>IXSYnM>)-Q2hPz ziRGocCTA=&EaF;gW6DOtlNQS>VQUL4FP#YuPc|?yHnubkON`E_Dy!~lZfmRQZ>=6{ zZz#>qOo&e}ukT7Js!A;@YZz|m$2WJvs*pjC`+@g%+wEpHqN8RTK4rd}sN1SBn%Z#& zUw!4?I=q&1{uGk$n^%$1-rIkShPmdVz7>3R>{9_Xb146X9*S!l&VvXqFOZHzAQDMfA`wp_A!5S+xIIlX zfd&F;fd~yCw_GQ|Rq8gsIQKVdB-D2l$L9scp+hSAOFL!;);8zZNQgKVcAAP5NthK# zqGrO?<(ZN3o`L>`(dvSBb7Mz4ca(mhhP3aq3+|`4*@**VK6-85n@$(if|Y!8lYFvL zqv{e|N-WQqZWq>MX4B*owco>GK!3oV`F*HZT8~;*j-965+Zz|o-M{dT)R8bD`eZ4@bKZ-_*KG#uL^`yyp24#K* zGcjNx!e_N(i={A&Az-8eM`OUO%*B59=u>&sJC`oMym9T!*^?*UoZN3Mfj~@aWI9Js zpM?rNIkZKAnGirRBJ(=tKtKx|mE#O4v2UKLODV~I`cdY|Uow|2-Mf1Ep46jfUlrsH zO!cK@-zlldg~bF7PYf=uFZ9n2q*SGw1X+bO#((xwxoiFGUFgTVF1L1l6F958|B}hc z+xB-anmv*aHIuQ_ICJCLk)0CiA7p%-9eg9azk3;<{Bq~i{nHO#Kd|@lGjMQ_Repc& z_Nh}+_pUlP>}zP?TVCd(o`FcnMnzzE8>=2B+jAT50vdqcMhF_N-5R&-x9A zCBS91iG@3Jl(RS}F##nyNGH%ElE0%B{Oz*pBGNORYYY8djJ`$$I>wae*tFKmuPxt# zkZpB|`V_3hHRh2a-pblbtz8PE!!~5%$Ki41b@JQc=}%bPk@3!b_$fZp8WRaJLCA!M z+aNr@zOFgl-$C;Sp>_9`5kf4MoSoaaii15P^O5Nh)SpC5%F@we@q)Y!oXle!>|-J# zU82I#U*((q;|Wq9>j85sH#ejQ6n4gub0@3qteT69i?VY4w6xWJ2SlzBAW}snAp+$7 zfi7Ray--#AbnN8L8+XngICSBhr1|Bug(7^dg1k#2+em=15Abv|3HP&Y8Dn6k!noGm zA}joK-O^GRlx0VFL~d$!Y)fwq6_=n~hPl|XY4XmiynGPQV*vX+3;zI*z@Q-a)E1U` z9y$ygSZAOkAXm{(0J>qoI1jki>3OltJUtQ;xwbZ=`eyzysR?Zz&AGYxU2Wr4C4H?8 zhNZVSi z%bph`oFsO*3-K5+owbv+EVN1N%m^xubWU~i&GV_BtzM&0jGrPOb{y2xDAOdt;Sb!Px z<>0#?LKH*F;>(-r(0N4zEp3p1A(7{o;HqAn#bckBhpi8DDKUzwaUR#&_0{`b<3b(v z%~B|E5(mp6{MbJwG(W|**aBjefb$t(B?7GY1WfoJxJcT?eDezY+E9aPX*BbXx}j|= zYU*+Pcl5j5e5b0SY)S2nc=#jc63NTs%PFoJ33Yg7# z=*+l)5huURHlV>krv;cz!PZcIK+`c{wgx*4#9!Dxt?n+Qz}1Y(qc) zHxn*)eQ_?WZ44?b3|fHA7HqK=68>_D`^53>0%ArAsx}su0qCGT);kf85Eq1j$7ujK zf3mUZ3o~0$cM4KJA9iiJ!#aHDK8yM$nP-(HJGi@@ICV|x{>Kkb6g0jW26=~Dn%g{o z`NGT3b982;s6OA&P4B7dqfd4+GLAA|U6k5M{UdNbtE*JaN5>%A;K4mONT!gc}bp>i&ZI31< z*qa-8*A~F~BnMpH1>w=PwGWW;WpLD@XDkH^b#e@Njt@&QO12xdjziEX>I-Zt3kV3 zvR>C4DhlK0MHA-(3Wk4?T6QnT^1MW${H49jV0Bb8FPtLpdO~0HPJNuTX>0xbcm58VOLrRlK z6Z>2tBj_o$Y)0Q^%G66gAv>zH99@d8ZmAhu8b_GQ zt28e+NWnntEW|EC0`d%0%r^*E;ySWza?F%tr9VbXUG+H5tHsA<%*UWlx6NAkss9t( zJcrbVjQakjsr5-LlET(8#8nclZb#ingbll+)nz!d2uCT9W&wgn-YIEQNFHHI(*}4q zXLvWX2LDw1{=}d76aSZj@~^F4^1pAJ1eAaMyAiaJ58?*g`G;`eOHr}WRgv-$KZ z+52~9P+CKhZ?2P+;kEPH$A!KK@6y_L#_s%w@b~^rE{${O=JvA8xTNarnwrA8imLK- zbZTJ2Z*5DB2TvYeyLwAT`iqXPzP*cmeP`X&>=5$2hygnluC5mj<$VtSM9uQRj1D-_ z0RzBi%5%x;(ho0H4I?c_E9a4(N$SV2ieJOfs+7U`$uf=~l_F)&nI67iar!U!kDnbB z{bKyfN(;%DMe1#eAW>myf@`cF^c90BSzs*&EEs_)VATa@yd`hAKey`$S1WS(S!Qy= zZa3#Aa6*$lv{9pOD59<*vj$zAmRx%G-KW%+((3uj&%a-9H`yZO%FpJ;E$qAFcJ$qo zsMAMK$G2K;Wj6uLR)7h`NCh;^#%x@cTNtebfjJ!l)P4b=$8^ErYE*exTxMi=R=Aw^ zHvt27D#D}US|%NK5uKgew03i-@i6KzB2r}5fZ+!p&&!>fwniVMKR)vLEI z^N6wUIl1-foim4z9(eis^U3oMkKKN*V(oDH*7aQncHX>p)<9Q1zr4gZ>bH&8&x-2! zjdSMo6r5dw7M4E75lNQ|a<=t#GcS#vAu$n(|mna9gt=ifdHdY%a>A zwCk3U^j(zRuGn8Zb=e|oWNtiUS%ur@;q~^uFU{y5Wxu71f8Q8xe>5?mTbJaU;}IQV z737b4sH1*%$lJGUFO`nj47&YscRig zDk_Xe7scl_y#Atm^O58ix%=;)N&U9-Pf_#BEmU-&&@{!w1 zxe#U7Lhr(^qV9pt(VoH4rm_CIfx7yt-179);`r!{aQ~2i;E)IvbK86uN)HA+cxOa44 z@8}xsBt-q&J3bMf?O!^S>|JW-m}hIB>XFe@92FJOTv}3;5Yj!=PzI+BR|a-;HHq{W zF^4{9lqdH+nM&WE_jRl9F_y33v2+-i4{$zm%~@UsED7F#6E&buZ$Q%^zz^Hc9_W%pQ$@Yn%9J60}3RuLT$F`ez) zT6~O#Oo+UqHQ=@0@+w@qb!8|avp`x-;np>$?4H7%1CtU#6a3sfUH3x_MS>=eNen<9!_m zAf#V~TuV@NcJA0*3varfp!*31T|kBMz?`1hLW1x62}P7<`f$Aq?3E%l>?6!_VF+xC zaQo=$oyxo)b@9gSac|nQx6Kno=GPvz$61v-<)!;Hq^D&U^=brXKaUHMEQ)8SDHnxs zCxq-PD{KQ3pYgDkh6CnCx^1kq!*dh$Q{%5hM8BV1IJ-P^2dy8vEHe{MM!cBNV{?Wy(LQL`Kt>?8k!N_ z(a&zYd3HlZwKdxJx6#F* z`~$iXkP0G&<^c5H7k^;ipZF91Jwe71F%)V(C9TmA9n8Y}On>9MY!jNqW@rs^tm zT4H^k&uF8(SF+6$%b^N^5!B0L?foLb25HtNCyO633-^}2&rd0G;wsDpb zx8_~}^L=~ug)f=!IHN0WRHkvu{mFyy*Ib%RLb}YSbPj&|B~#a4QdUwb|6Ru}+zZug zCvMIs>c+!F#a{ZXe8#+NhHUgE^axSU4A3hxU$?mzUzwd-Tp695_D18wS*GW7R6xvXjkxWB&w%+A8~r_Ia0M_tVA%OX{I=swvFD=uZXdXG@w)N{wNI~Q@aHFKM>DgK@B}$sbGw+-!0ht5Y52F#{hFPw zdT4%We}9~F&ln<1Q8!gFRvpQ`(PDEYa7y#S8^>sC|2t;V~ze-V{>u_H0n z%)JS9w@^o*7De9`&fK5Aw>$MmXH*)o*0?(CYj5*m9NbBPx0YiJM_lWB>u_T*J!8GF z2ilhx{8gB6qG48S6f)1iEm(L3LaCtvG=v#-Es-fP-oKqkhWk1Oh7~MbKe$K5Lin@3 z$G>l)W_Bq(IiWBqc4206jX=iyy@arJbT&7%j}0sH3++01{^xroH(AX@i8FEFQ0&eC zf>04|d)NFm5DKGq5fkCRj4KFi6!`67Ge$y+<$vcnghMYqLE-JRLLBl*_(#Mcjk^nYZ$dlqzP zU(kVrt6R20W_|)-M!)@7T3b&E^3SogNp^Ie`|ESx@hfE>k#KYc4o<=8S(uyK5fYi- z&<-tZXK&r_+PSNjm7@(XR&JMQk$N#yUJnN+prc!bj-l_&JY{5Pz96^Q-_JKPDyXBY zmiC;^*=1_!@+Ol7~1T^+A1Zlq%I^eE4#G109_s% z8HLVBo1O0KTxxR2x4&tB=bFcjyTNy*{I< zZ=@sq=5uw8^aKEv5{>A)3AH zVpe`eS!oeO3Ww`!NWW)s9L@~G$!WN_1Xt(Q=W&>|OcEZr=Z~J=d-d~miEjr^*hz94^3z!`vN>|H8nC`k z{;OyyXN|hJCz3XZISo}czkE@CkzP5ODYL^vb%lBTEe*))hvb@u#=LwqTqmpzkEZx| zERJ-yPqbNOhg|T|KO3jDGw>pZHK&A~&~A%ew-ePbltk|89^OuXf)MhN$vfsNw-=i2 zw7Mkc{UoE%7G33;J{)-!bxG8ii(LVTW>=WJuFatiXD9Rd?0^KuJ|V$#EB2>q#D zDZ*dQeM>caZTs-O#U-tXSFapzd<*;j$mR9>prElK-ac_j=E@<3!`lo+fhGX%2t$!x4?GEe5?&T>;$#<565kBFU;E3R zvbeHKe(&yAJG5LiBGN*&O?B?vy{fJJ*+gBz;QM#g9~!=CXzkeClU5$D;_}NN?7_i` zg3PR7Hx=0z$rZU75dUQWk3ew0tncjX2zyiGZ7OK1?E_JvXNL>;lN33A-(<5sDQI@= zl*{F}&R5?&r1vs9`ALQUw6?&`i2I^7>Ku)Re3{>`d%n^R|KU^pTcbAPVAIIK@k!~0 zSj{>&8_!f@wMsdKAuBIjczIV9gbMBJl1=#C3YfD(WN9mBW~&0(8!7 zUuSQ5Wo~q)k^Zmd2^f&kyh8H~BreSiw^yT6603_UN2W=O5LS;89?KgXy7=(!TlKBy zKJ!byw+~B3MPwRzH|*0+-rl&(kAoaUoG7G-;MaEa_ewQ2UUPPlx3o1hu^k&;>1-V< z%&wsIF%i@3k%2)=bDL)m-!#`Yew2Qsq5UN(GfDEs#Ur;)+lTwrP2;~AChR-yE-h32 z*Ne!^xJe(IL=$C{yH%W)a^c-;T zU#$LVJcywJvUQl8m=qI}vWkKA4bahfK}l&=Qwu(R-uc(ZMt!rURU)izoOE>cwz9S- z;3?QOxU`6^t|{~NNBR2O);8o3$txQTP$GU)dyl%UF02qY;2$1o%Hj|k9X!%RSR>+Q zF;k<{_P)Lz39*6csWIu9t#xe(dPJ0u*B9VoczdLLh>~pZSJn6*{>{F3KxW}Y-n8{L*K?fYO-ulSzzEyvU`oidiRmisr8P_4> zB0L8d->=mwl{i?(Yi8%7a?nolzONp7U)Ze!L=Yzuz+lX+nr4+#*A(rT>ZamenOj=b zfX2eP?&|3JilO0&s=E4!%*e+Y4=!k) zJ!E{8M^}K!h?&uvk%|`?tm$}cxcMx&IgQ!a4Oy7A0hb!1^(ys`XO-Y?ix8Ce`?0@1dB-IhuCr|_{4XLP(wU5k7^M|>AE zJtAbik4}+OKuP41#)YqzA98DwBO<*FRAtrWKSo4F${3nI_Xxk1S$Jl$_wdT#<%N;w zZ53HlEuEu{wk}R3?R}#(F>NQfb{d_%H?8_m-8g%5$8osE*^=HTFmqWnvx8F$MvRnijP>l z4(uwdv@>w+Ynk5|b;46u25<$=EsfO{2|Mkh% zUc=2bv86@1yy6TN&y0tB1jw=o`5@#RUpPNHa~EB1Tu@OtzBsr@O3XqpVhBX)stHe; z86I=6b$RzeE-AYpA|>VF>!-nS5mK+;Jp1tW`tz$Dvt1RveNSZ#F5fe5?Si#+GaW5M zX4;0~o<0TfsmXz5a&IzJ<+{vFhO6tKmVwKMuR6ie6PgB#*Y86B8rk^=_Z{f{F29hI z0;|!3ZM98JRjqSVcq|q!ufp~&*whZ2T483+n1^ptY(i#CbYfIUTx?+GM#+vsf&}7v z|4?IetcT+-{q*$M)s4Y_3Ymn(AW&&z;kV(3y+*wQb!mrCfvFI1rl&V6Ceq4E&o$01 zqa$yEGKDbbu*k>+M25j&6RtnBGp3H%JOoFFktO2t8q&-qW3hM=_V4i#3Y=e=ZR~Hz zsmpN<@v!`5LxrS)KB48gHN)K#i>oUbxEk9Ur5vSj_t(wywvy+p&VGvi~C1j-{^4N#s=` zVF{1NlCrAvsU*MlRzY(yLDOw# zoX)wnxXr^EEMXZtwK&;4&{9}gmYb8D9^n(@Y;R}TP+vPR)Nh8ec8TzhF3TK-7<69~ zy0MA~$#V z`Sd{b)*dB3W(#CQV}t((Oo52K;7RA>7tJqe2J1S8Ip?C$%S&q$vy=VPy~bWTD*lSs z9dB+k+sSImL2pg-q&G)8!gNM}#exC+prS?~<{-T7=T?bN`_DbS_Vnc&Jx$%rUPPwzV?Bz%odk}7XG(LcPMJj00VW5)FF8>=-KDT*2_ zu`kRFY3XdDz-|chAT}IF;8Xa0K4z9XacJ)#Jh1=|VW80J*tV`jnM&(`ysv4_yQZQJ z4utP0^b>XS-f`2M2bgjL^F0?7C6!cesQx(O5iDbxXyKNh7!;EI^;N{N0~Kss1Au83 zaChImlJDtUTRuE;*XY}xA2Nzjfo@U38R@Csu72&U11oqqIW-gMl0C9V?lPf3VR| zan?U&u6neqg==6aH_H1!Qp{6l)ZXvP0v97mkjj+B^Ir9&Ulhv@1 zHF7KL!<>7m{N$^JiBtUXv(K*HerAa>E2}HeF*SK9EpLqq!&4x-yttq!RYOfNF*2+) zJKf3DG1Rj(z^x|0v#_?RUshK3mus-TS;g~r1Dpb*089Ze#?8?o`+CvCW-2+nZee7u zr?J1izK4Y09L`u-g_w1imY!o^^z*x-^z`sRXI)#kTQqhGfkpUrnyCiF%+2?vq=tHU zI0X25O^%NILj;tt;arPDCi`jqUgGTXVs?IkyPb=dwNqJU#`JXmGF+U8v+Hn*wxq;h zmzNVu%TxoLP&ENM8D`b1^>c6zLt3V$lZEB=rBx))5b%`A;nDK^f}r1CPA;}Cfvy2b zA(<691JlDBb88eDJrp7hWlSHjH?A|%I^RM+O~*XjG_F6k9d-`EiBY&PgV>Z~sHmMx zUfY0u6vF26759(c1qJ>`&1n2@#vj=CC;r5LOAryA(h}`Bwf(jyfDRYXW;_;fRx4ex zW3?QiRMWNcnp)PKGH}nNp~yTl57HvbHT?vt!t2 zyEC{mU>eSJz<%8%v)6tf1-@+)P~IwOd*Pt!QRhOJW#l;!?-7KAg{Qu})q#<=iRGch z>?HRfSLXmr*D%AlQfC>%Tk0+{o|z~;Pu-TKt{J#&7-#;>`}KaCLqMON8N~rC*@W$c zju{C{d)>9jvvcpS(4Se5TY;B5rymbZSzs4X{goCmzdk8Fvo=wQiubZgaX6)QZrdl; z-TK^HoCMA!-F%y+e6QeC))lV}8+rCRP?IIzR6H_4wYWPv<{Mi9O-jug%M1f*7(P1wT{3Ysq;d z%I+c#)S0*b+__J7pQgJ?d|{+# zsO!Glmzf_vI35#!ag8Lnw6cq^$T2q{2H|B0&o_izOtpAg8Fwo)!fS0l3mFN531y>l zs-4?T=5pPuV^J6q34-DcP>c)@E|hB4ScZpR@A4LeVLUL7dpTC1EqtGWIiHv&hnV5P zuNH5DBW(TStcsfB5^@7`Gs|*(7Qfx^7TGm|h#Lz*0GcGl>g-gzE$rQ2etZ5&PCG0j z*FVJ1!NtPF%vjUVMBCD_ptj@PC*>zk-baV0#>Hmax&ຣzn(W}_sLj!3`Q|Pv; zF{=^7!<|at^;vrZFOLMB-{yWr@4HtEZ=TOY0y#^$Fhf^r-l`n}!fdP_ie z?^(-h5ls;*Z~^JME|Od8`U(qc+`RonqSBBRzBL3yW}Vk+gz^coK!Y zP6cwfziWY*c??{hhT~o2wpKzzAEkQ^&Md>_6{ML!T&4m@q##Kk;np@N%VY|kh@>Yn z_FrM&-<8Gx!uSLG{=}d7ZwcxOIKDRYiL(^9(KcYr3Q*j<&RZY)+)nBUp+E$9VQJn( zv$VgoEW7^iN-_T?97tNH0m(URaYhkla*0OA#y7UtW)`MVuO=?zkv)qv*th=ARxwiE zqfpa0ZCV78sEJ;4@JGUV>Qfl%*d|-tv_@kyGhvCQ&9LpOk zvB`D(;3@mc_4QwFZ&mz$L{-J47N!sO3~kcMr&bUVLx@`U!Imf_R{;|0o#MOVxO1~x+jr~ zEwgOTe>;6%`QYA<{D-~@z58`vGf?sTx9bugpB&bDAf`*d(*n@Tf|OPnCoE618So-AAC7d0n2Qx)Fa!*=*^ZsR#F0^AH6Uao zB&;WTQ0w@GAD1}~u^u?H>&;)!zkK|xrJ@@hm5^SR>s9Ed5U(U*xmUoN4`@;0CO4N3 z*G_}oFTI|>H+k{G_~|yi{ftH;4AvqnhC)xn-dxqXF8T7Lv5S$3o6U}gw*=MHE;hEZ zjm>aED7cCfTO+Zq5Cs=TxwFiG&T(KZ1+3p+$@YyT&*%4ysK|$#$&*iP9S{)e5xBGh z?oIQ0!yO~Iqn&An1v*K=mvD(EVZ;`4498?RPgCdtXQzX=tRyd5KUB2*;u(nYjPf4G z&MvIZbu=#7>kJEUCIQw4AhHg?gb3%H7|1aG(rNAN7nvPuZ|q7vqSweP3&hEs!YrSF zFt@y}`@tHXd^jSqC;8wZuWuj2x*L|L zhq1{|LnWCn=z?tO63oHfth}Mx#WVHGcb}h@Y4*RXa!aNSbUhy5d>QdO5aJi(6Oz)4 z$0Sa>@rpM9wlNN#c~Q~st5@>v?VFdDkRM%Lbz5Ke5QVb7vND5!EgbCa=&L9xijVp2 zY;PDF5(-IVPn2nQQzZ$r2@3I891#)mMAm2XiwnPlBJ3Pve1G>F8V(v7u04KQqo~$t zYSZB3n-v@pms(uaIli>14}hZyJt4`0nj@AWwzsAwNF(YmA-A2tnvKOmSkmQ^Mz~sN ziod_BH%bo`;uDveQdHO0IXW|n{QfBz>isGBH9VO_E6x$I5J%Xce^CG65q2>Jc?N_r zjDd>_aF%)j2iLKbbt2852uEvK(3X+-O(S5^8Xi$3r9Kq--(cULYTuvu6My1AJE*WP zrz=T9ZWpJ4D6kg*=8Rm9!q=QGBzK0A;mQW=n}KlrpRjLjlgx+TOpzOq4^Jb?5D<5A zI65+wm0qy1be!!41=t~f2t>J=w>1H#0`g2gw{uSW~Cv1om66PTm0|&zcA7%&cZI2R|NZLM=e>=+Qn3?14$0nEe8y#lT;{!T?-5PAM z0i32l$VHUTO8m6ju?L18fBDWT&VKRZ>9-zl#WW5}_&mRu{ZriS35(_d0WI;Z+Cu!Q99JAqpZ#&@sPv&P zzqM~!T|c4+R12BRYDF~C1=Pa!yJsu+&Ubo;guVOvrM#mdv?Sw4sNVJ;{M+=|wi<6? zP^6cz`TyAa>ZmHWzS{*T-6bGcfQo^EnAm}>D0ZNts0h;C-5t^(AkrX+bb~ZVgVNox zyVu>%M$b9#9q;$|{l-1a!P#S4$`XQZW%%?);pHoHaGUN$?w%~1}RF(G+AM`_!AU_$TT%7>m3>Ecg$9NNyM%>#$5z)iB^0N*6nfnVQ4x42#G%b8+0T=e0jY*c7Q}?8XDh1L>(U; zycqH^>-%(f4a|-rT3Pp8d{S`jeeWrad9bM+hztlf)NppijOAZmf+$n z$;#B`+SQ8_GrztSr$xj@^$(L=9BX9cybj%I-FB(wzGm0Xy^e-jpSzmoh!{d%QDJT8 z_^I0-TMwn}I?}FiyzjBbw2g0PdRZ41(dk_39_p`Y{!T%3iThE6zKN-yWu=7#JQj#f>R(gd0cA@+`qsbtE zBnl(QFiiER#D5lfSF?SqSj8&-{{|W2>PJd$yItGmsmSdl35?gSbKZE(>vUA1*WCOd zX^B7&qW@9krD7P3&wq=ZQfa^ck3b_~Nc7`1EUBvM9~!68;M4>rJG=1LXwM>PnjU9q zGzyjQM<^9XqtZh=g--bJBr1wT#UK|zGJ%R`#BS1W+DH5?5=iHEp}!(!ytX0|Edhv1 z3%eoPtq-c@6ID}~85)u!qz7?K-)vV@W=K-rrx^_WH^QO8$sl+ZxTDKj|tjf+!SrZW)V(SRAC`;YiQ2*6Wwo z@1NX!PD07;_Pv3)V?$Bv%gun-F<>ObYQfKG%g_CuOXiW-rt<<;30e-BDnZ|+61q7` z<|LwOII`N6%DYUy6~@=qwbs9DfyWR83BI&PcV%mJq!`xURViOtcf>NoG-ePH;?Jjf*~HT7#wT}kP5 zwM~KjMZc@NtmH*BML5(sg!F{=8}808OQ%xN=1DeEFSlPt@ z?`sM!EkiVwf~PFPqA!qx%~4`=8(?h{qUKb2ky3{7*<=+FJ>H!rl6$naY6ocPnHbsGdb@>0JH;m?b$48gj5=6S ze`yjWS62AEqm}o}4d3uOSG(btYJ(!%GT3Bu*|yaH(Pkz=93q%3Kmu#0l^?=Qax-ik zzDb|SG$N#bjI}g%EG&&Xem44thoolzXl{ka4yN-9XLD{J0umikJ3C}$GT%Hd$xR-i zVByTnOkns&4LvUdi^_x)SXc&Ix?x;S(-g$c!HEglL~(OZM)7w%oFc-W_Mx1@vgEnh zW%|*GL?R6h4d}c#%*`!LOirq-EN5i*DD;mt(ilfSvJpsF60UDzGP|_J*tS^ZPCsBS z1z;GkO#^|}E&EC`vWUwJ?Q8s>^cj8S(-(2y=zLS_&x*3jyz)%j1cSpK`;azMH^Ai1 z!EeT|8gQ++H+zNY(P=58(H9x{psmRMQ3=2A^*i?bmX808{~&7A|7bWS{?;G(Tb~*J zrhN<*K&1Ff{)_feSF?SqSj8&-e+NCfE>ebKD+5 z2e7?kKWctl+glGqe|^}M{y=1+BrW?>MsDI9W||Ct%gMHN4aX&9lvg$285VW(ix7vI zN{hPkvtVCu9KdJsEPUSC6?Mr$=XhFGn1Y28o9=pG!OG#v!sh_Chl=it+s|V!vDQk6 z+fq!*ZrhdcTVai%C?p5EOq>|1NKPzEPQ}eE&=}V2$UDR%`&8YRw36J&qqldR^HV=R zoV8&novSej1l<50yMYA{aNuLr=R2ga_t870vL@?}0rkh(({YwdOP3F z{;Hs}-HP1mhiWgBHSV6Vx}o1-ve|y~2EC2P?N48KQN3gH$i&xTaBgILdaAy@u_(91 z)y^#;;**uBrP`}UCT<2+Atqa&Nw0m+1{|1xB~q|vvtr@Z;+A>7Rax)Kg?E?vP6+NW z*ukaE2}}TTffEq*6jrd?nei(jt{}!QC{X9MvEDl~XCLpBsxKZnKH715hxCuiyxexq z<=pk)YgfZB8z$?T8of8uGBEZrQ?-9|TK{}VNoZtJ`1I&#Wp?qi>kln-P2ucjV!+JU1Z??H8QTSrR7&fR(@zm+7brI zWD#f>=x95zUius0>H@3_%wQgXPQEj}Tf&yaCW$)Huhbi z1$(Iq?s804_Kc8z`E2`>YnR?V)NwGm@#x+=2Zxstkr6|qPWe@qsReoS(6yjCv9-xL zCEg(=ZR6EMFDO=&7Na4C9co|9ftnNbm6Sf4b5nxvl`5H~VLtA(47 zhozHaeA?ICl3E&K&Vd;CEMrFd`aAPW^3uO%X6BbP_s$1JkC|9NKK?$yJkQ2U0Zd)8 z3LPP#v-JDm(Xjtl-`IF}U28!`al)tU`0}QP()#AszLCzE@xrM(y*%3;{>Rsv2@5-l z$vSM&&(Q28{y@%{R2mjdp#9G35g4t-IAkf4|5n)hV^H~@O~&va)a{S9z<j8A%fIM~*H zX=YXtLPb1A9KXC7H+uxm8N>1V=Dg|v&w#I01pWHwc*tak+7?aYYpVc+EMt zn{Pd!u4o=$l~kB^?7>M@pF8r7yTv`%Aw#}aNF$@9mdMMXCm#!=x<`I~PDy`jsC(=E zJ^g6oM}bc+I$eJH;f4AKjpOf6o-SPh}HC)s2?c=xiy`5as z!xCoNN0Wm;-nn#1*GR`bJvI;GZK#XKrho8HmvGLOa(i~A>c3u!-IjQ~Y$X$mBqso)NH0Q?DD)TS< z*~QeC^o~rX#igf2#1*7x*4EZmR(>Ya-Y zc7_8#A9nQKFX?dYTy)K?arh7wgn#`Y409t@84hf1LCmxpTp-T+KCYDniQoMlymNWExWM$ z&SP3;9$p<2KM0Vpyo^-4$w(Ilu4lCWwV{>0M^<9`5PhMxwNDk57sMrplvI43oEt|I zi9{q{4=Wp?ifZq^eY5QBBVf%4;AmGk(c$3S@S|b4zPP6R+t>QK+@`joS^Ow{?UV2f zl~0C&Z#P7Rw`FN(TAcM%z8-nis6;1wB5f4*;UStp!cxgAVSvnt354JMA=H0l?0&O1 zD#I@BKUD?(N&Ei!^(T!-`^_53D_?(kSN<0g`ESB1?OVkv{yjjfmFV1S5l*Av;xb%7 zBY7+|B%y!HHRDMWLu1vYmBm?kbQTCvguneMX^ehpBGu4_Y143VlBdm*_vSJWUuM7_TTPE-uba$%=WQ`yeJaym7i| z6izQd!aT(HPmQ55gvF_)f{fC#+_ohwp>8hqnV-xBOQ0Pt>>Ybn-{`~{C3$ymufqP8 zhf!u5oKCR2$^mCFwxEr|J`y~(T%0yMtmb@dmcjxK(kDLO3SLTx8u=Vh78G9?y-1qw z=xfa=$eN;JCLwkn;;Lq<4O6wX5}&FEU3V<73?9lb>W;tJ>Z?%rn#XCgxRa=)rGRSk zjr(bD?W;}A!sKdtuCz>?8p56!UY3XOJj7pK?0cCXW$SEg*7CD4r8V_{@!>rVijp?6 z0*2z-ZFih?J%8Q!_H%>RmbP}u>1okvQ9iMr=3XYZp5DBqeEIOcBiFC0tluWZv6KC< z@gW{9exL)`9oU!*0jJj5Lv{!D-P`LC?rm&izD{oaajg?#FU5I`dAM9T+1y!yF_6{Y zwD0-u)6Y(O#rmaGWaIev3s~%OLghzq@Yh6WRUVCl6*H-;;R%_9q{|T*Z@z zE??YN79Z;CZSUAtaQKTOujg5APsO!P@?!qVTm7H=b`^Pt_(p%PzBDqpfs6qQ<7**W z2tsxOb_0Zh%Y+RWjNIJBHT1X{dde_8*57Wqr1=vCd2Q)>qszSXSNg zEd$c#Te|vQS{Jg(L@@Ey@{2Z#N&B3-VgBuV5dk@UA?PP0X}p+~lWpVUt=O^mhuZ6oh9676ezl`U z2M|whNLa!yBYxi~Mvw4MIt;J>EORuzh#y7tzs4b zA|ULF1_@-^iZm+2ofpwMAR~=;Bs^nyj!eK~#|DNPN~-fyv-^Mb%`Yzeb-X4Ku@uq* zBd8D4H-e(F$o`RvlFE$6)_OFaMEXa0g$!v}WGf(}AOQu@Shz@l^JKWlXx~8&^fdAx zDJ>H6!YD{x7(+uiB@+GT;d0~hj}L8OmX$WHO`ay1W}X?|7&?_CLL6o#tkW+;FVV)$ z{B#`REMvY$29|_)XZwHskv~sb!r?HadHUtBslL@OG4QIp@`=#B8(cXz+Of-M@T$As zDsL<-si}xAOnd03rxbDz7>EKlF<`~S?asZXXn7AK>?WL)qBjAHNg>y+t2h-um+8ediQ= zMN_?OOKc5JBkuykFs^UA=8|E3+um+1<_9!91xY`9Z`3 z-uFCfP1f?d^RpYVOTAhzc~w&C#O7lUPF{F_Mak^M1~Um}T_E8l%<>j&*WG6P(Ks|E zJSaRg>s!v$)J$7VXXE!D-^%loza^)p#((+r89%gSp=s&q<=@!XY7=gxaDU6;x7&2G zHBUs`Tw^cKo~+E4c8S&d2+-dO9FzoN-@WcF3-0gED6bqG`IS-gY46>Al9v>B-*{kX zp8iTL+(5^_t*y^9z~RV|&4w@TTDx01^cTn`X!H9X14i7O&ceKQ;=8?18&?EqM!8yb zH_J^A@Ifpe#PdS32z@^rS>&WZCK3c>Cv01sNcEvvaG1R zp{1v+vcu9aE-0(?%6VZ|J1ILg_M|wOEuM;#G zVlWVkg#dI&wA*|3l${%SxBsNk1BX!5<=p_?>fBjX${hR0g$q4@6>A%0r34gIe z+P_@CWtH}=Vio@yFrq3I4{1b3E)MBSW7I?$Eox+X6viX&yf~!ynMT5*39~bcc_k&q z-z!@>x+kaRkW)X6;h==5u=bM)Eg30J!vlq1v%--vK0N8S2n&Pxks0@<)4nOV*gD%sVT_CXG1(-n2tGqbhL6tZ=%;`5O3Wu9+7GyH*k8C@_5-$ez^<~s%C^zo zm9gMuG6{?H9xgYHSDJ+D+;>#bkJm9xGfVDHEg!8&FHZiNonD()_$4mpW2)y%zq`Af zFjQK=txwlyX%gpUx zx-JG+?uh6(cIyMXd&^DhjC0rRH*GiMmoR1n&w-dTi=ekK{l)AG;k%nC5>dA|KKHbt z#mh^eg?&=BHy$G+QV`*u6&JE1q<`@T!gOHzCJ%-P?y zdaA~&-doOQJDV;CuQflPu7Jp0(Jj}uY*pT|OJ%R<4GHdNoPyrmh~}R?V6p;IIuZ_F z9F|Ggma2{*htT}b#jSNceM6&D*x8)&?5|&vKL&mH=ogjvDf8N!=MO!!AB8-Y)fQi? z0=8NS9*sY|B~EE${v+UhfXzsl&rq1zd=s;!!n3CMH2lQ(pFP3pA?H4*D5YFmXDhQ= ze~;+#?I*6?v~}?b36F8|a51$v6O&%A{Qi+aN0LJNJASwAh*_K?Qk#~x-gqSRv1FWs zOjoTaVV;$Q;$B|hTbSQSq6sfUAp!*R5YQjd7$)MdJZ1dlz@U3+OL=ieZ^!I2pO2b3 z1r-o#eGW=4DIOR^M?^LI`L)_xR$V*iWM}-*(9G+mg`q=Xx@%VETYv96j{2(JzIsLI zOZK=OyXSbAromb~KpFvuHi%XhM^C$Yhh5J~d`#z^vtvB;4RsN>esSsaP-7fiqMwc0 z+s5?tW6nH?mVaAw(h;&9o&y{s;@lKT7K}LOC@*U#KX<>NP|ME!CCc~GkEt4%hsSFB z$I9J#CUaj^E!fXKxAHypy5cG>~XdRE@SbsMveoQ+7y>Z9{8ECE*4 zwnisMq<^b4vuJyxPTst3at#<-$Gv#s$n0yKinRRpyz<(rx|ZI~CBhOUVwM;SA4A5F zOLKaDw$jk^Na~-=II$sX83WN&WNntFRJojgS9i{;R8Av)*W(;bxB8+w)C$QW{8sU# z$Cj1J4~Dk~qXC3*{GG~V;&h9#YJ>j2#kc^?-r52vv(`Ay<|pL3H2}f z`1{C8{=Z1%U-M)CMy%4lRjlIQ0tym!5g#-f?eD=i0tPY)w=@h=kRdF>B@EJ;hn$;N z+F^Q!N52&m=N07t>S-t7CW*9xIa1vyF)!o0ZE=%lMO^@Tp&6295xPkJlf7faU;-ip zGhfl0=9i-7oA@;5t9Dsq#Q1a@g&vvzI+CwggCLjFV?Tz1&_JY`(>vQAS`zs#;k|aI z_Q(F{T0(7EPepEJ=^{f75QTi+-r=k2)UOVIp_P>cG>GC(+eWzfe6* z9aWZY?>w^&PrL8i@j)Kgas%IWz?OHD_a2KHt4?Y|AGR3<#{!F7cj;`|VkyRI2smv3 zrzyCVeNwaV%w8L?P7es$BQ6(+@Pr#eIQU+Ib5`=^F|Wc)eZ0$jZin4H6mgK(fuGG* zl+{rbm;q5cV3{tSTqWDSbkR10W2+HZ_ZnQW6VyvMA!p1iZpFmq%)#s}1dP`JPYGbl zB;o+>gmH(IUHepOuk%6uW{k3+BPSwTV-MC?03LG|4r7*`Zab~=Y!9g^U4D1{s^N`e zCP%LNUpip2_oSYZ{Mo&)-)RK~M)*a9SVr1zQIXyLc-MK`i|1U>%##4MomM;P?2643 zVY)(Yd`j*oqU+-5f1=rerRKaA$^^BusV^flr>dcHbanuRof#b&_?q!8AS7Jf(O}z) z1Jb6lLdKlp)~q5nED~N~f`L2aOJ1{h9ON?<5H@6IGZN%9+5VzD$hW5~rZVHK@s$lh zhk(@@U<9O`H=cDqYxu$V$(v^+}0KU=JuOZt+3Qi_kL)p4&Gnwx$&ZnDlO42qe+h@2Mk+M?aFN1x%SGzIS02&qi>ej& zKJ95my%~iy=~*Q`JtM;-6W_njJ-Oe=!SEh6fG(99Wo|nVVgj!_qptM@dr{(iDzFg_EPC2$Rt4yAFAr z=ljHOmq}brJ@M+ZOZ4K3uP?(d4f$g9@8PLb3XS2A_9yF4`@;+g|D2R-|f_tJ6N;mYJP5GKweDZ%%^KaISDBUfFHiI;}M#rd+Is zYXltySxv#NfbFWuYVR^lRJ~Q7gxz_Y|4hbhBd3c1aAF5eYk{*MpSk2&Pvw}ZkXcgy z>~eSeSlOGPr|Z?%iJD8Ch&y}6=d?xPYrnRqI}E@kOTc6RSo|0=cy7o5gp|>mCov~o zigjK^-#Kirpcr&ujTKK8o$vpHC64|bTct9;mY z;j_#dGdg!%izp^Ka{)gAU=D=c)@;xPTJefewZ<>~mG_%&6w+l~=gh-m3D~Rwrx}x= zp}_g5%cp~sMrNq2?5o6$|J+XrtQx%l*w zXS9crhhcD9fOo80aGalapu2~s%ag|sQ_~VFYRbx+i#4s@ZagG){mr%4jv8jcCYHe# zYI@JPH}mhmdEmawgCll_#I!`k^d%0uo_HRm-+~>WZ`1S*z9PQ>*)`FSv_Ki^?kOnx zl2V-Z{b%#+@*U+d@JFoV`+f2(QcpxfCDF|vlyqL;C{Em?v>tMBVC?$Za2=JKYHn^*o#*O(!Xr2 ztCDEU0*P;d|1l<~O|t%n&V;{u{xf$Qs*@8=bHX`Jh-O|wZ-wv}4$2LaWtW#kP@@9) zc`h;vU=}t(!c92$e5%_OJsS?;1038rI=`X6Q?9dGsW9`yUH$(nE!ZWT=!l+icX70%F4FCA37^Dhd3CfUmwJ`B!#C!GJ7rR}g<=l5G1Snnm zc>9{ii_5QURn&Z+>-*Y=$HZ2p#uP_?EBTU>@uec?dr`)?)~ilMg#kY9MP`5!V49HN zu6n7~Sx`i5X`N_lZR_ah_|aI`(@|Gb^`)n$q`p2YAkewAw79ymxxA)3uc0ry@cYR3 zil)!;_1`MKg%@b%w|?8two=>8)RG1fli z|B6wxf2Q=P|Kn8ee|(qvpFj6sZUz6_9sWI7rG2Yd#VY;>{DT@IRUYE_#B@Oy@smoJ z46hBMtFqk^IOQlWqq88c6Vh-!gVE!Cf8mPZPhr zn_C>iU??=CVSJtlF&G+sQ$XxCsdzjAgIz!^r#F7R8F)|Fd>ybr3a?vyg)aCVXq?XP z8>}zPPlZTc5jP13qFSAm95+dr^YPm7FxfC8CT5OEa&Cj)4r!Nt5{6QudcqrBH}g9S zv%3ncapq=q;^(vw+H5AP7Wyt%2-vVO zaHC`9@$&XZ7SZbuS_5t`L^GN91vupO6#Ah8F$mOxk?Tyc~N zE_w69I_otrhqB#gv3bhrepczF!D*6B)Gw%gyxxC34@b69W-d#qb)$-c&rUC=;y zuY=;7M6D;D&mLK--F|atkJA3zPwuEbxRaTck&%%W9u`tmn2%muDl01X_4JB~h-|2- zUCEl{<>siVJux)YfBgK>d$V^hbzi<6z%Devc0H@;{`{nw_B|I1{SZ_%O>` zsW^q{nQQgm&0+)Z>D)N$eMi>)oPdeEgn{%m-#rRyoX2m=n%KX@!?CgD{^W$5(!8oX`$#<(u4!OSBA9hXDceilFVUchpnb6Y;ZkOK$Svdsz2PeekG?e6Ye=Yu%Gd|x1 zsU*}e8e(BaDm-$$Vhwva2V2KB$-Yzje*}2q#zv8I90{on5Xj_ZWO*VMyo)%(Xi-Tf zfrLe2Ac_RXsBi|kWFC%m!+Fe77drAyMDl6#PN^G1T*sPu&gJZS`Pm|Mo_;Z;GG;i) z|F47EDpqOVDps+I{|Wz~eN;sGcn-pzCRD7v3A2y`5OD{R=HRM3-!x1`5i3z}2#-t$ zLOg+r!xL$6nFhNe(l7RrFZ_a1Gf-?CO81j)n!3w;EsW^y`8mHdy@Z}yIdhW`C1(7=H-l@Z&yPeirB~kdk#8n;zj3#u`v)f;o;Bfxtu$GDzaws zT!vyymh6a3u{)BJ<8tBRwBVDpmlU=VWijIt@!2HdEq(Z-($m~WFH@fA#l7@QvZ`(^ zOilazv!}nav%j*X#p_duhL^5Eu+e+V7baE@3@lDYq`xQ`^bTvbc#-lz-gf71%gx6u zx2sw!nTEb_OSXFKdRtagLfnu`$dQTD5wM%H3%g5-hsYj}-{V`ToL+G$Fniy58^ET6 z5WBr*toMU9T@PO;Wdy{GfPgVr=L!Vufus}IYzhwRGrw}*ZRC0GmDSnzLC;j(@9xw; z%%`?#lhHm2ZP}ex3I{!ot}|WFX~e~4&&h1dyvBYFtJPXALoO+!4defurFoo5!&w!G)cfIu4pz#_!$SJj!wl7KaCFiW0~( zJrGNUC>otCEHm_PaRlu6#7yVNuZXHx{dC>K&e9vLSi~HdIIIA-lQ6f>Mvk!cY{3%T zPV4viUB2jQa?Zd>_KLpjx#u!x4(_|RUFz=IT_$2|8l3w*j=YF`&`D{Ug}5;?`eS}l zA#O}(enh_Sr{ws&Gy%5{j&GRA-8q@M`&Xj!Sj$Z^b`OMt5U!;_ZV0*YsEs7d9s<0G zCYe$2wiJ{?UGY)$ya)v{5g{jmB!tJwP~mza1yC*yHahx&c(+nyT9EV zaGJ|Tkj0Xn$DC)Ez1()~11cU?Rs}iM_32i*QS|o+aj+q;qqL~Id6H}yl2BgLIngst z9-96f5I8*DXCLqBUzhZuDaYkgT<6$y^4FXfZ{CE5MK?D5h>DK?l%A4SlA2WU>5cPC zxA-7S?*xg>Z_gboIJa+92=r|gm|G{7&T}$wPjcOc&UUu>QC2cq5Rcg~HZT6Q4Hzcw zasQ=g+;`1%_JIxMmIG8ahFtP<>o3Cn4)8`d6q$ysn6a(o@vE3o?WV>f?vC4HtTvap z2{rn2)_6&jcqnALejI6Tni?IO#Zn*~L(gFmofJBgUs&nDt*Tpoppks>cB%M=m={kc z?93OcDv*vlMpka6_)7XCq(VTUCW=e5kl`*WlH8wVbX*`txY$`_YJ&i?l4^aQI~84d zU3&0V(b<>rw~Sj#hET{XFm(w%1y^a`{{pPizE!MZ75^g$MA}Mo44Ht2aGnBt+m=)A z2JYe22ZGjMoi@1QDl`Q%aj<>@cB2q~Ci;Ftp|eCZVVDe?KNqMZ)kxsrxk)J7OOyRR za?&kav7+vCXLnb1&G&>)$tj=niu3F1>RS6p2Bwxq=SdTNn4gjLahHQ0i&^bqvJhi3 zXO*x9#~qo!{80Hh{%CgbW=Hp_rdIW7Y)akeH``?A+d;2)I-cUS6lbzx;__u-^8lO< zfW-g^nXln9Wda6(!;%71h+dQ{kkGX!@;`DnT-NR>&K2}w`en{n`cl_ss6i0Vw7cUQ$ zJF58wMZbOy7Uq{|zJ8yPnw|Hpkp6dETtZdX)!Ml@8C#mtX`h3ceMa1uh>!$rebeXq zn(Gy$EP`}zzdaL|>F?rU9^m7f`YAOmFz{1cpihuab6Xm5c>>btn>=n28Q1wefQf~$ z12>R5kaX2~zr-8BOMji_fZdTD*fRkyBwx#6y#A2w3uUbk`E#LChk})EM;|!zc+(!q zTRMky6CNE4P`()bD6}TzC#IXuL}no+BrPJSK2NQ)?)2pNhLMF0XtKo882fP5dR#OA zMAg=X(QV6UNj!D!Jdu?G+3By%HQezvq* zo9jh{c_pSs`iFT0$Aw0v$JqrtSp`@=(!U#;9%~F(d z3ITaGJ~kJTkkLAX4fxpX8Prw%9rkn~ZfS_dF(i^`OUPtB>9;*2l}zdGYAvtKqu+=^ zBjX^PX4q~+A_fwdNel|uzevqWFaDselkh@2QOCMIsbF%3;rM}QLo?)BR`LHDtF&(w zt60T<$6xbqi0%Oe52+|BWoU+0m(=O{@Y9Js_Wbw5_a*mQ&A~DX{E3G%vlK)a0#86_ zACWqTrS`N9h6bk`s2#b~i9XvneKxV=OhK*1>{9Pal8=Ccw83k$w>Qk5 zZ-1>MYq58|-3Czy(T(1s+$KQK6v;$xbpg_zK#)$A-UCIut(TpZtW)1tPNe6w#P&@7 zXzu9v_^~kZL$QrXgs*E%L_n~ci-n_y*&E#lPoG@8bwf!*Od2N6*#n zJbjRrn_XR0YM`z6F*Wg(t==t*M~^+8Y`iRd*Z7>R&wFjd7skfMqmwg5O_hehMhD-> zJq^0$QxaG;+q{HS>o9N`17}F|;{b|)B!%YTsC|OQep69)8>F<&?7jw>2=+q!LImIK z+;&fU>(OLR;V*0(OLt$WmOo^E_UfYp*A+J2kvS8i6}(*A250D`p?hSkxVY3t_nn=G zgGqUr9*VH@=e!&miZ5YVr$OCkvnpad22>WK6V0p|< zQT9F9W&*hN*nsT?=Hvj`=}D2PMHvE7rlniubA^3WWkb!>R3BzxnNUx*_Xk+6L>8wMPmr#&KgcL^7jfjJp8M!?pKgUs1-zrLwYAqEKR~%>{2~V42 z$SqO-o`j{ss*38K-gbt5Gl76`bo$?&VeCOebXArZL~w*c#u4!JQ^DlqB!MviOusW} zdKu!Wf4cTo@&6L5v~LxwSjB(Gir@>He(3)_@bNue@qlIR=EEV3u8 zc%2a-ZW)d*z+c^q4T(*u7HRJHgWf&}zU}hudU@Xs6!r{paXo|*kSIi2I5^e!qOmNb zysRked!cV`gl@9Q+syap{VyN&*t6S;`F?_+aj8VW(0<3B9d}cC4!d*iHW%KYA$;2Q zi0)_g7hZQxJUAQ_8~!2ii@9DzpnHa~w!MYPdo81fnGMNN#o?a|qcaMU)nC7HbMsTz zco+TY8G76#{b6*2rE`Y5 zip_zO*1Hed?KCyJ347TeY4oNSe#KZI6PEZTj}ZVJF~Euo{_z@h)qh!aI|wzNzCs5Ia*m! zYi;9T>*!?f>U{04$}3Iv)T9^#bInVdHx6r`J??n%tdFvS-qsUZI|Gt!wT++eQrvIl z>*<*J;qk|J>&-Uv8i?$0IrZqHc5-9VGJ5>C{toH4sn!@oe{7Gr8g+@+MHq1bab`ju zT>-B%r|hfC!h7@u))z9f_W-_rAY3ZFC+OHM>$B!Ym*QNcV(iRmlg=akZxWNkYn$wY zLS4*E^Kx_QAPj;~u4i_`GUS-WgNb=&G8JHO07vDdLMbw3Gaf5}U*blMb5X~cDWgmf z!w;$AGt1jY@G_mWJ>ziG2zl4w=$ZN~<>a8(>0e(p{FI+wVwqWDTO^A?$hJt8m`1A+ zXhxGi&PQCA)dZWL12KIufkZ2gmI^*10T5^aM_D(HzFa-#SGO=ejZC4^P_&l1hQVJw zIP7v~dq;d+VrOT6NN}9m-Pg{JetHfLb}@dh9Nr#MRSf;&)9|azJJ2~e$S*B6!Q0NY zwX(7@KQ}rmluoMVDR>QCv-`IUwH}8*y_mL6(49>-8tjjgGVVC&gJnBY&m!H$!nVK( zpfCsIXP)1}({WJl$4!;~bJvIWpQHkoMj-w}=v)mrZ6JF0&X%hO?%Qg79_W4hqjTH9 zh$t0uLL?8$LV*fY=uH`%go_C6TloDaqapU#FyUj)$iO@bzuZ63`L#HeOh1T`X$+q; zES5S&piWQ`B1fe!PlPGrkp-KRl|MN(N~WS{hz2`u9#W?vjl}2%A<&O{1l$6H2u?%F z67rECO)#DyBooj0)vL7ce*so$-zrwIivJD@q+%gy8Dj7VE2AR9&SX0IqjS4O0@BGn zF+A5?QkFuTo1pLfE#7rNWFh)jfHjq66ji%yowBI^^ zL_}_DCBZ{j8&<5hpK(sh@8Bi8E$+HTb6_;b(HA04tpkKw!n`owwhHd%jTU=cMIRIuG2+ZWqT?$UA9KX&DWori;mzeid|^2=AxJUv{Z zB12+6hKEOnM8-ya&C4w)EY2?|X>aTK6r1GZ8IYZsUsYb4ms|WfF}0`nS6Nl1vxkeh zt>O4=%h%?Qx4dr&zLDZFl@PO+5`E7p@sRbd$r)1*?fY8Kci%oIr*}on{wTBidf>># z<+**g$7$Vg?ShK5<&jCaO#e8*1uFanVG*QyE>|B-)nGG{1V%i-o*THb0XraNC9QD( zjexi_mvARD2OeK1lHElTEvanHpTR;sAp8QfSwTpYTSA|P za03SJUWTWt>z^n2Ir?02Gu<2H^3psh^u$Q7B&6}sAQuI)P0jKA`gM+oF@dPvNzb>t z$nAR1BdNblDlde$x1A4*<)A@;!5)C{-UQC7`d3x$FhXy!@aOlM-mX?6BqtZZ<>xLTMxTs(7KU0p}@f!eW)Cv$7_W~kF&O4IG#?E8EAmWLN|qS9+Jiv6q|=SGHG z+v)>jgYIc+n0ZE?x#l6YE}WY^2k=Ps7z0Esz(Rn700nm~ zQNrNR1RO-Pn$W*JZKeBuTA|m|GI3Qve$v;M_kC7@QK-wh!Uu%Q=zY43g zZxyRp#eW9{(RL52|n1-k^)Hw2XRzznY#{R3$rNBetrW^S6 z*I$abeBAnk&IhB2%+CeI-#uYGXR964&=n4D~VHXe(loxN9WVG8>9+Dt$4P&(yezOd1hTz=|6|~G z5q!AA+4fNg-7EsfHj$<}cbA!3+u2sz$Fgcla@We~Fjhah1V;z@WEovy*;?99sV4fHijnuatq zEX}Q$8bm?FQJcQJ(KNU~LF&Pa6xtFI5q4(?9Zx_}kVZk=JOxXk zA)Y+Q8pe|$9))N=G3<4x&_pC7OqwHN=YE?nuF}5$1z4qht60S<{yUHsJ%;@0ZyzMm zpI$#ikeN&)V`vc5KG5VG=#X8W*)!HfL=4tQblSJLyzqNOmxL&&(pLO18EHTYjm{Y< zDDyQvSoG5UwxK1(2MEF}No z{PkqDde~k&T2=GCVkP;pge7!QIxhzwlW^eXu;&BDECN=nJMAS-m?$LGe4@YV{vW*& zu5S0wpKvnK^z|^+F?iwVZ)zFv>b~FqVec*DqWb!G;U(Qj2#O+h2L^U`cZ-T*BZ30b z-60avDJk9E-QC?i4AVWcp4o%`;(yNl;@s!OxqtV=^;sXB*|XV$GkdLdee3#O5-^ko zrd+^*e_mr55s^XSn?OLHTdQ38b_z$^1rcxS{qDKo)yK_@Rbp4;_s@1#uzMWPHx65YP!9#u_QZB_5F)le5YG~;_jbnB0)ESvm$WW2|h@EO#Sds^W2G3 z+kFGR>$(`Pf2$;}yOR5x5Sx|+%eQ6Pa0By zgWK^9pD7&%ohfJBUYrh6Ss(pS%u98R-4Adn0UVtL64QK3NPwpbtc}?1Il6ZmiV`3o zKrRO4rb7`LQw}|MW^&?IV)6}7?+ZU7Z}ko+5O9kaWWb~Y275q07E6#g!P}e!{GWj@ zYk}cXV6}dEpt4ex&(5hr#nGZmRq;29!j%dF&ID-q8=a>39f3bZ2tUAV@V01!-TEdf zIu#{r`$yQ62x;av+rI?jWGU^NCQ|n2nK_kOTF2;}aW#J2)U>;$M+rsPkH%dd9eS6W z8daa$H&cZxuhl0X`ZK zBm$8Mz%?w&LkED!B?y584(uD3`3RTkFg9p*oqii&<`fYdpJ!ukXJTO3QdRAuZ(bFb zHCEScVCfJxGog-|JOwFY)6*-ca4i-d7^Z%lob5qo;W%VsA%jGLl+3#P_}l~n2@5$* zZzg+t%X9PS_+Qz3OcoW1>2gB&EJ*y_cn3)bWM3seuMCGJvmY}U>{(3X^i*WB{%E>G zra&?lvG6FrlY)`81B>WA{?SNn0spVDK>HT3fCc!U{m;IZ3^JAhv58eNTK<~X ztZv+KzO5GcD!9yNhBm?Cboby4HU*^qiuwK)(`RwSBam0;X&;NdKj3GX_$d~dfV=}8 z60i`%9+Y{h{(j;TH9Qon_C$l7eyvc1^Bw= zr6(dir&t41J(^R!`xvt;A!GUYxWEWrF}PH5j~dvjwehC!3!@BM&m5nmjHtTYQaA?5 zV@&qmSBxz?@NEYYu{C0oHZCr6Afy9Us)>H^f6*{hgPIz)G5&Dt;d4Vk#Fk8fLDZJc%shqj=t+=aq zgKk~4xNIC^e*N{m!xv6!nHbo9cl;XQqHU|~7w=7kST^k|s?Kt8vn|ceDNfIZ4Cc^a zZ)jA|xtk}Ssy{MuHP*KNoKzT>RhsJI=i1-b)l^wknOms*`b{Qb(i+>brPxoV=DR?Z zjcl2TsKbSOfgfTD!t}J?zE)L!uJzU~H|&>fgm9hmuyrZ~=FBiia*LY#u)t#cbfi~>wlObv`2l9XRXTz(vJ;*r<7i?6S% zYrKAc^U-~zV zMYKBtHFXJ<@K zk`A4UWcy7Iv)Z2fD{nR3vdn6QfFYl}&Ei{ur;B>CIN5tl8k0n#&CuWs3(nD47z&L{ zp;6ff=`8jJfMp_`nUMXDN*ZIqNi3Y3fY=d;n?OoL$?V_e&_%>cP=xP(6t5*~wHfFG z9s{t$USOY*$j7MrmPr;WCfa44y<^0gu*8sWUJjKt#dTHX5kb~XW%@LDv9+YSu4 zfB_J<;#y)Y$YUV1+-l=-+hfN+>}nWK9E0Uw5?}gu*m`GuKJ;2tS_4Sfa@e6M06Jh( z;JR&I>z5ddNSFxo7;+=b&rS$f2?8TN3G*d8oVPzoQdWt4e%;~5Gj~;+M5q1Fj>;Zi zq4M?RGglS0fcFpV?wLgzPQnoi#CJBg+Ur}gx7LK<$cW(J=(w=#inIqBx3#}(SVx;D zx5f1`IaE9$4k2JoVIM1D_< z0>!8B3RA@O!=&Av=rz%qtNkLjMx>qT7}_#1vm84oML>&FaKbE_2qf_l+j$#(Bz(?F zd|51`CoHAIC;3Tmv8|G1?$_O;^_RO_uN9|lvR64^aOAbouB5=4p?f=Bws<}e zO*MaEZDsK+xo~?%v0_v0zR7No9}S9PHquAePgbAoPx{!Epb=z$TKS%DQF%2377^h9 zgu_I7=TN_chwW9>`+HSX59_)vf8l%6cjAJ}nD8YZ$&>nLpBmr1c0bhDy{4@?p&)f| zc%Zbfi2aQW4)vy0WIZzf7&=)0E+s;@q^JbK%xUb^51(!+KQT5Ay?v*8g>20do^dfi zWHUd0ZrUO+Eh&JL=9v*#JRostPR^bwkq4JgAx-CYVBqD>=^#9!mKWvQ^W=hu<>x{# z*QW6)gkt^7%~Q5Eb+4(6$5VdNp$h?L(A8Fgq{<@Z*Zll#?Cy%I%Im-lP~kLW3_!*Z zWDV6;MURfLPwYi%FjzE5|2^M-Ip^3$8iob2WZK-+%yeNm|aU!Ex8EPkubm zO|S?mPtEOWjml5UYbg1d7US^Uv8kn=qa)S|sqtjAb!YqIw!x>)LEA&)UyvCszMhV^ z&R;E#&p=|k7*=a;?QHKH`>!0K;PWPHJ=*=(nr;C){D{Ae(?UbkoR3#!(H6C>7xYiw zw%q@tRqu1$>C&ODA(`D#SvkhXT9x4;|-Ell8cOhh#^>EDlDlc{yOfRbFQ;b zipw`2-PA%~w=654LVE+pr-vUbUGpBSvE*89FD9zPC8W;1Ok4Wk*PYwVSIcM#^L^$5 zM%)PPvk~St5tO%Ge$@M*X6~nhhKIMl-FZU)+;QD=TVCzl^nTl>Pn(vh$tqfGKl}CM zyB{z8b9~4wQgLdDzm;!IdPS6fxS!YeS;{1v4nDJabnwGoC3VGH_Sb`&eN%f9>L#m* z5R;l1ml>0KQRU+n&(ISniX4W*=3YDmtlaGQg`So#4OLN9mA%9Lu0Bpm*LG}GUAOwP zl%x(2F##<*xT0&|W>M{Nhi~V; z+Nvany{OGs4F0j%C26^1-m19aeFJdA3`24X%{MVCh-ZkhpePASPtC5HK=W4i?wDZg z?w{Pz(zUs&?eGLvVSGw#d_)vIE`*;IqESU4jgK}2nt$+mo(CVq#EccBEff@-R<8=b zvaB|2!}Q?(>A7>=-Ky0Is#y-IB{udoj%kTsNN=O5aqgjsCosee-fl;z| zR=2=SqceG=la26NCsc|+$vyDcbyhbfAfG)wFc6o~m~H256&(_k78zMmT2MXRZGXGLn->WgqlkBS9YTB=Kb&E<+XV2FQ3e*dLQ9gj@=VC~TKt3?IM4(>;RPajW zFH`Mb5-_!921;=96+l0BMb*qUpH|y2_~?4WN{OJg%fp_%ZmR8uvrJgp-hNr-VRBgs zfz4A%%q~duE=lgeqGvE9q}qCxIW&%nFD$H`7{)>h1CBvB2;mT9P9!EpjZF2>7#)z+ zgS6(Jr+vRub&)O|5RFuEG0=mQ* zei31>0|d$f?fZ|x0_|JC0v7Nu!2G=da29$pk{pB-_Tu0i8BRvk_}uZoC1EHF%y@t; z;I{?)T@{Ri-X*0)rle*iC+Fto71Y*OP0xUrR!t%Twl%B+a*3PGb$}9yQ~ODC*i2`{obc%CJSO>wXEE)S$)6v)%7E!quFHj ztFG0pqvwhe({a<24b3&R9ZfTk)P*0Pfs}5>(A!{*ZH@=|OeBFh7n{f~wvv^%*r1yJ z_EYYAle!oCoFqilz(yMZX;rXCV@X`22^tpvsB*aKa(b!N5;pA<)&r|_#SZH$8i(m( zA%?)fhJ<)V#CWNjy?Ckn@Tuz6qn9^bG2FjHLqXxq^0Q9o4jUhrxxIAHyX_~8_N`Wv zmiWlWt-pxdihGe2($;plmFPbAO&g8Z?KIo7>cct-W!cpqH=J-kyVY(xPz79?K-^Gx zi~ag1;SV*uKc*LF`q=peSOzEf#Lo;&rKBYekq5I!vi4~zDZG*2X1HmEn%s4-tFMAy z#`zdHQ`Hm(B=<1cZ!?#jT7bYG)X3fpN{3Be#NPtkV{sR5hQeynAr*{<;0P&zyPv;Juy6v$yJ2uPwa3 zguZ(pEU)riv6CTBhD%Uzb35gnZ{>?GKM|>7Jj7yV0)oH2ynC%VFBcL?R3;;&qv3pF z)Q0l%O_Ri>1B}Z|sFcyM-_Fk`BhAh&ZTHzSA^Bb&aRLB%_7D^&FJ9cdTqbA#t_pdj z6mUI$MeL+VA6^>yUqfjMJb{5rZQvz!xMvSpTw)mTR4N|tG<7Ai-@K)#PtQ<0J}Ml^ z*drnueNAoIWo3Eu4*R~Y$)71{y%;naG2W0@ndlDGXh&mxufw;B!g5+;LmPUs6H+E2 zgNQIc>aST3k?MWK+ed}?uI}FI>Q?5jrn*cP+XiG&kkBy4)rUj$Vd%3+axRVdXKEk& zbP^rmj=wYg7V!TX3$$+m3s}Ivz#nBCi0&4XfKVq27OCweu&w{ri3+nUi)2_K%}z&ZbDmk`7^cK&b~I@ zKJKmcjqLx+^uWXiwKwKYMhR81;WZI%`My1HbQTf_kd|Ls_%l0NFUe@5-c}i%Wy?%f zZLr_B)=cqs#GSo%8{U_n7yklQx&k2su+BnylZLoq*oC}qhrm1w!&HsMM&iI2EVCEg z@3he{@`JX`hn|6c_7)cv6=P_u=ip&$`^{p<&Mi0YUDE#c!79%Dj>Qx2vM?1p^)qj; zTvoqw{^Rj&Z`Z8QloT=G=Q2myTClGsW(by<2?=WP%9<@(qQ7jZ{)*)Wt9H8YS#BrK zrOU-_21Kj`MD+zP_@8_H_0h9W&jZ~<>(ZNx;!D}PYjtaFR7K49YR^rdHpzaH75E@1 zr7OMPb-!}Ba`I#<3Zjc@O4{n%J)_e;*VjG9&TW~+N~38@@o>u&V{c}~PGj2(@83OC zd+eFv?c3nD*Xppe&QkF&f?|eze0uz{W~(11zAM63Fes>jn!4Pa!oNLH#Y7e0IY^u>qIl^-5Fap1V-`&)&1dq_AQBAT0t2Q0iGqzgbMAk+C7 zOlimzqvORO6i147X8{$c8_5S|++z0R+-cl#RT^Htr`MI}Feb*>9{ z)prS8W`xaY-MOeB;oxPVYrcegBJ9rn97D>zi@tRG$a&Os*@vZoB9k&HXy2&4|CM2FI`)QO?Fhk{h}ME{(-d&4pXFCj7e zQwNN5`hFK@kQvN5j#0>*FZ8D%%#V&i z9Da1_Z|^<>DGQ@fIkHU)r0+ij3$$+m3s}Iv0J3Z|kv)Kk)MwxjjV>Ai@zOCNflNZ9 zvRevY`s?40&^+yN_wLtgI^HU?%yvWof`vqaNhB5psrH>?A7L;M^#|M>oSLKzjr4c* zHMDlr*0)!Mq=tR)`MlX|mw?G)V8siJ`BqtNdm8XMyD^7GB_N`3Oe}<`%BFPxpDt+? zNxj(q+`1C4zzA>e;O3f=>Vm8Y@9^T3%E-Wkr1&%jl5{vn{l!$VHLWGu`o^Z_&fdNe z$r*V#2A$1W1Ioir7+pB-bV1Q#-%jJ**2Ol%aJaFvF6Bo8W?(8b#`C_`<%_!KP8nU? zZ*X4d+LD7tJC)3qeW*Bc;0HgS5fC&0(x3QG+ia}EyLcr$)N_6CI`D~t`vz_&Aqgk( zQ@;D0l1zdlyuCbv!$ZfcNI$o60Wz_;gbj2Vp3}g{jlsz^B*;xeDsy>A z6#l866>VjTVPQN6kABv8e^6=5>f7fJ#)m76jS4ead^9@15yYqrX(klJK|wOY7kOz! zFfk#A5J##oizY*WTOj0TK!C;YK*+^}yg0~*g*+4_lq^W<5bF*2c|Pm zPt;x+X}r^4cE2z8R!@RrXPPK_auJ@joDBDL3~e>k+j!~B@yAb3nSWUprMI{%T$D7( zMMaB2+!{Cm;_OzI_{dd-2>mcS=w)nQkPuv$6PJ^p5S!xT<$qn}(RJm!hj#9l5tP4i zM}ME1*XeT1(J{EQ1D>d4oVF{F+xP0o*%$6^!QVZ+5|d)Px;khTo>>=6 z3(wk9_^%IveIq<4CvICpld!p{vZfI%O;;!V#J)sPu1v+9b4RYvC|xgnZ(LyGT%yeQE3^m-`!n(ef`5CV|^n+nrG_E`$~Fx2j>Q+=9;TpG7H{6_g%W& znMbr#P_Sm*;>a)0k|z7xn3P7O2!nxS=&^oFGBF^H#$wjjcel5V{m%KrvLKo|ukOVB z<;vsu_UOoOF#eQlnO6h&)!=)9`TYlAf%g5=@JEsq0_MLM|KqfO=klBw+4fA-R|iFZHv?&P5_ire)h4*QEguG+T38Auy~?fOfsv)+Uk zX*W-W`sUdlw>Y}SZxfrVNqm#M7JKcA&Sf(Pi@SH9CnpuLKcs(&gRWyTx=wR+=preJ!M9yyua_gSv^@%>3-urp~I0=IVwP z9Avtt`CYQPea!XZVtsi5Lum=C6;j5_Z-n1To=fe81JT(DKHkB`7B;$`R$58kPP4t{ zq*)6Z^&5%q-rN;cQyUNym{u8m#qj(ty}fdJ>jXYYi+x!lq`p{IbH!PQi|*+j8I_rd zIca58mDA&6gXlT;%6jFxHYFkyV?Z7z6oXI#-FFb*yE8jj)9_cbV)KsVs3cZQ2%Pwln8oSiUAkTLa{kE=c6KBbZKNE zDwHHb2?Cq&AwEBT7Lga?0Uokn-3X^Q&s6TJOFLT<^SJEC!|K4ZUE#aXrSgygNGt&+ zR3s5kMu%TEGmt-fe&x$Yl7YI*y5r>_RtQo7epC$G4x+3;l(|%8_@3tU*NwTiT&(YW z(s=Pk{n;&5C8d*G+cxo^RNDUJrg?0*bxO+9AJO2Erl@J?k)X~^+R2-5It6D8eX;d< zr>*z)omx&#mY2WBlXp*S{alPa10L%}tUeSdv#kLLbu1E^m03A=`YP&+^)MR3?8-_X zmxONy6=!#*Up!Xx$)L+WVY;{xZ(*B%bVul(4f!I{8GLI>_z#pxpKslD8z;ZJOMY|9 z#*KX&WQQgA`T_s2u*|69=B~55QiCk(>I(+bOPe(9UCiAgf?{*C%1RP566~xjbwBAi zIXM?p6~z=q=Qrim)-=`@RP`1VO*W2yR?A$(>%c44AS~D^3!*M>^&V>NqY_%@^;Vc< z#Ny+4?C2CGnaX5U)pT@rOt5~f`$%mT1@YuDe-&^rI8|DZ^K(8ykdw-XSdH`kzy;d( zpMeG1_m2YWH?RAZD*H$B?H}WJ3D)1v{6Dxn<5#sN(lUXQlR@C5Wz3KHrzFlxoN3Nl zn8p9ypmD13P@H~CSPuE4!9Q2|Url@Gg`4S|yD=jYH8~KKBxYB3WxTh2`_%W%4*P?m z4kExD@L2F~u-kRn>bah~vxU>w*u`$zt`eD|up=_jQVyFH`AtvtRxRk_vF zmLFdb5S;IO&EVmgPj}?cY}~1=v|D}8UNfbA?gwrrTwkZPRO%x?QZC~FfEf@n7vz4w zNWJ`h9Qo(dfcrBxzk;l1$TLUCs;iVi0UFuSlZB`zmEDcdDJ z^HFlTTydG$?6e4^iBf0Rj&!cg3*8d&`LNa17f(;dhxs}e7HABQt#0iUpPB}PeG*LO zB0S_LL0K|fjiYX!s1r67xHq^6}F* z%8y*X8Dtj)nY+DxX{dJMuGOlY5!`}x!XgYFE-C;$+jq2nHt5Z&82TRM6&Mg=qwjU~ zfMrI=Y=0}cvvoEten?xrRzWIl5h&&1ALN&q0|G-!H%uM7Ft+b_p@!Cw+S|UX$D7Y7 z^&QpFHo5CsW!jj4CU4VpCdkXL+Fzd17ii!AKS1ZW`Z#K<^C2G=^KWkD|7E#nGpE+7I)vg;ku%BU zU(Uf_w2$?ddzhmQMrXlk8YZ>FODpWcZKthIea^a;yVMff(2x+B5gU^j-PqVzkXM|Q zm6cbN;veMD*_1KdUfEFA7?)fYoL3f6UKW)S^TXe>qoyW1DR~4nL4x$Lp}~%tHU|se zh`uZ?eT=L4^N%#Ax~cZ`b5rP zEx!qpjkib_$eV6b3DnQ%tn&)mFvGwQQi1XVfCk_l3yi&0T8qiSY|6D z^-=sm@EwOjE0a*Yu$;h{%!sb$#oA)5lURyaLcVQ}tEgki;W^*DYwHz%vPp}n@Uy{Ue{^?N{LcQX~@X>gVWF%1o6 znT097S-uCgk4b-)1D|+>tc1Ds7xAerl2et~b7$kWgB!0ueC**Ln~~pCS=E!3ke!>B zTU6Z;(LeB@z2!U|BLI6qgugLd%iEkP9RE!rQdhzI;tr!-%5P7YI~nPhm8edm50UAs zAZs~fEP;@l3`Hn#4GC@>>Rp}dCTRcgbcE}%q}X-MEi!WyQ3!b;Ttuhw;Ai;ow zR*;CYB)S9%@7s(cIQ}ju+2pBj}F9!`&E2FqA}SfAD+gT zqcbV&$(xXzMd6rtSSpVj_UxUB@w?W?j5%B%r zzqEk=aeI9-2j{p!lc%ZkPacYHk}^IqY!<3;;I+cH||*ZS|uacYY{ zuRrE<A>g4CM2Z=I;L4(`gQ}{l3n-d=-RtGFMYfa-56w> zVfZ28?Iq7Esz2ZCcQ_<$A;x9N#iP#yo`S6gn~&%ndSE2SG_c>8xl#7mYtqa zl2z1D(g1NV*gL$mxPtx7;AZiN@EV#Lmk{fd;(0>vpv-4cVS`1I_QHY&d{UpK_v-9f zyMOsA1-T37E?b#9M1@7C$H!J@B@suvNwecPq%AM)aYe?7`kzuUni76jfrb6xPo;5_FkE<)D3c&Vm|E3azpf~3Q>gk8pBL{V?UWOO_SBWE zn4T1&LmmbcBrzoML{OLyTI#{fI5!Q)2dM&7JQ(Z*mHD8tM0{pQ7}EI}P=<(9B1?_W zg1QF2$#G#KNtg(^&~z|`=IZR;P*kp1Ua=cHwGuP74mY`Mq8qekgB(BLZzx+GdyYQ2 zVW?YvbY|N)esfX7<mXhXP_1Vw33j0Qz;lTwq}Am|=2+IalOz0aXv z#wep`DlsdsQ0voY^wb=ah`7in#|a6k880+dSMJ(=>511a=(l?Nv5Z=+$R8=zZ^vRD~_p%NgOFw_3(S4lV%8K%9I?E%| z{D~y?L2OD&B$k4ip^+Jgr8Nh~TVYaQvz~6`6BC;|KKD+?K3D0ybg}5e>&mp?PDBBT zJRhQ^Kn51F2uuAAB4i;+Tpu;5mQP}y@+0QjNEG{LfU}69@-aD2SlTuYy&SVma z3<4+3kO?U$h#Mqf`;m(O5f~lr7M0=`o*(nZ^YhwI z>-p5VxJ;1FxopD6?a1kzYl;LVqJ+h`~}+YgT7XK|y*&_^Th!*BWgT)Kn1ups+(z zQSRDer+ocSQBT*YuTZj5`iW0gEBGv9BPVSyEo-)H*(W*WAm!_(*I!${YV2=lY;I_8 zYKsVpU{PUvdwX_47M@8!W5+AAk{{l?d0h4R-RQ_GeVuCt+c!Y02cY>RwO*oA1n zX2_hQ(uy-g#c%hjtuJ%GulO%=el7cK= zI^@PeVG0zWL4c)$F%&NuN>QLJ6(UXgrbrUaJ@Wly3QQ=7$4QV0@({n4F|=vA5(F6R z%Jx59nWk8jy|%u6=NNuPWB&yb^AU@DcC1ISvHrj;VI73LoH`O|$mAc*0!iw?L1~-Y zF|E|kZ1zQfbE(;hML9Y7Y32A?h?#>J46Lq$>bkz`c3-{m)cmbh%CVEF0NAcxp0#&p z-o1O7$*I^G46LYcH?T6&(0TLu%bSessHEiJ`uf_U;{Jz^zrR$=y>u;W=gy>UTSLyB z`KI;JIX=RtyCZdu(vw~s-#gSt=WsKl@3yJE*`siL3XWspEQLv?6Bs3#a3EsDx|kAg9N9$QEqz8SGO?UT+iQsEcNmi!*iZnmznNW3i_y7(Njf7 zynW>PA|cZ}Dmr9x7{jI@(|zd5oU-=XRw|y%z7YEcpQS=PvLO*5bsTZIG4Y)ILwZGa zYD;xid43e6&*0jc8gf#}a0(6Q8c3sG;+!|=?-4hU6tdyaJ}V&P#IwkB5tkV^pA`@` z1uKlfAuFj%dMmfR5E9qo7P1!PGv`_Auz0hB>@kmR`|Y;vwchtMQS+eD4Sj!`=B{Q) zr9<>AME(#g2F`uYbW@LftdpzqGVabXgWcCO&iMUwYOG1CD2NRYa@F|oX4lR`>hFyo z-+mh#n8qB3rKtrug;8Hq^bR@hkhNYS=dl*3gQeQy(k~b7GLc!W$-6>ZGN@9JzG80bU|;WH-@xqQY4*q1Ro#>t7CF@1#=?{1BBQ=& z>->y}$w?_r` z?NK}SBKY}Tx2tag9~-0^`1WULmPRkueYqvbVq28P`j{(7t2%2oy%rX<-ni-8gDtTR z(t{N|)M;J_g_v*^3o6VEZyw3#jn@LU2Y~T9@Loz(d+Tb8yIZ5b%6FIY&kg}1NeIHF z5XzCE$iTSp_%t980c7wZ+QlSs%%I>%4H(Q9CUi(bhA0gR%t8S)(&vpIQF}qkO2knt z@o}0A4oad(vZGV$@N{_^lwzNWwO*}_|5@Q%? zDq|^?%?t73eJv|zMwb%_!k8I;!l(?fb6H2^vII>LtsTd#j_JpIM8kpj{LseMa!4aL z)l7JLO_^JfH9z7{pDNn4?c1Xl(W)Q)Uwrgb+^2o~q|>=`{#P#h6ckd4R5*=B@?wJ{ z6JnDSzZk2z`C8b!8U%y|=$ZQ5dFXTTN`?@3pa>{d5G>yy=3w+PaHge+IoncMl~dJF zLuDXyp|yYDvyEXlWpE6+)p2Mhg~cI*mD&BzAL+Ms&my|bxJeu&jnL8Ix#@@zhMFV% z@R?9P-ysKR>p%}bXj-ilcS`T^dQC@z>bhD)t@)Q=6Wds0(dY~U#|wazWQ_HU)E3tv z?o3XGE*;WooPIq2&sL`e{JU77eg7!1e(%1&df;FYlJ~bRipX)!ps+Y6{~yx6KPVlZ zWBAj4c@zKW$I5vNj;jX$iyAJ_zW-jZ{_g4fYl-)7C-WZ-n0|SenHIU`| zIQ`n0WGaEhu}1zz`;a(k38a32@Fau^cqltVRqPqPJv!G159!om36o3;4$*$bP?x2LRm|9v* zXm;Q`^`}+_A1$=iyqs+#Lqm2e9)9@X{i9oN9If5iA5Lmff?c%kW1n+-J=XECxsd_j zwc-{rcfgBqS6S6-CEJ<)$S-^f;uGymE3x)HF_wO)EkuiDO8jX={dOl}eje zsT<$XFzaXKqxF+(ej|6dRo37g*=z)9Uub z1eD8%6tXD5LTyrMo<&Bzv8~vrbYV&WPI}XcH+Req^Mf|P+OL<_uOxR)P zQop;KvfNhIrpe9?%gpzsATfAwL|F<$fx=u$W(iOsU_iixTr{c>c1m!zSBTQjOBv;- z(#7denhHh9ked#%0uKf|~H*aR+Q1H^#;{8Vk6&0t2g$hN4Bj3L3yQl1Z{hpS)Ps+=; zmd0j3e0*|J)0(Plhfs5H4hOAl+{hiOS^I}p$3T3}g{t{BFLsfPWSiXx~2x^CcI^K&q>zS#X?%R9Vt~ zzs|P3P@IF)$O)`}tK{OBSC13zozLH={YA5vBiauCDCb<5^8eLfA#yA81>HyyHv^Gu z_%&GbId;ey=x#4 zgQ5i5IvV3KWW__uU{7sBO%M@Yiigp>j%s>GKgN_FMf5M$zFMh1D1t^U=%a zK}wu{LG(8Uc7O=yrXY3}lA1}~Mv107K5v#+7ujwhx7T{}UXxuXEY5EKuzuYOaLNRH zEfZ^G9?R?7t?mxaSc$CB6P5TPve-ymLQhmuM?gl8e~GRD?|ZH@K4%`dK6+>KCL%U? zYI+odc>9pnCG4re-XMn{Y66mGAgyO&>_<@8gNrv_+`Rwn-a`jFM+0N4Gv{xdI)2GJ zG$^Ju-89$ow*MtLEkQXeK5;8VlaJ4wi^m86eI)KI+umCcSt z-|}%qK6UGI_lY}tyEhpVI$8bto+}kfTr-u z*q0&&Ua~_i8(C0cZtfIuaBsQSso3WVIxDuB?>ORp|6-c^&B?ZPa83-_av0K7)(JHA zWL@dTV0B4{UBG-XuvxMs>h8+2Z);{dgyyDsNlbnSxmXb3Xl(i=0qGKC@?s)gfC#xr z5Kxe@h{EKdQ4twu2GSIcmyRg^39)|*lDJq?vJfYW9+zqEyH|B~ijgP+bhr%9*fK*nGc|X$qw9WA z!KaqeCr$BZ5-i^&*gGY;6%`~~=xKQPxw;2>Vu=(o30agYE8y8HV}QRMfId-yDKY*Q z36aQ+oAk7GJrh&1{Q`V{M#W`i7gSW%<>i$X7uR}u{qXYiFDn^#&fZDCWQ zpkR9KYU%nl;bI_067(+x4SU7i{Pi+Xr2|cgS%WQzS~8oTwKn!Q7S@fH)MB#>;qVyj z>4g)Muy3#;DyFlxJ}N3EH7j>sGkFG$pQAA*v80UjayU+dUeV~YS0)4iUc@8r(k83l-Un1O_Xt7~f-ni|;;uy`EnZ?WKCxu`5U2{I}nR41c2m7Q=M z2D=e5UqWhJYiV>zS~6yAiqrauLZdO*%yItCC(}8+k__p177oG@NFIa(XxJXtXrl35 zY5N^;TVsup*|qYLU@9D*p!HUDmXvhld!)H&`n@tqe*3k|z&6L&G|uF;!)q1ObGkm8 zW9qlHQ#Vh-?W4@ym1D`xy%d+69neM^fY z=TF~xbnlac_8G^U(qC4Keh~-iKv)-u8E^@FT(rt|<-v%fqLxd55f`u!;4>1F)RMXD zd;f;%&EqPk{j);`;Yd__+#!v_+um;5@MhzAyK@^}Y=|9-sm0YeM>tL5rtoMSn|=0I zx5fInXC_6OxwxGCtgrCJR%@Jkr)zq5YU!(*u7?592ChLL)xP+D{a#a0WMywjMm#!j zj6CC)>KT~fGDfI_a1u%6Cm>Y&M|iw*ctF)yyP=D@p_xHNN`6&LMuM077n|qHwdH^% zAEGSd2!M(7>N1m!gq9_c&P9P@M7Vuw=JsHtL1UbHq^^qJt2c?(&nh#{4zw-DkMl#U z0Fxw-#i^3v>BMO6XWJI3ig8&;f$uw3ms+ptD-a>haj}^koA9ye9T^HBK0g)@6~|8| z2$7jWR4B}V0xZbQM4EQ+Lgpffc;o!l@(knI=;Wet3N zoieOR5!R#t6^c+H&kS+v;EX}z(EXR%I#=)I+BpQ8nb`Vz>ozo99vIn*l9*OILCXjCk2}#Wd;4bdgW$fRz8k^tb`Oi%*}y^PLGQ% zB>;d1V0eY(oZ`kt)o1C(Mt(*{1|y^G>?L}5sJEmzKP4rluC5_AHs0FQ)XB!dP}kbp zI%s+ZYG@Y9E%n;9Bk%l~inYt)mVm-7g4xQ)eZs8c^L;XX4Zam8=MDCcjEv7lhsMAu z0_>ftdHinh!Ha?0cShd7KnFRu$M`}fc4iK3=j1&##gSH`B6DXB&q_?lYe_9__~J1B z;N{r*Rp_OnsO8e#J9bQ%+CempF~gqJkj5mCSXcs*JWCmZ_z8|rewt&em6g|w_Kp48 z2&hyBoxBk0`G;bG_WhIa7uq);fj83=1>ZQqKu94a5*3thd<1}xD9D?jlcez_zhUbDKwEi1{P zupqjvt8x_EL58C^*gXR~uyC@vzr@H+v$Dyzy6rIrUYlUa4Kal%P^o$Dcud7}(^$38 z*~DzvvD583crGreC&{DF&ubwfY$|yvHn+ArdoX@7YrsvwN$44%lJ9O>TmQ&kqzPhsR`Wl(f>lYa=1^U9GHcP-8 zprE(%@sGzVo~+%ef9$N^qc!#igmu@*o9t3h+rItTp{sB2tAEkaH#IeJw@|ivcE$dx z{Dti7&|b`qf(f?F(BQbDxMH6oWedsm5f7M`n6Y6I&RpaipIr1LASC zuF-MFXg}v9Kj+x*-$UH%N=oB=d^5s*EPdW@v|S560}#LqzHtFVDfx2iO_Y8?0+~(w z1gGJSe&XedmI??%t8y#R9nMv0ArM}jm{7n@E+)?|rC=1Nr(X1o7}S&=GrhCTXwPb& z3PF?roLB_W zJdh#8f^0@8NQ8m}C`@IDvd97~ydZ-lPK9D5$U8>?lPJE4Az}2`5(uRz%vD6@6&&ka zK;pjJs>+X^l%*t$wYRWusxAWjF-ChoNw|f<-@u~oboV_9&;1rz(^Z8@3of|-TscRhK7)hrfOL`sMS_Dd`j+N~Qk zH!fzW+^T!`ApMi70by*oEVgK*W@x0Nqq;KJ&c)ir)@(9819}Gy?>W)RB|IT5I=z2& zi|R3083{=W1{-oZy96FKUBl!fL0pNIw8_fNtf zE;c$6`l2F>$h<3#`dbnmA%8R`l5@`ZH=J>-zh`XyR%~XI$XPm^<2*pLnyHXUW&Oqd zw}5{>{srxWe|z@3!sB1b{2<-}2761SQ|To3Xl-vPEiHyfFLx$t1~Y#SiO6JMlfCzq zmeyyb$1zC7M2rW8WC)=H0c{c=eh<}`%fe1V7E@ zhyf!WpwF|{d!I_!_2ee=ymIxvK8>N?HSQ(s_?o*QS|FQQLP*ts6-}jOb5D-)p z0}H#3eQeLMySuv+gAQpEL{UOelu)|6ySuxa&F)$?zqK~#Ip?|GcYNc1-f@5L=QEtH zv4!n+$J*DruHQA|Kb;FA7hKwK)&8EL!z0^IW)`n>cOKbh`sO7A!wJb{oLi9j`I}== zX$nblsHp|z(>R#lQ}xKhc)jk{tIjvyM7;js^)4|n)XCAp;KL1>%TlUNqv?tbQv|VD zF#OwX)vTb!7%@`>bGu+uS@-Fr!q2630Y$|H1ph@;)qrNOyRRvVd%!Xh6BD~ zk9@!FGdMZw@FWG(4N887bDA6H;y_`aEC#YDeNfD&Fo1o8ND=6^ZweSF@n9UEKZRX4 zi|V1;T6P5&(DkYefOvqnV!{SQ$3g?FgOgK}>cgG~b;mXaeU5mn zV_saya; zbnZ-xt?TU%@3P?ZGb@TxlEYK!d7cneS7ya$hqu;bbE85E4j!&nRENYoO>?8)J>gsEC;Eq!)b{b*1AQ?zlOj6EONX#PUT7ecmI4oz zB=oVMvH-GDASDheis}3al#U}&AC`K{)OwSWl-}FYDVg*M6cy2{z<^fsFZpK+mD%ebELy2Ed$!3KH4Ayf2$8i`P&ZOP@NVZV(^Cfz z|6%=9*T&K0mA~=UJHOwsIeE|KvdLTh9ea1~UbWBG=LKGeVd!7@14G;^GHaY6&4)_WVf>Xy#ivx;&4g4lKNsBMJ;ye_Pd>SjMW;o z_RXrR0awnxy70=^*3R4Z;;VC8K5diIlSRy=kh_SC;S5A;)yf#ROFgXzs;iF{mEK58 z_h@T<7ajiaqeC#@4>{dinEg}RIy9L)jrx`&T>Mye*_n49Cm$Qm{&UIb{mZ6)G+f;a_bGBv>GHBU)oFX{0`6AXm{;h(D86f0 zVREe5e_4O^7%>dR>2eJzA?+y~ilS10(@j2bKm>%ekoFNuL%%#Q#)5b@kG-Y5{z6Lj zJ%67LdhVp>F?J71ZD<>DC=f$Pb8U2UMR;C{qm#X(o@sq}Qhrpt%`?mNti(P}M^SOE z@e>0ZM~BG76lVvwXJ*gcUECvMa-V<5+;iS<*O9c9>q1VRjN7*LX=YY^a8N)=L4IO% zJjNmi=V}s)iwbJX%h@fRc4p6XwR9bv-dn!a-*@)XvQ;mpPO|%B-IskkzZu-mvocI( z_JF5{Pit#0^$u|{FD)%9vn&Nj29)LY-ni8{YBCo=KxRlef+Q}V_io#nk7gCC7bi?n zDjI_{s3O%He(!sv)z-nHz2iRV2{nC91h4_?0Z|t)TYt&n;97qM*Aq}`dj+3EghX9 zQBld+xlA!ClnkIQF637fhlcyGx;n%RA>HbiMT?RMFrJ8BS_si#K@kt9d%3ZND6J}7 zU7l@jx3RAOB)|e5Duu)3D$%@?uaOPf!;GBvCFH(K&vvLPi*BmvXsc&5SLan%MAp~G zwzL-&mp23iCbu?qH`F92#NJHH-~YfC*{+YMpF@5#KrGW&ZF@d+rm^b0mkTZ>-#H$9 zW{&w(P0O(|#AzKXE+Pf$fGHC&yA9UAPMLA=?Wm!q`fuI}x$z!aBft|!i&I%{!e{fl*~hRQ=M5!rW$ zoFhUZ*VGppTCultIT`fgEQ0cs3Qu2O&p@w0PxthQU~e}&S1;@PA1*D@n?3sBNOe86 zGI{e$~)#vXBJ^HO&g#x03V z-fU8Aw7WGzL)0%LfxIC?Nz7L)pq@k;L=-zpiwG%<&H|JNE`oIo1f|?Zlj9 z$jM^l#4v-0*H@Vor-;j#27 zW75V*_bMWovNVhv!&v`EuEnb=&)AYzzU~HZt?C%{9DW-q>f&=+LDUbOO%Na6bpBAz zeSDxZhw4#Cs_Xn163QLOPZa~JW3V=z4i-xW z9JSVnQ4+&@sH3r|B{i`iFSZ~h%=@+dHa2<-U>UmyA;cs5nDbw`kN1qb931(gy}P8K zCMC5Xp}4rXswAzv$T2HJFFV^bJrKwrQvtvCOlMGXES5xyGc_F zX^m*a3>j{S{H{MLsrb2z!dDJ|<6pxKFTY75zlt zan~;n0INHDr(l5eV=@FNkW6{#kpcD)5DWp)5J0IzhQwt>xN<*lTur0AqmAsL`8FSp z<~S`CdQBC0NLBfda*029zU-|gyHo~9iu{I>pp0RP7)KiVWr3xL3x){@_!=mGAo9Y{P*D`phW33;a82Q@5h8m!m;i>s)nV-MCw|J}l(eG_!a)fta{5yw$fRF?c zj@`U_E;A>Zx+3Z;nr^q=$?{e!K8gA^i05V4$6%fUm9QL)g@hAtt`$?KDg`-jLd|Cmtkgd&BW=QhtEP#9P|q5q7VV}wfDt_N784Bq_=Z8ojmc& z+xN5DFYNodjKE2{OZIpDWDbyJ0096W06qc)15`+)F$Q6e__wi-+SxS#WRehwfe%4M z0#PE#>6bWoX!m|OKnwxC1Ox(P0W?Ye%P;%s?SF%P6yH-TmIvUzfX4vj12jsMmPLP8 z@FoXqekIsP=O55IuXL1FOt9 zw%UG8|LoCkBUZNHAu$P_ogHlw;W0K}P(fl*0JZ@70M9Vey`VHaIVDq~FNhXoe|qP3 zc;iilnRgKtqha!f$W|9bD{jILoju=ZJ=co_3#Uj+i!TasnI#dNciU1P< zCa^)Hnz6Bixh^MpM^^mdqN3vr-qc>9G()5cpa7&SDWDD+8ubWS0;hm)fc%Dt#Zv$l zG_}q3a8mtq{O4EO+wJGUr!n9@gyE+al5p^Tx$R7Djug#{bYbss99Kkn(ga%;_`^|= zB2OeE5XyYnMfw@yY5 zj#ef{PwefSso~t#UhnE=8yy=^US3IEB>q0ZOh!LRa`JN%J6c)=d?+r5B`a-a%nTbb zDq@7X?}+hs+xLI|>{A*P7RhF}H&j*Gy?7TAlpdc};OOgM`rcgM`gL+nZ3~Ol$7}<% z8+z->&(1~XPBh#&S9tS$x|K;yRMBV0&RKJU6y&p%RC=W3+y2~&AKaT5?A^j)5duMf zNl8&uRLqZySJ=`BWu@KPTJc zM$&G2Vj$Y|E4S}wv|rfw)7c=kD%o3S1o z55vGp1Sh$$Qv~O5umN%9U=krw17IhD3j)|BfRg}E0iq<;{lDy|w|~Vxf}qAwB>@&V zTo9tS#9|{LoB+ZB3I^ei-U`s;?0Z%pO5F~W2%ub4BG`fhA?C9?KDpZX1vq9GhIaFd zh0wr<`d%mv%y+o>a`#4)g;zWdnTI`WgK8$;%jL3ZcY>HMA7t@5ic1?4Vk_b!DhpC7 z+N-j9x^p=+1Y#JQ1(=|RS^YWM+A8$u!-%DOJY`ooBZq_62Xg#Fv!Zje%c##I5>a~? z9}K;XVQW~dNn&gzUwpZ)(Yc>d1q90Nu6KQRt)jtB_w%Y{<}%1bWRxwUlQ;9mH;uL0 zb8o*l{u-F(eC(H}0Uy;+h`fZZpg-^%~ZDfLp z#t6$P$m5~1`iP1#f?PyK>yMo9bn*zxG1A6}lmVh-hzx%$HAPo-l=d*`=L*O(DHS7R zkMrEu;YUNt9$)&rWc)*9+zUk67}0!yn1>(!5~hn2wTbaT6?H`%U{*o7R>*@{`m<%O z4^=T7p>8p5?(4amK5X9ecGtx>XEWQAMYOgKFTW($#`bw;V%!G@n>Qb=tb)wuKUqFh zUmekxLrmnyJ)OM%)#_VsE_nvHUOaf({wej*9(iPWd@lKL(dp=tTQhfCZg`htAHj-` zf!q^e_tc**lF`>tHPW1Cwd&l*8zB{OFW)=<`RC4a*RMHwe=>7?kzAS?66*We<=O5X zllEL1yUajV{Sh+jiPD_Y$_7{Vm!}o@GqF=WEcyCMM8J{50qFw;7HE*Dx}b8JpnMLa zd2CCERF@Fx=8Xp!F67E#AcN7B8N&sjA_PU0kO2__0tDlZ2Yg;l{ZdcYaT_K-d$hFX z-2&8Q65*oW?z1E?<;J~Q*BOEW1bPutI0VI&aTq4TWW*%(geoFX;)9f!l)=eiVi=8q z8WSe;6= z{(!6?TM`-`hz^SeYft`Cf%#jTuOIK2`1=F?b_V{XhV=hCF&Xl2Km6@g|MA;jzYyf# z>)rm}-{{}Yz(0)md-vu4gU>toJQAmzA9|7f;L0ev)1o2Y^-nu$>B;=dy?M$D3mVbHktxrRC3Y%P4Q0pcD=Fy@Lrf_MlTtDncF=v@%st3)!x=XM zR*%p|hL|BTrpR1VWS{w@%(@SSr2(laLDV^EC!4;OdmME+wD9R~^7mDg4M(Y%st$c1 zJ6?C}YU>pXZ_YOQ_AtFEzN)9p*7dcujg`}T+oGb3kQm>`PLI#{Tt@CF$vq!4&S~~| zlWCW}U9t2rePsFYvB?wXpa89}udW3@S^jk6WSwb~A5NHWF#G(st9KKvzd_xZ3itV; zdPi$MuJ^gW{^jYAhWM={>I?3>8X=f=B++0JMJ2y zu8mB5DD(T}arch?sdM_uwHHn|^D5`(XU*g=DX^0gf*Q5z2>XUWt1|S-a(dJVK-h$= zh+;n8FdkPHrPW&$zGI&fipvuMNuD3B#N&-FC?5Uc4YG0S=A#-u)=P7q&(5`)9OJnD zd941iTE9vBG6YyMBtd=CY62L^A(eR~t>Gf3d-YQMr%172zwmc}bv#&C!`PHrp<7h< zHZaEHi+fK;&9m3W=3id>7pJGzRYj+#=H+HkQ&d$R@5ZJ07})6 z-w5w@!^NtcjW6`G8O`POjiF)DJ~1)+&#mp>+daBV=I1tQ;YztV>M#EdWwyAQ+Q@Y(!Hv&#{rMhX!PK`>>7w#5q)t1CdrhtBq@i)VIf zKe+MQ&NDZc@%~--Q$r8CyIv1=TfR3Bs;dxn^+0_SH!dpsiMB~keI*e6Kdr}u{OK*?miz9dBTQRJpt2U`5jg4{W#32+TCCw7)a^7F|6WTBA`?-vu zmuFHU-2yEn?#UBf;t(_VpeUI+7Xlap9!qP-N}Mh@byDQMs(psEkGTG zZer-<6C46cxxDKrwwT%yi$H}B)A;C70mDbwlhfPc%j1}fU^O3xb3s}N$|TIfiObYJ z5XU6F83yV|B$|H$5Q^v);M9R*10o=FSI1Vs@&S@*FojAWd+0>50W}(Q;BC^=2}y53 zu|X}F9~CMCtV^Q`E~zy^>0kcVG7cXEqW}2z^B;X_|LSN${3xBK&n@{Ku84u?Cx(C5 zMEsWq$NvS!iSHmT`qAR{JA~2gl1aL-hDp-J4e;OU!a}EeigC6O>p?+4O5PkHc^#DK zqhETU!TX>NYqAJ6A_SX2JRL*Fh|mcbP%Gwj055=&LGhr$_l6B_MM&u7QC~XkcVbDB z02>=C6C!Mr!>+WHo#6HECCR-goNR>iABr~bwwtK_2$3jdks9E#)0OrLD~B+Roa>ruO!3PBWJu-JE?Z;MR0g zbp<0t$`~2{Qgx^2_A5@e?wV;^nmzmc_I7Q_3QRBw7mr2pQDPX;BbpJPx8||g(%V`$ ze_ylV=Ub=@W_J`+= zxjZwk%Q&0)?KgMrIbN4lpRYr1jvJ}HV5a4+6taZ+*1S;ggNWY%0%*?M98#*L~L=eATu z28E@h-*fX=Yw={UkEcd;qjXolGJz@rCIxI65X%F|V<0CchKf)b0bPoLhylT9xn>yx zq%evCIf|%I86#z=`3Q$`jDGc^6pe`2Yd+pPufOa5xp|*0HtJh!+3aolXMeexpi3EO z^*-eu7}r8-c8aEOxMPKaVIrU=M`bZg5W$RIo?=+!!ScebULS7wd@*o$zNB;4-t(Pr zK|mIxfI;$V+B@CD5>8&%a{1!s;v8uIvi!ilym>1-5vfW z790CCFW1XEq$$7j`75)lx)(F+GfbYECkB*I+oFn$?83ZgF|8aVpswnetd^kEvhrSm z)knAdhAJ+vBR{YA*u|Ei%7rRO+bBdJhlo{W8#gUWjt;>6{c@kav^6p@EG(-^E$Dmw zHg4y#Q}&09zRo+B{HNaQjZchEJDOWYJatI$3oZ-|O=L5gCB;!aC7o67k>NQNZ3$5g z1N}8IF`#_Bp}L|pFOA3S=Jm58!()t%EGsLU2PRiZ+Zzlx+_vz*8jqI^9G6$PY zZHxiN3dv=i9oK719O?=)yBqo>-Rp4@6{9G1P2&R)Mezbpgo(s#+ED_zBm|S-2M`I6 zCrRX@u#}c{vV= z32}vasm$IMF`vWi@8gTu7_GI&5CDrW5&4rKOA9~*fITMA6>;=M{Q!JGgTD?qHxAHN zn&KoHNDDtUX~qwwFcPoi`~JHG~z@qdSXgKzwA*!K_L+dxTmkEH(|cM$uC z{|@$1)QCaI#wUmy^H2&dj;2QAAwaKh;dJ?f`GfSVv%2IM$ubrzvR7+l5n-o>@G zcM5Y|5~HJYQ>(h#IGC7Zv-)~ES_Lc`-bH}c^|wgcx21+^ z{^<72TizdT^LjU9!jWRWD!?c%F5Au?-N-nYUTk9f?(GA;7tY`8{3D*l#yrnVz1YuK z3@{FZkv;s0HQn0-;&z$Y?mTg0wZ6?M6ZcE|?N(?$8Hs4o{nHS`A&ALv`3J*)w_m!< zevZ0)zyx4mB=oh0@pK44qUbEKX?CpTJr-D8#kNC2(EMaed#he1av8lGRvz}{w zy((k6P2K+byhPW9%&KVwa|DmEkS{pZ(zVsj;lAn9*81+AJ_rgc^z(_jdE;7sNnA#K z@Us}*S(nupT^+mm!JNIf=kGf`bMKDHw@<9pKD9pn+q>AL7z@YuU!sFQ)z@szE8l>F zrjReqXp_cy2q_Wg6T)y5RmD+dg5F9F$3am<4i)f+2>KDUe<;QsAtFbLKu$#WUK@de zDx*!UJ!@W3z{Zc(+q6!M(M0C#T`#jAM4rqZ$1+LRHtj1)F%WY4i34e61Pm)CFe-YFo|uZzJF5ukPM+0!jo{TA;{keyyUVUypQ zdpa8oGLkXBnC_&^)=wWj%JO4@Z0+RLMC3-5_q4}l=2sUrQ*)w-JLr&WYio;)jLgi; z6bPs*9HOG4?Ck6!A|h&QYeACZArgsDlmpNW1cRP|IMCONQL{i-V^Lbb`**s>^P`+v zyNWStf*9zIC=rPLg?&Gx{ldPV%La;bX`OciVq^!`qvdrdAI@4U@Q1s=a=epVRDW)W00}0n?6rhVjdPtmZ zF-c+RpiAtBVl?%Khy8FVNwAfm5Jdzmj8+>H!FP3OQt~Jft?t1f|LRcuA2D*k=LkQ@ zZvez;KO7Ey7=6nof4sLrZ=XN{Tg0G8?_Wvn@8RqpEsx>vUO*V|_DNc>P={cEJ_nVP z{|y_EV-b>)H#q=~lBO(#q%^ySKA8i^Mo=4r&qDkdw@)2}ba5nr5`YH4RuOK|&j|19 zY|czBjE+c*i%SR#4~&Tp%grjx&o4~Njn1n|E3I$v4WVcymdU6>Io{&Fo4{QNxEd;w zX5zycup*VW>E^dN<8De1yFYA{fySoyo3DMo7FU&=TUb(_U7s71pAZrj>f`z4ljHmM z?``cq+P?l`_x6jOy{n_Mlgmd}clW?=0WrSOagI^$54`W~aojXkS9yZ*NDaM78(wZR z3wc`3C{KtD0b!hozgH+y!%zhPWt2b|tZ{7ioyu}&V@_^H=iI)VCok_=zWAy6?w9YjdPZFfOnz+p{-CM(;oxA;o-PLU9rrQ2 z8d(i*qu(sPH(%r7Xmtx(u6d-b$`B*jA&*q#ACH&6uP(1OQdwVF)nLR_(^(p~XYDaN ze$MF9GlwT%qU?;qZ%#KEEB!UsGb7QpAslIyknhMpP-ZA}*nIu=WuV@-8 zdqHK8TIegy^l#%z6Xd#T#?UcY(Ihc(3Wv3yy-yojcXY6F@=M~Q68cISFYi3^+|=T^ z{fn<@0lz=kJ@WC|As41=9@IQ|cIEzUbFZD+uzAP44_|G&SzYm|iM8!@Q56;Y?R{_6 z3uhNrYV`Li5dv9)Q02oI4vZA=hjaTCyLn1&?8*HC_0~3px^kJuN;yW4iU1!Yq#gjW z0=oCEECzBYi~w|suxna&>~wppA_Ty&H3$% z1kaepIz|zIGuCq}uW|F2mI>P3$dYOVDMOTdkl(oronUeyEEb|_ah~U^V@}zbE?mFj zjm@jzk8Tcn1}*71F}?{8?iv{x>t*G|R@J1GlqPqzcJn$hv;$InGasDSH+=lw&b9Ed zRm-2d`&7rYAxIk{%|jGKL&h<#UV@5TNKI`|%S%kn@#e7WQ-X5tE%O)imc!L?%g?fsgDOiaZ65#{*r&!oTX|DOHAzMsqJ+7+mHGF5tKY zkckuyevdU`6mr!xmF1UaQ!{T!R8(MicwAywMP)dX`4!*=z(o>UFJLdlxch-H0WuWu zWZEZ(+lk53D4n|Z5a1JFr6P87RcSMN5lAHqkGKE6yt_tcz%LlF_W z(c#|iZrs}CNZH?w|t_2#WL>$ly#^T5R5qV~P1U5$qc z&U7H;2m%qn@D6f*Naezdc9Umb7_mTm@$3gHHt6r!d~5F&?fZ`&8QQ=6@cyMkh__#8 zkZ*8^pLc+d$5%fezo_`Qyu7@syn_79?5y$qtjdwoJD(dcO4DRm2VB@21Fg@!&N`v_U%GUBio63Cg82NN4$Dg!naG_Q7x z{q6kIF&CY--f_8Z`^nPa!Q<ZYgeco*f#yr$yF04Alr5=a&~vC>FRFc zHg%DmZQ`ysfgg5hAK!0#bce^f-`-D{`f7}lw%kY^m5HWP)r_X7SWFmdGZZmHWX$A- zYmZ){x9=tNh%SagwL5zkX1|x@m*gZ4h-D*~w0z*KZ6r>X$N0-p@ zR79<_nN^c(Q`PHo>B>)!);&71cE^e<*N^QuxBarwHC^+^&tF+Y#|A|tC4UGh zcCI6*1;uEzHVwzP@;E-02UCPFR?JuGZP5_=)m@p( zdXi_b^Hhj_S?V*AFj9z4?Ckv`H)rP2?dKlvTW`EX@tV{q3stEn(unoY(Z-|qKmH>n zAw<|Lpf)}4pyx2NXlqB!s*3Ju;q1|-{liz+YHnnXJlQkzGyKVgwZi@lWswd<)ydb| z&)--!CuJua+k#IR#3Vs=y`R$u7e@yV_m3S-B~2AMso`NI@fjH&;Vyc1>n5$+`G?NB zeW6p+v8BK|JdFBA&-eEz`Y z-_z5Ro}QkVm`ME`78cgl)`p^Je}6x955<3tjrAzO10IjA*J46bV~x)Ts{-w_{?imv zrw)r)G}`pQg5Ut#Zbk$CBL(GGQO8eczp(FTGD!j%fexhJ)7f%8faU=f2&9)J(LSAy zr3yJXpTNa{5&{9N5R*xg1erU1>?7M@hlM=H@Mo-su9~C<4;}SPyU)Fn8dl zlYM2v?hID1b3f}9U+7MfS#+DM!F)i9IVuFa*#7Ro_BzjsY}f4A=Xr^GbrsjeLW&Xh z0qhrv&r|S+a1J9B?W&oc%yNhD&?f*m57 z-pIjZH3Gx9xR^r_d<+-Tm17dv;R42`8$EEvz^1bR3G92*5}KvNz!1>^(Eq5n?|YDa zAp7ty@WLcQ*0j<64bi{i=zwII)hXX=331!X+laFEKVMDK$1Gu^_LoASX8^B_%5-J+CAy zw>ULBKOw&`t-iXVyS;_Q?(XVtZLDuSdN!SX*8T zFniHX9-ChNLYh4ppQB}z)X}ogi+zU7N8?pXpy7Of7 zKaS@nOsMsn8t`)Z<)hQq{=VhR{_~HoZaJ{Y>64vM+)E9*l9K+L4iR!T_CxbCltlf~ zLg7d*Xoz4u##QCD4(n@D?rxN=Ek=ryk%C~PIzh3geVmBSGFIfmXb>txzZA)q!cj#L zjN!txZqB^s_E{x0>zg@ST3~ij&ms<6o!6oS-J_s)2D5o-XWg3CiiKTGBltWS9GA!V zs-m6|-6f;rUDmw0vhe22iFbxCcbbLVN9argQ$$Hye)El`UHvT-^Cbf8>MB=jOPL8B zQ^@WyZ7tK18`eE9y7q~4G8#MG1ncTr7AFR-vDMw7bxhytE+aoB_l`mS#$#0rmiF%5 z(V%xH<(ZkFswE>Ty{4!TD3;=P1I8vA8(-crjPpplaN2MBnhHd@1W|4pHiEAtC6-4p zL`EnxqIcHZ+TWI@>^@VxdUxF1#i1upRC%~T*qe@w>j}w!nrEy!m$ZfAUb*;|s(5 zEE(cv2EqM>eLt`L!oHu$2I>aTAJIKpsHGQ(27p=+Q~};KSR_FV#27_Fy6qKR_DCnj zN$3d^umCLq_$CsaBw?{cOKS>Gu&BOnYi{b@)QGp)(J@&uF&Rm28EN(zX&*8(1M{;A z3UljnGb;-7ODamTYs=FctFoy5Yi&VBS%!B_p;3F&0TyQ(2UTFBiagOm0@VRL3d9UL zR>&W49hC!o#PAaw@d6G^=fHd>y1BLQTxq3QdWK6vLPT6lW?5-_QzMhf6yrZ)odk+w zLP>5MCh5vb&7IU_BNotZL4l<5gGYxX>8K=6QjE;TXzKcp%18geKH?up_gA?wF_7mc zIk3Sy{wZrLDR8D!7$t{5atu^GWBB*bEq(ljeZ*fqG5*1k#1C)izhU3&c5xCLd@Ycq z5FzP0 z$fJ`T@v@B6tI4q&7%i8hLN>a7J9+o*p0(C~*&9kiZC; zcGlV+eE-+1g}1)UP2+wcp{}d3=HYcKV|~{Hr>-qqvuWoa``tde@4CKY>7Aw1?$1}c zK3e6T;`%qMuejbd|N0@aq}?~a@U-Lct#4+|*F`RRPjqkm!@c>>i+)q5n$vARkmr(H zZDiE_p}U@|hn6mBV6Q9cU2Wt$Y`G<})N#tNCqq>}EEyB4Gqo*aD$sH&Q!p6809AmY z+}?4$O`3g8NO`t=Z;OIZC`U1$0F){4lDK>q3i&J8ZNIf=Y|Hb1?&}fZ=X>tL(Z#1W zt#UV*nrx?1={3ABUX7i#n`qtc>MT2R-i~u;-g&sL*tFu}t&;(PpGyn#>}@@loO`6< z>piBeN)_Wt3q=Yn7{Mk+vAUI+wMv5eQP54j^pLh{q&*v9lv22+PEjce@@&Q!cCk`t zJkpz^Am|v;PpUKt$Je!Mws%hE3#W@ZO-Iv0 z24gh!K?qWT*RI-|i)1>E2zxxmcH<(edGnu5)qFa3sG-adGa1BMT2WVd^{u6^BR%5* zE;RQ33Fv5GBD_{vqIW8=j{@e%h73u@>l+;3ALet<_tVM!n=dTU%(T~QdSy_z>tKz> z>{j`qf>{$Y7EY?u*MiDUb}cI*GNHGJ8pF)ax}5hG`j!v0ecYmN-i}sKk3!Vi5&0%W zjx_>76%mFs(mYD7Wy<6djcM5nH+GJhRjo3jQEq7Jtw&s^H=MT@Qp{F8O@8pj=f*Q< zBbVHqy2{>`?1pkTudw9U&V3v0FCDAgznxgHpmY7EQeC|YZ@0v}l;q0NlFrUfkw_%b zW1>wg7Pq#xhJ}S?WMl*f2MZ)7mKZew6_+=B^9s$5OecB#%KqYSZ*CQBo>!xa)Q>>o zryvd|fAjxl%p*IPIGr;n`uC@wU-th8`-Oc!lMP~@BtL7A0H~iZln(rU@0}&$Q_ChM zB2h7|I?qM%evybJ65@mWL0_gV5)ED>0k(}HnAXXi*3i4Qw*7h^H%OA*C8D*U&_1!S ziN&mG>!_-)udS}BD=8~3uPUyr%q=TPFU(EJOOH>94UZ4EE6KUu)3sfQP2mwz0y-~h zx(MIj*{Q>3Brw`K` zMp$@cbZksfL2-9yA13KsDHQWC>V{6~qeVh##dx}cb)e$4T>_MabYUs2GR^ucEABsK z#6SFP#NP^}(eGu&5@sa_!A|rSs0~JYMFXgZ(Q_tU0)k0|Pq808l>Y{H3-X-S+AQak(~*vAvHBvIW?v6#(cvDsa{y`2R;jE^kdEiP{^hACqt zwfc`^WAmE2Hl<~r_wmuTe`9I)`n`>DPZSxr$|UZR`l$4_n_`a9YNdPCz}z$C5v zNVJk(D+wI}Jt7wDAVjD8x*x?x-?M$CZE0!#!R38yY;+L-QujyGIJa)#??A7h-rly8(-@SHt z_u4Qm$vin}cX#h^eeB@?V@3G1){Ye~?UC~CG5s(}02)o)*&mC4f9$iqoE;$sc6w&1 zx%mr2%bQzwX&UJs)G^+<^2*2qpH@w?)kF+rkZM=(!A#KAh*A(2DqlN#=C50 zZL>yf(=9O~bb53$utz)}+BT$%C(ml&MYW6Xg z8NZniA7iRM(R2UQg7+HymZAMD)pmvkmp>M`qxveB6lNT%t=!q#u%Mx8rjRp^D&u->dV%+>?!p~CE$gk` z&0c@#kKIR(U%c?pSo_eay}P#mVPSZ9}Ec+0N(_ZL!;0Qs#G-P2%^D0P1;2`Ia2TOoxe~w~%`yWei8E z7^?jF<&PcS+ehlCPklX?R&BJJI^o8`yhnaTD~` zN@ko)Zp*W0Ca;eih*`EgUPG;B-;&DB)2b#8Z&g49!;t=ANcpH?=C5# z!62`&A}BuWbH2BK@@w~->%OcT^>wAP>y8P}v^GS?+T&tsuAo!4`y^$8zwD>8U)c9E z*#P#52e1#mL%@KRvfz77D?ZRSmBKfH09Zok>nGc~goS*VFDCk;KIir2;dY3 z7kb4H%37@|+oJmg-NZoAEIAlbq(xQeenlJ%IFe!oE&&{p{GNLU&4j><*?Wn>o-Kwk zIH)m%V{C6B8rOj{d2vHB7*VS~hl zH>fI&lNe9HZfkEUEG$lnPm2yujS5eys;X-3sAmbf#B@AP5`iRtsLEy0l_w{(SHxf)_e~%)8Thoq?IW-XxAe?=och@WadeVaA_Qr03ZAV_OVl)S(H)R3PV|{D~r%EQSMJVpjn)_b@sFe0*-mxSv&vnbY*S2(0Jr)dCezt=bl|*`Qll;Z={9R!>i{`Y(KvD%)_g; z&MyrN^zPk!a`}XTw^Kwzc2UG9qZqGEID0vus+f2_w6n~vpWoDU9v zTvl3yiH<-~j6OIj~C?mV{h;1O*zGec|hLx&FBx_9^DW4$f5HnRRMn!p+^ z5KuI!NYYt))7gS?owbVmlHt%r-J^g}u*Ihi8C0U@A`mF{mQIedl(XH3*qu~Pa+^}u zFsig_OmV{0&Wy=qqdd1ox|ctPfR$|(=Q}g*7Ja%DZ+*4W{m;sPF^zc&947soEP+DC zHyO7Jh_23vHIHd%pJ2n_Z_r?B3z9__)_5-B7JRoVAuW6^!L@)i6*N zf+8En^}%)&9trx>XkRj`6*?jVT+<#s%=~>_>5ySeMfr|Na{MJ@*yDzwBjwSd2zMyL zo;E{p?y@i_iqqN?l972@Pv`oh`_)a=pTD|#_p9>Zc}z* z)f+P}Tl2!Hlfn_X5@Z;2$V5PpYI&K^_lETY+M2Dct$BHQ@$vD|(b3J#%>aW%9xW{` zHa0fy?(R$`b0E}j3j{E0`s&>2L44UcL72^7{m*oZbos ztpiFiM~}ow7Kwt32%H2E)GbKPF36G3sQ3>69a0&cyKoh9r_WjCkPl_ln;g{MS^#fL@4hQ-E&$3}#OXOlC3 zUV341W?6ZDb#--pT}yL4x21uXpH-L;6B!=im6R4ySX$E9$PserTbAbn+!lcI2-qP< z4#CJHLLLB+3WZo4ilROBYd>@#L7;T|vna(McEjlQhuv^0hAyLI$1tjk9!nDw76B@HD4|L1#WV_}NU8~{_b4d^1?HoeRHQ&6Rfcpir8b#>+6by5goNKH z6||>d9}ZxmyK-5Xen(+k0W24xDEjA%?}_P z{Cy+*{lf!-0^<_H!ovN70z$(A;-Z6-;v*xO46ll^PdM@%k)M`GPKsrh#Ih$+xdi|j z)a{UO8cbbgdzT;~H8D9ov6D~v0F))nC^_*s0KyRE&c?n}+5_b<8IzNs&Zba&Ev`ec_^#NCVUhu;rW+2sH& z(TBEZPd@fwhnuan)yucK*UlI{KOMvPTrUpwPk!^+UPI%#dRSOeTx9OUYmbg>T@e>} zyR-Qqf~_O5ax-^rV(zX_UgJC~`n7<0^Fm=Vb{6qmP?LU{JQMrg6*%ze@$GL-!exEW^s|xaH4>DT}oF zFLsfNI<~B_{6=DX$Q(0 z`=>F6u}H`a#!1oHA}~2EcV~d(Q7?_34aY-Iq2c7w4EjOuCqXgSN9Nhy6gOTncQJVH zy7$1jrE9kBIPv@M51xIrv|qVp$HZUMCR{q(&&z6xtaGSHHXH{d08o*Efw@(K($gk% zfEW;k0)vEUF@sBJU?toS;4sK+#w9>;f(iF zs~nefrZ44)*4K+}RX5#@^WXO3_>3D%k0pFsTO2$W77azg5FCtW%T+xM2ftney-|S- z$*Ua%-G^p9n?L{L;ZsnaGA7Nl* zr=?MFaC^cc?(v*b$sr%Tt~FQMd~S2$jUb5)0_kK5|rHRZ+~!h?1IAaQr}p z>eLzts)t6w(6E*z%c|c!tyVBWDNGr;&MPl3kBEpM@gE8JI2_W9f`*0$BO{~Y;$lEo zevo68A{bO85|M@l2Mkk8m9a+xQ?M43)3ICz#P$3-k4)DqZoeOK+P%(j#nNZL+;mpY zmX~t?!UrOeya`3B>6EErx}28N+`-PwFAoV1Xzyw!r*nyjB6Xu7rHCu9m4a59okT2# zg?u5Wi``jRm>(4A-%?#C<&h}I5doSq{3Z%s0N{-SCONfZKtzW!N$3J&1i3)$q&8fx zKPj5;pgY) z^3Bi1I>^`QTY0W=d&^}hss@9x5_FOT-VU&j0Hl+r=s`T78|NViLK?L{nW( zv0Lv9nBGU|^-INi7*Wthqqu@FnFMpGRLL`VO`=b!^rcMZj1jqXp$7SV(wK4xoQ2{Y zD8a&jfTs7OP6zc82m!_h-6bpp z-N>7jN=ZacLh?@1xR;ieW@ct4CnuA(K0G|!-_OU-%iZ6{J;2v3D9|@NEIKYWH6=AO zJtMocyri_etgfb=(}_^5jxvQeu?k?W0GI*dvq=02L0o9O)riABPxRn8{k%nVmZp+%2NfWK_Ksk0&o}ySL&JmQfJ-hdYFK%d4F&0G%$$^@ zvJ9t|QY|U#SAfn2R0i5~5ptrk>3%BXWqhPnT2xFQ$=n0RWb4D7Pe-Q(dNnNx2oGW)bmuuU~(rI(+*~^!D$%80l-g7j&ZR0;%NJ- zx+bKF7vmlE)ZO=FW!;_V%sBx$!~CjM0vnbb(}vDJUijYVwvXMLoCM>FLW6dJ4zu=j zK=L-Xh?|vy!Dx-yAyJM)3zA$zy^x$7{L@S z{t`S+({^iGeOPi)O0chQ$j2`M=8x~ZnXpV78u5kY1IBFlGHm2MN;r6e<&ZI6OUESZPiRUR2U-W>Ez^Kt4Yy{8 zsojAqnrru*Ubg$#!c%A0J$!5sp0v~A+YEodH62YWB|^0}wpx0%F(7*lF{|}i0j1+yWQAfk1<2@ zcICGv?FIeX*duT-xs|ux`~4D~bMnF&M&&h^K(Dln*!GSWKF@mX?;#&`<`0k&}}{nv1HcDpOO_{Cv{v06tfsB)%7lfJ}t5o03z);wl+c zfa{wP8=0C|TU-M8Vk*r@Ag7l9h%#wJPPd*#@*ftWf4Bd%_BZVNZ)(I30YP~;orhB6 zf+pWUX@v|a3HvfC7!U2#XX;HGZZLS@Gidn30h>%WT30&O0WM!L6H~_$0~rUPLb`B* z`VDCdQqwYPs)UrQh5$StWQW?>{WqB`Jn)i#~DKaoXObisrW(bA93;33DStU(C^^Y5A zk4S{4NMZgn!t1>oi3k-gLvl9-qM|-fcS9@7(XbDumfR%v%aMaGeA!MJz5oUD0oVzE z4gk@xTr3xVml)};rJ!u4oR$+I&o7gU5V(adSxCev^$Ryz10#yUTL>Sg^BECT3S%T# zq$pR29)B+l`v|m4D&dm|ud=c-H#e6w@gaYbDA~))%iG)A*Vi{7Ab^B@ z@o_P^S&X9m%&PLDrpDT?F1CO#l}J&U979kXLA%oiknX2}QuzS1|1z0^)YKxOo^nRg zyQL?WMust3TLsj^!x03R(u9IydXv)V7Ds!5KPmxL@vB}`)tD3^U=S0Vq^+mt?C;12 zT`=IG1P93YlD0xlg)6V-8OGTI;CTQZA`*;3!C4vjh@jyFTn!{B71mYcJrZ&y%{7Xk z5(;c9dWi(jX&E^&sX?WkZp_ZJ5w$b(WXlRA>)OFBDR9QHrsnFl)ac|89|uqOM|E8H zS3YOXyq|08v#6|QBOulaxhibQ=;F@tUd)*<9j6((?Yr|@&DwiQdE1%%hMC1p!#jo3 z&5CJ4=o7>s?Ag|%L zrs;{R&%8q7?&;{9xq0*KnTzVL4b7{nwgviK>+00zh_wUKTmn-h5?Ki=@ArT!6W`5H z)gC$S<=7<_8&8L6*w%)*q(@oWx*k4vd#U>U)nAS0nOJV|4PWczta9nx$fXOQ)$@is zf1D|57=>X9HkQHUBqWdzYpz>bnLa)GGxT92WWE7%KC`gQW0jzJaz)kbf|4USIrpqg z)}Gik;l^)EgKXxNg+l=vknIZS?Fh(d;?&4%;5%Swz-AonY^{PJTB>0J$}m9H{gU9%1Qjz z#BHjsEA|NpjEPU_*m2tt27`K@UwTeMfJUD$mb(UJ9%8ann_i9|jK`fJPZtt0p z!r*ebq#Yw+8K9TAA`v-EKx1QLDBZHMvh?)y?Ck8=Y&H$zPyma8uoH;8Kz&)fhm#aw z0=jymsa}{AmEj)}kiUQ%Mtov6n~NwzlS(RO9*(sG zltV`zDfET_t_%pJ^u@^)Y{h`4fT!YvB%l%Ea5pauogs>>ztg50F!hEdclQXEE< z7e>&oP14YaaGau`>cXVo?+6$6y81(4))lre-f>b1ELe)T!eC9DpYJqKo$Xm2RK-ag8mXYBoK}g z2@auh8$eu~7Crg2pu4Bm>7YbdxOMaV;$D+8%Y{zWSGf&#t> z=5%)P>g(7QC9MV7Md>M#39)_w{uHqD@bK{T^d#Y3P*6|^-4f`=WHPI(t6N)JNoYu} z^(ne$vfD8^T?UKytQNkr!E}3$l?kJ`;tCt-tw{-aQ8}L_%*(2XkIrhY=PEE9-CYO` zPem9N$fiEf;19*RvqY&xQJk(=MroF71scSGSV%F~l+@Hc(bd#9e$~{G(;X2`rF@O3 zKEN&jY&(D#h{ekr+fHyrpJ@!*iAiwEF{vnB$M`tPruv#ddvac7bn0V4QQhZn=5`52 zngMH0m_e5u`aJQT6kqpDLLG#Yla!Vi6dC3B?Y-e4bHCH;w5Kk4+~>LTkbwBUKs*`X zqX8Jh77i&D%#3PXan*A0hWpTEBgiFjI15aYfxfVmG@&DLumA>oD}+a#l6IN|+=wj+ z`xfB4dh6OrGawVQ_5J~~`P>}<>}Ii+1xHObb5u(#uuU(|$gQrcC~5$u9iYDAtJO)h z3(&~hgJX;Sf}FL!Ii9e# zIa8W_2^FoWuHD1o4f1wXHPKhQc4Wk!rHeysCe>#Tmy3R)c>yVMAOL0nbY*ceq`eo? zodjw91U;QJH|WWhlE~@)?(@k*xq4Uz1nj+W1=_ZDNrcOYOh+i;DP%DRGWrQJR)Jnk zfG*BGAM`S}!>T-C4;(lectId?a-8EdyOxi;t2`!GGq*R_zKivJ@bsKZgqe=fRrRxL zw11r+f9*uj{v|@olg*}c(BM(H3j`d-p!Un%V;-E&v)v4u=E&-9pzY*a(k|-E$q6~B zzkAMu>Ax5)n{#jS&hyhA9GUrL#n`0z6Zz97@%ljE=i#*jpz3+AucS zKQ1mI$T{kpsk^C}iG`=}M^Bv(&QF8G&E4#-IhoIobDbUed0c`0nrz#HwjU0MHbpoi zbq^}CE@$PQ3`sRB5><5pzL3Io93XE;d5SeoR6;~Uls|>%? zg(rddTq*z|plaN(oPzwQsHmc%BGR}~VO+YA_E4eK(bUuw8yjn4V)Eg`hp@1)#>#RE z^Re4NXdr51+5Fa^*w_R}V3CBj26V!1V+YrnnPeL7TUt=ANL>bW*n~XzzgTbo-TtH6 z->~n$wBfzoV>Gf-&aW~WAIX3`vnuVGi^iNc^M~q>==TU3co$m!c7bmE%ixOGsLYIz zfW(aO?DC|724-PKkUr z$cSTe1azz!hfyksDWjnoEgiw5ay`1KV*80)c=uRSGv=?`|bwAGs{ql zZ>;CYB?|qLC;$uv@Bx-&mITZf8X^pYR1AOG>_^COH)4r;CNu8${ksTSx16QM;a(N?G>~np?#$EI!<_ zzq`wP+3Z(8&3HOs_vaLx=H03$q|-;_SUXnjZ8kD^+$d9RmqXtrOi8Sr zAQX=S_$Yu429yrj^H^(idj6Nfx-@fF{kK-znAJxc zIV%vbT1IR{!1aRC*TIn=lhR#az-~giIF!5=xk*dJjQcKUr)o|b@ND$xj|-t!Goa_Q z7kXY_m*6-%J$g`O@xZ!8Y$_75Gf>+45Zn>f0l4&C;LGUH_S4c zIqvzO{`Y>KV|r?-gU-m?=VzZhcKGf+Z7Z`QAKyZnk0xb?Lxo;rnrz30?t@-I&A~-FIOGY{8jJEIkg3l{-Y`NxtisvZ~QIayfe4> z*wJuB+_@j-t!}Qrhl5LrjBiolCHc(u&K8c8G8RHw?V7}3ktqpIo{j;bfySm^3W~Fu z+FIo(uyYFa3<#4ene7$ySp-MGCmU(^qNAfp0X#k-v|3h;-})1UG`^sq0#-)zQJ*C3@z7WHSgtsLjtfnpS@nkcuHoG z1vT_3!VEbrHGzs@k(iLkDQ}@zDf~tQJz6adrR81`Iqm7`7V<%9Suh@LLXe(`nKUJ^ z6h}y0htT!0-N;Y54@%Wi^aQwR4nh_kP4*;!jug8kkl&GF`hXG_&ZlA74~;}@VzHRC zeWZ0OD=Q-r5{Z+@6)PX{7qNN^#Meg5YAAjh7f^wCS zw&))VuBC{gG zaw0;Q$?-X{5kcW0Z!La=#6ZlanelLC&&%X^S!or6!C+=&lFy&oN7AjHM@8-BG+38oJ^F!I_ctl_ zMI0l($9%t6E|UmRh|qhH7+=B@7B@CIrKg>O#iudRMqIoKz`Fp6gMl#sUKjtnXN0NU^br%0em^`whT0j0U8h)_vF@-!|T_)QQyrB(Qz|43zyn4 zy|%d*%K3w^3{UXDSQ?=GkxVuSB@6uY+7ds+6 zCxr!042xLq8NB|rSumrPkx_h0>#k9R(ULc-XBjNmYqjHv*NvRoXlG{&c00GJm2DI3 zX6k2W?`r4u&4E{4OFnH@TbHAom!_WKnfD)8_&P4H&;CW;@w2pJfQ&nal12d#uVW4X zQ)*&oTU=gacmle-by=MEn3|#yGVvG!9|;IZDjCIZTTvJ_=JQ$T=BU0;$N%(Z+f3&t zv&~e>GeE3bj^oV8gi~7C2FKb$mcs>;xyAFU$}Uv3XgCF(eDdJ9{ms4_ zs*oY2h63q8!?b?>_4&3fi>KNw-kZMSV$?5(ZRYL$c2qY&-yl;43Q}=Y#4sqAFH@;_Q`7qf3Yide@ znT2_AMH;QyEtSDyYQUnD*%Z@_#C+t^Hzp>gy}g}W1S-6JWE0S>u&|IcgYm9zeitqj z?b<7!z6cB(FBvgbvUO+Xt$U!lQK=0?S{eyLFCB5DOIDO#6d4jhVoK7q_QXlZgZ~}p z{SR+{!@mEnhAQ2AyQa;Fo**-MP}~GE4SwH+Iwdg zVoIhMx^@BYUS?8!d*p=Qj)GPMh$x8qozRYi6foXHI@l9ZM1S!45d?;!upE)eVMWN8 z3K91N9w|(LN(ro;BB=%i07;ii8FDEJ_{iUg$Uprdo~tlJf5_w`tsaNNA+1$wYimVC z1&IYon;04zN}5Fy*5u~qDq^DwYI{W`EJ4%gp<s;p?JX{_SN>Uda#3NxcB%CkalI+Vs{Vp#s=aIh`k|P#i7< zz)>c-0!tU6unLBZBycraBru2owN&Ja5KwhMbf8>{f!cO{G$Yy3-zl>wqq?C|B8IyM z1&ZMuxwM^}^cWd2FT6Cin6K-nGX#1&9D1dq@@n{kr;um)iuAhi%&gh1#qYnpzh`Ii zC_T^arTy-`nvfZzZz&kh2C4)Y3gmqO7y)DukJG=7Js@!{&rD;6AC_Q2@vCpV8vdK!1E4BQ*4&RbaOIr|*HeCPh%hcyjFAAI%J zzg{))#VAN`@WeM`Pde<@|8~Evvm`VuB0ip3UEOSAVQpmo)$E%!`3z(o9KeQkUYqQF z`}t&k!ES($60xC<4ix~VQ*_YCKw8VWFSlV#i8qWbt|iuX6DY~gP}Yy zMg)eYWh`_sS@dAvir>dx&{jM5c+1IqE7jjEzi&Qovti#!kDxhEp(9_WSw-IUNw(T` z_3|q#kD!>;W2eqWr6h-0TSOl`=s)4-*x3_O#}8~D-M?)*B-EVO9&`Lt;O^H2uU?i1 zs8?m3sxRH#P&&K0aCc{&%7t4SZ{1yTXTQqL=|f))8)iHldIG7wU9!{U(Rj7Pr+)eJ zXie1dmGMhwhRz%lym@Vergre-XNHS`trx-nrFoBlL8HipjjO_XdA- z-JknqZ`Rp~o{J&&yOnu12Q9<&k2cgT1>h$(7{~?lF|ebp(No@DA!i8zXl|1fre~!E z1;;tK+CRIUY<`ZLc%2iwj~##1-)L>1#kJf7-}^Uje);^hsJKFz%Yoy6P|+X_g_3BRX0 zvD=UJm)almlJGxVZLi~_{}z|;caPfl)BWM>Kio?%6w@n%N^xr9PY3wZ|9f9N_yHCv z4Z(1$xw>Mgo+F* z-E)dLp1Mdj04)S)CMfDP0gWd(^atf+ujIY!cK%R4J_76YV)SS}Qr$@YlhH*D%8;?L zbeSeR#=aw91nYU$w3m=BE9jlOhSP-Y5379tD8 z#0&wVh5-^KH~}P&dK!o#{SK92Bwpvyn2EX(7$H!MioKGTt>8^i0@Ln>bdxKPzgIv& z@~2oR`H?CRqkMZ*g$cFMGI0VJYey(ZO$AVKghCEvhx271DJ?%TCb7AtO)(r`m<~~r zj|QeGsZoTS3A%f^7tsvdJ@ROBX_OGbK!Old4G%`wcqZM=m1B zRN7TET!t=e?YLf9XJ1)g*x1bDx9~Bx4A3>^6W;;5Xy>ff7*J1WyyH-EdI@rlIgu#=_`n|=aNBC^Bgv57Y zneJDfUeVXr{_s}+!nG?tv7uZb4P(T5*#^5Bn&FKxWpU>Pl^cLS4d5$K`R{bYCcYuIsZ$JJ2<+A@$SkXKSsZy^h? zIo~h0A z%JM}8St90}ITDJJkPTlyR(@DvTLZap- zGj_`)>i{yprF2_P_<}I2HNw(~^2UCmj)9!6am6J&QZnBK#oaNnyIEenu&iO64D^#y zT5W5EV4aKC>~niIU7o$;;^-~cCvLw#<IC}NL6p>z7HFHO9Qvv zfB4)rlzgW@dAcmywQ1MJhr9vMAnp!IzuFFny z(0}pv_suz9cFKG=NL*Fg?FLI@4&=I?%MG&(baoE*ijIq7`g{2`)>TS{0@AF(s9cda zgNtZcPjXaJ&YNiSysa4i)mYliR6ii$*jU6)Wf@Y+xE_%#|R+^s19iuH6VY( zn&$ULNO>tP7ZN~B4>;1cQvk4q$|j;lqDUep!Czu*OiDt0bA3(sWY9~;vWMB8_|v3G zS_VQnJ1exy71JNAh$qv#p;RaUSC$46KPCnJkemEHhVWzJvf{|Sr{=#71pXgt&_C|J zf40$mhy7{$m-r9wW~}Y~6YV7}FQU-^uNxNp6;0Lekw4J=s&x~ZeD{bb92f#6^~PUA zeu|)~NSTgCbZaLNJ^ewKBM>k~2yk44VZxqdJ6btStmG@9H5I7zCqfzcqP|XG-^qS^ zKx%I{7y-I_Nva?X`Tih!`)mjSeR&)}NWKpV>W7T3qELk1C--nF6aGGtd8$e5` zOC@ieGLEQ}lBgv=u<4Y$e1K&FVIknx11^`_)>2rS7N72&n&y(8<{uxM6cL)46jxeN zUem>9@Fb2fcqT&jN#NxePy=8Uimis_8wg|vBHF=iIT{^wRO`{!o7d00d-uh`$=lA} zxvZ$TtGO14SpcsEM3@9K5`f=@;F6g5EF*#mkV8wQiF@7#m{Ki6d#@3Q?v+dr%V2M! z!JYzS<<5#qWK7AMLz_?X>tt_>5FnCKERQlNF2R(t!UBpN21gYsnGKDt@d+vE8QE-( zB7ly|<z#D=6YBXXFL8F5jO;qf8KMOn4{EE$eEmW2M#MHEcSjt z)G1rXwO+Biza+0+`vw}OK5)c~se`pg|NMO1FV7Y~ba=F2?Fs{Z9jY!HWTs`j(R{I$ z{2Ur;wg1Gn-!7f9va^5mRNKbgy#wa8qAh_5-i-})W(Eco@jk39i#+db?6l2*eWpEa zdv)BM);xP3hgWZ2Kl1VOjgC+7_I1k5h>dmkWQ2O=Nbe^|Z}6ZOV>RNQ$F;^&LahW^Q&DN_V0r59ksCS}d}5s3 zf*mtTvf^0{w<}Xu)EADCppcX*&h0NDv5R?59g+wt;<;CZlY%L{(5D_uL8~_5x1|9HHZqMpQMJq}1X z!z|t0!XGOHePk4@oW^Qc;_`L+yd=;t(|*<*&`+-~^d5rr z2TXhW>mg6GGx0fE<(>YLt|U&yy4|ZcZ&5pUe&@O)bC+u@n)%zX8|UU+IXdO!BejE{ zmn_kNv{M)P#aJX1RRfZK5 z7JvBq)+Nx)&g4q5)nVYi6_^hJ4nyGpwIciNejjdfE2^SBqDhz=x4vpa?i zZXVJ{ut_a$-`2d(hWUX(jOggh#E8mbhFDNXP+UuL32p18w4OK#F(s_7@|?`rP_OLt zP@%Yn0PTdL2#lcPO9}#0S$7Q#g{fL$O3IvEO-oT3%}h)je_aUV zH18ONGL>^O`hCLhR!E}EBh9WrP0%>a8z4s&%TE-hK1vDY)Qo$YlwgkDtl6Kc)qC_tlEJu@rDO#wDdl>(Ch4p@6bM8F6|@zC8b;Yyr|<(dL7Q!Z z4w-vjsOX9*ZcY!5_Y8{(;<9=4O-ZRGq(UDIqcI;K5lkVF)c{d5K)3+Ug~3NLxFP|k z5O5D54gh7Kpp)zoMCos%ll){f;KHSHz$1|dOeu6JP%J`wgv8W_BXXHY1oLR*%A>V+ z@gD-j3TGb$VkmAr>J7t{QSWZ@;9k;aBqBsmskBQXXroHtsFGER8d-pm0FZ@)0vYBd z;=Km)cK|p8JRV4@5kZGQ#*z>$v8+laX%Mja?aji*8h&AZQ9!WyXY-?Bai@~A_oU^n zPD-DgQ!uWJGaJL_64+E!IF8r4xT^4UVfN>Sx*|D^hb6FFDrHl4jN*SL`>+`YX=30k z08U~&U53?Qpp~Wr5TL z;~hYr4diwJT*AN(6daJ_ck615vlw2j_4Ue?fC!g~Ijx+!tg3>L)Oerp$iUEuq}ZsM zf`Z!I^2V~3wk~O%knfQfYUF2fJ{NfaM~v zEGl#OD}#ZKP8TXlvcrOcrSjs6#y1|(qg-R4@U(%6>HW*A`?9+F12DF}3IgD$ultnu zuaAX?xPGvF6rAw&>9Z4WEG}vV?wk4ICso6V(6hm+7Ly6Cye%%urnTbev zB0-glx8beFdxBz&9z5Uu`qPzP53PFkQtQcc-R2H1+4DJ-jD(!f7Iv+^zLxHz+o=Jj z6>%@p-1nD-98GpU;BI;mfJQ05tfHLZ?CPTT?oDW{M__<`w3}~3c4kX!^kcgnqn<#J zHZ=G-w9;t&OP7OJ?y0>pIOP*=#ca&6Eh83z|TCof-PC+oXkZd|fGGFfxP*taA5X$>E(J!^~6k(kC5(t6-BnOugM+gLn# zb?1@Zjo)q^3(k#f0+QExev5ow%`OR;g7W)_K|c`~$^mLJ@sg6H30As&UmSrxZk@o; zm{(=_YqS53;4epP-cCBS``~k}E4DT(9^PJX`>5KjQx43MrzvH-oB1;&@DLpB4-4l2 zbW2C;I)nh&W%{k5SfZ=tEzp*30) zCu^woc{&LCG7xfKJTXXPcC^RFI@X5bhFOk=humMxI51<+t#vDIEmyrg9eOr$gyEF= z>SGt*ojB>$+<_W>)!srrtqY1fRB(7I%9;)M`}3Kn!lE_ZBb|!?u7%`m=!{SkmZ(%^ zj4Df9S(5o6y>Aw*Z@Yz+q;?v%%KeAmx71oy) z*VLEg)mCKImbu6IrDhd0rGtVHaQe5}t$W+lmX@tQ)V%s>{j*Z=phbQyF3H6&sSt&fcOZf1rJkYB=Dm*SUf|-?9QC?b#)s)$zyE0coRS9-UB|;8Cv`~z0cr+mV0SKpfHvj{m zK^Xo{4EAuq3?7&$A(jE;8h~v8sG%{iLb*$%;J~9RM(IuY4)AE9i!cB@V4)k$X3_#E zZWo2$4(3VPseT}&bQvmv$Ost0U_y?N_w~Ksq)NpLWj!hOL-0}Q38wHSC4oU-0Npb+ zIB@TjUI5@%7+xUaEIgXNqRNP{NIN@IQwtqaD-AQ7+x~$% z@o{gG6JErGpGi(!TU9-eBOEP|KsYf(ESbvXp5_XyS^T0lHk&2jNf3pHn@`Ih_xPZt zw0cShy|EP~!=Q&F0sP_B#bHDukqZ7WW)z8xf2c%O#Ca)k5~TZ$rCvxy)va9N3RMuS zkjrcdqNq_uP#7sIDGv(^D=#na$dxtKLF-) z+E!#G9gOiaeQ@hRVL3A-I+&4Co>fwOSbx=Gy#ak5KttX@gJ1Ps^l8oor{C@x-(J0b zM`ltfd3j=aG zqWAS(GoO49E8ZF%4G7SdfztEZ7Ytm?sJDTli$uc%yxe_Fy?o65>>WHUjhPwW9$D=j z`3M^KvG0r*(EMAFrtRjduU2T7A3OGL&&rpZmg()i=JQT3%|8_8T?%r&!tmG^@n)0f zdA0ZROtSY^1JiF|2A`}_oJwA8VWZf6qHhSOQ*eC5P zZky007)#DfFuRYac0qlC0khaUziFRKz|zEw2~C}2+PMqLYECnAwQTJVUq7O9ewwPz z^x=8~r|Aqj_G-x~?bY*cOj~Zb6?!)g3fK-Y^rnH@`2eW2wv8(e>2H5=w9fn`2Gdn< z_n)jYe1IM_!~hz237YnD2&DZJ^b$JZ|5Gxutdv7OYw{8%NWdHdoX*a_lvd#6Ta~%a z&18RT<@_e@0x_5&lpc^{TWx$NZ+o_Iz4z?-?sFD=dTnGEoqDXHZ80KQ2#B?!wzcJ1 z`{Mkc`McS%MR-<6n`NQvvCr#n8ICTDUWJ*Bsof71Y=WY93;>x%%#<(tukJnc>gic) zi>Cpe5oO5&Q8nP!0DcP)@KdAW(h^e0@s*U7?B#42ZE!+leoU${yKyg6unKBA{7d_- zi*hro+;^WEB4TQpnYqmLR3_6SB)}uu{$+? zT`k}#N(8rcMWtqhB_vu{na2nDl!y3$>@?KNsr0Q*s*yf9WsyrQMhDj+jTw@a$CAlN zOIy`YmR6FHUY!$_86TIC*wohA+}zsB@diXta`Co~|ZERUvoz{#bqfJd%7fDDt1 z5D`qNDWWo1N-=#&D0BzY_Kvb#QrZ?O0u!`(#ClZVD8B|ZrAep=6N&S1I&MMQ9H7t$ zrhWgoA{g;~Hl#9MD3mFuHgb0Bp8l}EjtBmqXnz^@{h=wK-oJu<|L`FAkNB)7!S64_ zGW5GMtS3U+gZc1Y4<7AKGx|%%?*Vqo;=~@t z72?W~Bc~mJ6lCuWgS!1N6lMTI0nFtJDE2@4cW94~P!3Qr#rFYVAxMjA^LI(AE2+iE zNt)96_=aK+#eyriUTVks=gl~)4l0O1Q`_r>rD47(wd>LIc)8u?M#UGuuGK5$KOl9At9nlv#|`v zvE>al>M;=}=9Vt{`X1J%Ar2P)p3Z@>F;Qie=`EevWi>I$44(`}G&3!sEGMD77lo%INLA>@*l z>|z+@qM!q#P$nP6C|92nW)VMn`?`xV6an5Ie(C@5eO4~lsF#tFSH`JCP^B%Eh?Q<^ z@~;9Zzq+O_EIhihvRdKiQ!rgC4)ZSvy8h8{S0>gdnEQHfSa*~j>ltfGuOPXA>7dC1 zo9P!@-qpd%NYC^44FOoyZapZYJAW5{b#-BudiJ>=o5Ucj(5Bh ztQLXgt8zBKz{Ze`|j_|%=ZhW2fFZK?h#dY`Q4pF6hL^me~@+aaSR zkn3(JS!;BS-y(M9>Z1J9p%K%L9Nc^R*xDz@<_5f53)c+cbNhA*R>87W*(twA`Maev zT)#Lk9XxscjM>wMjnKLIBD0ye+Kx?V=8XZw5JET~h!1g_uGo9tGjqFLP^y-nKeV=P zNPY9d^17Rq&HBE<2QHob<>K;18uQ2B8ocoKs5f8FJ9`@3`E>K5%l+-%X9qbR`nk(z z6et_QZyU>PgrbZfvu#6c=JnHs20VjQ^q^7ipn;l@>H}z;_D=(i#*naYgE17vR;$G( za=;KSl|wiuC2LAd!eOrf{k12LOuu($op;z2FXp;3*_71$D=fh#UE>LAr?>C6{_U9k zV|D$YK=snPSvVL5;C_HV2*3-0^!Fgovp&uyKF$$I$z^^C0lz<4Z|=0dx$rp1KM3M> zgM2NJuJ2&5%h&sPMt0a&vyV=e=FV?lyFR+i^8E_31JML$aY003qHlF>il>t;gOLhE zxRwzyXpcvTcR+JL^L$4r6Fokjo-V?BV%7pG?V+dEk~#r7 z_qWjs3ihb$|-6<^56mvwa$`>#K{SPxy+!}W=j!zNQh07z*7NO4Zv9dF5C zfdL3HR4$mpZ`&OgaV0UzJ}V)!G^@J0thlzSxTdnHy0S$g5@S@Z2zl~O03rd<1z zpsO}m%rTS05BTECEcVlmc8i9Z=;HjeoUHh4W?V#&o149jjis~kCkG9UR}VDKxOwfX zZrBXKOaS_#D8%DVY;HJTSMN~YR?^Orbu|NC8|Y||737x0#fL}7`(@_DmRA(8yCj@e zj5I|wWexctEKuSU0=D7c7zQ38xCahOX^s*gMCDz==K9v`n!3#LqMZE9{EWoBxahRd zu!w+QA4W!WR&HEzX=Y7rX?sVLK)}Oj0XPIEdcV3wE{$#g^?uK@^-U)w{M-x|6 z{E)_x-wmCqOamz&Q!ErHsU;jHSLy)~(Oi*C@l8cJGwAjm{_7E=9CkF;qkVpq2awL? zgE2bCl-`8ohEO4Ua%2$XkbrSQ$?-zDq-AoRkXACGsbOf;D3J21>q?_y{EG?-1zb82 z90M{5LdZL)1rL>SAQ#HJ+Clw2|MRQNW)0MZAYG{6i{Z;Hw_bF+>tFe$1$iPtcL=z{ z0T?M2K_XdyK02wgZKbWtDo2;Q2{8eVww4L8_EmMJt>T;Itd*U}+9uH?O7|8Fmthb= zjOl2Z0kFru&NqDBOphMl?C4^Zo?Z~0n$`xI{hGbi9I;xt(eeu6G$~kAD4fS+9lrBs$z%P?w%;5W+1Y++PUcCkA4Od` z>3(3h`GEr_Cj;sNoEb*iHX9y)o$|qd(K+2dyKX>fbr17f&g9qcuWh;H7qP3b)g->f zR44iLVXu{2JeF;;oud=F%_HMtsl@4_i_r?rl{1P3H#)K}hZk=(yQ0A7? z?Ch5~#O6}C$-MytwjVdirN-3pAOTAbHTD-WsySqCi#C0;0%*^^`Ccx=A_qm_q)TJfUL3Or zxZ&_~#ob3MiZc5)6!%msx{;c$qUJdQ&i=IIO^=OVIa%)5_xtAkLyn!9`N8tKxV%ir z;Xk*1Z}{WUol^JlIbX-|;#VrFN5(pKcRY+d9Ew@%HhE^>TIoW2pNARN4#EBgc-c4G4D52=jsXNbq*a+q+tNdvAr&I=Rb* z7>kYW=364&A2e0OvweGnzzuACyiU4zd|@x7p?!zu&I59L49HrzsK&~&J}#8h5L~&E zURENt1OqV*r_$r${7L?AeTtE83Iq`#Jvhk1{2tCQO$^gZvi0Lazs5&qmzI71{(bMh zZ4VwB`-cW{xje?oi9j5a!;MVN^703ckg>GIoE)#Hq|o^2k^<-IqVu7CYwX_Y2mG{7 zjgP9UODwAni_3O#jmXXB6c!5UMNIF8O)`;hoVQDueSBVEBd3%p2xIz4p$Z1yX&B5W zV1Clat^uf~|E(fHp=R7rCDy9lz}hVpOVDO;8|}?YYZ*f%qkUrr_Nlan5(k3z>KV_F zWXj!GPNAw*wt}Lqorbm@UR) z|Cgf;_No8!Mt_2wUy3CD7rA`um-6D)^KXsJ;VrO_nK!(aumlrb*5WNWd;61QKpT9v zk69HP8>?fi77M{x*oU&*Q%q|V|Ac+iFJB7@tt>-n>)SQ7YHd`JR2%`{H1L^(FcQj{ z^cmZ@ky#C`spqzDgaxr$+2JO(cA=Q1oL903Y{Jn(pc-i_Oq)?P+Va+HbVp9VubQkO z8DvSnunEKISJmca{%mS`fx#ad=z>9em8z$VH-T3(Q6-y%Do0|PfdurSt)@oZL&5Lc zTsbo)X#G#8GasKDnBTp6^V;bv`lt0TT)B8&|I)>ycW)iAvN-nLeosWu;^Kk{Vo5I& ze#4DNnWmgG2y}S5kU!IadBa(5y9y}f$=|sy?+L}XJlk$ zB&X-5#22PS7sQ6;MSd+yHEOEZCzH+K2nLo`kITqj!J7i-8C&r=1mR_xvytz>kIX(S}BsLCy? z$So+$OiNFSj*bip3i9&}^6?G`4T(xjNG&WVt*fi!@pxhhUx`X_wNfsZ{@Ppwphd=| z?N4onS3A^H+Z@2y6VMh?R02uww@peBJsIRuY4cBS&$yHnj6tY>E|*_(yZ=L2ks%Ne zY<2Zt`hSqD6pS|7hqd)^)0P>vk_w>pq+9u^Z2caM3P~mfuVG{4@{+QQLBCgLDa4TyC74e+d{WjaDz8Kftyfbp2(O1*C*L%VJ#g?9DL*uq91?xfB z2gok02=f4(?8lW)t!g^@<@+uNdlNq|$A+3ju{aciFHMpgJf(g@*7Q2zBn2@Hr+N_3 zyP=^suW7Br`xW-@u6ldg+1b7?D$c_hlBU8f%X`nGBX?aMoG{l*DD`r1JpSZ?uG#h3 z&y03B*jO1oyS(YnaPNw>D9k2dR3l6-)@;B1VfKC7i;e-V`erxvULM%-X5qAFeY@X4 z#yuWB-f-NxFQ<&Y?=$^6A}DuC0e42da_Q%wfiGMT8LZ_~a{=~o#V20|EM4+IXP8C1 z-WKgfSq{7!eZ-^0DobJ!4l`Y}R*F<@I zcJ+3N3Q5n4$}6Y$LV9`SH8ol~SRXjHXZ6yR_bxv#NQ(D%-Y6AsSHr?)34M-304B&3 zGo5{g9l6*qIomZQEZFzs*Mmz|_}acH%MLZr-~9QV0mw5!oh#v|=j6s8KC=JT^$U^j zt)h2r&RaMwYe*->x^dZ?x@KJ%B6Zot3%{J=eLFS$Q&yP2JTDd!pQh;z<}T|X8h}9G zKB}$*i@Nno(_K*d@@ZDEFVT`MWr#XBh}9$rE8*S+{>l|-*WL}C`ZObBDu9EZ^Gfo`MzI0Cf&IT$t zAR$Zg%k*vE&3=PFUfC0j5a`77Sire)==a824;$$|Z^G*-yg7q-2stz<|He@U}q|I3n* zwxR*54W?>KP1UWRnh%f$5YF^1C9-I2qN|iNKoA>d`w8H~_F%1P36V0_x|x1Qig}K$f_lV}-Ug;PH$ErZ zDI?jXtkhD7F2F&Dq7fcB98|i1F2vDMDAotlbR(cAZC`;Hu91wFHO^}++me@dI3`3d zz~_d)&#RCiw}|l2n8<+Oz!x!5H&c@i=VvV`FB!sZ>Y|c&r#E?wLI@bqEL@S4bRazF zWl46Pg3qXnU;<3VS1P%XV5UnqlZ+1~$Y;rw+^&`Qj-&S^DCp=ym>rIyOn zVi_)#VVF!w$mNW>ghGy^3S1#m)z>r^=hqeG)#hfGB_*bXM+Jt2yXNPHczZg$ePRFQ zqjz{d$$Ec`)xXfgyc(3i2`$k?H-|J?du14(#JVHj?MV1;)v~oRJ zP;e83CqVr>$bJKKAPBkv^ijj;@_OC;va@nDwjehyB{n=cDmX7QG$Gb8CgOv+`O?tv z6(ZR>9L90@2n9p)3kKz+9VyRoq_0X@UTt(lFke)kS5%s#9Sjy{l> zFrQPsEG=ft4~H=^QP1BxTCBUg&@SqL65GgY9wvp=Me0LOer`4Rp!d$_oAH;Y3$M+b zV%&SECGwjI(%-z@XrsOpP7T;}v#)#l#9GBffEnC|u409*cO+6S>Y{*=;>H#X1Pir}jj><3Yt`HZlPxWc&RjZu?8e1eN49Q#ad)rN^W|@iHrqel^YO8J zZgfg!u*>5!U(PK5c67em*(L4<>rU<%Kjp+s<_Iuu%v1;X`TQ3sa zB4U#=ZbtboP7WT)<#iTIyGc3Y2+k5k#rFElEq*RHYs=S&o5ssT10`x5C3MB1I}Ux6 z=x9D?WK!q~PlxlSx0cQRechUchSyKKyfL%?W;pRuf8<_Q|ONv!1};_ zgP#u^^0aG*+sJPZI;{0xdBA?{ii=YwuO7a5=hD@ujx623$MU6RN@i$OfwQsI{zt}J zrFEh7HW(S?9PH%n?-t+^?kynYOq5&&elf8>Gh-~h>~!~Sb_t0?u^J#AOC|I9sv#r{ z2I$t*+)+xdO|L$3&3xs;)e|g?^(_p}A6vf?1ftsVW(Ql3vVxij|DY&8=i;0smw>SB z(z-`C@4Vl$BVg*7nh67&dm}a7kcL@E(N+X=U7!DBYktr@Sy>1TYeXSmtKV!wO~%5W zcH}Tbh#+SUC&m3u~C>>n8C7bp;;3`SDPa9pUsnwd4nfm|QBu%~wYimFM|Qr77U zXFV%f6$Ic}i+#VR^ZfQvL|?YQo9JdQtvY-}s{!`J1=ajT@5F6RDX# zsC50}%6qqJ!a`kQA`ShFg$GYYLPf71}B&G#bm;!hWlH8Q^ z7(cfpU;F$>*ElbikgxVRey&tzH28e4J$tfw(HiyIJ#f+Wp*brQ8p(YzhA*&iewnY1s?D?=&P5%$} z{jXpj`M-vJWGgI_wl?X~;uvu&{zLyWSoW7ewqiTsFO2xM{IQ$`UV{HvAm-N(_!rox zjaJHmDgAl4pTVeIIBcqy z%*-s)P0zlPmh!&2F@@XwL!{cFgwZOhuNv_>fRcxE2xHpW+H|TqF@HVzUF3w zxJZj{AFbX!`SAXUz3&f~pN{sf4mRIxKH7b7`1r~Gz5TlnUtUH;xg{o96;+(z2)7D& z8=9&Y@v9b~{Fwm50lKKDc2!LS{DYUgdUg2uW2=vEz5M(`!Xv}t6OuwhgS|bz#zefR zuF&T)H#p{TjCs)yu%>1v8}xQeFPiS6A!7>1FK##wks2#v4 zR58oPAF1GUlxs!;wZkiX`oz=7ls5wqO9U}PMi>n}^69{*=&wRX7D$kc#_amIX29j& zdf(~T>5fjP`wK>US`LVdn8XteZf=?^s+d*yb6@6jw>tB$iARoq_+!bVS$)n97;<^= ziErz7JQ{7Ec67;Iq~~>{!+oTiRfj7M zMjnnv?)IPWeCV2&g=au;v|9{XC5nnm*EKL0@A!FWefemRPZE{R;-oBW4BO3%-j(2f zr9O9BO5~W5qA_*SF$yM<)*b^ysp?8#I*pa%{5-Ebd%0`*%7u%k?>xN1#O%n)$14Zi z?}EH)k34OM7#nvRlOlZlJ-&K>cMS;eOK(h((Q6-r+_+NjsMvbI z7oXm-^UisP&`6Y_rF{l+7e-AZVYo!z5vL~NpywRC_35X}r!F4#^ZvYI>9niY&qZZq z?$tN`FevWd z3+;uVoe@EAq;NtnzR_hxNrppe*c!i&i<>frgS@9kG@6sQqcP=Vm~Adqtz_)1SX*@4 z5^tc-g3nh3dL|g%jJeW1TDs}G)6h(l#8bno4 zl1t`f68X8Hlmm`39I7_e3wFyDGO1iPe#C147Z1Un6Q$$k6c3x6GIC7AiNg@)0m??4 z$s@CQn3hN(^J*Duo0ZUNUjI8`->+Tj|FuQwzYdkM)wXFfo-IPV|NiT1donY~Hn~0x zDORYOw7M-Q#v?(d!bMCNfC-Q>nW?|EnWFx2{8{S&&_eU4v5|mc0KFQ+)LP{*mW;&w zS55N%FJYgS!$cBof-D;CXJPZMY>JA3Y4n55YCK4ZUsDp>aB`bka|&0!28wC%&WiN0 zq&S9zT1lfE0`{Whc!fp>CHfNNa!|hpb>5#971~p6^=t^NRaChp)F&aJhN99i*>_OC zAT<{Wk~Y;xNzFD4TS&q*H4KuGBgCpra;2q0lqTjh2zcc}Nu^37RBEJb9*w0*u;_uQ zOT|K%hF8=91v(2Qrl?>B1}gwI0xS_p#^A&vLZc6g1Vv*`MczA9{!l7iE0K*;C`YJ> zVR9HCCFT=M%#y}!0u7a|frmJ(MyUa6=tdI#WuhU?4HFv}&H1AMrvR=37=U6Gz*108 z$K(@8SSUgFW#`}b4>I%eH^|7pS=n$%imoEyca5f}8gED9eO27KoRS5FX=58Ibu-gu zes&o3`q`?FFE78bFtsqee)r0@o0tBudc586=9*_u46a@~|LED1$5uur=IbMqwl^qs z~y8Emxg4zCuvotNX=i@(LYiw{+yo#$C~?j)56P@>?M(brZ` z1EPBHadkd??B*Vn`J z8rihSxDHVvh>SZ3)P0r8aZyRSe{4oBTONun4ZU%1{S&KAE|L1a*%lJ0EtAzb`Gwm1 zct7`jHt+hRQHH&`Uq^c1>^shQ+WO}^j=WtJDLR%8kkjFIXftzuRt&j)hy>? zr=R*S7;b=cH|>Z#LV7(#`d>n3oEv-U;Vu94%q&4cx;o)n(9yZiM@)F!Z-n`X0mgHY zYbz#3ejbzVJFYljQI-E#=gTL;u3fR4|I}mY37c8d?hn?vKM1+mt=)}|ldVT=_%dzs zliux35adBSvcBFG%SRudymfo{ z*;CKjs^@}nvsaD0dwz-M^)xk#`ctNi zm!DM^>>4RosUO>ZI{oy`inkwU=ERK658j^Rd#=cHbFAgDtas*hLA!!&wq`}l%}Cyc z!VC#Pe;4i46cdV9%7@pM&2n|gFK-I@_<8G^KMr5pdEduqqUqF*ulh2XT*D5F4<}9VE#T!gR>SUH?Nx<-+y+`$qSdw9$YrOwq(()Gg~*lFf_2V zGTo)OJ2)}m%AM0MU*7cg{^%9z;S}avMih%cn4DSa>6cO}!@~-*-h6p^$Na8)T!w_1 zxp8XD4AAX>>na2z0M=*a?zntos-K^8N=m_&9XmIlI2b0XH3;@z`TU)EbeeBwZj-Qy zz`1_@KdMF2yxh{@N6&*cY|I%tkkgrlecgo}k;c(T&Z^$|R@-^$_DpV5hDZCletd8u z+U5Yn>BE=B8l%B-g9+IuXUIH00;dsVij25;eq|Y@lCmUOrfm~NZ*B5c!A`b*PRVvO zqFtqIUagClvmiS^Hz1^-AWzBiYHOS7LVUuRmn0U}96G6jn6D0 zCr;5&puz=welw3F;#G=JA=MyM2vMc5p%N;JQnqZXoHnPbvyQyykeV+2Wb3z<8XAC# zi-9&X#O^1m7S0#mss`J_q5AKHecB(twsrbH4o2=@p=fIZ0094omMnicYFh|PqajqN zN`_%5E8O+x!P}16COP|Wk54g1lu47Z$|lgFf5G$*K$R`dfmBR_wha_$WOCtum_*jF z(?$Z6iaiaj8B#KZv=mFWBP%fMvs|-HL5)`sqh!iCKwMUdZAJB&O*M7ZH6_I*`FR;- zd70i-H3wy?sZ6>Peq(Pl<=mT<*^b2-Ifa>Z6~$HDhFXP`i=$$d43!HtErshU8jetE zwTvxtst2F}iT<1v5ta}W5FO){ zmK7P75*D2t5u28rke;8IlAD~El$qjQRcaxV>fst)8iV4{T?sv;#BdD8C{*24SZ9Fo z68WlZ-?9md*t75U7s1{_g9M_e;Z)Hp|6_XnU$2yC7boiA4$BGn$uB%y(| zilOPy9&S-VZA?U3Oh{s3dbFJ9%S2MCBP6+2g>J#ob3oZM$xuk3s33qJY(hMdZ4D

sZ-TBMKvvWV!lfveSj?;5P_wkefLwf%XuB^@%EjD~Buxw#qih5z~erL~38?Q5?d zpNk366Y=%}{0@x5Zf6u7uM*!VN_`g>;GYEj*n)7z^J+tK$?QBm>n@u{h)85tQO zc8JAdi9|xbos1oFxl}CS(W{K%9pX46)6%Bh$KKN>M}z+YaLHEED#0hzd=ewH`hJW6 zjJ7geL$*}vQ}m(^A($?08wj9r&r(#Q#}hx#%&`j#|CyGWEte7sxu&A5Iy%BXKHkCh z+fM)ZWf<&JW3wrAHlf_!ShIoKxCvkjjoSdafo2RyhXXcH-qcal+@IS#SBhV&=RM5I zGW=?P>E^}j`!-!!GH3si`7375XrGXBsH|bC0PD+FbP%gL;nXlbZ$VbVsj#1h&rSDV zyR`hG!Hn$svkfq{3=jc~lEHKzAB0rRWm}ZZh)LdbTRUxBf28N;3KY#ks!RwK z5)d>Hq7NRPFFn7z7yDdszqsMUs!1jj=RRJs?c?!V-jA<-JAL}q#B9|mF6>BYnql^$ z+gBIeBF?r-N z%N!;hir%>4=jtJb6T4iQ-v8~9USH03c0AVO%g#O)vsS%dbmH~z#!d_Oz8F8`N@v7$ zAo6w!@@2sgzfF@pSB-R-*8R;89{XXoE zdod^eCh~S_m+L(b9qIFQRj(g2`$Vjp;&f>GL%ju83{Om0vA`}cfFp;;ciwGZbxiN( z{Wl&iUpyQR99+L~x$eoMd(Pf5c>K-wq0OsPcg~+PJY(?k-rFto} zNjj2KixbZ4sXI2RR;l2| z#X@c>D44vKoWNT|?O4t&&i3ihvTg~j7!Uj*1kE`SDNw?P zT3LgEVg3AB)jg4#4v1uIx0qpFoR-h5FN&`Q)wL++1%VE_qKai8SX&UY-0smCt6LxJ zECRjkl4E=lGvk6{!>ej4p;-XwImA=z(#6Z0C(Pt^87b;KOg3yR%$WgmewPi^q0wz~ zCxkPye@NfVpg(mN|Z}{ zR9JdgXmo&wbE)gMfTfFy$Bl1l*AYd!2;29AHM<(`+y-$y3d(wx<;U!jZQ9PQ_~Wku zssFBlXl1mR8oJi4nEoBbRR5-;gW1DTjS^>iIxz)$Dja3YM*m5`rIpM_wJ7}kavWCl znQFCXs1hV%mbm>dQNvbB(7ze2;YRlOVhXqvL#<=l(a>|8eq4jMyoF54K20kW{f}|O zzmAr`ClhefrlDE)fYQbz82Ob}*jIq5U#VcB3VNz&=TkM9+cY;Rdb5k|J%@K6zkdDX z;o%k=kyB9=B@~?om`9r&Ood>K7{pvs@`PLDQ(ur>l9id6?3)l{7ajQ_GR!VK*fTsJ z;=8S%qpja(TUXmpHlJ)?IDC8J?D0Gxp*I~pMHFQ$L zZ%XJSQukF-gMg{P=!Qezl8Ol-z9(EhAFis{OOY#>dPa>7NJauW5n!B5uA3Bp$o|vy zhvt^{Ut9zH>@w1g#L{g9Ow~}dSshQ~TE1Yc44qKh*uS=6h*C94D(?Z%pTye(MSoX= zOY5gJ3XHRgt_p~w<$|$d^&q9DvqsYa$Q}efSS4Mc5?uwQF~5F=Tslpu8Kr{BD7l$0 zKE&heYlv$?`69k@oP?YbojT0Pb&ZkH{ts`zdw9BV#4p6KgA4rx(4K(d64|uMyfw9X zM;i){HI$vKEjwFXcE7&bhf`lxSt80z*Fa%R}rI654u2uz50Awbk zCd2&B%+)r4$H=uXZ{0ezDMsEvCQ}Ib5-yjQmsgOVUszC3R9IM8TwGXEQdC-6Tvk>> z|4Diw>wzzmfh$H~!h4Z;=C^Jf_@y#?}UdU2vW?P}Ii3<(;;VHoyndbbTz{&jhhtugA> zf-Ul&Zd6MZ3tRmGY_q?nKf98!9vV(?=oQ_PB%(bFyh07n1&ZaEdKjNM)u9;7mf^>0 zxn}O3MxVbvv$M7E_5P5b`P1Lc@Xh089$)72>(*1E`JkKy&67Z|6y+|(@s$9}mC#$H zLe!{^S~x&iKcF%LsVHt&UfrvjHzlunL1@ez(+6WWte$Cl=Y`E1|HPOgv-@`v;-3pe zcStyh6LZCiegK0|bZmXqy86;P5AQ6$roZz1pb1 zrsWN-s@FSqdgQem=Z){bjR-FjN|;7CO5jO=S4vf0wliIPbKCa!2X}qmxA)t@<1Xhe zyWhX`+30Js``0MjYnHojSS-5tV)#4f#ZNx%H~p&rvEa3S+OvaYzH@VYho!mAt@W9k zY%=A;rWLnW-F<$`$16NCJSD#|{bhvlgnNAveWd$+r02a3{jchby*F``#hm`nXSaVk z7BScP&7^(r>&QZ*zDHio+F?Fv^p(LK9?e0V_an&<2Q@kj%P{WmwXMtB;TfcNccvg^bWc zR-Wu-wtMF7U32DNp4RQw0K^a(d>+}mvd7g8lMi2Bz32Ltf{b9aEW5zp-B54G!NYU+ zom{Se^@?Mle>&9J*1GJlSwG}vcf<(k`L6$fmlLk!TV4-(^)&S7b(_yW#Ih5z#;L{0 zYf6hvg*@l#jNnw?Mi%d`_wmG9WPElz2wvU=|v;P)%6=zJ!EkB z=w2V@4fA|+cjwXdk8LcR{oT*%U2=C1@=r;EiYHRY`c+#q+JMI|ovv{xf;_Izv z4u&u7W*s~_&)jmVZ`kU>!kx+9JH1}rjqyt7mAhKr{Jd;p#0bQDWcSy5kH;ny(_UaH z(-cAuCDBC@Pcx%ToE(me`4OPzK7VJ^ebL6nU%hA4)OD*b8;*09VYT=8#ZQt0PMBYP z_R;F~YrEv|RI;ua%FFUD9(SHLz<2pD>!n?voE!b<;lh&2A6~(>-(3CPeF?Pl^bbu- z$g9k3tcerk+Lr{~u8e&El1OIx;6fOZgYhD(H|eZvJ>dduqHjl_Q~V0fPk=+lp-ECF()J0&pX%L zylCexvF^em1W|VBCF$HvId&MV)J2!fEFRXoyr&Lt)P(3heaqLas64(SZ_WH7ogRu_ zy|@U%>D8IDVjP1PP|oD8OH827hfc|Bp%D7BI&$;t6OX>8Z*dUo2vd$!Tt& z7&TrMZDLeP=Jp?6W-X&mKrbGO?Ip!J3UU&Ztnw}GKIId{3k+tcp&bDTN$M!1zc<$& zsmk-L$S*1^%1TO4jE=~Ri*jmcJgUNG05L?RY>(1bM?FQ-c(**uHpbUFDe_%bnt5jC zo#bSLm}ukJh|dWTVX=X!31NB3aj8j3QSnK>@yTAvX@R-7qt+EG_O4qz~-yHdhI zlz2QYA6Hy3{iWrCjcaDDSha1A?wT>9`fc6xTWI)jm3k7Y>L-_V1DK#t4pYNWv8sny z*%O04G8IBH(dDjnwMb$b^6@ib`C>v`+M(Ejp+cbH<{%O}Xz-3e3|C6#73MAp57{0Z zxQ5SLtincO&>e+d0`;h(no()Fg))=Yj&ixcKmpi0I&uh@hZ= z&@lJND6haEUzVR2PQ$p!$mmFBF!x18B{?}c$;rv|@4>-=^z-RkNJx;gv-6K1Kj?dR zcXuBjAAffE`ub*PXQ!p5B_t#yCMITOWmQ#GNhA`2P>aMIOetcjN{Ml6nd_rT~ScRL)3OD;ZMX-`0bceLtpu}5Xk4eQTwx;( zjHUubDF{-`C^%}g5-m(@kA4uvl=MwaDrn57Pzk6uMk-kg@RERSv`3*Knf#te)ul-^ zNrs)^G(W1V`}FhYnflsy;^w!>;Ro|#c8JUOpw)js-4u|_hQ931;9F#BdMY;J(%d9c{2aO8z&h)3+I zxPle2nd?2g4=rA(V`=`Nw1DU7oRXeggJ~2v1N-WqM?RZpx@N$&DPy0`TJmM>;h!h% z%D#P0b3STx{E_{==MHzw-{{*r8{`yN2F6^!|KaSio4c+R_+3)htS6=8<-$Rtrs<06 zNr~_K-`{rV;li*F^Tol<;G4G#Vrl`cvNqH*r=J(cEd5Gj|l0W+p%ld z)JfSNpItb&>%ggv2hQv~tA8#yIyN#l$0H{E!p%$TPaW>Meft)tpQh>c>+ilhaqjMx z-A5q4=Ub4jTt5+UCHSM8r zpduu$BEhqvz$q=pNXB)l%yztKd}jIWyIWEUhKgi}N{)bNBwl|`QU9nQ`F)75OikNS zs439*L@3iXS*wwdZ8@#_7SC}tE8OMmPgp}mWpdX*h zetwWg+Eja6CfXXJIgw;kAUu0ao;g*ySXX-HH16gHb$mco7|82DS^?rZ5DS4-FHvc= z@fg@AsX|kNla&&MTc~GZS*n6y6rFIMFxK4<1PW;2Lqsz6>AUov71+2zX)RP#JbU(F z)wVScADK4D1=S4qLMqYr6p$zx{d5enGxFUdW1S)+1We~P1!N|x@7>KFJd`(J5T{4C zI-M@^MbqKhNiaNDxo1<^>J7XDr@~jQ&(=E$_HT=KZN%r!5q4-#_3qf%6_Kx5-gxCK za0@|Ek71HFb{@&s)_Z7Zuq$!@TzPUoZ{BR-*ol(aE8zAc@b(6!SOsbhn*nTTiqKMr z*V-iZzjz>RwF+uNgJLR-(FgkN+si}%4vs{)x1V&{3pdEe}R2%8Js_1pXP6{ zk02OvT8d%ziBYwH0-;!0&WcK?wVejEfbVY&=Kns}Xde@SWQuxt>`lg`V%eq(VntP2 zp+jD^O+mGj6fdA3!7)~g?wqji7Z`B8Um~Vn9ATM=t03$6ul`I+F+6|D(NeQ;)+CZ zTqrxi7u^*$dy1O!F%hnz-e*zm}Zh^Wxmgvg}Sxa8ENl$5Bv48O{fcig7qsB)nS*OB8i z>}#*k3~3fD<#E@N>P2dNNqW|TvT_p_$5&4FPvhf{i=`V-Cdko==}amg+*CaxF>2AL zSEp}Wx_Rr)Bl~X-K_O2wvro$L1qAd%nYd#gphgfl4JO8Fs398Y1<*}R(vz(n{VxqZ zjMKCtGHSimt0lgnTf)=3s$nn=zhNo_NF9JBQtt8S(1Q+NciKD7&n+A&QuI|-G97!{7C=og)N50df$G0baeFm;pFw@+uIkfZ`?9GcwT?o z#mfh--#n*x`q-7r`W8m!pWi)+34TyjdRi=*CB^9vGL(S1a@DDtD$Dc)_sGDQu)y?) zuLX>lx%#Zd^e+##3m*+pMMJ*ZQ-|ZbO3PRe7D=r(`t7Y}}SQ#Un z4J)?P9?{5=-c_U;AeAzvP>-ayZ3+eOlAxy)=xq8D;AAIy!bnuzRT_Fhom41JsL=B{ zIm?8+t!m}&hT^%B%0-;)nT0{Ui2PBK)NZ1@o^^R$YpMtEq!T#Gu}YZFmCn!1`aLdu zdU@d@nPNNuJxhDbp|6M>U)r=XA;mZ=DUDp^{b#K%)+Z~smE_wI;%x5R_g}M{p zzIYhw?G@-AV*U8lJ1ZlfAnV}5FWX?!S`&;)&)4^Z2^?fwHxl@{k^(^Su@!h(`$tf{a073ONKE9rnnE`-?m1N$?jM`k9 zurt|hyXoDfmH`esQxb*?rN1#oS3OFt*j8P3B_g7vNv>@vDw9J>LZR8Cr-pCt?tZ*= zqTxWDn@FGgNWc5tx?k+Q&-S=ugp;eQdtyYcq5@^sZbpIB>_PRxeWiQ0r)^$aaPmmO z)=lCWb77ipbC=!?!zVWEJFWTlleVw4D_RGNQcyH763i_3iPf=_HZr=dejbsvdxOyElM`9h9d)YB<-! zwZ_#$k&)qV^(^|OCC~av{;b(G{rffcY+v5DOZD=V;X8LqlS5^K3MS7+?-6{WrJDzn zbYl}K^`(@ztNPJH!MXGB$P$7=p`sixMiW%j(qD@m1dI7d&Hsaah?>y+ZGYB~#J@h+ z@-L<-id@EokHt-RJ)dZ%y#+1u)3)gSQ=<7#*!N44_m`4Y6|R-<5@Zzo^bNQ4i7*a| zHf8|MTfnt-(U33R1x4dDYrSq-eEU;4QbPjkbPMJmc2A`@W^5D(*iI z=#nj+o3&+|Op=!dVnU{!MvG!!HNZy_bP4DPR0kk>N=1W!Jk8cRFwJe1%sr_f3f1rq zSD#QxSEI6-3T%`dTP#P-)%X{np5lsA0?7g~HXVm$0B4j6+veIxUUhwaX+ueVW@>s+ zM9dGLz;8cXf4cnm5bS$2BXLD;=J=B0o&r%v4cVTCQ<$cQL_DFod|ghmVOFARW^!^y zMp1rgaaBW2ZCz7CJ(t_uz~R-_)>TxNXEatvO8E{#{vljFPfhm5srCSaFtu(|tzLD} z<;tR6`B^)@e%=`1_uTb|kFSrHUx0po@mdTU4^(F$I*COa=VFQ+m;x1z8lxu_zns3N4GJS@8;KC37_B{!#_xTd(UE;}VEJ>H@?ZcAs85*?e$u`tJPA`OA0DFF*ZV!lQyy(vtJ@b4!Y= zOABP>g~g4vwo1iWfVnKOYy?65fopE?MGmzMsde?7ni@fVURgqHL`aZ(SZF|4SV(wy zXmm_?Qc^-@R%&5kekpw^t122A>V!f*iYoqA!}05UZOgj;%~_)V#Bp4vkgIT1fvIq{ zR?CIysz-v4Li3qb?MbRAkFY|bbamfqfQr0}T9{j>;Qf}TZo@APe9E4&za%_+S=E;b200}fa#X&@SZgq{k zS60?4NmWM7OI+|mP`^Pg?WZB=`8<%ui&fm+k-68tMZCK0WVp+2)kM?o1Mecet|G%O z4?bjj^!nGUhi)D`ed)@yiL>}M6-<7O0+n166X@ja{oKsq;?DP*I^FM&+(Hm@1hGP9 zyq;hbqVE{-!ruLEQn9u9_swhX4Bqu{DAJe+~t z?fTn;c7rTCkFyv&)o>m%`L{Es*E34<9$1^7y>srsg@ad(^?gFz-uXOPYB{6-!_J5$ z(%G`dh&!{V-`%_9z1ceZ*UQasuX*!uv6tP3aNE;?Cb}k5d!I$dJm@j))r2{ZXWVo< zWAOT7g;c@8!70S$&XZ#gU#vHqN62SDbc-7dyL$e2Y#De`EWqLDEo*ajhKQ zY+SQp)gP-*UAz>Pnc^Pg?&}BZBAh-5cR}0x^>joIe5JBf_|OB zWrf{aD9-_m$CSfMB$SK+W)1bVUtT?-XJ6*XlQ`CIw;8nF_*!&1g2sd0JLNa*iDdR6A>*G zl>gQQFEbF9+NlxAq>g^Wc9Gw|~F(umn z4Ev~mJ`_eUty(Gq;vP8#>IG+<@k-j}p0M0GbVq%2a+_L2n@Q7_oBVT<#jMm09#xWr+@X;Q&H6^G#U+tVJM0!*+Kt;p(;Ly z%Wo3$>($%_4TmFdX?AdqmsrH*6}u%Bwxm%-!C|IaO5jDI@)vM0iRjo1;(<#h{NIdRK79 z1c$7)eS7?w>D{NMmR3(qKH8YMcvuI8c}7O%CB$Us#JbBGegO6g)Tan)h8lWe&|VGw zl$b6-U1duvnt<&Fp)FlfFe^gS+}vDSS5{bwp8qH*> zU{__XWl@qNx7aNw!QSDcrGd0AnZqfmAaV3`{0sgfcD)(2o6qx=Hp^u<+T zQi`c;eMiC`j2NZHIsk?MfhYxt2JJ-SU?xDQg>+aCRNNh4qFl7Z;OxMS%SLWoGycNG zk&XP#C@~RN^`LMB)E(u`GqdB3g8e=x3rnH>x8=;nyl(x|q{ROJjP*lJlyjbU&sTLtkP7x@h2ck~~SNZ9tX7 zbQof%JWtp_d%bnsvhteJqTqum*` zx!pEC-~SYVCx2PzZ511gHDL56393j%Bq=rFC>4ldBaDA{)SHuunRB5rUZCShlpi zW+jHr)8ZpwI-H(NsuuGbe)F|9?2%=?{+_NS@8Ppq?+2TfU@LiT^@IN{MwT=M&60^h!-ls;<~!o zA!b9K|BRTAKx~j9*2tho$QYBUd!x^8e70-Rp|5}5J$uiQrDmoESy@T-?L0GI$DQ_v zzH?s2A{FEbg4q2qHt@>g&VuElj_=CTS9Ihr)K)JQr5-P`pYvo5V)otVsTYPu8BR}q zu_oF2d1GR1NnW^bXnsPvquGOoyth8@&X{>@H}O8`5&TG@Zf93kVR}*E3zzb^6uXyJ zzaQOeX8!!uOBdt&W)YEb;l6$k?q1*W=f=>Ah=+cr`wTW;Gu&i1 zPCtr!ED!{3wRyU0nZe`3d96EmnM-r67u%h5DvvAg(OOyhKf7V-T>43vAxZ6 zHLn%s!l-&##m&~Rg=EYO;5!aDe5&@GpX#;DG4I+DVf0F-XMv2^DF!Ch()u16hzQWi zm!u{qx;dxBC3I>ODbm_sU+nwo&Eg@q;3}GGG^6) z=MdxYz~hl^m~DJbLI>8V;dlJqdqv0{fDTy7zDbb?)xg}-JNyKoJ8u&+p^ zloL40!gB)G5Cq2VI7#3HMSQShD8N`}PKs4zk60z*izIEGk|ti|tG9b91qa%|oHj7L z8?d3;&Zo}bq!zs{C`m5-cUw_WQ9(gLZf-6t%yC<0W+wYMH90LcDLXZxEG4ldIVCeO zJvKShKP5ec4bF0tbBfXf>&hO81i#SqY(P)NG}AQL@*dIg&Mx0Rd9#p^uz?XzF6vcw zDU{Vv6Bf(RO8qdk3jj#~^pQjfs<})tD+n-C1IEeeu?lJ_K|KLfI)nBS3Z<3}fXQx- zk_IZH!L$c^%ThgnNg$NBF?1CLrjihdjL{Mc0k{l6XkS-mNohfLK}l*+W_(U^Y*K1m zTv}pMdQNU(Wo30sOO-^BN1y>>$tjHfRYT7ZDn9S(UD{ZGvMj^3I61AUu)MRjjo;iC z7!aXU%V^N4#=-zFC8^a6J&{Y}eXc>5wzMBBEqPm9l-to+)7cX(Q9o7^JJr}14B$%7 z5c!P;%y=3b4v1k4JqVD4xGCnTV%ey+j-M`^oV#(w;<+H1oUv| zp7AZ+jh=a#$*Bbir40$0sV|x;toZGw60t1+QF@YUCJWF$0Jj4wGk}`{%!HS=cc;2r==DfU3XND9njz%ka}L%|g`gl1sCz|G33F-9(sbywBZr=;Wr z1;%^1$2eF;I9l092L096w3ozxB&g3Y>Qe@MM}mVSOj2fX1KL_$XDfJ5XiN^EJ&MLC zT5qZ-iUnTyKQep++y8MApQIQq$*9@C6or2n&XMrfQ?!2Pbt|0R-#NH@V!HW?_Xd(2 z;4^{@GTayHT50M22l`5_9`gf}?$Hhvoch?M>#XDKy|2qr@Fw^#c&ragFh`>v%? zK%!qtQ%qcWWPWZzeMQI3`({V(Ts6ybw5xZu%CqSKy=s6#@f}8?W}0g|-HjekY-!ol zT=R{%bp=m$;*Id$E}r}63Ld*ar;5NLzH~uJ_{euhk>_I&GX$~&nj%9ihJS87d!gmx zndheeaO|t4+h?CX_xs(uS3Lv0o_Iam{A}xI##51}W5z_RMFI|uuJjzMY@Z6~&qQt0 zP~K>!b+RmPPfOHX-&+&%tdUZe&!lBDq%}YH=4{XMds~>vI_b93YJZ!TL}!Dg@t+sY zue*A6o5ATl-F4OMmB@j^ zd=TVe;}hsx+T50eQda=@vLI*iOG~%tXAN(bfWYs1JXS_pANqCskqe&jzX#b(zrAtg zgu;&$lPW z0YxWl?-0sZ(-4^w=aUj%*j!Gs-d-baZ>R_h^i54m%&jaHV|t!^tp-|23GC-na_bz` z>-`K!`odJ7ju(K?VPNzFtpu*qjRcTUm5q{VeU9)0{L|m9{BP%f|IOrD|Bx&B!A|Rm zrnm>x2WkxON$?nbvh2fr*}p}o|2MD|2}Fbc2>s(%hhM%m`(~}&;e#O<6q~ruCsRgBb>Fc zISEnG!O;=HDe;l%$x&&^QS3is;v!=c6B1I=laezM;#1>d5@REyGU5|b;-ljegQJtf zqSDf0vkFoR%5utLpF929EZZvtbA({H7|f7@tu_G*(#oH;@QPah-Nxhb_O3GW21E*=3@K_8Ml4jL%QW;Y4dX?Cd>V96K*H(m z>Qpzh5F$__4p{VXe3KMIx6$NtoY=1+R%@B%DtfgXJAe_k98rsqWBPy`hwPVFOHD_SgzV~733$D# z<`%4`T2oZemX;QikZfApcBw~wP)M(nfX!m;Zzbx^@nvwWRopyfK~8W_SFl#&3hDj8 zF#^I$2>UQFT8YgQivQ?rjA$q;FD^`JY>vqB~hokB*H1hq5 zo-n0|-z(^;tgGPjYD)@}661X16uz{3oQ-hd{wG7;V4`frMn1Np#ot&!8K74#koJY}FDN;CXw7D2IQm()U) zOp|DZi$;(cK;y8y#&2z%n4{QdUk_Rc*=ZH_rE- zJV6N=&9nhfQ&D7BkZTtc|E8(4w4xz1Au~dvmOwp33@t7#^7MG~=)rXhv#V8AZW;N< zt6NWI75*8J`PbvLn;Tqr{P^;lPj10Rw2xmP$YW%%<>Zk@bAI-m%%q z_Kj8vnSEHN+0$H`o)R3F?Hg9(yW`c48DPQy$86l&dBBR)&GepKdm^$*xoCWu{ ze0zTL_d8ZDS-0@UgMIrA7OsCfb&>6i!B%sJ2cKNfk~UW^p3M3fg?1WjiN(fig!8mL zOWR9Ub{2nB5cvhsJs!wM%3HqI)NF6d3BXl60D|NEGE<{+ z&t6pLSKhsP-`>zNKO`1ciVO47tMf|hDq3!tz4(6phP7LMHZ?cicXFkH9N0b`}yYQ{q^K(Lq~yspsTT|-Q@=_ zpSgM(Ji6`t)(RyhEzP0;ujo(@r&#-IMdn+ZP3Ho?*@OoILO-Gchv1RlM0))6u=L^Y z>DC!QlbD&Eb=|mS#}0Dc($-m{d$z4g+`6Xp<+FIA<=7}Tf)_oZ|=cD#zUxJ^PsYgx2 zr+!QB-krW{+uN(xW4(Rqi?S#=i=t#Y35r;bYAIs!sW>nPx>CYC@HDy7AWN?9RJ`*dI3jCk70^l9glBra-@yv zz3Q?8{nUn*1(Bps)|Va?67B8UQdq2}b(9gGUN^WX^%Lt%Q08GU7KI=Fd7)FPK2nzJG*i(9N z#%=Tm@h~>9b0tl^2M0PDDux?vquB5scD}*xFK!>GMC;E6{a#fI?FXO~=&zu@FQU@@ zVi5o74Y^r!bFstsQ_LoRa(nRTJ0rfgU3ARvNNjIhv%Fo#CMKol zW@mTjWcB9eSLc;v2}Itz3p_Bm|JrI&mvm zY{re4jFlqlmtJO}5UdHxn&Xpn00-4vJ?r0WIJfD_SOd%yuy!^uiNf``_X~;;`qVVM zgf)P9%P__i^Cw1LR@2^EkV8S?UDk-PH;EEvYyLGo!{~-i;4+nP87xbJzJ^ifG5QJ# z%mIku2!W;jVS!$*O@HBGmY$8Dav|F@ejJcLap{fWDmB7&HQfbdv`Wz4(cD>DnVnG- z8Iu(logNvNnwy?kS6U)!Yf(tVy;`gifJ6ekkzhxf#MI;!CLB`sz05}P>v4p6hxi-1IJ+UC)w!8N(4pxgXgam^%+QF1| zhKN7Dw{x&Y3Fl5odN4{3!@z_N&A9T86$V#V?Aowm|E_8Ic}G?F8WlMnBS#U$G*t6T zXUplr443MnQn^Ay^#?$D79La~Qt(@O)g>iG&DDM7xfR7(zKyl_C)jS9O|M$O)+$UyotRk8hD36`-JxZI-SuxmUQox_3dG>Yg(l( z%q%h-0XQD6QU685GcXndLn-j3QoFUX;%LCTOAfXs?k@fjVL6$(m1T7eT{0dwi=bt1 zNP++wIX8{~O&@DvGD+}K4U9CvNDC||5C)hs)&lFp(b=IYP6VI-q3N-@%54N!sM2QTK0aC zD?vzba!h1OXG<0Ut~hZ}tC>xc;{h=lF!TBCGX%nALdAN4YHzpvF9mMx_3mg{iIG%r zq_u%vuA3OJM@d{z6OLJVuVWH@)3Vc@+}`>H#8g*xzyW_2>Gn2cWrp*5I^6u+rC2*( zR2>=~#IciVJK8D_AKYwk<)o;uy^CMw;P$tp=gH?U|0w7UI~RL-k@L!NkA{vmL57$h z!=E9@wLz0Dm;UN>{FvjF^(WSxz4-gtQ+waKIx{+Q8)(Hz29>o4t6ejk)|+ga`|!I@ zj3y2>orF9dj@(8@n2wzCWXvq%5$mnydhm?ThOFLbKHj_Jm|ODx=fS@Rl-tw*IU7Ux zl$B;SCMv|1fH)5n>w6lPq-AYODShz3;f%?f$L4mH35ls;q0u-^vC&RiNpb_f85$5> zHI4Q06iX3Dc8&(g%ji@gUj4T#a}{<#tgfF z47-Sox{iE$A3+R|v1St|Su7fNYw7p*cNuuxUwPukpN>!X`cGt@G4iF=xJkB)rw1GS z(2%!Orv9Q=I<~8ClBj!*Mmn~&^{Z~-T8Zkr){Z$6;WU5^p;W_Y(PGg1O=HZS?2vO| z_Q$P`-n2M{2cKs#qfw{gsw7NK!J z?c29y|Jw6^?K!l6_onsReqOch;K5x0Wc8ID2L<0Me34ocr0o@=PF$7qdcA(1k$N4BD$?6}95>(w4#p77sNzrR(#kFE1_g@vFOI0?r!uEn1s1dOUyYnyT}g zdmkO^*uAoN{OAs3gmUaGeC;O5MT7DePMJOdQEwgNf?e{`LtC56bS*%s6=iX?d0jx7 zFD@$X%I66iVa^|-6clJASaM+N-v`C>z5tWQt6?^AWcBy+`kopBIZv$>pcFQMLI@0c zsMM^mcVTuVO^KeNjyV-2o`KQ#o;p<3i_~z^gHbBwGO1W!Wu-x3L0L#-d|7=Ps$Yx{ zGchRZ3KbM7=-^jTK%0`CeCfJDO=F7|XSi7~?#&t%+T2iHkdd91z%PMH2P|l7$J7NS z7VRdr&KihwAMP*7qyCSr8G8brTPhg=5FE(uw;q#S3pPQ?{6ys)pt*ccaT zx+)>%4fiCjXNuv#9wa2u13X%vjpI<)zrsF-+o8+WNuX={cdo=vb5ff$L~IxYd~nBg zQ>p!_MCJoARb3TZ|3&nBSqELp;@{Ax{Kq!rVA$cW9)30T-qdlo$Bwx+a*pxL9k$!N z%YCAFMG6X_U)Ndj3%-B@@Gp=zxy*! zmftLGmdX^Uj6mg>TB%WCN-c${wOqo5(QqRdJO&Q&>3M6VijKD-2NTQI*T{a8gFkEf z&iKS0$!`b|69P^`d4Tte%f510o8z_ZN4U%f1u;fX@1@rJb+WTBB{1}4Va3UixYf~V z2i*gYmo~)Vx{v)&xWRwTwCd*&W!yImhrf#83&0^3=Bsm1OQmMzG%0ZBm1&jq?3W0yA)-2-nEeHsnb^rSM zK=<1P1v_xCSs)sQ*wd#psH*VH%W^L%HfZkHB*#7{z;HG5 zsfJptLXS$tW_?{TQXx-ASE537;hL7@2?cs=dB} zmkFYSpv6TMIWdV*9o_AH&@Z+F;HJj5gPE=Oqq@>5RcQ=Md z1roTXt+AbrU)7Szgh-1Wn7g!4_##T!~JXvqj1aLjZa)My}I-0rlTiz=Hw)>MvT%D1GOdg^kmnUxaav@io89^ zaON0`Nr>4X8vdZ%q8uc+i+92yzP<^A7%}6VWEvt2q^LADyJpkZz zcgHSApADxjT)%eA#_X)Q*@e^R_ODp>^WN?2Z5|!0bUUiPx3Fac(z|bD?XC%NJEn!~ zpHpM8Uh(!P;Ddl@M3gYo#cGzN_iYKFiwiQd6XUyt!W)llU%TgByW+fl;aanU$Gndp zjK6V3l#&zj+Ut#zhj&n@+gmSottW&f-neq>iG|6Z2B*&%o=MJ5$Vtdbi^#01Xf?5X z>JwFFfR1`R&H+(|YyNUjeUm2gBes|lv*j*b_zg{3%^o{V-5voCmcKAm% z!#*iqxH$9RNo`_gV|H$KL1uDEQhZrVN?A-bub{Xl+uX|Zt&e|QTL)|UwJa!XZ>;q4 zDmZ^bc;lh;!K3ZEb)x?cg^s;;B)ck`#+z1clKSSccZKc65YZAt{N0(rmp{`d&)E%8Ky)K z>F{yc<0j?*zPBqTqO+-9saC0R2>oczkHcUFU`_n0p!j!fQl3|2WOhwkQA4+dgU1Uu zmqq~#@3dUm7bsO2&_Hmh5i4S%6GI|n+k1u3Mnc+oLN4io+U1ZQgzS(@oI*k$*uJc7 z?a+C4fl9=_4&azl*xS|8locGE92{HN($vEhZrQs@rBdY;%zZ6qGz=-|LZyy$yDUR_Kal)ybhMZnS zN(<#pVYRNCLsZ?2K8n!lukk=ZU*9D_us7_)s$j1eL}MySLqTB< zVgTB+$EOuZd)xS}9i6YB^FA{Oz?2VjB*e5?f1FmRL;u;LRGsiBoP zRK+GawAapQTjelx3W=vs+8mCNxDV){AiTcp^~j?^07=+rvA+OVkHOsDCkz-w!=c4- z0GkESX$<}e!yp`S%q)5DZ_SkllH&HgaoBL>%rxt#vq}r+%Vl2zFr7e00BSs-#{hB= zMIrz)OHU*5`6~Gjf}Z6$Da-R?jp74OG_@>I5?^SjF*KNBZbbhAqjaMW{dA)LLmRvD|3S{euH5j*2%|g3ZS6dc z)gP!e?>U|*ebC9e04=506jtbi^8q8J&snQTGF`)5QG!(}Fb@Zd0XP5vd%1H_&<&?k z2ti!NlgOMsS*^?3sU4XO+dLz-WfeOLWVL~jZiyKY9fAfmA?3_mmj-u6=|3s3m|Z+1 z7_ZdK;qlg?8cVJ_f^`H0d-}6e^9d=&ngh#gm%Z$6`8nSvrHxJRen-{X|U(c!pt8Da!oHbtq&|tD*v&h?{09V zZUYaMn}8*Pf-^3A^Rky!j|-@z zTS>P*HT@c~o`#qVMIIp2?u?y%ch&@pl`|cVT&oB^67~8!y9=jE!d8VuOndhB*EjJ_ zBseLQEbnZZ-qSKcSTRSDJUHdnkk=a!!-=1nkKN_;o#V?RwpaEX|9y?6^~3u%PuK1L z-Q4!|*{gR;T11}B{6l@si&dRpqJjmD^#`NUFI!q)`E#R{!R8z1S8n)q=AI3|{Qk?z zONV}Wb8rd&*Dr=H&x~& zC3csV20Gc>Ju-7Q@rbbue6;3p+~JEk8+R0M+ZDg>kDyC8d#mcJYZ|Xyzw7Sm=laZ{ zEVCdsIo|T^t78u?Z$Gu)*Toe`VK&GoG%BPdKDo;0mD#U3UK=|if31l8(c$fMGmELu zca0BUF{^m??DCI?@`od$PY`s(ATVQE#}{+DHt+4Y{sa^@sB2q=ogH1hZB@-h_3cG% z{POJFgxb2wv*#{6cxo0H7m|<{9}^#z6&4zN<6ijrn^~sM8Uo)H#keipNqLNa?N+s7_LQ3T#0=6N*o*On$e(4=(Zu7FPrG*=?)5Vqgz><`r zN|7?YxX3>%wYj%fg<=wgoP}NN-^|<`-{8Q6%#7ylUXhj(;jrkY0J>r}i&1q+G>*$n z0}Ixq{uvfos9-A)PJ@A-Wc&ksr(-DS@5#g8XPiE;hU5o;VIVWd=o5JFnN0e-_w?^# z&xTIm6NAF-lpgJIoCr{eoqpc{s7Dbrqm$kHulfrlh#zsm@5=H!KhL`~f61LCf4ZG~ zSLGvNLqyIm>eG`rggIKE^eFQ@> zI5+XF;I5#Mf!Z~E1`1WdOAcWl#JXw~S<}D^Dyj2nZYyNJQpx3CaPCwL5P9iHdMLgA zsanqtq-TlHB~r&$xw<~5k)RQ>uTjZS3evyArmp{8E&P2|_rtz;`U6Q`J?vBH zlofQOk6a>`J+u}EmUM~!mL?U7P`wbO*~l=Qfa+_Z$uwCs|?hPE2Hn#G7IG_a5nt2^W)wP1pno-5Lv zLWw|z7I14Bg#<_J8712Q9YZnp6!QdtM*!GyA`=OKs(@v5_NB1bFhNQml`zZX=n8(% z7c}@n41LKcnm_}jdoBkNhG z6n#Ugz5>{ET00I9BLVdhfg>b2gki?vOR=R_Dvr-7th>xYlG) zLg4ON)QAFS05}4uRWvn^VkWX^fCAH0+U5MNbG5a$y?u48DXnX7Dy}X_PKyl;@bhx? zb9?D!Yh~^1WajUEH#PZChiEQS<-usbartt ztYy@uq$NIiYTDM;qvb^AP;?+9C76D+$I&NAxTL=n@XLu8el%JBjq%c@H&^d@c<8bHqw<=vwA8FNp3uhK zXV#MCOE+z?uzg7@G1h8u8g*Q)qf`Fc($UGq`I(>P?q@r`dOUr^{Xrwlkx5p=ry7q~ zc6V-Qt&0+LNhyg`sY;GbZz*f9%dU^}iD@sZsmSn4jXd63`72OQlY?}o!oTtk1wM`_S9D% zP$-X}#F>ort*=~vu?akTHP!Byj_M_n_Ji`2gB1@Bg=|=S@h(Pm$=sB!J-eTCJ#s<1h4$ir`sq8<+q8|yV0 z>K_-~tK_=eXcddPXdDhHDhay0sm(tsCMhpZ`++%xnJ)%(^{Jm**}e7gt?y{p^}S&j zA?H$btOXUT(GIn;u%#(DJSsaoSE<$L0TWFV8nUgSD=98FEF>u>zoJ7R?NqA;TxY!; zq6@BB3ljoOH&C<#O%K3ju(6NouL$?ECJfA{>8v5>uW3+vHTDm3c=QLz?1Nq3@l3z} zFWocDhmF;cYM9`<_+YY=(p7|6V8gm*oPO^t-q%sc{t7boUxa9{*9-c}RpoJAu`!*I6-0>y2sN;_MZT}PvLFhg zEM1;M#x;vdI5uYGKyj9}e{uzTkQnDPv?NzJl{1Wzjj{<`!zDa*(38;)TmzJVaI1-=SEBqW1&`36F z#2abV&y;FD(9D6&f8~^xIzB@clJpxAm{agc zcTS?WfySj;6$RyY^-Go15@M7P(6r(M*r!YKY4s5i-cO@2FxS=3015sny!-!j`v)y^ z{|&(2!V0C91=&P-MHTCPt15XEIk5QYfo?wz?CQXuL=XG=1$ej(j!<(LoxM_V3>66I zXxOMgHFFv68o;zOWS>UchtpyW=&Ipo<=4e}M>v%>ma`{J&Dv=K2Dn-hh5gwm#x+r- zQj`q%N|L7gCd~Rph58_$cLk7|}5lt^z5Mm4poKtc{Kj-&tS28i36-K7%CKH9vyAHE8e~ zul-t2dt_l+o={430HJe**M9rIZpS=3@%lz!Nt~5~by8fcw3`h$fp=KQs$JW*9Y1pG z;iK5Fxc0_YC8enAs4XcdeCOzS@4{^#SGVZQpf_1|TVDS9v-6K5Zx5Y#clNeq zv#hwRn5fkDK3P;&W^GSfMN3&>Wfp5Nr7~e@S(P2@fnQh$MhDqAJ>7HYr?bZv8D3eK zmic#F`swVl$4{O1Mm0K`@e&ra^Cydxt7HY6>Ytx2KC#GX(zj<;N4JzjlteE-xnkzu zi#OUFy#Mz8%eOB)1KwHry6tnm^Zm>HlP$g(^LWgt`^eZk$RtB##*IOXu8u$WWc8UF zTVGlm79|BIg?ODlxz9D&-l9>&nN_4@DaE;8(2;ULCYAwjZv4Tygl|=lF|C~ z`xo+^91_2Ju{_rFSZ zdFkZfV(pk86)(&ysCn%maB-`B^f=_Nvj7#hcU1UigeJ9=#Fpo|hX%QLcm?}~N?OE} z6lzE+C~aavc4AiM8$0K~=dU_~g1WsNi%l-}23P=PowBvW)6>h=#kH=nOrh-54_k2P zM@@s;uHNv>%#f7iZkben=oA!8Xa9Ed``$Ray1jAl?0^v+(LWI>?4L_kpeUunIvZOO zAmwRX2d|l{p>dWLb}do8Wi^>0!L>y>tsS*IRM-dD@JkM`UeH+u%F{r!H;4@Y1=&C( zV%;-z^;AE#9|SA39JU7OTg&?Ac}ackwLe=o(9beZ|Hb=rd+!G|`tyI}2csMPZ=pHG zWQ?OgW}%mg1IIep_g+)3-+{mvGw&0U|0UQb2ihJWu2wgetE+49`ZmxZfgCtE$&pq; z*v3H&lE$<;aSjX$onbJEq>=!!0QAVAhKmAap$tvxGg&yzo_dL~DSB3S3l!D>$%Y|H*)M0@kxg zroZBkYhiU9R>^T(t`PV1wzsy|)HRe=R2G(&7RSXV$H%3lr6(1YWY#ql^Y{%?sT9XJ z;WwS0)qvxK$4;{c!`>Mh{b8uM@?|mregy8ubv4yOOpbBO6~fST5`PTn!<=@=JOF0W zU^+(4RB7e|u(7LscUjgodz0OE=3Da%_SN@}mw}H^FbtQf;V3RWHWkxMRY=A&1dHlErLkdv8pdJ1!2o>BzKjH;FnS~g zhBAO-Py@4t-3vOKf9q=aThV)!Cbnv^`4l}@qn$x9-vO{2qYXq#E1uB5u_Gog%Qq)A zHZ`U&J+`5;Sklvt@_O1U8*^$~3yRA#vocfjGc)VT3&s2zK(zyq4K&6;`x`}!r|6Hk zpT{2QeulA8Nvb15lkeZRymQaum7Pc6yU?7J#J2kK?sgubQF2XHx>uJM1bWe4 zlGc+=O1M}8t}B}SA4~ZCBh}sifY3oYC^$zjMTPZdrmri?+m%XP08I2B??C^H?hL3I z@UOKDD7~Z7jp@N57j~yqaB`XI1b7&ry)?>q9B30`7*^ZR>+Ta%R9i35Xmm~A?77uK zrBJT8!^KPt6!7429TAmPB{m)Ho;01$-U}El&=Bld1ubo@1%=rp1u?g~$Js97eKmEV zTfC{Azf7av(bK&qKYxl!yIYO@g%K|b3$1Ev*_(*dk<)^vDq&7iLUPRG!tAXr4d0=P zMSz&Wt|d9le@_-GXVo|ErKp(P>=3C^!Cv5~>LUO8%w%P?Nz$9do_zZt$AaoC_PY0p zt9>JEwjBBC$mQQ1eeCbtfAYf4Auu-3E5yy;$Mc2B(^%gCt*Dm-;udYWdE!IElv^8K zZQkmzf8C?sEu!CeCnp4Dr-w8qJ&t*i)K}aI6f6qNDoaR9dYzwo8?JL~i&FJjpL_^k zF{`9yRYvK}U%lFSQp1AnX z#;hV9yL!!EI4#<@<>0-GzW&}1Z{56p?W*}}3)f7y+rd|kyRBdKV&*Vo1i6V!G@3B| z#>Dxz#{YEVvz2GZJayj9>kg`EOEmO!IUf)*|IW=RSNE*8-2A2Cj0q2BjkQ`Z=*2?B zaNKOeF-Gnyd>wuXcG_j?b~>pmqnC|k!y^vw+8OZHsL74xE!D875wUE<%#u8t3FMbih%0~2$={R5be7u zg10N3HczO0cCRfk+T_&rR~BZrFU_kOiX2^?9vj=Xemqnweq92de3>w@$3Taz3Mf>;$$W8(cHGs#5)F@Ku z_t-TrXMG~Z_200rl)du(*$3aZZ|q)KbcpyU$GfB>IWKAGvh{f9U~OCf##3W$m$PyO z`u9=k?Smap&VaIfAeDixHepV3V{}BShg(5JL}y3kv#_V%8GkeE{%GVDGUfHmDGz5% zIWle0jzxbQJ$&b~q2p_(t5+}Ix^>y#*Cjb2(B|dCuvEWBMSXUAL2OxaY*qrVkJsMb z8X6ejVD&7(*Q2qwP6(jZv{)?w0%484OUAx~4f}RvESZ_RenGv_sm^zAczNY%$ysJE z%>rY5x;tC-5Jm$Nd%Bunc0+Y=d~|43n5erKniEv-_X{(!eVsisV^ZY;g^psr0Z@Fv z2pCAh3d@3WEs$#DQdwnFeMo$Gc42Nun}CvX_C`vA>Uxk6Bl2}`2y)L%3@NY8$JiLJ zrxEzMYt9`9#}CQ(>{4910isjbpQ6!C{WAJAlLQa~=uHW3>f(czyZ4<51uZgfMBWcr5?^jw4eMPigwo0C9I5eyxGeYzRs55Qu-q8^Rwsbus8$N$2HaGyo9 z2POv^8Bi*q_8R2O*JI1~qUta;O6nT^V1=DT8Q5m0fxYZ73&`lYln1h+bbsU00M6}C zdNGu4cn4@v{V*U->EFj5v5aQ8QCu#E2|*L1Yd(XiJenK7gg#OY|4r$e4s|thcqTZ~ zWtw3TCoE^M@epK|g=bl#VMI9SK|vpv8vT!Ny5Voxd*3!t#fFZr8VA-&a6rZ^k<&kD z>75wy0Dy9aH2{>sD_aWk%M%k)Q&N)B(i1Z>6LWG?OUv>an`-%8?Kq{; zQgW_L#^M7R38VzA91x1u&HTa~FdX@2&l9b?_Wh(ffC~#mfb0Y$4^FMpLO^$MoqUx5 zBxwk9Zn$BdmKvra5iNxf;A0AmqQL|;KDoDdQB&nn8)%gGxL<1p-Oa! zhMFwHzE|SMDd5Q!QY!)7!J-aK209w#Ihj?N*%>8OIf)q&Dd}-(so7~sg~^eHNnu%8 zaT!%P{y=U6utNZRMbn>X(IHCpM1^XFTyE&?{>bi`>uY=8kf8LipiFILu>?S}b1=plXHv&Tb&%r+3_Mz|w_XNq+aMdWTM{=bl zO%@g}BO+sU0IZ*zp#R>7z)aT>OuT14;%dPLDC{RSM&vv+p`Z(d6Fy1=08N;!M)gLgquFW>r>cXf*Z2uO(EzI#`&uWvzOO0;)CQ+aJp zPM)!;>EDm;7(9P+`?;CpYtK`cZya?tdDI+tzcXn^oaJ9d&PS_**7Bf606G z{70k4-S~Lg?JuTY7_;yuVqUb{zWy)E%)@WfPy3bGzTj2v&C32Z&;>dF^JJsx3vIui z6TEL|z+NPD#}~1Ombz@+`D~qI%w@ayTbD8{%=GQXV>Eby&_#4B3|27CWflo8UYup zN9IJn3b9|Z{^#YZetBwSks6+o6PwUgoTJDIm)kyUnZH0hY64iWq;=S+hDGz*{yGfu z;$gLsQ~+G18!}LGMa+KrF`7ix&;hV6Qc1v@fhJx%yhtm|?o!Eh^9I#Cv0r?8NP4!P ze_(rZI>^ZtytD<$q0&Sbps53r4$#-B3~()Za5dxdDenV2Bk$gBDNHKu&G^&n%nuH$ zKE642mB+dt9xYjQe%S--+X0?$gWm=`F@I)cZE?=*u6?x6-zGOs8XS9OZIqJ|SeOt{ z8Xxq=@}Yy}J;>P{tlgA#;Jw2?(KBtxo2Z4C)N?@ALBV6KY= zBOH}zNotP0$&1LKD4|Rxg-tU+txLzKRY0l$Y64Cvaj<~>`pl}5nE15j{A#+e4)Y3> zT)Qeby1!-LcDdyPS!PUES!Q*7NUhCt@zLYN@-_JEIgOu;sG6}*bKi(ca}c^x2A z089cw4#(ixKZ*e#^v~fj9B4!PK>^x-`IP#ETz|C_Q^3d-hmb~R*7Sad`w(3JE?N$u zmzL5ao_|Rn>VBBIfASZA_sPQl!iJDwz_r0HM{L0LFdt+lIm$W`b{zCiu=YRMKTblv z@7>n_8v1>C{Jqw|K>8ZvQUcodG!Pur<5Zdecb0XjT#nz4AP3MVCtslD!lC}cyDmMd z>Hlz2UlgWw>hF4L2`#~f1E5a}T`U1UNa+m}w*J}4uFrfB92yhr%FwW&uDiC736i~{ zxfHmHGXEcYZvh=e)^33o4DPPMeQGna{M2_z5}q<1ytcQiH0gb3ZB z0+V+0y4V6vZEa_2VnuvJc}#GDn_Ymp;Y;5iM_Bbo2>4g2bO;4TVRS}u>6Ey*U15Qy zl|?O55z|{4p;cZDH3f;$zH#C4#o1NdPA)z4=%z&yhQ%PDJ+8=ZrkKcxvI&`D7|Ule z0h4_staPJFoWy8fWeKgACYWuBa$5FbaS)_}(m;)<4idC6e=}q_2KJHU|HH8F9|ytk zBQUzF5k|u}>Q6yIk(ISsXsEYD!ct&9LQ#I8H_JHFQ-|4`$VHxLarp*Ev4un#T@JP5XZR!}J?x|XGV)St zl)F?a2y`HTjRIkNXIpZ7E+dj_K!Hz0#!M0TRRqRLzz`gaMeyO&Fq4pj#b7K3wslMH zC8R&^;Z%wxh!~bRxR`5dUhxaO34p2yUjl=P7#J+1NA)xm{Myp94<$YQLq1p4XC@}c zRhKsB6cm<7a<4m_*=DhEg671%?#EBK9(fgdTJ_u1?;c}yOd%7OaSuLEeB=4Ym!NO2 zwLj~Uos#PE^H`OqbWX1|R9W_I!c47c%e2<+ zd2`e>LMJjyQz+Atiq-&ZJb?|OwqbQOi~QYIhB|8@ot-@m?V$nT(Fw`=7M2#Co<4C2 z-|bus^h^Vt0|fP~IIrN3>Mt~(z6cACEU2nHe*KoNm9Lh+?-pN;;ST#D(~ZOJ5AI8O z{YQcElZx-ZdEdBGtYd|^-S#_v|NBu}H=EPPue!Jd_<9Fee*S#^;BV~q#-xhutFDH( z(;d~yemqWlYnG~J+EhrG=!b#=ZS)VF@t#J<nwXO8x4Uc@IyH)i^xi`JuUQuN>;A$ZX-2 zKGwQ$di%^U{dk|wQjxoQq|MRiaT5I;aIdEdnw)vOuA6&a> z?z!_P?d`oY(<JAq}bZX zN~57P@ylEFZw9YBdfKUAU%(RUTRXn_V5s_sn))A4TrFI}J-m|xojsplE4qHLa`Wu6 zv5;h;3V+3l%1zr?hDM;a62LtK9Rgq}pz~84=D}Cgs_0Im7=~a7%Ag;4^%HBZtNy5` zUDevyCEy1}$9RQ?SGKp)9t{$4R(PbRt{y8f3CMUT(gvs*$7&Fo8}pyu%GtcQV*ASa zllw~^-LCPl@$$3y8fT@IVsymj@~TJcZhgHToa~X27MT_u`TFT|jW@52T^t?aBddh0 z>bi2X7wXX(Pg*{F1a6+dF9@V2u@j@cf0)_(e>acTx$SUa+3RIv?@e9&eC1<{>mgQe z>NOv;Hf{v-=QTo5Wq$}ew!h>4qxiR9%i{9dTRYfXPI*p=*+-M7*VQcyt>R;n)6&wY zu|&7u=ClK059n$tiSSK~@N8|XP>}dhBEmn!`nzRZXcS!wMWsM006Z38b<&!0wI{LAE;vG!<3G05+-%U zpchO?;}JgHca8$|!dg&~0J392T`3TD0fa3ELJ6HdQ|1QcL_URDmtd?CXnva%CFF!4 zp#ty$HYS$aWXpj8DelDq|Mmfq|LC9g6K(V%u^Of4;KKZ|1 zikqSm#F5j0`XE0s(?M(Lmh5in2!ZlL%Vbpb?l{HCE|FIiqnDiSlO z0*o^*J}~2~LA{q0rEp`PF7V&gPZA>mHW`!r*4l1b-5B4_DnM{LC{i$0eo+uqg5gr~CqGkea!ZXN>MQe@ zFD_=@MZ~~9ksM_L(6`7d1#&e>UtHcRyP+3}dH_3##g8Sjvp6_IfXf6pDnaMUurX3> zs0<&0;FD2e8bXYh;G=}t2pO@2z;6NE93W9R94y1MMfhz5+#-P{Af1>lw}AlsAb=JE zZerjH3eIBSBmqtW>OuYvkh=h;LgR5Me*>SjS;X0hA-_`C7r~|zV3rJEfc22ob)~!e zPD`U^VeY4t1e?Uz!04!mpujNyfFK|Lkm#t4*yw@)pHyFu7zb;Yk6Jf9Y>#&}Tm!U zIOd2T7C3NYx`PM+1ApiN+h??sKY?8zy$%iga5-&{{P`TY076cH<1jl4`HlYB0{ef} zA;)?248}x~epawyo>*4B|8U&73@*3NFzgq+zpSjodjptP5e3STf9CnvyQ zGy*17w2kLT@3wYX`ubQ??`r+S-r(ye7P~Sk?tOaRk&>F_O`Qwaq9qc1T|>u;;;QX1 zFaw|}FvPRDt0_J)AucGPwWZ!X;?;7EvD5TML7LED!x>|Br_9lS9y<-?g8eCXs*G{@ARqJLQ?#bmQ3Np$!|izl8=ohoDzu=DZ(x zJnHB%md}g|pK0Y@L!(|onv<62sZGW5A*5;)C?6_IpT@16USBe&E_YsM=|YUP4-ofc zqL*cPzh2n$>aD7{s06O=x~it*RMVxFp5g_BuX8<4n_k>{d-{<-C;xGK$@x=Du3gwC z5p~wK6~B0|di&1VoP>~?_-MV~&)DBO`D%yC`$N;OFYo_k&tij;7aDTzzRl8g`eT3n zF#e&-ao5=QQhq^ffc@ROf5heGwgBLrS*-uXUGtIA?c)zp{Zp#TYNEq^(u1u7?jFcq zItE=T%W(L4hE)jK>mlX+eVwaWOFq{^43%QchbCBO;KKRq^||j;*U}Cl40P zo7lc|X6Lnwgr675Y0qdV&~!68^76_9{l~6BKPpSJfRNo-T;SvGV&&v`=FY9#@83B1 z`!wWa3;jcqwjDx_Ujvs_J9i&xxO1=B);!kRPRCyJh~bWHmg|OTje#_Vt+w8F#^k`= zORIitUz##vXlK7)z_?M8i6e`qO>Vz+AB)OvFK7r12&<}Vln3jQtWtxIhI$`9G&7>mxJRY5CPT>CFx0tnQY!)<%A3H@};! z*yF-GGrzqr9asACE4(N!XyFy#4db;+w~-e?KYHehp%S0jC}Z9I;&U zM*&PePTz8QB1cNXJTX&Xp;J&22FS?etB4}FPz#`x5cBA7hbgmZ{~7H23vT=G9!z{w z(8=(>rtba;{{E7n`UeVBKot?A?d@j5UT@5U8PcAJAnS0tw=9J`%mHZ+$RxV5Mh6fD ziHgQ!w3vqB`XO?r6iRQCh&H-Zhj9T8!(6FMOkm3TxSRmUbfEuN2a2L{(`M>#u~^I# zusM>hy0-GvoYa7jU{7za06+hP$b{VFq{^Jg_F8)}PYp*763AjgItNHL04ar;LSeFs zVIZ)X@Ft_s9`#V(rJtG{#dlG3t5XWR72rUs6eTxer)74=*DrcZL z1@#gE$YD(8>KFzag}f6OHd_kgIp@r<-|hi$36Pg5@WtADfJa(*ioE(f#V*CG+)f?;q@RK4$|E%&B!PU3W|!1h;?+Z(RzJHOY`uX=L<5D zcMF8;1n?vgGM+D<$LcxC;TWYQd8Q<0C8uO0r)T8l7dAFFi+Ft6M@^#O=XJDoCa1)N z1pBAP6xNnxqlhC)K9GT9Qm{)34#GekBcpMwlc~Ei`!d=?ihjt8yo|*+N{>tuA%D&g z2J_*)RbT}f;pas8tDgTqGhzClJLI1bXXXx!V@i=0Jr4Q&j#saqdb&E%sX%Hjig8Rt zl9FB#m`p*#_$h>IgUK2O(h)LICsV#xtL6@e!3c~PL~TU~u#*{DNsPlYP%?{QCgIq7 zT1+n8r_D|gaYlM-YC;O2yKgG8jbgw@=;RsP)X>JLkG>xjW$EfqHaa*R<8jT|J*I_u zuk}CeeE0T1eSI<&fTLq`sv22ezE~uOQL{tv#OnBrN6@IxgCO00knSYt7 z@a=>9SE7@G+K{eNy!%;%*J*F3laUc`TRR^lr0vvt``+B5w4^A&$=<=lI5HqmO-K8* zjoDm_htS)p(ASauY*nE5lc!pof0AV%nEm6c;R9=PGh9mDf^J7MtpGp!t2Zy-esKTE z>sPtuW!A~@+Z=7x0oW@7v%4e{*sSG}+JjB*npqlY!H0_-X2$pqO30h&n>TftIs`qN zKH$@4=o+->$GSay?`@!TR$UzA^kSURz6C*QgB$Wj05E_+#sM%~B!b#H`*(Jalt@NP zgbUj0PbWq{aQdeH=|)>{Rv9>oiwx zxxaP8sa>V@b=FoEFP}W|^zw;|P0ftSGkf*Le zpEIjmI-L}f9eiPMa&l~1Hd`#C)mjpMUP`pnt0#4aI?cE5 zcJ4h|yl74N{)06d45)xx`1fabeR5t)4 zVk4vdA_82qJUnZ{;~EP~8XB9c*gcIRp;&^5I6N_*1LQJcgbm7y`ETBU8&@U2F62y^ z$z8jpTlFr852R2^7uiYcacIsm1MrcbggO$%WC)BXYl?~naWW|lZMp3VJq0x-nWd5T zq5TiDec->$)d2wdhN8bnF@6eEm3+g$z3T6wgZ{PLC>U0TL%D;0Hv^0miXP_Y0upb> z&@>qP4&zT`*fkWr2QUk!I~H610qDtqGu`EtxfbSy?EudMSQKq{Ohgf~c!uuHqw{kl zqXuVKfyy^hel#YfM|b~45H_3L-rinQQ&U`AoSmJWl9Cb=6B8B@8WbK7pB7(UUDv}C zVsgtKTt><`h@b($nY5^r(7dO=01U=TkX2m%0R&S;Fb$kk z1K=qGt=5U?Yqjq`h6xKtQs@VkxVqySAw& zza%#~IWaaOE+#ZKKQlWgEu%2Mq_w>T#(Si2PhCY@YP@Sx>rD>41_iTGFhK~$3cxmm zZo*qfqOyOE`F|XQ+_aJ!F!DRA7^q4fs@%z$WJZ(Xw*V-o{{JH6M}N$p4YjE$Cy-&? zD9&Yc^fcOi`%=sD+cyFh%8dyzNju{;MJNU;W93Rek(y_;=DmqFgC*#eI9WM%f$+YFM+oB{h;F6fm%;|dci8H{(a+JXbzySu2(i5Md#4A+BT z?1%`AKHa#J6$ZI)ej)o;|sq6z3`tRKcRgm@toPmriM@KZuJtQ_ zYH8{iF8mXHA_E0T4|$=vqpthZwSvms{LiB=BKmI3~pb1a^|~_fl1)!cRu=tdA@h@ zLoPQJp5S$^C@#9f<{XQTKKAL|JuNjy>#s$rF&U|GI*z8hEi{(6-+}a|L+^$_AErQ0 z=5KL)>%(qv3-ZX0j&U;&NDWEvC`kG8@(4U3AGUev*Vzsl<`-Lp~3h2LocUJ(HXrV{OGiNvz3yv z^_|7D{d8|d-#g%Xa&m~lYIgOsvdrZj4Kpf=r`Fd_lgMTRpd#(s+L&RO?iwEUDc0w9 zMS^BYlv; zem0kh!4LbuJ9pk0>DgN8eSP=vx{uSFONS>vJ}}DYNlGEyPDmZl=vJO7hEd(M|L5Y_VC`_QruF&+DCoq{zR+63<o~M zBYF-VDY^T^Tg%YJ!lS#3rXV+$H)cg9=0qe1*t@=c@z}@5sidSNEi;eKoKXvPUQy!7 z3)SQ2!@o=f11EG39ow{J8(U8sh*^c4?1T1)M;lIq-uwc+A2>*R+&E z{qQN({l^J6?&03Nk^Mmpuu3JoM&D2m9TOchi_clX-qnVmYgg@!fuI^_AZx;uhNX+D zE*`6OF)d07PfkrK&&V${vd%bvD{0v#;_z{>a|>87O*XcF&p=2venR!!g$=Kt3v&_@ zBRoGpRt-6QoqP9r{I9b!SIrP#J_DR=L1ztsyAcUXCS=okUmgtjv?qgr&Lq*6e;B)+ z=%@hog`m0ylvZ`8q^5?4g(b$u=44X)&**@Vh~R*C-1p2^*Q`Q{S^& zk*xX&VtCBoefByuv$LAS{1E&R73N{`SA^I=5_c&WK_Z0!<16xHR0P6hC``9gqs^#j zypl^`ZaDlJNADB(Ef`amV&+mj4h3B}Q1*;sKD0fJ~ z6Q?4$z5s0Cfe|7wKnAAE&?8d*J3wRuq=j+Hs9_!zOv|z~KqN3ZwpN-WAb$(G$s$+D zRgfx)`AU{1on)lefhZijl!5gUFiu8}fyucIoy(eAZ?#khpgmnc*v;IFMh0_tF#qNQ z9R6u~sjPyMa^GZzk3}l1jN9msiaP+z7Qit8b;7|>3781ugC#OZj1EA+LZRejUCs0C z6!+A)G;jBaq=dZCun0#NJ6nfu`o=H5TffLEaN-Ln=pO-qH32j+;sF5f0Nwhwl$Ko+ zFtdOX@@|PT35tl(=DLc8+A<33Cnm=QMY)DXxn(4$S6BFV_MGP-v!q~@3=9y0U-)2! z5Yd%N(&;ZCmsTM-a|%TlU215c2xGPRvtc5N_vO6)ZOhcxs97-s(f`3`@T8ND&A zNUYPzcv^EMpT$&o64Q9l?5n}o&uS7;sze&XY|pZpG1f@mQxvC$42^40l^;+2{fS7xANXm4v#UXttS<|zLp#f2reZ~tj#ZtUb{NdQr8 zeU+DIxQVeNE(TGl;g?O0tv6jpcUOBisGq^y;jhN;_%zrpX9)mXB(f<&usR@r(T=Ah z{c7F$IJIbq0>hj-DKrV#7S2W=_1~5hmC|33s~~J7#|A!s{d<0$yKjZb3DFushE_J1>>&jn%aAu*>aW$3uTk9?ay1sHYscy*^iyB;le;j>rdj89`d!nyS^ZyMp zT0LC<)bg*dG(zKJ8sO{_UPYB4C^p~L$-zkHk3d_sD6@|)uU>zDqUYtGte0PSrK@oc zr)?ltG=u|2%0Pbvj0XJq(&(d=ue2-{x#YG+b1>IVESwMJs6OXK1V2h~KiW zj81l5&FWlN82_-$DzxHkT;7JS2?q_b?)_-~`XNLAb)xOfWZy02smq!&*E9y-sWOX; z`Ln@!Bim<9nd`6F@u%*8p1VeW)*`D#lMTkrFd8@JW&c6XpdsHV%nCv8r*Ct*zW&OA zQ)f=Qnpvg?L;xOYWoo~2)9&Z*UPp#I1^a!qbNqDfV7{Q6Mdo!qce)4Q!|62(gVVZ;*)Fhip4A*zlSFl zDca$nI96L%t;8Wwg2NqOyHM}r*6Py|;251j1Y~*%^B*ci6$(5F7L1R)$ zoq=`Z<$HnyhqH$cX`VTay=`0T)mzSoF4P65w3c_smokJ;lg-2|f!P;ROUtkB?)Kk| zOoD@=x=7d)CytA`a2hn#utBF*;>V44d&hm4G4{jM z*}4lZIG+tIask{bke?51oN8~~Yt?w#^hN*M__2n0zX#KqIKqsPcbPT>!n&@^H?Yu}M-%_lvv{@rzjt*w=r;&V8c$9h4OTDe zShApH&9;nPheRPk0Oci>CMD*?;AQ2oUjT40YJG5~=js`e>P?xcE)wJ0Rhk3~+vrpU zO5iXe26#Jlh3I8p3fda@UN(8ESF?|uV846|QqpM=ED3XXY&kxoLV`fZF006G%g@Au zBI{3G<4u{3LqoveX~NO7Q&r~WJ-Ahp;v1VC!s9ga>g&bj^>}kPg=Az*NR&|!Qz8~q zz=lZ_(P>->3JOcv2A|umUn}0Vz2p2jCCb z5{&LfU@c4%DHYTT1S}qpBM|pUsT)BD2NXu>#x0CHzYT$16p2?z(Io`fLx6`;V8fFX zw{^0ss=0Z&^+}2GF_8iBaZ!nhi4?rc&CM+@FK=pUVzH=m@+e66H}^i}(Wf;EebB*= z03xMurwb@(h`DsMBbyimBZp0N4>klQq!LspkSb0^T}%?E`GY_EcA!56;XlP1%EaDZ zU|+9x5>_UNls#c+tX7GVPJH4b2Vco0=ZeX#B=!Q}VRWLdx8$fO&7-t%L5?7m-a1N% zjw)O>m{F(TT*@<%^hu~!MPuNFlw2vshsf~$QhWlR*dYV@GC{2(1xcTuQ5})E0^cj+ zH__{aBX5yLZ)7os)W|qA63h#UYPwqqtx67)fJ0m`ig5}Y2EZ>Qm?}oLHZ|YO&#+F7 zi7m`75A==p_xBGB@%8X_$;eJ108arQ={u4`py34O#X!JvfVESXh}s{?6dC~xR{$&w zL)Z+6mnU*jzJ%M_+}2PXEE21Vq|3#!aR@mI0h7B$8}f_phlLnMM+9eQQ2W(3I&sAm zA~8%P5%cBWlvH-zVXicc_s!Q5e}#QNZ%1DN?EgC0M>|B&#a&ufr}XDj=-EYVxv)*( z=JM3kd~c8VqY!%~1=={B7L5T!rD8|HG}Xi~L0xU7`8Ve`8edbQN}2VeyRjjlw$==m zdNWz%e1JF7V~@mW*}AgI)td!Zl*mcCg;Gx^9O@q_U{kY?hl3UZv@@`e(Vox+V6rqn zyrbR);QA=tw0bfDMv25@0N6%Ik2h8x1VASzb%#{=9AFmdi5mC*SaO;KvMsq^krct8ZZwTqgYP1PAe0s6XzN6YKKRg2y2%B{~?-@NB|M=wrWKh5x* z`;|%BvmjMy&}(S=n<49-E_wFjrK<5e@3JJdh*#4z=1h1tLq&VV^p6{7+Fse#5~k9b zGZDa$R5BZYbv+&WoF({P< zesSdq`E8j+jh41H&bIGzLthGVbi=fd#TwrF@#MOJ>4!ZY+G`r3rXwwbU}OOmMLKBN z%M^aAN|nQ|T=kRR=ihh&c{)zZ2wIfxJ1;$RLm2m{Idb`l=+QZm!}Q{`i5UgZt2;YP z52aP?!3EnTqC;Ha1)l6tUirP~G;Q+)+dndb_hb!h*MUAKQfX87_^X#y2q<8)sc z>^^zn?YGaBHHm&+Mt4p82-b^sE$ATwpHy2G@kpT?{pX4F(~|LVvynE3kP=Z;~D6P0l7aop*no;*T7A?`oi|Mg_BsLpspcM#W<+?$h?{l zS3yT{ypP*=U1RI7Ho7oX|UqpCjMX5X|!4gZEEDB!f=;wRxTFk+N?YI8u zym8HI`O3(vcd+Dq+6IhT1YwvjV9WQQr6rXyvGL`##qJ&szaKwuZfF$~m(|n322xSk zd(E!B+nWYMEC`a#nVRscUp+IwbI;}crPM2bWS=^jw|^`D^nT&$#qGm} z$cBv(3?5fAY)aPru_<3f-Cg0Nu_7(b|K z+StHVODv8beShP={)Imi_8eoc-3n&Rrk;Qff{)N)hY|Ey?E9b(s)iOd|B0OEvV&lq6t6KQ{T8zuVxS+0{ zYi=ghRBb)5wQ=R@_FV@$A8UZ9C?FFsrp5?`(lEM(0Mm1mgh63V^MjYQi+*h$I-+Ob zxR!CVi#Kg=H`eQ{N-eF+&W;H$3yW?EOKgfsPfv;uiVY1&3X95$FRCtU>S*JLL^7ci z2qd7n6WBSH?cHC!U}4q7@l{hM*KXVd8+`@cJir)(VgGss|8EWmX7sa)6x~nQC-)De zfAzCHu6$=e^G~Hef2e$kJdeTKp z;Wbix2u$>o;Uh${MSQ`9;@r2HDK2p_0pSrrp^@HUQQlEeevwfj8JW51>CqKsc09Hg zhVGCe6C}h449x3-*H?GkOwV(Qjn7VwZzw6~WOX#k#LXBg!I-13j28gVSJgp_Ipici zYI31cfFKI~9MhJAk)FVPa1_c&aXaK9+@Izuf2*4-PERQFe>Bh_<%v34D5{h^%cTbN z4P`U%s}E36AU~Mw)wcBFD*44EDiP8N2JfWgBN%-R&_Kq|S)f1?e*+(cB6osfgbbs5 z5Gq+%isBlnxTi|oPoINNm#|3_3_FG(3rK7dPR@~nV<@l$q)16Iq@LY_f)13BA_Okq zAt+KdbcPJ2CmY>bQI2&*2oOtYe;s9KGWz1lQEZqP9OHw@V##<|FkC8yWax0AY)O0P z`SL=u?Bs~_#MG?xf|Qh$l9Cdsl!nnLBc5UU_w?GUGmh}IE)-_)0gVh87b3vGKC!Ze zj0i?$A{o82!sJ&QbQ59=Tyl++t*6_QkcW7jTZK7#VFA`IKYm!6T7A~jG0=TuWnmB+ z>{3{m+R|LdWc!#c5yOC!keG-Nim1gx(T+vwar~DdA>S4C5rX`$Eb__+IZX0Vy2YZ9 z8Z^{A7j-il3L$|>TL?0ywC;Xc%WooZmBa%g zm7$g-oOw01(<#mfVz*iYTwNxb$yi7Z5=#c)_)M8>K{s~>0PCw;$8_-4;6Obl^1Qo) zs+WU~vG(2Q5Ix5qCj$J=MTS4SMGi16`jVJkqF+6UK zsj)$jZ=j8>!z&Bzd;Sl%JMS2-Gi1nHXxw9Hs}}Udzki!>xd5NW$7e@mjdKj&C;=5L zk|zShLA3p5OqM6QD zr>9t|bQaG7bdSYR5@LN1`*lr8U|Fh*mvMBWeFiJ}$k&ULwU+h&I0@2)hC8b)a5%B- z-N^@^Ro#NDgR(+g{Y*35-hm<;5OG`V`&+%|((0f!nI7x4Oy&*I9{)?+jS)=&qxfwU zpd8l&)`-9bOuQ-b^9q|IOTNvUr2(DOovdxO%F%uAC&yh+G9Ml(*WbVn-T>-X$;7Mc z>!BU5aV>hdeceP_^S zsB6lI%Au3Y=PdNne`l8HU=;c&*I^L|o&|zcaDSD&zy(&$3q49zi%IXC3Tr>d=SX#0 znY+E-$>VAZXNMi$;;>HT)Al*lhWCKx`Ig^QY7UGSJX{ODo^Mg#QvdtnqRqn!7DCZ0 zp*nk2Gp&n`x_Z=*ry_fDL7ADI{fAFFmKLT7v0+X1RYG33d>v?O>xhp{aI$syVP+oc z>6ww7kXn$HkdskY-&m4WBx&bUOLC~4ubqw=x4J7eA_;-%gg1u}@o@_K!KHZ_$)Ta0 zg@swJc6MsF9Mx_YyE#hAYvj92jCO%#!+?hXUP)s?aC~&9NI(U=Hdbp!L~3?u);Aq1 zYrXFssmUNSuH)c#@sjE2v{A&`1#M>!f%r(gv@$a}H7qSUyg14w!O}6#IK0%^C(YhJ zA)vOs1D8)mI?$u^_K%cSl@{52HMV-E6{PvH{Qis5Z6{KO%qST)xo5#b?t&TcVimAR zrDyoiwsGTS`wmx~Iu~;HW|6N8kaB3x^3+Ta8V#Hrz-MiE*9OV#QDFW6_8_PSf;vX@ zZ{5C~G_}IAD%nwKq1(5oO&VDbLGVCGG!iPBIyB?L(O9ilR@XH$93nivIvD91rWa*( za9cUGwSXf4{{F&aXJqr&vIb6S>OYpdb!WT&YfzgDU=h2uqae2=J|)97EFeD6PwLxF~e z+jj5{o|Jw32nsW(`-2c%nxEG#1N8#X#K$|jTHEUCS+(t=wzR^`l-!K)=&0zxfXblI zPAl8GGnWddsPv2-1CJWX9Wao(Ts`+6fYw$Dv31GGfB*ggG1w0KS(#NV_XLyGQzZ6& z!aiDCL|e-+Kkox36opDc{`i^glfynb3d?PlfxK&z682$=P^Sol1d}{V1;jfXUrf?w zzk>l7g`m?!!d)Vc5ucUF?rNlFLPd2+Tw<7`v!#)d;frV5_wL?)`(}SsOJg?$J^aO_qz6d`(3eLQ6#RKmWWm2|Av?1NO2 z?v*8Y2_v3J2OyjvA^}`R)1>IaoERfz7@^EOF>gd5ebclGn1nJ0fwV^;Ax~V%33y^$ zAtj*(b0r`HG2|U8yF#GHF#Nobd?6(LsNW(&`YaNOjHCe;n<=EqM-jpxWBGtVY3?A- zmyaca23jcfeQ;d)HRUQ=vKs~dJaCo^<^pmyAf_-)LB^9{wMcrkrQW6_H#RLHrL?G; z&*uWULV`IrDpfd!C>)6d3}IHm38Oxhlx}2*Dzrc(5TO_gkUY7)FvjGHaQTIx+XrO; z!U5n~09M0g+5q(cWHr5h;e<#aZf&e9EKE;`3-s}|x3Mw#`o+M==!>O=ou_ATTw+FP zRc$AyM@&i-xkW`r7X8NqR^IC_MmXHaJQ%;2zl;X}lpPq!-btqz8Q8xG6Mq)$`_A_N znjylB4Jjgbs-d%e7(JHg7Umdp4n!Tjm{@NQT^vPV;pYP{NJ>ve04#&385z4sgvkvUih&zIt} zS>mOIP5X;GK2mEu4~Tn!II$)-r@T^O#Ux^eC=M#hvc8x;G_g7w6n&g6RAmd!ce3`2 zrH?zg4t@c8@j;F;{%+-!dD*QwU$QM$y<4-%@Ye&zt53b!l-cC@>BlwWkgF@7Kr>!J z12v&h`r`*`Oi=lLa7B{o>W)&VrDp*3Z85Nx&r&OjYoQkwURF-n_b{`?4>tUAPi3^; z>>;1}|DpvAeLQp3^PSJW+%`6Sl3o&#pW|8N{#X=no%Gxxa#BHJRVsZ4v%vh*DR@Z5d0rn$c&yl76)3i;e&AAOh&j$YTe(We!Xt~zpnGdGl3ca<_aM8+-@}K??^;~p;qf$Ew1hb_LH&#Md9(tP$*O|!bbNhlUkae`M@mF(}~VP+iYVw)5nLBYH5 z)YORh)V#bhE{o610ctUpwRW|Y)z?&(*Ayh@ruoGc2BfArM}GQKQ}?lUhy4K5%;Ba9LwS>jHO?H}dgfr-`&TadU*A6X z_~g$wj$fUMGYX1}a`W=iaubtFZJk&*?^jKl1;)<<115{1f$YgM3J>qCwJ>VROpT3> z3kyq#P0Vuj@^#fSNWP*PK7MBB_%T8VA_ffO51UjzYbpE53y_}LQ&rYnUMZ7eVkVZ$ zS8)24^{`n{o}I2=1xEUWjSa}lpyj*}!NZY3Blyq|ux=~&{zK4M%SQz)@b9@0_a0EMwD2EmKGFO5 z86K2~@V|MXzzT*@?!% zo?gyBg6wRatZi-V9h}_V1D`%NG&M1gPcmxkc*;Zf3BUp&m?;KpFz^Qe7Boze_YOgN zGm!u8(dQE~|NWa76IXt_zg(-oU6Frva`=rhK! zUf1hB@>S(^m4h!@hF=4KzZj^pkyQXp2N>-Fjf1fq;r!~_LuJLrHDzhNWh&K z1yPZ4c0U|+^$fH>>3lagb#wCw35+SsF7IgTJp;l>@xI*F9?24hR>6M;XE0&TU(D1*E_oOb94MmOibTv>qkasz%b*q zE|PC2sC{mGWPEI6E05X}q6*_f#SoCcql_-T)7uXU9?AIvC}?7w4nbmKZf;&04c2i2 zVZ57hm{E-oGWn>*U|wR16#`sfpvLW93E&w_zn10GNcs>$%l)v>ddAWIQEu+@)U=}&6=#dG zes^~~_x7E7D_`0L;B;=@rOy{H8E!W8+u)Hf69?P4!l^L4uBB~naLfrq`|}xXK^34P zfynxn<(%43jWKrvXS3<#B+dh*z~AGQo#m>A>fHcrL!{HCQWXO1tLy&L*yd18O$v1t zWT-$$a3nzd!R)=dwrWaVaAQ-_N1Z*7G$GH#dG^Uub>k)-aUM2V6B>2{n)!U;y==?h zisC1B<0AphmRKY%y_RpCC(EPWxuUqr$kA-~i=)d8HgB`pb@J=(O%IlAd$E7k@fBxo z|DF&RpBfq;=V9k!e5NzwcDvsqNzeqb-w6INzA1z<}ePRDs(72D#=-1HL_oK%-9bSc`j}_HV7bCMUFdCE1YinL> zYNuvmymhb2#fxJuUWZ=1n|kml^y2mIn`(QP?wr45|DJ1))ZeS!4AeQF{(g4ZhXEZH zV-h|MdiQY9@u%bZ--iZ$969|pwCd&9RaX~!FEnx9n(L3=!%Q>wzGby`~I$xk--no#Ec3y z7sIHnw*Y4Pa$uNTmnb*+?MA3=LuN$M_T5K682^Zht4@xqiStj1^9}WLHRrS!byXI9 zdiLDT#(Fu`J@sY_%=|y=}^_3kR z4XChF#%Y#u+ZA+qOh}<=5a`9fc1^frcj1UJ;{CsMKeiD;AO zu2x`aS-ojT)hLyok#ji{mvOcp0j5@|D-wu{Rgl4pFF7bGYCi@19UDR z2Lf~gfX7f#pG4-dny*ykea(!ION}i`h))TR4D$)__wowrY$t>Q86BR1HwvB!!5Kb! zgfF`c2xCBm(+qJO1-)hEe}%yQ{{#Fr8}v_n$(Re#UPCMlw4tC8p*w>78~Y7r&w$GF z!MGCnDfBA@Dt9tubi`Evguy_Y4+%n-}d!*btmp-_k*ij2ENB57}Lr+{F7Ua^C^wU~er{?Ex4hgwvY56KNEQ-Yy(y0oRhJ6wN^71i%~sMq>C77#_gqQj6Xg#!-AcE?L^sekeNp)W=V2KU;6ubOV}tx8DNoDHHBM zYhR4ENYfC4ItebWuWWTTw>Q;OtuE8Vh=)zhD_U9>cQ$S-%0B1r`i2_M)g5*5g~{*C zKDfu2whEmIU@U=80B`|-du^?c>|8IIc&HnNzqn+3a;@>Ib^4nPA}vZfYUvA!GfuFG zjFd<-;(wIp+>r71129`Er8eZ_dB~D(_;zgO6E?pRhUK+yX=!0mWKd*EbV7AudUs2# zm*-*6(4jT#nO*SO0?ta~)G_KIGuA&HzWmOduW4rH+_Ifr?Grnihczc2kR}?oIF+yy zi&FCoVhdZ^Yu^5Nde-##>qs>Nr-#3tTfFS}^o0jh{&;XM(9;d{hykZPD&$3B$|ZK{ zdUo(=j`xrbpW*!M75SM<&7-C~h+eQObj$Y0$5#^LF8&DCu>9ti9B^6d#+U&eh}eyqh;gUzTvRA;A|&>H^99A!)$$#)w_ko~I8z*_l*ZX5iqv|H@q}PtT&tbQ@<) zLzf%2f!95~&zTx*H#5BG>};4;S!x~N7nc%Y^zdT2<{b*Jw5v?)>IV^{AZ!f88w<4z zgn-H$0UY^^9qYVLoFyTM)Pw(a|hka zmf99}umCLOH`nPI=x99CYA@~wOBzaYbIu%T z7&QzG=m!QvvKiy~7mu~*zNJ;Jn8JfV-qeT9W+x^l1_uYHq@+;)h>MHM&d%=a?Ci5> zQVRzvDJ_Y=aVuipl9uUndxnqi9y+>c^2CIbdmU9T?%%xR(7qk-{(RV;TSY&aF9Ibs zoLdit)8~Q_Lqz=`{s>64XjJRM@$k+qt@D?(PM+7fa;x*KIj4vAKf8bPmk%#%%&b6U zyx8~)`?DcPP5~8lKmcs4{5*UUgTj(Xu>|R81nd^TYG&8fv4cV?Hf>~$9)%1ZB!eL8 zA)Yv?0rRdV2V31G8?bYs`XdWp3ueE)LO(z*2$rg7G6hwnWB(Xjwo zK&HO{q3}4&Az>kk6C#}8$oTC&O*J)zMY&nosWIUpVG*Go@sU26DS=rj6~(!5Y*gdz zTcVAdz>Hb^0fU4p)0+2g>Cx8&!bU>YBX2mzSf$G7qLZ{6q{hp1^jh|xX29*~1@d@jJ%TkB2J;(a5-BT~~N ztLihk!d8OX{$MmOjs$H4$ijgy)6vTXfM9^70j!?Rmh=+m|5LT||Cf$`4f}{bbxCVg^GdQt%waM0wN_TjWmdKclXfU-QAtjJ@4NRt zXYc+0&UwG{o%Nd^Kg5Aqvz}+}b*=lpuj_Kv)i!jpGfT(V5B1suwk7~KS*+~@GU6{! zXDP8L!E93Vl+`U->!dTe^qR?rFyy{Rx`?TNm>5@baw_e|by{Qc27R(~?)OCUZT@QDWE1hINIO@7jBAHD!a;uUWHB6}Zz0nr7G{?g zW><0+g3Y8*saWpqH}xhs&0LmNu#GdYc0U~uJ;!F*%f$5?E z2+}Eny=^NCGWNWBCUD}|Oq(CK@2HAzkr!O9039)c4yr(o$&buk6ceJ-%hQ`I)Rpy= zCGxX(vVkNU@h9BCB=2f(IF*?2x~w`lIMy~HC(=Gdsj@i7vXFr1L)5SOWtTX&&{xp|PiaDur7%i7yX+gR9p zNZw3B)A9X_&lfU>lg;~MU$lD}fK(5V?4F{l_~e*_ycVmr7Z7Xn@@!3hy!{cZ;o$m4 z;md(XU)ElIyFlU7Zd>!m6@?KE)iIycBrFZq<~T2|b>pk>o=vXZMjzfpVQy@rE-l0@ zjceZ+RCFRbML8%y_w~!;QHjn9-@fd-ckPz3$^&afX*H?catB^|IXurS-&2NN-vbu+ zgN0NUKj;(b%#`*IowNFo%vm`{T_@Z(49=4GjEY|5S7Wz|6K}+64s;!NUZtozwtH z5VHlOtp?$<$}OO9=Ld$W&pM*LlNF@fm6ihgbyVAxxF2Ff|BZ~eEu@6y17RY=A?tgS zu6X-Af1-TjnX-bJtEZBl-YXT=lKSSx{(e(0n~cWf?DC+bH20|J;MA;wrhamKMpk-? zORm>z*k88(yuid3fj$TV{0k`1Y}UM4?1gjLOBW7^2-PeR>Axr3hyA|fI@O$`#%6k49$>E6GybKYE>kZ{|Kxt*)U z2k$-r0sak1v882&3?|HFEF$eoElmUR^5DQx2E@;VW?>gCsXumXz{C_Zc5ptJ#BnUw z;A$!ca-vOlcXw@VZD3%ay}f->QW6LGanGd;HcUVQ3T-$qEluHVq15TTO`B`?N)E}r z>~gYUv{lzPmg!j-o|QgxM(Tu#u|BD(9wcCfAIXj{SqbLOX3d1~Ga$;c1;qUlKuNjp zjePC>=T!>ILGR^XU%Ghm%-Ma%j^B`$a?tzIU6DD|SO^&7!(GkZfq^c;VO1l8Az2tK zZ4mKZBKLsq+To}$hPx}{`J1t3( zpr?M=&!SrSVg2*F6FQo3crX~~;Dp3};ahFsr$QV+x6Lt|s$1Dl8=1#v~&PfHV>FJYbMnab?E;=ESB? z|8E!lkujZFGRZID^#O6Ss$8ItokZuH*>nOp7%&xKx+IIS5Wsu@%%?Ioj$^Ns6{<#s znZ(9;q-VxtslDlHR}IW#Z|kakiM9pD%UT?5DMUz{|u9AUCY zx$|Xnvv`0xI?@mw;!GNA=DJw1;ZZE1n*~S{#BuH&ML}(jIKP#gklRw%(_B?pTVOxZ zrB_{X7Kgvaf**EwOH#HfU%7q7i2^@vHmWBr`CNa&x|OZdW$IMkl0l zZb#1bLT}(Uz$gZsfL$Bu7HxM;L*}u|wat2>0tS$%5wuNP9W%dUw|sy20s zKjD@xT0Am`fjD+-$5S|X^k6^XY*D+lji0`;c zzN_^%AXHl?2R{1rdXKu6^e2O33g7oBd+$_n6Sw$z$kACw>GN@8v+HkPFO2!Iqs~Ee zz*8{N02-=ZJ3c5(q|X^)K;smMg}4fPXO-n%B)Q z+O%yqu(t#F0YK;lEId5kRgoN6?rG6yrrGoOV#RW(YdL@SrtR&Q?}40bKp6tH=_zXW zidAn!No@|2I-=ref$D!Awe*U zj|t6dfM&IyI8pQILqTw0V{03pM8{GQjvS4Q<2;|~{&V2;xsHXah?{nf96Ah z4Um^jsx0Op)Q-`faX`Ah2jln?Bwd?r`FXaC?FK6oa?7KBKA5PAM$(%$31 zHv+__f~H(KiHyalm4r0vEhJg9K<)ZgB?20{S@0ZowiI z6DkEMy}%6g%>O?D_TiB}Czb;RK0yHs7(jplW;4Jn8raANA7FMQ%oyd|jC2N>!N#Fa z#-pMz=tc`qYM@Xim7(cVc`JlV!EL|$?|y@S@$q+IAJ1V7Sw=(v8|}Eqv3Wu^|JlK9 z3}yWd*f+`Vo>GB9{&Fj#Ce1cwG2WAD&uQ=n7O-T47^LNnJq6|A$>b1a8sGW<;P`v6 z4>6+s(+^Q=J|ss4!$dlRL}##Iu6i?Xszksp2fz&m&lq423>LG&3K*Oi!QZQ@QBF&- z;Oz04Y55i9HBBweDfNc_Y@YBKK0UE5;_O}anUEY@&YCklYjHwOhbE= z$+o|#2O_RC=`1%{9cGh|>NAT7DD{9A0hk(qDGivb0dqEB&VZQ<80^En122$nNSIks zo|=^6R@W5Q+!m9Q?NnXvLB%Blc70PxNnvsks>#eii8OQ$_+!E}ZhIv%EhMvrV=@tl ziMZSmQN9k9P*ECOS80ig@?;}|FQA=?2y-H$_7fBuO4DX{_YY)eXKSdd=47OHHCIwb z>MC+v8!N3j_tH?$j}YGrd6`mxbsYdPCOi{>B}DSx!u)HgN$RN>XH1^Aj^88AV41g8 z+drBfcqxB6Juavzqp&!{y(;c)ZQ9)b=@b|9Bv4`5fU|6G(!;*m4&VvE-mOaX&VcOPH{Fg z(MZck&CH67j#lX#G99IARu8>04>@WVdq2I_GCau&egB}Pye=aF)6>%z81Lbh^7%XF z!c|?!xlp8@wY-NpudSC8%=eQSyNTdcYMbRtlV|QxHhR`CJunI_WPbuE$xgN$Nw%ZK zX>@p<>Wo(up8_WWfSzve(s9FhvmkheCPxILK)nLn|_nR*(%Y5^64o%N> z3k$Pwb@fZiD2Kttu=*!BaE5b^Z>^RhSFQ8bnX_lNk+ExO>G-8vN;ltqQnfQR4X{)& z{B+{%?WdAg0}dT7UbDDg7$OTmoe%_+PLQ3%X;BM-7X(}(5W70hexIX_^1CmZ_Z~g| zs;eyb=B&cgbuoJD8m(7NxUL4_JG#G%Wb3W>(toM`{Hg4ot*$qA^k}aIt_wgIL<)qM zJ`f0)2Ygom+qDD@@&0!b{qOEnD84s+{OIEcg=_C$y)e*y_*OnTK8r*G!(&V%YZLcC zH+fZ!=){7kuskb+Z-xqzCEh1#Q9FU>dfHvSF)@fTQ;Y$vWkQ1RisjYomUg_j>T&z< zNUm>gxJ6pJ!xNqJ$G@GJ`B@D5wh>ZaCTzS@+U-_&x=D%Z-LZ2=hWUh9a~3e>FCSkg z-Y;`;+|sZv!Dn!wnn4`^tT90Dp$ECNoSY`Jju()q48Pe*xv$KeKOfQ4mH=x8S{4VB$zv2dU)pQx-%-!0M4Sy&-q zAR+=ob119UMvE=1)l%`+`G$$jNKMX*OiWI$D9))X8XTw_$cb$?*DHQ_xAMuu_OD+O zwbdFiai!Ursi~<`rdHEU?Ya1m8-EA{LUeSrmX?;cw>KV(LpK%{*UDnBFeS;(DE6mi zO;HAxFk02wQrOYNnPdaDEiEw+m#=lr%3jIc)H~P94eh*?|011b*6ire^QF~uDmQ5u9Isq`o0SUGGr4i_O3IU5oO&H-@ z{!57FuLk>2QUWuB3csd+y%ZouW{Z-52n9%>_7eHr^aG25RI*r13JeHzc!CNOk#12g z;3NOCH=b0(prMHt8tYGl<^LUj8h;n|O($P~Uk8$ZPG^qILw@iy*a!1~k%&kZvHzQ3-`|H&$lzw(s8s4?&JRXhYk3PbNhm=JwVdN2 z8?o4h_~p&@J4=AY(@cCt6yXDr-us8%y&`@-Ud3sK~6u zn8L!`nvRy%@sa-Fp#jdRdMd^E_h$90^Y3q8$3LJw-UF@ePO`26*Nzef2nVnmV1ocj z3(zhC#wM640GKlYLy$!i9~?PbTmNaWqmgqF@z~z_#@MFTz@gzF7Bj4-LbWLOaa)~1 zR*d6VdlP_3Wc(1A#_Zt<+8~0=Y?SSbG_G*Txtwn%CTa?c3|S04z*Ygkh(tjxJ-`ry z4U<`5luRwDtB*-Zib+EB`XeKP8MI;CNE>#bGb+#vuts`Xi;T2B=BKz)abDvij{(>| zL4gPih(HtTXunyMuMy;LT+^7H-x?Z}YZ6!Z{fC!IT-1-3x6XW(m5KO%FF#0<*t!p} zHX_c^$D3?oE#i9?QFE7!VT)0oqq=1a*TcO_Q&~s zuU9_!c1+vl^)Pv~vwbMiKPoLI*~`PzFV-%x-7ui#!OpkPO;aeej(-g7B+P&2M~}Z6E$rr4fN*~xgwGJqa9$kzeT`4h ziQ_tRgg!wtKR{v{&{LZ=23{w=d7M$xUe%L(z1wXWa1jK4^9fno@Eu10^$=ie24Ewg zECcnTAael-6d;7G$&T9pLUW#^;_RL8SKK#vY!%_@6cgZ`5RS=D3n?sq6`UzoIdG|` z>r{Ke{VKmBhIfx%-1F`8hn%F$S8qRVmO5%4<*MuU^^N9BLvy>n)MAe<8#6_q9zlq` zbS6Ov0$0~ie{QElEdikr@PsGp5Iqxw%>n_!z;hk2*$p%fg6GG_ zBzKkUJJ9gmD$3jAov!u+broGBeI3=$9mP$-PR_5Mois6edO>=(v;B7uJ1@=GuUvF4 zTglB%)q!yK^MT&_x=qla=qjqv4w~Sqb`jAU>0`dPj;Y)~^6cr!m}tk2{(^{HcNx7) z+e}W)H{1uQtUcv@TPI1Ywjm^0`9X=)p^04*9ej(b_!kbIlwup|gZk2@(v;}*u(G;* zm^utbnm|$*kbT`I#NRs`!V5qo{>9~j;<~r@t~#*7=0sb*)Y0Gb|C~V5l%Z(}x^Ncsra((xr#Or5%OlVzIVSQswTym1PuTM)$3pb#U$z%@5 z`$Kr-fL{R$kgM~tuYAr$;O0()(TYH0kzPe!n{Dp_4;K@DUYDe?x>sr2UrsjP8OgcVt2pJ z$D}EqAhM~kD2S&9@@J5xp;AD9FVn-VOHr}>_ARoh2_rNF4EAwg4SiD9c8ZC>!Rwrj zVhl-juzCPIh#E7Hxo$;V`?h~culX+q`?y6V<`@fj!QcZ8TqQA&kiY>NxI<$*B6`T^ z!t=yO(CQYdC_&@)8?mXApn<0tL`9YF$52TT5|UtL|1a$O$KyYOee6Fx^PdwCVNZ68 zO>u0|$mjj%3goX|jLOcJAcOyqF{+JZO$tm!Oe3hfgNIQKaQHAE)zlrkA43UiRi z>ZGs-=qw_cN*Nu(HaE5v!$) zo#arXn%->QX^qJPn#eVU8O*lcQSa8iw?yh09C14nTmaxRl@Zq7m7kJU9FtZ&PxpSa1IYp9R@ zd@m>9{bT8)nGwmHnKsuq*xTC&1_zXs=Hl?Z+%oh8b$|pYj&VM3oiwk1mwn*=;+(OR zV$g}uh8;x%OR;S6VeF1^>@)1BZF6<_aAz%kq$xPu@|OJWYg#+5IUHX0cJ>Apk(0U- zo;iWO(b0C+&i+4r-JG23yE2AAWN_Qlov)$&?;wL9zW9=L6!O*j=HumKvgSEA&pTh9 z_h#J+jWv?i5?4L$9M`;l^x>TVFFSUL-l*rw2^RWz;PrM+>sT z7Ec5&ZjRbwXuIU-XK05Bv`h=y{AKMqv&(XBA2a)MIz}s%)ZTh0VB}rhZwA^qj%K~{ zdHPgsn}PaXFTL~17uE-wDe4-@I|ka;wNxm5lD~EP+Gs_A^Qj%#l42DrXEh5!_|^Ok zTc8o^Egk+#K$I{skU!sbx#ia_CoW4oQBut>%rpHa>+UAy_j7ryHPjirvesr(fv!Zk z?IZ6mPq(k%aQ^&_r?>7Y-#MpvkU!h_#1DDFG^!25Wm%$!_t{?g@(OXdi59XyhD|7x0dmkeMUXxb8%gGdqqr9_>cI{^10`)R35vKcJM^QI|Y!E!Z|Q;HuiQJK0G%) zBr&SKIv<;vfK%0+ICG|U3Dm!O{>0WJT_>J^;0$(Xa_z1AmFre<&>J0sz+B;$`O6Y+ z+_C$p)>%7G4HmuYPooMaKSRbH=6_dSo}zulPCx==Pd4;za?<%LGNd>j_U6tZFGuF zwS%AC^?fg!-a^PtV+S0 zvmFQ1QhDu3Fq?=la#;fa#-Xc-14vzR05C_n)?RcflC`4Z0C@!TH-P$l&|L#4V*sYm z*en!evT1yNWh`D+$~hdP*aHie1GW21SLUwa0^L&U>~FUSBuC0#~<=y>KL|Hk+o_A%L$9~a&WGEp%O>Xg?r*EM7WZQ1NZRaEfYv=9sC!9H#k^-n@~ z|L*vkDZPK(Irm#qjrV*oeOBBy0}ei9qVJfE2%=B{a~!~}h)f_m0e~blgVl+s*w6rz zglbSDr^sS-qJfZ z#_M|IeD)EMOWs7#g=28}BjZEkagF_-$KktF_9g&U^o;iWW@l#qX5=JNrR9x)EvOZ!!(kMzoWkY=R;q%ELRhl`vqENzCc7( z^rYT}6VJ~ps3=$%n3U(16_;QhecACfI7VW^k!xr=1N`MnPd-%pB`r;)7};rDWrrQ=+50ZmbPHT zhmRhyVkmlTX~J#|NAYbckcch>X+n#11;o_WA2X1yVpdF0Dt(>Qn)9NJbzd2|eTb;B zdTF#n+jg6at3+AEtpbgsHO3F4tQEc+D0J0SnrLe(zP|NA_T=Zgn@wbvdK?zY*ezHh z47Ki=m-<{F$#8yW=;jjNwPxzV*Kf~*7C~oj%Dj1a!$|R@zwO$J_!XUT0{x*220RX= zs_iYXm4AQfsM1Hpjl1?dmVFZ8@1T5hhx!YN0E|UYo<&sRn`E0KogX)_9*Gf-2(&MN zup)EE4@h`kIwLD{>h|3mS8iUta`d3=k*!}(to(ULxHoujQ_z|s&#fL>Titc#nllmt zEk3K>T<`T^IV1Tt-t|a__ST5gb3%3t7arU<^6``RiwAC&Uu7O&)^RY+&do`UO{mV# z#}kJl3d7aiwccC4yYldui;=RgiB@xcaYt!>t%V&(FKDr~OEc65{r!MJA7(Lz(QUq+ zLQE;mP6}`P`E2VB?H61`Jo)(mtbnaVrDd)DSjlpOktgo%h!KzK}#pt~Ub;nNNUdy#9$q)HE$N5+W`aAI) zK#^26lTJb`c{oQ+v_#BcP{@(Np)egOEW{_q5y=D`0Xs4>GBh+)R8-{R;$mfG6%rD{ z!GD|(R9042US7@#KAoMN+;Rlke?rAh3}^Xxb{L!YzEY?>ak}-)x#kNOn=f9kId(1O z;N`Fz&j%AQ-L*xrUUuc?CN0ljv~AinBsdE<527uEh8ICS!qCLrWsRa+-M8-7K6+5@ z&?c9s(mfwvR9`%qEVVD|*=@h4&mGmYtb9H0zj3jk`5H!9*uyLty zS$35AZ#JiUmlC28-xLAeCGai%H>8;ZU*# z5|B`%Dc)Ae;HLOcHX4jPZG2Kbjf7P{_R~m5oV; zx$zZ|KSMS?8k3i3L?I@N>ym_GM;?of140>$e{H|}Yr#J5fgjD^^r8umPV~`^{&O73 z?V*CYzlv0Bu2~$7#gmohrjEGjpE0fyGMh&~q%r?1YJmT@kG~81{!Y&CpB!xPo6s~* z;g7+dY7qRJuZ6rEx5I+Mm8xbT?0+;?i^+v4@LjVJ3@P@nzAd+?GBL=+7LSSDz^Ms}FbnW8fe@Cp4NFmBfiyN^ zGr)nTcsv-O0UDhRnj46v`Q52Wn2y$(@~T4P??!HZKN~w5I{UiXyKAef!(w9JboahV zP2W*kavVGIxxM~-e)^B9qV%?gri|otH5JV_vL9WYY)p-A`gtF!t=fSdUNzDVu|{SB z+EPH>JzReJ66z1td)>eSv+ZKD2N`pAX%syEGiUmFI=`4)VQEY>h{x~}#4+>`rz zB+f0|_IT-X#rY!ILUX?f&e0cKZ!Lb^@9s_GXC@A69-7khxTjj)00Wo zH|&k1{Ssf_(iOX6DzaYL@^7qZRZO^I&zEZT5w)cNzYJXNm=WuYUm2-&R)qAUL zjki?=F75Y)=z$PDoWD6xC_m}I2a8=t4c5>30zrn*Tm$}DZ-s9;$u`pK@(NvxBXp{r z-&Q$1>d8@e^OBPK1nv6(-PD;8?RzNqia_fc(bc7W*;_L zWFi8>2F@IJ5$|%4W)=L*@wZ5b^)j|Mkhw4Y=(*JEhuf4a<<1wSof+&tl@t9VE4cjC z&6ZXCxTO$bG1RqKIA{MZCzU4#4w|o3WK};aJ-l@9&d#02XO>hOY#9E@&xjGm#%w5a zJD~GqzryVcCLiTIzrM(|xf}9!Md&kuLY=MgDx2~QWCHJ=i&Bv9&PlHStJ{z2ob zjk$Aze|)riaCbusXP;^xX*M+1{GuqE>SP~gZjsEn`;(IGl{L!zg8HkA%=J{m{oJW^ z#O-kytyK*HFbL>FKJVL`T3icWky8$NY^Np1x6?cb{QP#!G!pOwgxcJLkJ6EgojQ^{gke2yXa5xDq*hy-j{D5b3wF#C`;KGp~g zq1q#cOA~0ajet_?P(mTnH^yd9%F%Lx8=F^sxjnN`FPF^zt)Y#T^csmLyU=HJ__mr zLPP;EcJjd`d~jW!;Qx}=um1r9{!hWae+~SxC;PW}6<*Gzp-jTQ>9;b2rz$rIo2F|! zvnFBRWa^FQ>GqeyeBc+5LMO(Y%&t**vt@x_@Qyy6fix8$rmE;LdMD$jsXhOvdMml* zU2Z=j=fF)SlgG!gU0q$}6-DWp@d-&GiI~vTwD_#7jO?tU@{-oBb^-y5vPO7kDlXjS zfGRQzb%^G`K-xH&hHtDWQyZFiLyK50OE^C8mDwK0k*Zb2~VcQMCDnVd)ZsrxwzQ9 zS9~e2@;U+&lvbFSS(p+Y=VNT9P>}CL#s>p-1i~3&qyut2LX>0iB*#0N+l}aBcH&ExYS8FXTqQ0`xkVJfxwmqG$H))dzX;-8(ECYzxaO zdwa(b1!=THr@5{rIy%_P-%3sQg_Ext6G_=tR#bf;5u}L|%K=RcfV}|Rq_flkD5WsQ zGK)(v4JD>U_6t;(a?l=R2SF|~I6w+gnzjGy4i(!c*KeG>e&YV?Tc5n$U1a5N`6m6y zZi$Y|cZkN={IE6%_Hy;|wUK#yY|#V$<(dm1C201S*)oA=A9>!jNp=X1cOPj;$g+D} zW4V9S=S-E;^*HPM53jFLH@TkF?x-Ak<)YoHb*j)--5HBsKvxX*XLfqV=j&O!UULe+ zlTe`(muGwD#X)twD?cm_*Lkj_2J?ZinIKVkAbv?^*jB3_Cyu=l7E^*2YC>zxg|~m( zcF*!fFQYv0*;jm!BOm(Qu#vn}M}sQ@1{H3#*N>Thmck}*~>EA|QDz`j9; z{UCZh-Df5#XjOrw_)pafUZ&bVbiQP|n>jt+_3b$H^V(d`=L_pX9@i&mCD?y`DRVwL zI50LYi9iRT;i(NJU0&aPO+TB4dxv&*c4il3Sy~u7nVQg>JFwNQ5utIFHO-uR&)eD7 z!OPsi$MoZe$G)ESDVdm-&Q{VW4p69^IN8-)oc6wg3qir}*%$=MziA7OxTT0v^2wwh*)cW53aCnYC8r=lVuJ~=uxvcJ2FM#Lg= z08A3#pt|v43Kh=*aE;Z~U2Uy7WqHxrNyVKt-Q-~=*Hw~0K-dE`7#_m5=9LgTdI5vc z(^?;&5=rI&7dCUM1~N7_*4*680e>keDN}V1DwR4sJWQw4IZx~9>rBdsEct0ezxE6) z+)RV`fuJy$FHC@-2}p2s{l3iO=X!p)HpIn@HrIO@o8)_Vl-Zj$fBgX7%K|fPhU(MW zyXWgK-RhFp0(nL0W<~)w&trD1FM}X3hYyG?9S{{P*tj|N;zbW7xkyuuCOyrj3+LMB z^Hb&^9=`NB5D=OVgq8umMeuAPDj!6i2aP}w7E$b*OP;+9w|vj&?sIKNuB4s5Sgfx( zmLCtuT^#sL2Y3RYaFE{^0{&pM`i-i@vser^jQEu!OfDEfX=qKBi!>%xDbS^Xr|DEM z(StRcn)QzQ~wt zainuMA(-3m!{Vh;iIbi33=}zm3eYf6W){d9PYLLcHvx(s&r$H(~AH+Ya)?f_UYfEE>eXM@Cv!S?dp*7(SxhPobv zFo@LUs{sfD;1gg=AtV4W7Y4H^tl4C+q-X48N{Vh~DyFKUvVWi(X5%Q-{_5(&q{P_b zypmxKa^kA`;E=;^CMK(`HH`^EsNf5YDMOysw_z?%FE8kNb~mf zsi>?@!lYn15$q$Kb>O9WZL!8e; z*rl~|^%)_t^@3_|G{YU^-#vQo@8!q2x>!1niC7Z%1p7GWr(mLey{_GrKJw<&n)jQQ zX^0%O-u%q|R>g2hb$hF!nVFuUQhixOj-OG6$7gTHM=w6?koveo#_IgLRE52c2ZX)} z&(wp2jAt+Y2tC)?l9TkVG5%fy=6p;0t-Px9!NEuEEcXZaJn*+X)aSbuByOO&L1QjZ zr@ug~>FlmdSzjl~4+hXJ9q7_G{-dA7&dFW0bvBOmd|ByviW(pe!k3eSM7mP8I7RO` zt_WRx0Xe@}NxitbV=+{8QLx+jO0e-hPYK>#`T|FkRy{hmR{iiE z`soA?xFFjIpb(e@~QgRHzmOqRw|xP&O2;h=C@xw;N}HS zrPoz;x!uHmK%+!C`)fY^qI&7J=kt>_Mn~!1JIO99LFDNyy*=?(ibLf&<#GO>uSgi2 z6m>c$YbgmxMjYNX;p-4OPEiWyGM_++faya zhQ4aR!^?O=r_a+8fJK;{ag} z>1}e1kB@Ug{RFvxA~|MIQo0YC1<#xd_#h$&v@H}I7ZfR8ur%h>DYJ{`&E(&yym@1+ zscod8@l{QsH6av)`2#1bT9p?RZwHW;S8QxgY-$HX12}irJP8THvejVTd=7SO<@~8j zS4L!?`rbdMe_^M~g*`#LmgfjV#8vZgqBC(qGq7{#kmd-&vqZpbVfvielv$jY765!R z!R&cJWEEH>P8QxcFn@ES*uKL3Qdl!f&{+pqBb*RHrH?XsyAlb}F(4BVW(H#QPNN}t zIVP|12;GRdF`MT#I;AM`tL6hwb}7Nr`lzS20}Ar@+1%0=uZ;O?znBzmMR~#$`u`Nz z2c!H91~1LSoMd+*c|8v1qoFww7)=L`p!qx!;MI0%(^4t$A9qc7Dl_!YQ4s#09RCpZ z{hMP-2nLNdDCV2&>*CdX@n|0#mObU*{3p!!r(EMSU%S61?E96-oZ?LU&Acg>1U8P| z*r@l(^g{681%Ll&OleJX`wnR|&Y2xe0m4)^QanIDFGzPFjm@A=dm#XDgu%A1!php{ z`i9V?WQ)ismT6%!ayV2S*z(pC51K9|i=u5(qeMENN=*j!(_5ZR$W)kHMyljUZ8*fim)# zptp4(J~lHwEw84!vZtpjB03?jsB{E7l$d7g;di8>awm3lB_N40ha{>~52OY^Da&>Q zpnaHB-v*Hauzq}OU z@hv*eMCaRu=#ZB!Rhj^J^mnUP)~b*gu>kZow3J20hmrukl#%x|^ofYh8c1UnWCcOG z&=Sq{OCPO$W_Tw)BQV-O(%sz4R?jZPJ)*Lpd2D2yOmCtw%lf)fVuCI0^;LAW6_4LP zEFwK`mxic(n6!7iK@uj|%OkjFfKbp@%socio@!im z)>CV;cXvH4<9+#@+bIe6U7LSM?ssV+$*EDICPd2oca^{C{P4E-qfZg<^c@~+%b(J`B=POq>ELf# zNr7JZC3#dB;ct%DcG)Uw=-;?ueNEC@O4wHhinz19nUN>KC>ju4P?(o_$fgEq`Kb=HUa4>(`VBK|{OOW-k)LseS-`Er5mtL<$G6jZ<11 zdJF9=jJ#dlGl~kcN{YK1I!J@~?939c@L2zh%);)T9vq%XB(rb?dPz-{ynfR&)fQzn zYOoLJZpw`g2@mlf93Da}g8`FDpCID8b7Q?7wccm@J9by5rsss^RHiZ}U7AoAnO|XN zcz8HHJv~1^e{gVcVq$_THGm^y`-y$oZuVX0Z_(E8o0u00u_+s7RkY@7O+7%$%y; za&R_E?O<``S`Y#q;*G@Q^>hD96QVZdjQ<(1kH_Gg~Iu8k1R18k*q5q9WvDDDWlzQln)3AtU(j*4eogXC{lp3FQnXk@rIu z5>J?@7aWX$?;bX&0U#9sR|d_TLGxo$(rPNgJl!ljJ?--g(~{F8!{dA+6Z~=tk}7LT z8yg!32gX=vlU;vbPenPVyxO#P;tmT)0dRy)*#W?L27WtWZU*3DZsGBy%=e?@+N|QD zFJBCGwamY28mcILQB%?}GyLx9>Jk$4)6dt)-_KNETm7@5X0%^6ZIp8cCXpwq*_0%} zh(IS+1lV!oVvUM3pZQu}&rh&3FjBhzRUf_4yko?8W6|503FlS=0+2SvkXTQ?AVlO3L9555eBB1uZeFa{PBzCu>GIz7I3JG7|d~ z8?+WAE(8$*eGX86s6?vy=1}F8Ery5F_d`7omeuQQuC(1kjXl;KxzF2iNI-y|C2t=lju$sYNSPb-+e|Ewgl$NtUga^K^Xp@-AFCA_R9 z9E_we{(2EVO<&%<@;Ul^5w9|qlpond)-CWH*50AoO$ztS^n z1|(KdPwyp5>;#g>M#VN|2?=LzT^)6Jo!!9|#%H#?JF?pR-39l1$48`3kyovxED~yg zpzftBS}tBDrKKW$cvKh=aTBZ_G8n0EERlWwSm&EwTTd4cYOoOhi2A5t_5uR*l2TdgoD|e%l0oX1t>lU z(R!2QYn2$|Nv9FHafRDV!hv^F89ow;l$Dhg9vUA zbM^M9lvKaOuIe=_T6gWlUc52z>|L#)S(1ap+jnx$Kfe9&`NO>n=K?={Dt+>D{J=S^ z*sg}v;$4?+wSLlPTkd+XOhJJbktL%WHxD0^sNTIg zc_EZ540SIS#V=ga2SKFSbHRKOAUq$;oXzCmJctAP763j0_B=j{$Q;#2I<*w3H_VE|& zL&(~Q7IzO5l#)O!iEV|$-2m_%3PG`@PNlKhiG=4M738XMF94AW21ba@ZT)5CHEC%X z(UDPcG4X}@$&F3X?Y(-v1W7Esn95o~CvRXAjsw#5>LT%`8kywem-3&^X&7m^286}N zW;oe-+1t1##$^=eR)+_Nn3^bS>d1Hp-A~U~Nl70ibTMEiFCXFDW|o=Ji`1p57DWaTZGS!48rlLXsNlIypgwj40?Z z@wn09?vAd8I!t7EdSdj%XkY(OcT#$CECvJ6DhlVBHLWcXNy!bZbuhhcu)`rQX-9AE zDnJ#-53TO+mFnvI!UQ#RfUj*U{H&$e*wC7il*3t((wuxPWyOfVADt~}0A%-%e90*~ zKqTK{Gv(4E4mw#JGcbF5_3ndjS}tMkaZ!GjgWXOOc*Ta6V{ED{7<~b{-(u{yWcr>T z>vRc;vf6b~T>Rnuov&6uHBxFIPPF1%5_3Z`v*SYiy(9fXhyz$arU7a#iD;TzD3P4G zx}oi2JVxfb%|WN1QYE$Wg&jANiVyX5$^*aw%1tg2_V4Y~11gih`#buV2Rw+nA)o~< zG=vr#LQC{SMbs89RbMWyzkZkAs@K+AOG2eP92SEVelQ@wWI)3JBC}@DXq!rkkJuRP z2r+*I+S9>k1{e+lSl{%3vvE$VG8}~l1D8+uiwt?q?DiDRx80uYdUdqUwzE*V&VO^` z4}MbMnmSX_3hM*W+Pl*2&&C-a@_xG#W3(^T<3yJG?k?9g(SLsIxhL`u&ByzDE9guzr&lY4(v5OvB~<*%%blL>7gs(01<-s z0;1PE*c)O5^9{squL(R6B)>&*n~3~QsSE?}(6=80Z%DShUI*;<0^4*L2Sr4HRNQnRnjtLZ7t4^X;lczBWP1xeeZ8y!yy{y0BRYcG^%_`JaC ztH#-Vj?a!8$%uWwupmwC)L`(}MB^vE#*bg#*?;@Sh09lN96WL|ATaE^rJatpet2MT zRcR5rg;7SS!>ywo4!$17Hg(k7nQ7AO)>W@-s7AsrmcmVsa6$ov$;RSFD-)wyt<1|VT93y;yjRlA4el6BCk~8EB{TL`{1A&)X8tGJEZB9F4Zs^s+a= zjrLCk9PT&V#NJfHUsY9=fq{XMkx^M$8JUStWRaLN&=2C{CJc1Rx*EX28ra(d7eA0( z2*xRZ#;k6sckp)BS66wueVga5eVL2J24{;;Z4!+kNk4ddh%ghZ6YfXqB3l8Z} z`Bb!heaHG$ZHt$+tlvhL{|HKo0d1Ty3H!JSKjyUFG&E4dsJRk?-f#$vW0HAoYrkSR zTFhiHCj&mWK1@XbBI3$KLvniz*7U^IznAa$i@`qLc?6!!_90YU?)Rt`KW)myfwyF= z-+|p93u+WzGvsuD_*YL3{iO!{JIrEC`}$9d_Dvhi$zm=F`?w0dJmo$Hiu{mNACr6R zL0%3&jmtF6Kn&&dOkGnp&(`-ZHl`rjZ_tgGF`X2~LC$&1Ni4-BDYGUiFOyOL#A&Uo z%>NMf{oTn-{tD9g2UD3Wq^QOI4Z|Z+f-p!W0(T-%0^k^pupB?W6ENfe%WYy5)74Sa z(J|E2I?^>j>KYtx>S(KPscUYjt0*r@PsZeAWO}%pI{mzvQFL~MzMTR@=wKdTtsxH` z>8-qCpeXuPac_8#p1QWYf~H}9O>al{P6>x<$HET0~T^5ETm*JFvUE#n&z{P{9P0l9o>Ck`Rza zy1ToF>5g^g8PwO`KhOWW&UeoHoy&DSD$c;L=YhTUz4u!8eSeG%4Qr@Q9~}=MFrN^4 zhl$(`4A=;80N^yh87l2qTgTD7+$*V=KOwBPZHmW9oG@=`^ zcL%9Y2S5|JDm2#YmVvGPZ>3YT4eH0{9t zC5gd)q~-#4e>Sc!+QVo2YtNZBw&KN+3;RFoGlL}{T)gK!stK8uSaR#1Ooxv6FMK+FF8SkY=0ySt~mlycXbbLSkj-AmJ zhaCrA?Y!i7rD-g^C*X0gnYPmbCC6>jk&6}lXRA2xHZ>MS&$kGFf2PR|&;oBs?k5=t zmWE&iA*j(7b-Kz%Zb#v!D0atuH=`L@dzVM)i{m#~Av;oNH zBlu6@$&{Lw{=`uI{P8)^HT`tHSF1(^;o9A9?Qph+H)CnHcrOleGtZyZe; zSDR8ZOT%ZD_@@n;?~G-_PcMzUx4rHCU#x4#hYj@bGP1*xa&YEyxED6s}~z|$%ryUg*lTa z;gl4c&9=hV44|;tG%lG!8yo8h^n7A-$@HnIitREvJ0m3*GsA#q7kpiw)m9Z_&f~uo zd1->^?CdNpElo~N&d<;1e_UN%ot~bapIbn}Q3TvJ2GGbr9s|N4476j0V+0nK=KIL} zff1OPdR>amtB|FuDpWOx#YJht2vo!dq)?fPNXjz9h9{4qrV+kZL3lF$iB10T!^{sa z!ONX?{4{0S0+=!jk0$pc#L-fQg>y$1E$Umg0uCRgnk*Ssl^hmFbV&rVlUZUCBqT}` zk>-EE5<@gmM3zA`O%aySjFGJy(heR>e);Nq%GZ>XG;cSTh!1wzj~->NHLX<;r^tv< z#HEL2r;tpyfU_&K*8>|r;W+xECyn)YCm-)el4l9DL>PibH8xWqJ^XjSANGeOaCBsG ze$+F6KF$*mII#LKOK|;^GUfdK_5b7er@=nPjdBai+2n)fIE?*;eYA-{if{V&753F-!ALXc&mfsgaExsx~H-u5rBkMi$eAL|GD z`9Br*aRo`?@A~a8Ar}t!m$V_{rx^7FGkv1%kNkJ%-;cNZJ1+lmaoBVo)*Mjq_{suCpcJTVG7t>L1#q+7gER^2O{GHgQf?*I=gYapXGa((nXsvg zG<}4zw!QCAX2xG$A8*~gcILsI-Pt)uyM{KgXijPK1u4 z+PwUtV*anla94Ly#OubI8ywg)LJ`L^Md+YB4$~SsRQoH80KW~$YIHnegH#hq>0vm! zAFkiRbqB};UQ15i$KwH}I`%TM)?&&J(O&y0FFa-?eO~SNcB=O~?TVDWUmZ=FL$_7> z>2(+Ca$ydhqfQ*0Ng3UhSg_l|dFz^83wQ0=zHP1Px)t*--`#!8a`D!uQs?a@{XJ(k zhR)}OD?x-9I|QY>q4Q44I+xM5J3{7`T3#QIIoh1e_24@`H4rziOwKT+Jsne|t^yTkaJ#}aOafR zRr3C4mmXVyyiQK-bTo#~Gr(62d}JVW3iwHaE9!p3zm1aMEd&nAwFhL%)@$bIiB@Wf zcTW-TmqD;(D(qDQJJrEJb!yompoP+aP}tN{p5GYb|CZQVj6=hR*$1AK$GBgX>VDe_U`p2y^KJJ+vVX+^23fT_U zqbH8Q-u+$6RxutvCI^H7mD|xa937Ku?;N=EzzHjBo5aM(k)gJ}z7_{(JMW+n5|#5y z1Xz$ph(PHbe*3I@>)P?@sswRF6hi$fO5Mi}P$NQMY-}R6a+rkNMBfircfH?i;AyHC zvT1(6!PTFho@uH|ONotQ(3paDSuC-T$4ONs67Z6-0MizAkq zly2GAx?(GF`{A)uCn-10JNE4-Zr#n)nomHYfO#BCjPSo|!F+i15MDnYdw7HNz?^q+ zKXbhiK}~9O5+Vsr=1MCv#AFD zcBl^*5!`rS$AF?38zB}#G-|duN|38C$KWm7*pAh)x($4#_uK#vm_a8So{SH%D zzoCMLdr@n@p2CP=n#|?|Fx~9f?hn{e{I59{z_jvbR=L?#atO>*c)n#r@sx zAIwTh7ZRv5IiSJdDB=m~0Eg>Ju4KeLj}LW@2)vt>ykw+bpTkxG5Cf0~&SVA!0Z*3A zlx4BBIGBs$@uK3rELIYe(ov9|*<4e^AT{wZ-@uSfeWwwDHy>a-gLkRA@>Nz^Kvh{D zgCa2SW-}&M9TpXGGRrDUs%c~v|8`NiP}-c8Q)i8X)58p10ChS|kw#lpP-5Z}z3pT8 zCJH=gXkFXXw2Di80Gv3WHSq6tN88|s53gh6%zK8HvH3_5ldVz3ljmg=Ed3O9?St#9 zAbYn+o5!J#7B?rXY)GBal%?I6qB@wa%uGWNhiGon5I&paE8hQkVe2=y{M6KaJ9gc; zX+DT=YV0od@N=I?jV0MxF7Gb6J1%c%S_ZI)$dRP-Dq;aSLI>wBivKz#@w4Rir%2}pn)g(Auj``IZ}!=FS$<1%^6|N9;dW^4 zYa`)jlE_+2{-Nx`OX%%=)E1~gP0(%%lKAW;vG+XMdJActLK=_7G;c|qvtD=Syg}4G z-ER8@12!{zU(RW?U7l%a7=BKxn-BXUra&lyK>5*klC74qM5k%+SsB6*1R-!k@Jy|0 zHfj<_6eaa>B?X*}%ovhn&6kH~(!DNdylhKv&s#c>2WYhH3_ntx$G6AR$DJ0EZC503 z6H41D6SP9&v*{d9Bc+UE3gr){zgUYt>{oxgLEObU9N)Ql=_2EWi9w-XLW7)LUbugH>+9{-UeokF@mpSM#`oF+Amc~hhLkNol0SV~ z=c>h|lV@v>?T$G$d*Ibl>K4Q?Mcv!y#BDu)d7j61y_6kVF?UynKUs6e_2@3oi#we! znN`Pl0J#swMmh0cAR&bs8O5$B1~!EX+#!G#JS8eE*Uim8H74b==O>T%cD^A#AH3Xd z-L?r2{YIu@bzmaK6~L&e+}@tz*ADIGw{e?IyHsU{6;*j_Ha4F)1odS=!!gGOutO#e zi`dZ;_Uvikk)ytA*Eubi7kq4IPUu_0NL6`OR$og$KY`S>_vB<3rN^c>=T#9}27edj zpFlAbtXU13TRPK{Gc$6(jZ#Mh+*l%)ISyR<4{uJa(;KVoQ7VhecAemD+uy4t(J725 zlMzown5Lz~S+~6J;Kpx@^vb7c5$4S8mz3|8kZse@7+<=yL0hj`L6ab*2okbfB*H>M zAfwhPt4z6n1L_L~G9x?FLZLVfeBFi&7xoKDfPykhScHlYPhF`*0@Y2GZ%~$M*VZ7c zUpIF4LZ|)5s?Y@cr_Qd73L^Ow~7i?%E*qc-a@+n z6goRFH9i3pBNCbH-{9ZRRqXHUKSC})YA*i?!r}fdf$<-~p8r|=(_r79_!EEPSNyj{ z_TO>gZ?#|^1uGfwSidsGgvRwxLhyv((S+B}PtcCL(`Z|mER*EZD&_>yW~sZs+RuQ0P_I!0rrtb zt^+TV|KlLwXz5?VGLx=0luR4%5Mqyr0(E*%>!QFAqp0jxQD1`VzGlKe9h6zHOLxaR zibOagcN^qkg;MQNPp}9t!jSGWogOSX7Hc$865#3ZdjIZ44=lGuCO(WweQf#sz&(r8 z)=w>OUOIdF;Ht-W7UpJ|cK6I9F;xMyGZIi~mQrE9Mpc!@aJwA9?9Op@w_J2FSz-EJ z6j8URzGDe{WEF$3C!*%?>`P*r2c^zG*3b1+Cj}|uJ!J47_br?YMyQS|$` zFDYhEVbb*K@5bg2l+BLlJi2%6!<&2dF6L#8X?)zDo?GYlHU6sS#o2d6msz5Fwo(cH z)B5A*G-}_z({%98Z(e$nBYU3MX=q{RUD=BTE zqJL5N5=}{k8~!5KQ+?7Jgkb|H0+_mb(MwCg$$RuIp;)!)(L$w z)Gj}@0esf<+A4?Ml6`kd&SrPuA zTP4fKn($ZyE~pnD*=)JaYU;FXq|k>Z)vJodZkloT<*B;~AMIm4+K^kTfkXrv8{3yq z1zs>*_6T`=9H4lp$0;JTs;v#%;f;)wI=U+IbE7h&UBY~C++InJiiH&3*pss#^K#?@F?s%0v;csI0t%K_{b0NqnNo3lNRay$^WiJn->b|G`KG5dF@(KeF}&!Axaby>lBq8nloo? z$J(K_i(AAHD9MA2Bt>jeyXd5LCAlUUg*FMfKB37BaWS5V1RyC8R_&41sy%U_FWO_U z_G@clJSRT|++A^tR`vGAV3nR-2wD#x# zBo>$TzYfg)_r@RC_b2|uKQ{hbO3iOAAisJ({yp>e=a!ozsIPOlKUq^h4%P`=AaLN9 z0yaZHrOE-Y1@>ux{QyUR`v@QmgGW3(m(C{O7|CBUeS8DoJJ<&Vdso$br(-e|0t-qE zGUE7ooXaEdFsAB+nQ{sCnp_Hblu=#OoEDduo9$lRcA>gs9)qLHVJdUjGl9F5L0^d{ z%pubhxFE~nN(1mwi&FQ1Mnb~+Czd-r+?|60-{)nYZfV-YV(p+&my^cllSXHEHmFi@ zvzTOc9!F^yhln&`4$K(FEiKI5R91My%WF?+#;Fm~DS)jkP$jStm8U>unNkUF0SW=i z7>qa~^)-M$k6chwm|B*fgC7~hmLiPj3zTtIR?asTGyw#4cWt8&E&_rPvBdyrJ6qdL z`^Q${MilsiWRfSbsM_OWW*nBw$Ut6cc2rHaRY}yU&Lnv#Q-Ee|UhTa6&>45GN;cu5 z8i(=+tBQcwJeGQ~E^LmMHOkLa=^R{8Ts1W(d1|7oLSxX}{$NAM-@^TVu{zB)$oF~Z z=Obyk+XG^!CKnhNHJr>Syk%>9$?DO~d)F>Gzq#C6|FENJBa5~OKRAWdG4pGZsHF$; zER?7r>EU?`s%qqtGo?&Up!ccrx!vjj3xKN&9HYb}h3v8uyH3x(e#NNDM;{V3AzqE> zg&;u!AFmkaEc=%^y7kuJ!gCE9dzp&Tb7xO2*}QU@yY-v$uNiSMewCF4-{SM?i`sZB z`sZ|)1GaNEybwBMjqE&Szenj;`RUa87`WSM>pzl34~6BdCu!b9SKjI;Mw+F$s5K-6nvGHOaGwn!)eIb0lx-2pGDek`r^S4n_3WeerYk}> zAIlXui?Bn4APh15(SW~XZ=mvkuPnqWKm_9YA&6D)`8YG*YWZ^$^!bG3pvyvtS`Q8j z@#hW7H4#~nj}jDVlMu+uf~@L*2!fLcoOFAAPnL!_Hs&-Hf6Yt^+8<{tLJ1Xz5CjpE zxzSU`0_N1bFsi=3z30Iu2$?zJiLyL3o?er%Jfr>g#$xA(Mj=+ZLtzUTG3umP)SsaK z_J*Fj!?hh-=U&;g&}aF)Faw>CWd?pb*ED<%_Ivx>)#iGBx-*564EWTTy=!A;Pfwa5 znXfHbC#qB~BG<4{hkbidmodtliJTW~SS@pF5=uwHWECk;7OB~>HrvOkx}&x?JtiE8 z&6#<57Wb{roxSAl?vR=0 z-%yc~7atw(6OL=cW4pzcCdf*K=T_|9yTR%y?$!;Qoh@{<012DgU+eQzn+sdU+D68^ zM*Hd-YrcggeDw9T&iAo*JiM#Q+hsU80a{ytOoq{6{`AL;Cnr)8AM??;SXUd5Eof2` z_+{~6e4sZq{`0plF@(V$emxx~`g;BJejV>^x%ODgn8DbZzhIZCs~ z)#vtT>o;g<*2zf_C4~tH@vpu~Xh2(P+-MrvXim4P((n|S5h-C35@I0k5ks7$Sd?tl_{Mj%|3Tx@a!qP)h*7Q%l)dVR0#=^)TB0P zRH>&^eezWDoA>XmZExLr^y>M`Z(#x4q)2#l50)6wq-8jg5@d1VL4*lDH;GA)96iKv zd=Ht~FgDJAm)P5+{UaR&1n~#<{fR&EkBxs*&i?m*|6LdUyBFlwnc&~BZ$cTH%)!WF zSg*+6O@LT4$*|x6PhhWsJv9!j65#p)Tm~=$*a4giz;pyg26MEdwY()REW^RpH!(h= zyrOEHFe*4Dq|g{6OvV70+bd9M9>lckv5ozZ;Ju})U_-xt9qRNEugZYtGcniqSmp#_4>f* zH45!UNzt0}@2jJNG+L^cjt|cVm|gKLI|%zJuldj?;OlJ^Z!W{r}? zS)c@9NF;0nmOGCY$zyxbIge>@fCpz7#COfr&Lg9F6Plo0{5Vh{J|wNDlfM*UBFeeR zra<2Z^cB!Q4PXVpUM|m=M^Xl+EPyz<89nDg2XolDk}A#)h6R zdotX1zJGSa+2v@|7vrM(nN^+2js04Mb+e;WOuYRrynS;&GB}b);os!U_%OqwG`0Rp zjruRBE`N&K7r8Tz=tZzlOomQT#=M*){q+3B)qN(tOiczz0Tls3fkxguMtpqv-l~TW zS5hi=clb{l4G@NOEu1f^_d(&Gq>nsAGt80h4Yckiiq1Iw^4{@7CYycVc)ovTUr|x@ z^5yHAf(Fhgc0@czYI^9s|C;B#OK;JWS18@oF zqp;m5Eo|B0TT&s(8^}e5jL!%oZG06WN6QtyMt$CsxDi4Sjj$qbqGGGpj7I;{KIcte zuGJ3MH@D#0iqETMQy0nA7$}S=ijfdv3n4Z_Tw(DpaRlbt;IcN?a!1HR^GG|7SJ%%M z#h(1`J9X4w0^;RHKB2(?slgbNn48mTEf3_JFu>W#<04d2VrD+DRo!$%^x!2O^E)#g ztY_2)&Lu?ajz=oCM=vlxJ^P{Md5i0NFVB@LS~jCuPbEi1s%Yh+h(p_BpI=JzI$scT zCO=|luFD4Rty44hEe~C+oWEj5v9e}`h9>j+Hpca(1yfM2mXNREAu|~(3G|JRD-@(T z2o)?^@a^{9Lf_zTnK`-Gq-Js_j_k2n%tu&byvD+&ti-I&=5~SP86U~D0e6T@>|70E?aPQy?o^*S*8K1u z)K*~3R?avPv$Mr?_yw?!4Rk?k8gR*M%q|Nkf-^A|tMU8yl<1g-#%4TV%2gAoF79h= ze|-@&HI81m20M;|;To3KVldnSn~%b>P2-x=iSo)UQ4y@+PeXcGO}J5CzRFO2Xzh~r zIeIM`avd_FBjVDarq5BGH#*nEdz$)tBOR|LdLjC1S!!~f%JNh(v0;STlu*mcS#9Q5 zaqb>jcdle_U)Q6k$U=xGjCfOo2DD^{kL`ofLI$3Y|GmQA!R_h8hgr@J)yI!kYUy+$ z5w5T_NQrYKCWDMPPkl<0s$$26HS{M|P*4DDjH(D!Cj3*6`~JYbKk+C2QSm?MT>PC8 z^0)F4`(I%n_J=kh3djLG12`<+@3mqAa5{k349sSX%)w#tmkKsBhQ_)>17iwkljyUV zjD5I)C;6!_z3uG0?1R2%)>8;TBGMQv=D)U=Ot`p@Vt;dg^^B8&NBy-pO`J(k2!jKo zkwf$=c=i%5MF}_(0FuBH2T%ae;&PNnM>V^fR}6KW&qzG*$>WICvop_Ly|Q{2^6;gr z#p`o_SsGi~nB224G&7sL$rRlOu z%f7)&9N5p~&cPFPDR?tB@j+AR)AoiZMA`{9tRaIgg*Su4-cF_7rb8?P>R9vvHi0qT z$qDyLCyv4RNOw)CYj(mWpl$<#4vb0xLx~4E1iU(Ha4L+?0Qz(~Wj>yIIJe|vU5(#R zYcR9*{CJuUD@+E0)V#!i6y!!38c-E;#I5gz7`g4z&B z;TZ~_gOQ^(`krL$^~S`(X7ASwyRoXY8Sy~1oz2Qa|X7d3V5_6<3bogX8w*&Ye+t^w<2{(90dg7SVAliy9Yoj>`xd4MhRw zRSs{v>pl0x`;{wCqtFPsvILRw)EQ%;GYTG~)~KcLy{FH*j*!J95j%OUhsZtWpzDj> zS2iwpvUPgp;JtqL5$8`K9DzBhK&XuEon-5ua3;imdVa7{SD-p6Vj(Vk`xocsx8G@6 z1kZK)yuin8M*6F{FiKj5N&^aXrxJC+SW%6`ZdQGgt}25i0_X zMx!DR`C|W5QC2sv8?Je>aC6SJ8!q#8g7u|3*64JrA+j_A1w{fvqmzUh6oqi+rZvwu zdt8|N=Hik&hZbHxFxT_xsvK9-HiwxIy$qt3GLpVC0Y9pv`v`Q!V(&#(AxHmFpe+=mcOLbQ2` zeAFD}3X4sJ@8&fH%xiMd_;Ok3<36kcW%J_ftE;jOXw~VVP95b` z-5pP-TF+KWMq(vGa?NOR(w0rR4=wXl*g1*$Tly)z0r=thKGLJU%xz>T6_ueK~h@6ne*?xNPvq z)?y{pp(4vzx{SPk|Hz{U&{Yk9BLD>_5HZHG5=(ILFcgRZQW71jO=FG97zctpOiaqi z@{CVx!I81XxKVs$Y7Cvy4+Gthkq*~xFc&TBR+|P(*1+y#uy7SgT!}6u39>49gnFcf z^JOO$ES*_%VAIfF7aGkr@j-U)bj^OP>Ft`cD)kl;E}A!3{8fAN^w()>O{(%^iV{2p z36K`yZvb$U(Xi3n@+&u*f<6~`xg;Ii*R7!gO3M6S50)|lBOTJQolyJ@ICLV3Ueny2 zUsMVs!}Tw1>eg);mDK?eB@mv>;BO=*A^K!AHW>}5Pa*Ew4FNtt8N|A7_^|K4@y`7J zEdId0Kk+C2QSo2GKHk6mpFkIYCJ+etaq{B8hkYX)7-7IT)_TL6ko@ci zRynXTfRzZCQA8Q=h5*_Cd>L?>3_3&*r-OED_w0h)-8IF|)WLxX+{VP5^CnCYF@_n9 zf~`Ii3}qUf$YioHQW^#@xm>Qm)`|PClJr>0FK5G>=H7W6(iC7z0DBUHfG8xy$9!~} z&OrBq#`1lQB~QMDKmL~D9mmV7z=jVDJ*g;$$8tDzN;;KDl=RSqNZ_! zXVGH}@wrRdbJT!5jYp6Go*IL({A=dc!xxZsAnJg5cu?(QB8;R0O5Kf0&DHCzt(LsC zJ9Yp0=J$RJ8;5n7Ajbn?3RwgrZB#8AqXbYnt=)tDNne7##GT4XSPfm5$%RV>6VYh4 zLT1>qGy9aDUE3GsVqcXO8RKth@3ih)u~~k;EQdCOfmdrCFt{D8e#C3u$_LWt9p&u8 zP(l)_NYv_$U)~#NTIjLdH`qk?6q2!#KvqcOAqubAk>I^L+4)dYbIRr;JD$FAeD=y^ zlt3e~p_|0%BvW3vJbnFUr;n9#Yv`PJJ}0jz*;>4akU(80h5Aof5HYw(~LFfIHK_epnLnaC*jv zb(c(#@70;rFO6{@*Rp~RbUEy_U9b3l)zlD^S*e>1qc>|`GeM^uW~f;UE7(fNTT5u% z6x(rM>fHmB@mAy86QhXxJ1s7qyLS5QPJP9oO|x6)Di2E{y0k=}f@s4=O~U(CwI8JM z(Q0ge<@A%tZKuSWjT*1ks=r>T>9E=$^5o_g?^_i&*OluDRY^`tm!2JO{NR$2-(vk} zfAnC!$@&kHvM-Cj!JF?IB$gap&7N5O8#6<<H(a)8)>GqYN`j0e zNknRN#%%gEbLgoX>#rdRU>_a0Kbfi%u43FV3JrUNp9jY16Vi5c3Z{5h(mUuL6_{joAZSO0(qC*Z*b>2m@LK{=)t9*aG>$h~++Cok;__?sxn1ZU>n4m7qlR{i6#1fm_t*w)P z>T02PAh5`cjHJ%Bn_%hkdTBH^UkOg`fDhJ~xD1!Y;lenXnP2q%ds5tZcxeBr3q-ZK zfD~b}2uMxh$zY5VDt|8_B|UDsmiNIH2wm8Am&yJq+5C@-Kd|pl{E2^5{NBAMh=0%C z`kVXA2VHn3jIaU6#tcqq6Hx5mz1j~wBMyu)p_hr3bVDE3`=$iI6WAWWD1h+>a!&umhHV*-Sw#A*)9I$vAFah`nFGVRFeDFnNnlA(nFdMu zYj4}md>LrupFVAXp+Te|pd(;P14RQrv^KBsT71UymmYiklTC)$vpCowg@CC5u&%3P zCyDtvHQzTOE4sNSk;Z;X!EJ((bJuO6JXf3?xQ^H&pMqPvD#W3qgn zyxdyy^(Q;_gkWJFj0nev^=vU_EInQm7cr9JP+sT{a=?65_CpCOLw`Uj`!kib}H>|GYDUHC)ygtyYKY$G*ZGIMwBsx&ID&| z@?KOr1r@5Gn#C$5I~O>vQoXfB$^N?bP`n8wFM#N|kZu-xRXOvKamKaTS(oPIURn~e zLM48ta>YE|eECU5TH+bARo-vVy5hWf@&h^aSQtGK62C9B<*o9Iw_;xdXQlcqigMcF z_WaJ7E!!R*Sm$82z*z+~8EQAkiPxy1k!!Oca@lZ#3@&Lh(GA7#N3S=ES?`?rXuXo( zDdV7nrs?K~p&|)l9BIeakyhyAS1jS2xgNRZytT<+R@+~Hco6={Ki&SFSF*Kd(!p!_ zCT2PF3|o$GD}Qt%#^bi<=U2u3l|^l}bv5-ZmGv!kbzS|P13V%DFtP`m^t*jGqow^@ zfaArWb0%&p)Eo>oolFg_cP-C-eu-dlx@(DEkJzM9gg{&dD)h(-HOq?@ z%8KTyD;6!#&s$-bVKTq>%%Oqf2dY=EEHPZzv(Bu|WK-5+W8XD%Gaj8uwz?T?dFRd5 z%P-EKjeq?teAC(%B@MQSB#0wEO2f;delav6EL^Xo8M}H*#w)wgjs|L5eamy}=F_K2 z_N^bfe~$6a3er=6Oy+TZq+2>+sQ-HgH9U}X*_^Gr2!xbDL=2`#vos|KBoRSebX;AB zx$h`=xC5mh6W(GA3fX^7`~JkA_!IwS{K8QG9qjw@44VT)HjJ@OacD}d_?Omi-67n8CUU^9t$w7ua~RneR3+AAHyyGJ;N1yBDSaB4nuD}T#AGT?0vj9jQXC%gEzCC(%)76Jp z9=*0caQe8Lx39C4Uu0-Tpl8yh<9~g8XM-Qi1xTkeyi3Zi0YqR_5>7{XabHqwQ(0jZ zu=rrN64)g`8x~ZZm>gyy6Yf%B)d)))kBPHv;=ui0Ms&kyNpV?Ddg|w}V9VU^FS`le ze(}d|KV9M%yr;JEKz;S1tZeip166fyeC?!AUw)>y?qq4sjHc@4sqvdT+g(aZ{hS!59^zuvc_btVSWFq&M)HlVN1iBnnv$ zm}2Zveoj`VQa7jH-Z_%O#%uP07#EuY?edVbq`^R-`m4nMUu zns1?SCT?+9{+v5o8?d%7aM5V$ig+(sKQGkOszD^`aKw`b>=Z<2p{Wwx8&uGEyB zV=0U2;W7{>N)1L3uifgd9#7D<>Q|XsL>LP zXko}Of@l-4Ukt(fSphp=9-Dgm-u3N=_B}kmFx__kfR_@4DnOVv!(F-BOSAU#;!>yi z(Kpqc&&hvv*?4=eu)Vp`XG_%*2UUFhRC=;#K3hr`@9_S^6Z}5X5l9{pEsF$ z_JDS@lPP>VK)Hj44WnQoe719?J;X2x&Y=`yG_-S}3|$8$7zDdj7bE^PBs%IXPX0Y0=NG1@1Ndq^npx zQ?pbZ;V!DOJhaDS#d{+VMClAT&h|H-XM1X)-7eLzgEO=Do2G4CS$FmH@ck?B=o0nd zYB+2P7xyLUs7K5;{C?sFJ*i-%t2;mG3nYK<+kKusXT^ZDO!o|hgt<~r&2+s#-OH>_ zE^jU!DX;8HNU!w`Ee{IIPe@2jjLS|*%}dVAiAw((l9c@={d+-LaHOr({cW$d8Mv&T z;j+xY$ISGt*|PlW$LPoQ!XiVKv?R#NG7(}3PsT}$gV8+lEWJjRDdS5`nOAP|!o#>t zHKm2wsR_xQU&;sGc;hy%Zj(eq($d3Iw8@cK4umwZjByS5RN3PRLF1x2UY2 zlB9x)#1AhD0RkN3aQL50A14O;eTw^@lAIF{?QMkT&j$51aN-Cjh5D6cYP8fEx2&o9 z>tw|F-M(jc4EcTF*}Wdvv$tGLr&~*}QC=lys=CL@IZt-4x^i%fo6CpDh}idU>{`EN z54m}CEMAFMRK-akoERG7qcjC+imY0VtXBQ;OFdzcFP=O|{gyOP`2Fjf*QJhcYdt>* zSj?ERG#|O~lNQ#b$i+i{ea)RqeP&y@T5~`|nIkO+vve8DXZOyZ);G%lzs3x#UO`ea zu!tCJ<>y2e`%iD*pZF7h;=hjn6!vl0KU9~0$Ut$q0^c}0?dE<^#Z`v-u;20#?BeY*v(!}>Y+3HcbtFP$m)QF9U~#jt-9SI3?kWJ4Ag zJ}_CE$uvC@U7wG-Mo6oMa0kna&ZQ?D2F6SL=pj0D6@{~e3x`VUcX|0AxpIByiZx5N z?%I)=n~_(PYiViu%8XW=Ej?v~`5+jERB7_Gm&a_hw{n-1-=c>MD0fh#*!Z+!aX)NT7Md)@Wllr0_w zQvy|m3W_9{($b;QUaE&nTiE$gg&m;64wR&Z3lq~2wFv=P3_y+n%0!qOo;fWi-SftUOV^j^qJ%DK0NXXaQy4~$?N9FEpJ`DbJhIT)w6L4cE#UrHbg97rD+qw5v0mc zVl)P$*E~O^bNTd2^G9cFKE19VtZb-_`yO^2*gO>f+fDV$d)DUw%Dk+PZ_NfMse-2b?|EHd$)#!WbpJG-?WVnv#7#|sVGz|hYAj_O?i1U^GhJHy$*6pxE6LJ zn@{zdof>%ZnvfU+tf8^q8W_g`kh9U^F4T#_WNG%BvyHD_7L`3v=NVKp69BYddFhiqky z*++I(ZCTd7VO^e~aqQyNkdQ$yua5N%t}m^_kJI>{f@5iK{zK4Oh3_iO z$qa_!I`DSIo0$!(D3c_FMkb*SRpB7@DOJyHia$qvv~_WR>*HqQ`SFQuS7dzk<;yiD zONQsqVCZO2BxR^bjv=f~pS!Vi?+MDcZ&9(~=N{kpi;Z!0bq{iIEKN(Huql+Eu$72? zzybOY6eJFu+fOi_OOaCGh{?gU8L(j;_v&f(vxoRQ4>%t@;cFrE4g)_eV^tRxhx?$|Mr#Szl=Yy?@#=Re@^@ovi`TQ4|B`?O%8w$=CFG1uL^|VaV9@lR6(7W zB|w2c>N;4pfx+W%KDpe9nhTc)L=G#5&bURPEv7OQ=v)y7NKtu9m<)U1VEgmmKimK_%9`KS9OCUXmst|Mm%BAMNLuaTDq-0`xgCB7vG9@8{e4)ozGAhvPy8 z55hU{h`~KiVxM8cT?)shrPZpX&aJQAp|N%+g|-4<0R=?bhEQR-c0u-yF#p$AFP;AD zufOsO(_g-}ICJj6g9m?ARaEj{mc#m?$UZnU;^gXvRgMG;6OJm-(Be?aYYVczWwg|{ zVvd+xAd@CeK>6Ucj18f{jR0yHaPU+-f1A#wbrqE+T>{EMfJGFTMS?Xn-a{JYJ%?;h zAH82wvTSttNLBsWqN>A*Noqaqx`3Yo%$d-?=zD~Lz2y=+t84ZzoGu$>t*Q7N>z8(R@BN0d zzE{?k0ReBi+H!81A3Aw-&)E}aj_kjDc+Z(PPw(}V`%^1!ld{&1B~0rHRiczer)Xyx110$?p#uRr{{*j+{wPki!Qc zq90;>mZwH2(L<#%4L=`=(T{U8pKNx!Z)N${{oB`;jmF;@Hk~&zNem=a#}$RX8zS;+ zeZP%To|~t-n8?R3QL0fDAC?v#5l1AANkg-Qf;G|3nc5518eMp^^Xi8MR_-b>(R)3v zJvU!*>w)<_gQc_87U?hBvElToqw_UYFK%A<%EUNx=Cn>N$xe9$r?k4SDRem*H-6ld z^2+q)E-A;;8WCGj^_s~8`cmDB($(@xnVRYW3$+8*>cp+pNS-TMtR~z#MZQg0H-G9P zzfF51PG3k}yQWZk%DAj3h)<#+G^VD^ynnmR*&)%rWCe%4jd6J%_sYuV;9>9Ecfwvjh3ZncXG5R2h9EPA4{*tf zvULbG$xmj?)*VH{Jkcq15$)kw%W{q$0s6==Z?uC$U@^GOU$X}iLRbkwtU!N=iXDxJ zi?OjCsBLCbIJsZHe@Xn-J4WV%r$H8thRGqZFiJ1x!6cxa0Q3Ws2$&W$&}o=VOkaC# zOej!CMp^y*O$HzPN?_K=@-rsrb{^Dg?Np1rf&rcEzL0%*AGZuLOzJz?A zrQFVk!U(Y?<;hyQj9t6H*9~gQprQ_1hXBu=$dCfh4a`VaU_-^5a7F)><^K%|{C_|G zz`j56C;mC{pTfT1YP;Ou90<9X_&pJG`TpAxNbs3V7IqTI;r@%TA3xiWm{8Z;o|=;I zHO02M@e-B3g9i&3pg{#aI_%?eLjbyfN5+KfFi2E~d3j;-d$b=^T)IF6`bYH^4}Wvj z*n&s+8~70cYk0zplFY*%{K+!;MKSpS96JHZ0kQxx7?42aq%l}E`FSA&-Td#ELLeT- zlc#Y&j>1B0kl=7uwO3d=T3v|@^!Z@>X2Uw;-Mg0s1iTs@=OffHE|3Jk7??a57$LiO z1!9V3Ol-UqY_a2~ULFm^TYgZ7iAtG4I2Yf1(+fLl6VkmqHABULuVXJkJw(k{^_l`^DX1OdNxic z8hc)rbuQ0qRv81cA?oS&cFX)cJBt@LHt#p^3ZC82v}Ck<9e@Qeo;O!N86M8dEJ$_m za<+VTBCq;Tb<-k%gVgbr1JtF#mD;;~< zKJ4T0!CxeX|5yPc)xHX`?u#!RH{HBp)!7rPLPM{Vv{@z;?);KrlA5wS_|wAg@u!|{ zpYp_1<@tIgzw_E10ZYdt^myS)tWV+)JPpET_jo9Dc}`+RAmCqd8ttwNk{xbEdRw15 zE}P?G+O$EvM;5gq)FzA4m!rQSC+PY?=)18NY_aDTcJYF(q zPP>c*ACxpopjxweg@2g^Uodq!YZ&ryUfeCU&igv#i|W*EQ@ZDf7ikE^>xz9eQE@cW z`LIIXXQ51zfn14>O6wGzR*kt8i`SKI-Br4GGyD8Oyn-|dA&^xY)YPv$W)79D(1R`yuUoUJPk;WvbiMWkvxYXW=v`;rraz6Ks69SOg)C=Ks4zY0(1GTxxTeu2 z01|$$DDuRUCu&8d` zyiM7bFKiz@=I{NIJ$)K(zi2VtQ6?eNB|mvgU$}3j#+Ox#X)*Eq!C>cU9E@rf6rJ+c z*`>3+8ps{cS%CL=NxX8dXY01gwd+S7S;pSDVsY|Nb!`=YNa-2bzBabeXKy%f-|O_s zGV1?g?=7I(TBCj6MT@&zokCmc?m}ItyF2xkdP_@zDzrt4ySux)ySqDqkPvs7H!IM+ z_jz}mJKj5EoHO>jqjRJk0V3Z@vSxnYl>bZ$ElSO6sA}tD@dSr}>+Tq_|AAklOP4}8 z;vzH|*=9M#u0w|W)OcW!r)Y0d5}$(($D{*`fex4FO=f0zynj)Z-yiJzV}I=5WB*6& zn+o!=eh*}uoLJ47YLeyr9<|00Y@Yd(lR2g&{rEs4(p#H4^YcnVqY?^AE3jk3DBeLb zz$XU##01@d4rke50Z@d}+h3J|D71~DPFvWD%V!8L?7tGY%>TGNG{h!kKFV+GBk|Ei zP7R2IDJ^K`@9PA#I*>mighc{ZfMo-Hxj7jWVmCkzz-xDAa- zXl!nXNk}ZItYruZRN$!b7|i4s5Nia7ALwc+iK>b+f$mo@wg$*bz*FUMv>D9R!vlIG zj2^HS1Iz`6HifX1PTkhltjWeL1pExN!*@Unh7J1*Ke&E(`#w+<8K0gMrA4kIFf<$D;F& zs zOpYdstq+Ad5GunB66HmSLF_aXl7d8-k#fYcCG@dlT>c|2d`!tZcJ`b^xWm)j zAa9$GUvJ(r`tZ}Vq&Duw&9x?HW@LQXQ{!?a-|2Bhs8is#AC#spAmV}8HIi))V{v1R zt{KsvZ(g{VkyetDntuD%4R31)T2p0=*>QTB5iNQxEkunTDvRpyg&^RMK)7HTGgIsp zjIWFB-8}o}hj*`UolJ_d3`lwZE#O>8`t^X2y>=h8qQ1{BvR+nTp;Pd2e#Q5BjrJOh zNFAQ96!^nFiV5u=5p(L{pZoY2>6ET3$=d zlYIA+Sq)#81zwnGxkbw3fUL__BStgo$)d#y9*z8r$qgWFm#Uqv}Lvu=f0y#90t14XMtT6P9%KAY8JmMi7V zl4+5b?^jgonlZa#(Sn=GNEE~)gaeQr|v$*ecRiyvg&krL1-@DFG;9jfieEUPVzEr@kacMMN;#AQcA zSODSi!@f&r8}DC$93=EXs|}h-<}kFn~_sk(mq{pbDZ43Qssy zh>C-x1nQ3;>HBuZZd;Rc>L4N19e5*TY&Q%LI-a~QpDBxxMJOT&LrkfEx^}_L)n(>Z zk#1g=pT5_m6pnbtcHc26S+Nmgu#t0W)7X))!F*YzMWpy)`I0`8k_WAc^J8PMbv|frG9o* zuHF$}?7sc5`Qhl0?iWaO_yt>*gW%H2CANynf9{bO+Z|W*rxK*YI1zE<0)pbq6oNNb*am-;ZZ5)L% z0cZ)MYYYZ-b&cfa6eTC8mX;KCbhhBBc;?hpUjm=rz@`bv&kw+t!w(cr)EGsdcCr{7 zE#sSP1mU558=R@je~_O4ODXVoFmE#IHYFv9^4~urf@~q`QGQqWp}*nrC(x%5jtnj= zE@)~k5`tzrA0DuH%Q$=`KFE+sGDN)YV8@PndZ_r66uxpIEw1>Pgv~taXk8{_U zEt&aNM&TZ^_~GdZCJelpSdHHtyr8j)0wmKGP|YBA7tcu?T%;_Kx9zxsb4g=_3sZa*!mB zCR+nUz+DpDwWzThiIuk~{Xdyl|BnwA2ALV8S?s4|+c~~GZ}#@)i_iCWAJz4>+FR|q zZNNzn@1>86H=HQ@5aIDQ;+H)%O+f8Pz4x!4T>Fyj=SfKqEUm86C>_8aaQgbm=+NU& z*UXcCeWEwoSH&!CPgEUGRHsErasv^*H^L9p81!4_`g!h^^Xg}Itg^UtGx+&4t1D;C zUfv52b2NE$*y4_Ux%bL89|cyt(nJ6<;w#nVD${H)OAglJ2a7-;!V5=mfk=Zp(iI|3 zPEw;r$T0j7PMCJqJFPDlq+eW#k_Ijg(TBWy*~{LO!oO6U1_yi!}pM$ z^B%eIo5~%IbEv@x#3CHm=|tbf%}KZJUR`rw+m=UnA1kPfpSy56E}(d@6Z)yp%i-tM zw{P3BZ?FEky9bW`_-Gmv#(XLNqBr3`4<9j$ z7N!e6>lm*VHg1(jUW$0^Qv7vL&D%gVYO`AEa@m5J(>mpmVOd1*f4VYKdiiwK+b6X~ zXYx<(@K~W&`1nzUXRxQ~m-z3Ntyb?DxAymIBSbNDa2Z!ce`vGGgEf&GHaE&DByHc) zbmL@sYbkoRa=Babq;x8`Ba8QXEo#lxGLSKqGY4G=;e0+H!$62#h|c%^IM%*nyg$IZ*x#j~lizj<(oD$IlmHhCq@b#Fg^sIILb zo<$_7bnU~cWzh?cdre-z7KsB zgpt;+^t|As=kl}^soL^*1R+Q(5ms;NICq(tnHn1$c>C@ncNhO;w}=|Iu+o=iy^o&3 z<0tUwG4t3-%!yMq*RR!m{REA5z+;XAQ?RRxPm9k;iOfg~9H>Zuu~x{92jhG2#+dJJ z1No`Y*KV?HtJkhQ_&bj=3s<+PYh)~+m0`F!ZU4d4>-SUa9P+~=n?j>7<`($*T6|Fi zyLXK&}##Ds6(zI~aPAPPtkJ5C6Xh-&X_Ve_bbn7jyl@Q}NQGg{EdqD>AV zp)lB-sZJCsi!sjQOoc)VN{S<5eCwO?flb8r)P#G!eQ4h@0V`y<_CZnPzK((i<>?mp*wdc>@^!AEMOw2Vivv~60?g>M~Cr0-|y#0t{#PGmqEQUP! zF`-i#ldn64&#vPSn3cF354gU?W&aY(mAWtHZhf)xwZo0!ky7A{U`ylc{NLnQtYL+( z2VYSL6`k- zj*MG_sb>lCdlF65ot~{C^e3d|{?Dxx~zHa;_6?wuBShK2g#ZKJ9mHHbJ+jh6BomM6|YZlLJmTL9^Vfw zL*zsv{VvFO;H<7+f=3YJ3&=Gdp(i3WZb;Nyr0|;*BVi>JFNcEVocQ(Cwt7J(+O7|k zzwAWHU(LwAG_CT1R@{{tX}8yTY@Fr2MaR)V-O>PwdZtotD~k_DAYBanFX#Swi<5y=$RI{63>D!kn2iIlK zM@Cl4kd|tdOqa=&+Eis2w^JcYTD%nz@6gieJ+QkmCK%nyvDuTA!t4psXmn207~nn9 zJwk&#ll*)$!-6Ub(?=LXWON@zLMuo}6u^;zLDTuMOA4|5>YJgx8+tlh>hfZ(8sruGROH*`#CvCI_MX^Z=k@K!C*%8%DS`xN|#Je`){nRha9S zldqm>N(f1e3d%^!`=l z3)h<)8#`WI3xhqxtoVVCZ`+RT?OHOQvV9%q)PCIS2N3E7naKrPx8=(#Gu5>R5TrsJ z>0CaWa^x8AmHDWnTd#jmhofuxsZ+z+syyjwpr}Aq(IBnf0Pe299Hj|d)?^6`|F=vk zjR{5RGAME>Mf9&65|7Yyu@TVHf1gtj7;C9itu>kj2u#LhnO%O&Op`Q<( zJpM#oeN}aBjo?qXAy#8$PC@Qf8tnpyvk5?l#h2&6tkJQhB=Yn4q_3Izxx-^r4i9ZI zsjS3A#}u@+4G_tb9+o*e+~ewCCk)`BY7unuI^9*0b2K>m?=Mn7m>k7T02iEcx^|EYeZR#O9&F zRl~#AI5c#aTSr5nhoez%S7rC$poNp?SO1`EZ=c_O`GCWqPA0Rd=r|s?~2AI95IU=KPao$1WXNx@6T$qnp8wc8~Agzi|G_ ziIb;2yxha1f`dbRzJB|dnV#Y5?BU}UP+i)Xmr+t#R*S)4NMtMv$b#?Z(vgy%sb8K3 z-ZKt;@ICEQOhrIaj=SBrOI_u*g|Qc)@8xjvx_%E?2v&eVQF@k2OR{Ep^2VC9gRN0p zF)2IxGS<`v&uI6XMGIR1kvb5f+#W1i8Y)p4F{?UpZeyW3&^L42XV7YvME%mM%`y&j zJ81VtV<3D1B+r8|1qhOd5bf^IDy`3#*Bo8gsIQnQhK%f81g{Ms^Z>y{XTpC4K5olk zx&g1-ir83dar@oJ+t*!Q?HbKL(Bdaa3|6IkNkfnXH)KZ2+j-Y_${6gL_4vw>Jma_Z zM=nth8z#w!$LcPOn4?oMZ)WonAqo;HBB;s@w0n{);{munh^_jFg^7N-s=nyE7|y zmvWqeqRRoq@(_~nN~X?Mbs|6sLR7&`bJ$il%V~$d!~HOi_;qVf?LNEf-Pc>Gehw%V zwTueRj94$fC_leMXV*8EZ|vA{@ZC?x56-U9S*ZsMrjf}MW@iM^e;!v4k zcZ@V65#a_S;5{2&=#DQ`7*ta2)mhN0J-bg{0V6Lpq9{HjFFGV8)-NaBJy*MM>n87q zFW(x!3~>Egk?fY~XcF__O8M10@w#iXlr-?G^@oT%r@J+V71gwRis!P z!6PyZ1gW}wi~IalQ+t>2{9`bH!=nx1>PoB70tY59JFL|0wR^mM2oRY}VdxG=rwt7Z zqh5k0IGgxf5;|_Dp|C6^AQGr-rVw-lCs$v4KaogIOiuXq%gXB8J7>o)McI+R-aiO@ za*y!c32gk}y(0uALPOm^b5(G1L}W@#V|oTDHj(_rhH&~afAtp7(PPR>uoNWGi6%?+ zs(!o-_Whn)l8gG0#*T(JW{po?#+*J?YGdy8{$6u&GB62cNhyMJ8IO7{CQWdH(FkDY zJ-B>ZhqQE$B+{iK+om^n@R(u4W1~=uXIa7C4gR53pPdH2IS)N}Gq80ZamDJ1IT{1< z$bhN@oY>ueW*`3UrF>1bUNtqEv^dC$@KnX{3Zg@Ds)PFLG1u=xL^Px%4_v;`t)V(0 zE{Ycs8y1ztE?NnnEMT~fBUlKVI<;G-qc0p_us{$j4a;VbIh?-(e*cne?;q^@V}I=5 zY5#kYmnXzNfv+3nLpzOMO5xfQn9G=;#D>LW#_o=uGZmGOn_IkbxF!&$W$83vu_o_} zPDNt#aFcq@Xqhqlx2Q6o!Q+v*yvaTh27^oE{~g2Q^LbM#Tw$0Ho$xkUiYH8%(tpp{ zohk+t%18757D$=AOiJ|gCw~+cJx?Wh8N#lP8~_2lCKr~N<0i6!Kc1gl9^o4t8T7cS z;zCu`QXFwH9p+=n^J{9?5J^81((Ukc3P-3|C}12e2SzYlFRzf^-d-kiLU8xN-XbTP z7wEVV9s#&aR31z~tQ-9bo}xXbP22WbxT!_}=)0A6QHPnec6(&nP2(OJ?fz=Bm>10W{~HnGxpb5?Wk` zduJc`#>p)>KhZwWIVw6{@FFv@!f6D+j45<$8i+RiwJE4#Q+(I5sOs50l&#~WWh~Be zF08@xx99im4=cH|aqp^!51yJmx^DH{=)JMAo1<$;Sg@_5weg#KDY-EbaY2>U1%v&~ zaSUSYk9B z=}SSd)!Nkl`M{e8bQxgCQM*-g!)C@gFLr;ULdZT$30LQbqU^==LLgX`@L82~SAS@u zYQF+P)*Jrm&kDsoctt+4(vZgvlcX5Zh_J9a7p(N6*Au_U43T!pO zSDfyE@T1l1EG4SH$_=|{V4O87%_Iv=Rg1o@3B0Z0dK}3!)hhTY-5;jHjnaUqS))Gl zI;|JR9+)1o2En}400$k2T+OkbH}PO@@>V47CKCDp3Av7R*~wA;=a4*gcp=+5-FC$O zI#QAES5e&l=>C@jhqfA@+vaoZz~IaGBTvmR=2jk;PC|P%wy-GZhx4cRHaiS2eew1` zdiR!zg~_d}7r&c+O$-k!DvDw=GqG(CIMrJrN&;*VFw=xHvqlx9v4}ERVJ=QonIo#e zn{_TZnr_kH)%Js{H7&Bn$4eN((=`J0FJ+xHF3UQAclz)2!~ zY6$7{nw*=!C*5A87S@xc~ z+@re!FRg;Mr-)6XA_z-Ceq!flUT#=KnElYiFzWW`+;n^Q&iG`{cduG1(<{pp+FJ4{ zxUQnu7+|9l=&&fE(kJT6J03pkT)kmTTXkGU1ScakqCEqyU4`ffT2_2pWH8n}Aa&Q7 z($xq078|sSt6?SN$#N1H1fhr{W9l-@YscXIqox)5?HX!>q6kY8VNXLqVme=18Ka=x zynH>_{DSn9zH1kU7A%-hP{xWXjw#I^TW2u%_;F3FXLUms1=|l)3L3>69)+x2rjPfq zmrYlo0}S^7i6GcM3oD6*J-n!v6WFz15;4}N-M#MuZg0v#tuFIaA<^W}Zno;cmB%&Oc=+cjAWj<`Z()j5ZwF zz2M7>YeS8Nz{3IuBUpY^l5fXAB@6mTM)TU*g8I6A%X56%iu%fvJDMu;DoYZLADRRQ zXSeoZZM+@xnq#WF(_Wgq=lO&VtgbwFytYKip7Yd1S!eVC=gh_(^~p0_m#V-{mY% zAUoC~%a=Rn;6Cl)^uV?weM<9* zvRWgE2untaEg?dmj<98AadYQ19p29P{gFx`Nb^;*=gIOQ!MtzOAdf zw=WtvXTY%j1TBpMHzV*MV@!?^;m}Q|XUaaPooOisD{Eq(o8us_)M$Nt#A#{TYj1(+IS%bEN=pDXOBKve()PPmB!>16OE z!6PPjDUYkpVl8Aa_b^${cwAqAN?}0mAJ`{so)Eg||K3^?E;^1$;KHaZz;RIxD=rtU zAD;~Iu_qX00S|H}AroCz3xEf(&H`7T!&hMNRQfUVGP71k20h7)i%JShurM(;HQNy$ zcLFo0+xaIOmLhI!D5ghJJC5)@G%3J0eFN-lH+lf_0%u% zHAhn4iE-oC@?w?P2?#L};p8IZ0FA^$NQ{nB=h7{MvNN!<5*QtXe^U-Tl(_<~@_iHj&UU@o*}O!e zOBrcbk?vR58jw}Rh{`agA#?;8M?@!P%5lyqfP)4z7{P`kxG4D+d&TH0;rC z_r+MrEAc_njNs`ItvvKyqRUc|61B3)e@U9(*7BfJjqkVATvY~_`QW2KiB`e|C~&Q3 z_THPucG2bf&w?m6Q(hs~Y7%e1v zK5yHn)~1OJi6InGDNtWT5>sJGOeZ1;8$q~;2uv3jT;Yfz7;&UVRjc{*9d2nmivsSU z5iv;#Delhk=TG;oUOOl$Ixe`46w;!E^c~s?g*mM5u{ZDBzt}e$?$6z_EbZjB9HS%I zw(lDIYS6$Pcbo>~(Vo1ppwG8$9~%C=ci8OwZs$9f%Us+eUs*dpG!JlaY^*5*25pio zxM=jAQSR-~eftvc{At{Vt?=+AH8zPeK0#t}X_IF&R~Sr3Pj4!)M(BQaT?cpIG&Mj@ z6(kjh#igodYt&vaO#b>T!})X8<2zZKH}}t9Mp2x>7L(-&kC!eXN}8e7cX(G*kPYxg zNdvuA$(a#OuBoA6BLjm`G10z3-UStz@4h{853u=e{W&`;A2-fn3EUxuDPT}2PHTU3 z4tH>Kv#LU?$}H01i%d5!=x70aAEhXxXuo00%!N2nB_1Nh6B7dk3GQ?RWMyc|T5#+H z_&P#q3NOg3?&gh_-3L1tEpMDAHX*M-kd|wgk;|Ml8|)kai>HJ~FyFqy96Zp!c@N9z z1$c)-{}2%H0vf~P`)PyCxb(1Uqw~Y-SM|$^k0>ES^OZ=a4I#jfMk0<0(3vMp*l;F* zk7uDXKzPEqp8x|XbhO4sDDwAP9u3PibcFT|tY z*vI)jMdQD*vrNcQ==!*mvB2LAy~4u5K@RkDP@xtkpU>rSvA{0_a08AZhc$!6m7{Z& zhw(GpySI0?nswC2C5A-byZrRcM+K9u%3RK5DGfnbc$YCw zeKkVcOoc@wWK}w6D{wyoFB`ZMY$p1JG`SWQs^mi9pvp3I3WZct(wGsQLmnUii#O3x zTb$u@>-NSk->+L*JdN@3%?V8vjvWCSb|Nu8VE{8eftC@$zz`=Zy-IL=T@8&n8S!N$ zsVS+!ef{k*(b?xN+`D!E+7P~m!mW=_ap`RD`0&aiGa<6ODfOHAf%@`00Jng%jLMM% zP@~bsfUg29MIcK9Xkv#{vkDi5Bwi?~DI6W+kR~uFrnBiZDw)a>05dN&zo4bJL&)yc zM8bO#@c@M~pG2B9h*{m%ajBy{9NXUpYyxlwfmZ<#N26L1hVRqc&SDZ4(ZduOJ_sZt zlpv(pePNo*y6TDxIL0L^=+Hm}9r(+Vh7oCYnG`8){&?z+zVwrs_RFg5bgRG0jk(MB z`Ar)s(*)XLV5{-S(rl(ag|fH2*udh2NVd;nQppymQ3r-TjLm@p4NAyXzauk#ty}A) zy`o%ed7HXM`+Q{R2m)?uoJe_Yup~q(()|?rog}f5v%B5oI4-K-tOgz$V6Vw~rPOy@ zn(Vlt(P!hkcZi3R=8*R~hS{=VL$$KCihdhs1Z-KHt~akiO@$&oohFO4sUqdekkMCq z%+S@N393UWl9))@G2fZR#+tu2As)w&l;;v-p))1|Wd&3R!Sg2^<_BLy8a&shx^1_y zI$-|V(Dvp*|Fi2G?=J)E4dA^IyjJu)lIpuZ9lmRTug<880y%Eop#PzfB$J|qAEu_a zn>x#5Go!z|+uD5n6#4jVmEny(=>_P7R>kQUEhKvZ5_wc7-Q{h5Vn~dgQ@n#)Zglv$ z3x_wJGB|5|;hFJ8OXG7TzMn9$P9xsdqwgN`&mS66m88o^P!WWLh>}F6ftmG{RD!KDulH$@p3Ek6!|CWCK4YF}f~0bu2ij+~5Fa?dFlW^Gc;fYu79+ zKX*9s`4g}ApE}F?xp-(w>r4I^oB!Cqrf!)Q@^}pupUy=+%=1qigvVPf9Fb4LI9$+SoAT(;>HX)Ze$_%Gt4%OGruzqtm8g z=FBVIX+RALLZeiI)l9H>1TKp%Y!(r4AY**c`Dfv_&24J4i1IVB(^a}8)d$yZB3qaP zZH$ZU8+LRp->|1oW=;=+fSdvg4fr8kQG`5gdZU_J-4ml?Ul(6zC+A--pYOlU@QrII zZA(ikOo_`H8yn9q&u-|eFDxr)>+Z+0h0^eoBgZi#kR8wc`M&Mq;nwSynLggoQ7_nR zqW)o|z5dG8*7-{rN;5!I7R1HaA_$sU)>enDyWq<=Xefs9P9Tm#eHEmo!cVKwGezz)8Ymqb1f1bf1gPWCHH*fB^3g_WiLx_J4u>t%1kubl1Qb=v zN2}t@fcu!oG6k+PK#EY$ML<;S$wrEQMEiKcN2C9DV;@J@%rRBdhvszI!qtudwD6#r z%IjgFkw^i6VrkSYJl>qo+sR@o@ifnA zC*q^RgtLByPrzdd^Ta}sAFvQk45=%iAP+%h+(&!>jkg9U~^+fvkkgn(79@ z!_awvr$A31tGuLB@UW!?S@(^PH1?!$U|is~`v$2WK0DsLf5RulzPu{EtRy`@HMuY? zB*^1RX8iey!FRxZi)OUxD+we4=P3{f2r$J3{0B5yV9Ehax4C0iM8cJb=%AR`!i?n1 zv7x>R{3wM<9wU=_NAUp>skKdQ!$S@I-QGm}O(u0EdS93Uu)2Tba7yYkZx_=lwn2e(E9e)qs}hNuUhaTSwO#FXOvG+K z;(HM(G83go=|kiivh$LPS2HX&A>NPX*u7qR;>40;CwH3MHL|*JD)P+cru$p+&&_Cm zxjf>Sc)RJ+{#V+g-{#l+(r=4A-spd4EZ@7WG|}eAN525?fXJ}Vc6N^6?QKpQl^nj+ zrnHDIrNCE3S{0DygEGw)JEFcE47WFm`tw-O4ne-|+mCEqZ7}D~x#e$9E%d%; z;B{`Z|KYtgXV0}SThb{l#@AM2D2g*@X>k_Kf>r8VJ#n%u!VpFH(`7k`I7~x`vPj=@ zUFL_^Fxb~qS?cTOHaam>nw3@)5QwvO&D^-Ja>H)g1LK^N$0mH;s$J~cqM|E2!kS$C zAwB~0>fy+VGd-L4)~?>xdh%4?_t%VEk7DmfHn*3)+bR9aP}Tjg<}U*UyIraQ2j=JQ zUy-+Eanspt!?%u5Z(kZbaiZtUwT9#8b9NbY?A$V;BFCSOfRrd+R=jn^g8o+z0W%2r z31D!9A$%ZGX^l;_by>;v4mO#`jukCh(>{N7x9-};h3hCc9zts;2-uK8hm3+Tqvzub zHsHk-L0y?9ATSApKMmm{NWZLN`Tm2ojt+Sl88xk4<>@7;vNH`Zqr8HQS~8B=)ze;3 zl-^QZHqg^Ojy4+$T^#D$1lWX!o?0kLhK3TLj0v0roiD6xVPM(rj^wlFMm1)#Wfa-d zrU|f*f*@p7rG|O>;Nc785fCskn*G<+-SQ>i7hcF)6W+?qHcrQ~`#;0+!%5bktV?n<#Ldlc7JF@aXsguH;Zr_+w9r zE&NqL{r}zmVBa76WB)>9Pfa!xj^dcYK8{c-c?#PEyWBA$(P@;f~g#rbh4n{%35? zIu=_>fPGxhq;lub`I|<@k7wte`0#dndV*hDbtjiB*djw^X<1ceijZjO=r;#fSbD^# zqMK~gR|>E)fzG)<#?+bsgae{%SimAY>aDWk6B>X)K!=V9?ug0|@_PDu6H*h?b5fhy zDp*{>Yl!7=`X_LW^;KzZE+6Y#(&(IF%;134ho7xwy_r$@5k6rRC8caG3yqiZ(E1w8 zI6XcgrK&tHD>taJJuI==V#=ZB>ZvOF6?pcXX#s`lxh`JoQ1f4#O%$zw&I#AW{E+PK2r|s>$ z#M>Rc+ZeC{U?K2zfw+uOpbKdSApQg{Xd^x6MpdNQw~x2e8zKt29EL~l_BJgB_8P!x zL9Ze!M|v=Hez}LijpH*99-M#X(z0WQGH(j&nwgO9Iqud=V&#l=t+h+tEK{{NtCZbg! zOAGQf1hhBogN#NaxF`f_=0MgAbRt}kKG?49y04z=vC`O4OZ6Qh^$7XyzdO$7q|KY< z*Y2s!e=er}eEP~O$k7wio*a;RzIU4ORq?Y25wn}}Kkg`hxisDBiJZ>|)il?o$+r5| z7ZB?Mh|_+=>nKwGX;I$Sg+4~|4hJM`_bZths^8wLHCr7yf9_;tK&Zpp_ujWJ#6LOh zcWp<&qkT4a4T8+ic--0G_hgg1se%8Gv%T5=-nn)=)@JPAGrW6!_Z+E4X{1mTX<4h+xNUjQL47#CYDg2oOCTIk1g41!V5l2G zissH9e*1!vnMm!b>*=WR@^xx!Z74}huJQgHW( zcxYsU&af7OR};s`iucM(_pMmidTmdKN~k;AhJZ%S#bw%pojXi`!s;JG8%L(Hw@BJSd0>fC(dpj2o<|zIDlFj5;|L zZQRhOKYvtZI!6(qi69^=fIl&;f?DbNZBW-La3+HsURY!Tmy3qJ@GOXpEa0Luz}cm- z!NoqV^wtJIt4f6Fg-I93rUMU;Ry&}o1$1;+#$?Krj)jgIaJ4R>o{w$Ov*}(eYz6N_cwO z=}V{Y+`8)H;80yuT-VhxFo8p#mr6sO5{&?(-4oWI2eujvNUMRI7DHaSW3E!ku`RLo z-vXn;TROVwMBz9SI%Tp;hR&aC-4J+oA>5%em+$~M22jXoEwZqhnj);MM7f$L6x2c6 z=KdzXKiK!j{@DL5#{QkUpu*(#?tc?T{;hu8FU;!p3X6tGLUm^aEeLlcQI($d_Km7bTInb6x63*bnhtz`3Mc%aAv zMFwcFVFi}Dt+H-cP0j1rDDw$Szu?ivhY4}9(Mezk8sLepj7fg>Mn-UFB&ZO3~Hs_TT7gki((O6tz_Z)%7>BW*7 zY8x~1@{-chVurCTTp$${6h?(4(6F4E!g@y=`_z;K%-9f)OcXp7l>Pj|49oMf0q)olmOS)>; zBqb=ebm`!z8a$8(kOC@##_iZkIy%mWN8NXJvvhS0j1SMp4ls(!s{&%fetCLl7T3fi z7nhaQ<)w$0W|@t3T?G0rfK?y>Tv*3|TVrgGwvn)LVjLF#sT;p|j3Yq<5jI1X-LKmo zr(5xBYn9#EXcL1p`#Vw2mJjdTG_`tn<^Jx{+}BkCA{;w4TA)VHkfz2cw>av!8qGYr zTW95(x!Q~5^_CzH&WjfNEF6heBg9P~iA4Hh6v97i`dhAf{ZQfkYenau+TPza6Mrqv za8T#9%mdZ}AZtUzG8lGMlZnil(&e;!r)bzRlqmlPehBR2a) z%?^vb*(dsGv!vUx1uwR%8|_;-Q(b)9j_sD8?cd&c`R4RRhx>Pv>?|W}KM&PcWW@x2 zdU5^y(RH6c-461xO7L<0cK@;2?Z<7!m6q?#Pn|e@+xSIS-&l-$1QazkU%gqrb}l^I zNPnPAG?U@`&f@yar3GzbWtqTud3kP5Lq#*tsBv%aIvMI%@0K;)EBon?mgB+Y5qp-5 zU0h8+IcMU4JoUyhs^x>&GuvX%tnPlWy=E!Wqk&N6Mffsupr}5erd_ak^MKv=?#O_K zjF^nnu;|3#FF!wrg@lGW`$stiQ98$+&3`(6{sO~2!%>0FZ;W%+tn6K+gOxzo0-};a z7zi>x4e3*t&0VONxNGi@t+IZ%mId5jZFx}r^G12EU2~#$EXdMTZ$uErbc8P}0<+ZU z2*N@n#}Ju5X;rv$vunjX>WVqTDxxFG;zJ8GdyZ^nxP1fiKu1l1r>FfZi?@$0Oih0N zv~hL!_Vi2e4k+>uF7xm%cXewB@QXAz^?dPcG`A2M2hkcgDwR>1S@!ID_vQWNTGRUF z5RL-EmqZhrAHz*o{l)aD^!kt^l(pfIb4n$-Vcl4ei?2s-QF|DutI;8rE6d zbov$)RkN5>l(vNMCGaR1Y=1^(BpKf>I647GcJ|gM6=sccc~};Ipm)U0*5$(KOJBeJ zsBUjh&dE*;kEn@HB<1EoZ6%DgL23xxy+fG02vp|Mg)Peg83r1nlV-y#B4_pJ#Bl(( zgt!+BQmf!~sIRUnDJmAyEMqEF&1a&UUe@p07gT_dDwNRk8yUMurJrK6F9P!=fH|48 z1B+iig5B2DdptY$Vr0a#nyOq7hC+wOXu^VNMp#^6dQOI5b6rqW&*ag^aQ!wmHr<_l zf}2&9m8Yi0wsq8|XC?Ox43epkS5WKg7m$<^CPeK)7LS{nSz1xk-Zw-@&n}Bk%53lK z2=MncH+hd8ECC1`8#@TFghUbpSPQi6bn>b0p39$pEOZH4UD~;Hlr@WwHpYsvY18pz zi-7M6(8Pz)&YsqStg?ohZjP{Avaq(=(bxNm(Y?UHz`nL>fHvTU0RJN}&k@P{#@Xj6 z@R15BL?|AH$WGRq7SaMd$nilMII|dohwD?%zQ3~Y;fak`_8B}pe>uTFydpEFJS!>0 z40?e$jT`>TQ9yp)rO>t*EXD@5l3 zGSdWEWGQtfXvO9qTFUQ4#4JRRZwT@knQMt$3084R)%T0nHMK=vy+S@*kh3+MYr9t4 zZJm1bQFZ@)NUQnm;`^d~X7k(M>Ze@Pa@s3rxnINKq^^^Js`Ks{9}diYcVpL<9m^GG z&fK(W-^uM4Z|uKma{cx9mtTLr`~qWa#=yAwH?!3TwisSME!YrbN2gjncgg7~HMc9FwoRT|TWsaj0L#}pS z1z(*)_8tygHYalX9P2%*mdDh;8!Gx8mm!$zgY`1_IE(MLXCUlt*jJJc!Ugg9SJ3M7?ji1fIm!l0U3g^EngER`xH$>JHZG0RmV zk1mZg+8wxmb*jd^UPWC{p2wReLq?`CL_|4=7)YyujMkWx3T@Z6;Uhc7&hKqHxPIc! z`S$B)>P;T=3R9CkZJq7Ee*N{yJ0_@RppQCPo48b9$v2<3#^!gAIXpd*@%mKBKK*7LdAz(hUPNS2M!tE$lJKoNbH4tJ@(lEkjOwEG zjR3J57!!gGZr9*wN-hM2a_`+Ko~K#APQUNg6-bB!3JJIzDw~NG7XLMcocnv)5KBNv zbYNfyiW8wAvB&g9$6>>2okd;q7Q?0cL`xTF>7Ud`=%+G?LeT0T9!f7vRI8 z2*@;?B*HwfWy0G7*Vs~$UmO}59^&PZ5+9SCQ`p=!A{`Gb9b?2rBb-lk4t&hNpylk#}%si56& zv6RVP4Lz@%IQ2)_8B1dsgO(5sW^Kt3%-_GC`3B8WW7=RQBcihDQz6jsmVC;fMUZJSXRB zpw65lthN&lpqtViWTLqdG*80B0W?-tyv1VQ0sH`X&0rWZcp5am1cjxAC9ZF2KcAcX zDm^7IE2FHYW|YH&hK}yg`0&Q=CKMpK5Ehw(8OKhLu)iGahlWN51_mM`gXzq1!CNpe z(C6+N@WsM4JH4*AYq+{Pp9}3A0e%U-73hrkxR{KLjM9>-u+V5vcaQFl=GwA?f^26p z;9n@~yM5x};u{v5!)kUnGq>VFp8-02SOdJ<0C`Mi4;O~G zfa~dQ4-Jk;O3O^k&x_AWDXb}R5AcqP3C~W9A8055UIWk~fn^I|%LnH^u#4@turGPg z06w+sR}IuHI9S1kr99>yU|3Ox{H;EnwEX(W-{1ei!>30L_iR}+_uRfkk)H2j{JvT| z*wPqybl7(pL@yvXDK>o8PI$TS{mBK-&uqA~bMBL)vtI8<%6`skw2`3(t1!R(l7IE9T;o0&9pD!+`Nj~0H zwH(NcfinXL(lDw(X;vW*8l(hj+TTNhE+b91k>aCB`v>h(*LAKTtBmZ`A2?`QrEGl~ zwQ{?Y+VqbI@_o7h`^3$WHTKAlirH<{9TUtQ?So4T()7F^&2`&1&vmg%>;~1Lhq_%x z5?%MEcij^Khov-|r5&ca;TM#x_G*9FGQ)At0^9xi_BZ#uyte1~`Q2K&GneVDJh}JW z6Qj3bAxY*RZB8FP_xbK?pC699&Ke$keB;`C(?L8#a3a=4rTsX25fc-dYWc#;yu3Df_4uF$U&?KI%OnfcB;KjT*$SIypU*bpE6Uc$}>0O z=INy@o#(Sb-SvQ++Zm+PLKX8v34-Q>t12gaOHb;{l5{)ZwAJT&wgft?S!ODSAnhiWWYhqNe&7;r>eVAS%n>slI{pJJt;L+iwt9tbo*UVW!{$K39 z1ymeumn~dTLI@flXb2$*ad%IM5qIM5PTY_P34sI+!QI{6-QC@3+TG>+T@87^eDBQM zHFxemcYSvjXVpreX}YV5u2cIwXP>>FJb@Z<1xsK6n*zdrjP{LsGzqyQ^SweIn&w6S z71GWa>>G=*_*aU5n&cg!_l+?8e$(>)T(J|4a`;Bf&;+A^Ph2CSykLns9ZNJa>;H$y z=YQwU3(Bu#iT?fVf#@(H$HE~`C~*uQ1Zp^v6bIwSo{-eUXCk+ z84B==1veO?O=7%7Wg~ph5yCz)%@`m>TrhVu<4X$r%s)E6xA>e^n8-$nL-dxeuDrrB z8l73!P#+o=MI_O3^YeyrgG{ypbwGxF2R$QW9BV)EtlJMZ7qFRna6;TdpYIa9Q#8LL;z+hnwq{34h^AjP{oJj>;wjj-qYGs znwFav9o|wB3xX1W%r23qMe3oo?-Z85p7uCwdqn$f5_}=T6Cu0-fjw~3IvV^_v(0LH z{4%NnJ~}_R@MyJzw^2#uJ8RpGuS}QPTP{lZuE>qmWqTq#pYfcC+1cjPJa5Z~K3f!e zXI}h$y>ioe1+U}_-b}>$s0gB^dfkztAL=Ep`t6bGxVTB2Fht~w^qa`JZ$)apPlBGU zfM4DF2}xUulr5f=Hdo$j-ng*K(+CCI*~3eKrv=O@z?lMEO=`V-U7~uK|D2NVGh58m z;gcrV=?{L?XmeW{>!knjA(HF1D%NSCi~pj1pC-tfBU5Y?C%m6B-+JoR$VvI6#Y}-7 zfCh`J)K#O_>}mLGJ79%0epe5=y~5()`e5Um z&v$Q$wEmD6?Q`nXmf7=U_2(+6X-?jEe(&+eXZ3e2z5L{2Wnl_~*!lR=or4dIww^y5 z6q73cxYh;)`<_1$b!o5tZe53yOF}uQs716;b6QfO_ucZ3Z%fxNlvh`PGYR%4Aq~SdeA-m1Pc*|0}rb4(fcuWxb z&7`E>rZxvL$ki$7KEUq;QpaFidd@q$UPpiE=mHpG!tjW}G=;@t3sH_2l}%5{&GL^5 z^N9@)PYjC<_*rUa)^gskN_{eB5+aaBXj5be(^UIqXSD0ht=P3bY0JWxIV$Dasw5fZ z*6~s?3se#w@5_mL-x~EDUYWqj2k^#b;Nk<`8&i1uhP8bgNllG4S&AK6d`;h5K6;fOo?ek$k`^B0{>#zK;**7~jiZaZwWa;{H{U%hTrZ7_1v`{~oq$d;RU()z6@87Cmzh59z$Z|Fi5ux>Fh*0iq-6f>+dfM49)o>jF&6)?hv;t$et#mrKXsD% zlBOq$g#Ph8dQ0H&QTiEGfJ&v1MZXsdM`7Q8yDgA7%hEyE0{k!m?-@t1mMK(bh?J-> zM*!yl90j6E789*t$#|}pi`&bGcLtm4LDbvX^d&QWJB@LSOy4DhOKgU5LDnrkHLIgG zyQ-j&jHQc4lnQgWl>(!-(?qDspu#K+)Z=h)oTRa4WFmX#}pSwwPQTzp_| zS^}_8#jK&G-f$lek6-Ueq!c#K7vKZ%U({5d3-LYCUYpWXUTgm1(~~Ga~0Y)Pk)oEKExBC~FT4YcM}(J$sfh zqW2tGdJl0gI*$P_5h_?+0-Q1sG^8h((73L2aO&eb74*OV5xd*Of2C8#-cO;+ezrkCKTYd%T~2jciL;#BW2Rd85Gi~k%?O_l(NnllQq`{0 zBfm%`yGwUv&BUe8Xmy>Kau2C`t5*1IQn@kGX^ixpMLHKD4Kon@_9-pLks4EkQK7>p z%#ctRrGPCBR25#e{9wFlU+BC}uO;o@^{LL;rvdLmvXzRLO&QdO224D(+45mw^!E<}m8U6I*ieR@Cb%*mnz)=kx zX182JAXJ;-Grq-H#q^+Jri&#_eB%!e^R)W%^y;xA+ceJ~*0^^?@6m&0CZ7+#G&{O( zv+|z3bASA}U)z-QE6m0<(c9#w&8_>7;=MdWUp*;(e#LK_j^B1guRY4v2AZ~4_S`#u z*u?asQ&@OTTP=w_;NyBT%WrF;r|xk2=DM&IP1&b%V@`ToZceZ|%m_1y*|Riz-KG@X zg}DoL3Js*|ULcSp&kCQ&4;c?JvMjghEnWwKT9}_1*wkCkKpl3_&oiNuQ4kQD6B5uq zTt)>V>e9oK&{+Xm+FD~`BdTi4_#!5qi%*L6vV3sb|IChp4fEQyq+E^YX7BH`1@3$v$v0=;+0WyHC5l zJt52wV|1iUdOUtSLPC&X>G3tQ)xEc^y0mJs`HMFx$%Uoa&4sDe`RPRkd4+i;MHO{5 z?d_c%wapy`b%SNCrKv@!DOoW&*`bNC=?PKT{5(cLsDYcEGF0<9JT9|1 zF&=POlnTga6H!5S)JQ{ICihjf)@7BBW=phF%$GD*&3cX?A{3;f#l+v{EG6JvfRkj@W61- zyo&sc#Kw!9go3-UoTLPcv z-&Au2z#Nag($#uhz;>@Fw4vdvfzK3;$i~87En)^2$XwJ8gvABEq}eOtN<5D;GSZ0T zHh@}|Ac!e=3vgr@HiVQzl+kBVIy2Irf@VRTK)D3OT z`@;RtIaxnBb7a4#vwL}QQAT=_o10yAT`~_!^UFSG7QQ!ntiJ3XqIwTeekQMOqCMq4 zvizgglfYBq#Xo9WlFLi7qr=m(vggX0YoAzr)ot09r zvj*E`GK5a;@JIZ=BHzCvzudJgt+ei6K}@e8U16)ceyA}lGz1S+gvX}|HXy^&iJ~x@?>ky+ti; zuVUaTBzm`I$g#CQjvwZ>^@!_@k5)gd-oNww{HQSOMZC?v%+EW#uWKfMTV3kAqugg* zPvkCE{DnI6Jz<7xlMHtzY*~PLwT|PXhV@3+kqAF>JlP4M`_3UGzkvSYlB`5AV!=!N z1Zg9+9T$2ki_<<(bZpo8OTlOAZkZj zO<{ggpnJKSb>87^rCQTKR}rQlJgISc2;#S9`On)oj2}Gl4vlYWzz+Ab#h2em{$Y#Q z=z5tLtP)5-LQ6X=nMel_j*O)?G(l>9+x1t~3pZj^R0#+|9VbVhwTg1#0fZ$0fgny@ zd698Iq=7)l;W6lm+V`1_x()bB(_d)iYg~K{(nd6a) znffs+x7c325fc&K)zMwk(A zzs1a0Mjofh9Ir}{Rl=(&g5eg}ypTI@I%&ovs)`gGHH7jSVDl+}iZ65%+A+A+lJcDD z!t^rlU!l7;XH7yVQm81mM88mvy1)Q@1A&TVi~p$T3jF&6)_@q0c0-vNIN3G5T&JmGK5_g8P1E>CK;RD|J7Ads|2>0pucghf4}WxkxKSX~69|-e0xd3YS6t*?ryrL-ym?es zP*PKcPJRpy673y+vACqf)DWZd2e%zxx#s>X&4<$J?-f)&C{BEYth8M6DDZ))yV*CJ zuVsbh*g-mnjj98pa<1asF1~Rw#rsC35C^JjYy5(u3(DFA;-8X%NfSzFNWH^Q99(a@ z=Zo6>H^{ul$o5-EVA?4%WH123V!=QcC&>FgfF%pJ8pOK_I)gAjxMRMd8zS=tQGAQc zxrqd2E{$@KDX^Z}=B7oAT~p+#oAN`p%z6sVR~f>TAygKEr{Y}4w>v9Pg7m;k6@p}` z@ko6F5*f|MbAKKxn%FLmIV4!AFaBo*ja1Iko%V3|9P zN1xc!j8K}VaPy@gOSI@Bd{gy#z5L-py?ck(&z*`q zKC$?x>8@wDHJ?A#JZXgN+l7SMsnZHqL#385Zz8!yr8RSAuKl8rhdQ=LkeJ6(wRUoZ zpgAn>HO()S+g~X3f01PwdH42{myL2uSOlm^{rzk_Wbzr%>A43*K0>Y@1G7dm>;xS-hU|)xC9AV zIw^e1Y}Z{|l0R9*1O#NKruOCJnx8uO^vJ5$2NnihFbFulB*$b!z3bX8f4%-;@VLkB1=Z(jqn#olT$&g*fg3-a?62PEzd6n2j>U^RF<}wn&pB%6Mdjhy zl1Y3P6&P4XZ(CY)curO}iAd}2=+8*b>TK#n$q#f&M_ox*b%yV!yB0@Rck-r`Idj&h3yzaN`k*^n_ZK>8>l5|9bS;$;H#s)yK;(5=UfFcmfVcghqr6 z)X#=VMs@GRA6fy6!KN@!2^Bs%usTeKpW*zY*L$VqI8w5damuZ7b1L^3HCQ>wfVq%8kUOWj?R)M{H*n74%Y+TuO z{bu%)7at!z{`$=_CO9l5E-ooQEv+U$r>V4?I> zjOw^Bz=;oC4EYF5RBBmVMlx6=d~RYgaD}M%qj0nc7nhU_@hK5sji@yc2XOeb@Sxh_ zOb`&DyAd4SJ67**m79&3F}Huw0?O8v^d+FAD)U3HmruA2p~{LMv{3b zm?txtSk$Ghuer_SMeF?8Y-Kr~^mx`}DTd0z;q`|guSk3*#k+umQS{ILOK??u{qs;} zkLbTX$U6r6#$qh~g+lm;*X`fpFZ}n{HtOdsX?OA^awOtMMmPA!_NRI7e|PXDSX1J; zE6$06Y7k`r!~$fZLNh`>l>ph9jkUGy9bN5K)}P3di$&rw`>jBc+yFXxXpQ57=Wu_Zjf4Sx3=@J!@ znv@vd+!8~fxNvy)*(?JlVliR8)3{geAXzZ?by*x>IK^dPW4FHa)L)URB}VWX%tXT4pLb##(D`uQoVu z@cHxSn9!J%g!qh%jNHPS%)FMYjQ-fL(xQy?+~n~5oOCgWy#Ms=ohR>tB2p@m7)8(nj44KLf zLr7uBUT1)dySk+A3Fcmagv4on0S@;9`;CAcP)?up_7O zqUQ2b=Rl7dFs9~&L%azSv?p=O+_uAGCdUqb} z4@vkO7;v>F^4L(+j#`JADMyf69R#N!&(YBDo;BZho~qZ$4WG{LFg~@#!T4yP>BUc{ zw!GdrH~Qob=Vi)4o0Wps%XrK|!q#cIZe9}c@J3g82BoLA*88XT)#GpWE`Pgef%lfp z9xLa@UeK$w(4~cIb5myZ2F$JU+4TYxVphnTeI_*1ny;SbwVgwmk>$nwnWT z+1tB2W|il4_O}894|E)KHb7Pqq$WUJIk1RgAV>o`O~gQHxx*xgFCgD~NnW|3M^=?S ze>3^ewbbhmDng@4gSf`()`G}Px6k(Z`Gt9vrCr3K>aOPKtYmZ%k;Wn8s1>!1naQcp z)eccX(K<8g=FV*2uoS+$gY-n`Y6UhUFD=F4iE**7&p>@eb4yuvMOtz}Tv|qSZCL@a ztG~ItF)lSfHm{5_0-o3`sH^I+Fb5wGC@zKWE^)ESmh?tyD)K9e3q&H($YSBh7}>~j zra;I+TdQm~@OVrLE+HYTt+oW%R8dw|pUJ206PFqc_U9ek4j+t(YgbZLX0T=CITI(t zv}v3rD~PWwpdBNAkzNi-G-{H`LWQXMI(mM5>s_~ztfY=bCiYF0Z`NPkcl{9zVt`E- z3As!VaDT5{O5n9nVu6Rw?v25|u^5a086o_=us3SO`)^cNgnw)!&l2Kh!-##6f`>`c@o?pkR??iXtC`L%4oiT@|*mkt#0!-Mzgsb5cvIlHXgNYwJrF z|7Ln@8m_BO#EcWcbs|fL4YCC8G=L*i`g621E3S+Ml6Dqvg@Em%F;3}Tk!j2NJO#tjR1)>#+QEVDS&I$jcY1v zj?~v%4-S-wpNZ-3X>15+XnaVf82~H+kYg~Ao^GVMby{hKdQF2inW73Xo59iU?pu+T zblk%1kn2y==f-#J9W5y|a$Co6OB?3Q`70YXZtd>IW9iJ~y6oMr_NZKumAN@#zWIVB zFPH3jy2HWW?x&4gluvYcfLB~hKzepoK}mhz0E$hC{R2W8L43by>*}s-ZvEx!?-deS z(@@XhP=*IuhKFjg36jPWKU_b`!bbw$UZ(dUn)nZgZUPhkI@x9y(e+czth^``bk+HfpV* zQj~%T!saQ^D$C86CWIi|KqY9m!rK(Il)$Unx zi8BzN<)|#PgOSvWwTRnJg`iD}Axp+38z@H}o0ayUth!IK442=X`*~ndUYz{7bwR@J`+2!$XPJA$2 zZoE?0X7dWC&5NCP%<|nmGj_MG=L+Qj!__~}?R5Bbtvb)U-plmcE8t@ zId$`lZ)$o;WsQ%shxd1fIB(zXrfNoibyKuu#;2Pxj~+XoKJReb$mG&~SMxU+5z)!9 z**V$OJ$+;v6`h&r$cqg%yXACgyUFHRKXxz4ettFM?LBf~DzL~B9b?oymnyE4n7_Rr zBwq-`kBERKMD>RGbaZWzN@K8yG*W#FH8l-F{SvpW=)HV|`Sz*9@?{1J>Pu%XQkkib zC@Stcxb4}iTh_K8QX~8c=?S9nKGrhMndfe=sBjgZ5DrshFMr;@u5bBtfL1O^a$YIwq7 z+;FQHnH49ed^5Lj@bS;8s~^IXPzqpLCey^U_u#(fEn6seo`Zil47Q=0 z{K;5Mi6ar3TG4fXNqIja+&rS_#~p)xV=)&0@5f)xxtxDeT@n5>1pF=LBKkeR`wx9S z;csDQ!9Rpu{&?~KA_e$A8KO~(@R7a20i*-C0el7cDMo%d+2QG_MJzFtA@eQ2+txKT zzW?~VqB)rlxa_Q=K+lk_ru>|=2Q=1x258Yl6L~P5#$V3hTn9*yh~zOwQypqk%#t8K ziN&z+4?9gu~a&$fd^qC zcu8Wf=fDCXENJMQ$rgM8s1zR!pUn{R%K@DE{2e@=f`Bs)K!=J~&&!ZYj9kj18Uvi< z@pdw4MgTS-OfSlFijI5p#r9}s$(NWk%dlAA6Gmrt?b-eE<;#wau8!XJ^Y@Q!y0BvH zlU4Gkl~4QJe4F&~z~db^9-bR)B#ZAm92^GIS5eoLolzK*mY0~48c)Rx4YagLc#XoQ z{gdVGwOOp>xwHwTlNBDs+$mZRB+fRMvEszxif%?D8F3H>>h`Q!?!= zi#CHzK!7tF*sJOEO{J|X?4z~6$8EBXKI0Jl$j|NNKvk}-xrMW>$EQ!$yLPTRcR<T&{YIjY3PuHnrYn9Nz4Wra;G9=KnbYIKvIWNS*F)Gp4;rsPb&4_r?vaZ z_D4_X4L~4T1v)oDhcfVV`LLJ`OYxN|th|}S5gIM-GyU#Qbbh4z<(dB7cMCR~sm^;l zZv8vtthuzK{l+Jk6%X!2Y~N|6e4g18u(&%?qc>s-Ghz}1Pl7OQ@SC4uJk4tplBI&Q zPF3ictlX?Lt!Ari&)a3WZ|8s6huG{?__o%^<)di=m9 zEV8<-Yvbk(2ls3Yw7tVDcs&@hwZ&I2>D{Eu+Pc`$tsOx-E z?)^sO-KH6zc5C_E(atrU4nZ1%Ks6{aDvi65Um93kmTPw9rq`?2B`Lm7a;ODwi8x8J+jV&4MiyZh3e zzoiEThXy88)?*nX9Y8J*7}$mszeuY`p6@QZn_N#1w(Y3M&WK2=FKK1bMrXfJ_<~+U z9JH^?LD#pqz!=qs8==h#N7n+YGAejI*!qOnIP15p@)TTNQeR&F?X&mvbl2(3TR&&< zvUz&S>zB+pwr$1NGeI!bjYT_)-9%Yp}oz?x9j9H2Cz$K8|Yf7_| zK*ZzxUI7)uK0X^=tQ_^76!JzEM@clg_>qo*p{R&xUfUqh_~ zCu&TDs!a1n`#ORVl`1|*f;h)u-&l;r|D^b<&gA@qjrSkJJ`nx$g5cjUpWu(*1^C@T z{{IE`3IE*%n_ysw^6jCg}FDCyyoqtpauY3DVYbzXjJF;40h>*f99K7o?Z%+(16V3zDzFL4*zT41zU#$V#x2S#x?ks4o)}`j#xiX`Tc3GndOspE+kDuJ%cW~?eqia7pJp3B*MtR3% zli0ghDD=*_cJJ9rYtz@5x?Z$P4RpQ;%Yr_-sH?j#za}@LHa9LQAviTRp|-O+G%1h= z06QxmddQ}fgPVq$&;1$7J$HFe~~#NPCEQ{mI} zMPvn_EW*@lwAU!%F-x(S-Bix=)GFhgS~q`Rs~`_|gH8Kiy|DW35OVSQ)vH$){&di* zZJ0}8Ytw}a01Cj+fPPsZDgbkafTPVOsM6XdGYb%i83#ei0y{m1(>njFNTRu1Pp}3h zdIr^ZDmy}nQ#u!0H>u)+Up93O{RZx zRu1yk%6Lrd_n%n3zSo!+V19OGUy>uK zGUJxfajkj!oA>Q8dvmw0Fad;gJia@%ILXTX>FWy%P4=sN-81X+wwaa(^&`zLwr7L^ zg-EAEa&|*(Y7LD!a=mkaixm(W2ix-od&-1B5&#d^ODoK5tf?QOa)Hhs5vBw5VllI(qKZYe}af62=?=iOAO$nFU26y$sS&jCZ-O$dMh2v(}_n%VxzaURa|xizKTlkWyuhQyP#`8`^#l+`YxHkBu@=(UmG32hT-kVF%F3 zQ~?9eBvQoq31E;xCejG`St&jq_R)#KA~7DsVk&a-7#x(d%VMG)jC4$Mi`$PvJ>8+H zQ>kLirywPin#x+Umg)7AoD?2==t$nOb)D+kLsO;?OU=cvJKFZ)U0s4lcDhe-X?$Z* zw!5wE0EN(t8w4t4(9WrL?h1^wGI2b@kdy10KCfl-u>pr)faz<=$>^x97N-=BKuDD= zhVmtSUg1c3^aBz&-(tj+Y3=n!IB%7XKsS-{bcEN23`3_Q8J(4gZ~1@BiWM3IEv1zi;9c zDb8dP0nvUD_;cYTTQrv~(ByGe^)wv^iaWJ8vbiky_0yxbuN{7A{3<`Qlsb&fh>b4E z$U$prKCY$Kt-IT}t!IBHW`h7Ov-l>J<)+0sL9`*<$eZvaY2q_cYFJt~iq%^PsaM3x{u&G_q>} zawQBxH-YdSiGH{Xvlz!*H$*v-mUXeX)Gab1*v#zfiE}3m_OE~a`ihg2@tY4vXDvm3 zhAr-38FeuYIteG--S!5!zpu|2vbXrTOu# zjTz}#-dWi`r0z0qZ(~PxdS+C3RC;V)dr2$4o(|O1lIo)R4)I-|POQufyo)WngelR% zR!-?Glwl6c<&tI%cPg?67m<1mxFW0U@=vyo@1Gez^>ua;Fa@n0!~@6oKYq11Gj}bS zpvz%PF=3(z`8ooY90(MEI*Hkekn*G`5i;N@3*H(cpV|5EC&yXL3jd^=^j@LG zOSd*eDfAnX;jtvqStr*Y!IU6CQ3KlIzT)LcPD`Edt$Mac``Ib=x3_dZy;>IGvaT*z zYal|g)kC4pO_dm|$BLXy^ONC5N%7MsGQtpW8xJ1ZOwY~X547*^pKx&|60eN(t10(s zs$o{C^_`eR{~!l(b0~4T$u8>gU+0(K-Prizg15<;!M@b2Y{yGiSKhq6_Qk8C$4_my z{{DG*u;<~8i$6`DxV^uY+%?Z*wYvYx z*+E;kyjd{+)$#4ofgkSNUV7`+yxZsI8ST(`X0*-f`kt=9M}o`?-2u}Y?2&}4NXSWL zm(!~vO>ViETrN%jj%l@{_d6BFK4lN3gOI|aQSHBdv3z&$=ey$(-;M;Cu8;qIY@jeE zJv`#WC%ctvcRaa!In4iCL4gm2QQh5F80KQ`dUcQ0UhQ`VlfRtO_;`ASr`hfDyjY$X zJ_yi`V}BblF(7%kX#glp2@Qh56_V*(JWnWLxH3RM;3nr}=hT+7MpT?QqckxViv9?E z)aM#6nQavV5fo`Lxg0_-rZNL%#eFn++cwZjLp#G94)C(mvdXfHU4A&NTd``(_6-|W z&)lGj{8*=)KToGoZhF6*5?NMJB%{cXRcX_n1LBVp12a5Qo1tCiL4Y9?Zi;wh4xPYf zaV5}d0JwPIvjnup+M@SwAN3Emk*K)bvSRc?Wl21B#i*9gz$AucuUOVULzO8tQ8;B1 zbD9F2G~xw&1Gabg#p{M;tA^#KF~*MrB~9Mi^?4_^6gxfbs0webOEEoj%Kf#efWxJ6 z7~*$^ilUzF`+BD+QYTMf%1U?1&uHJex9gjY_+8Q?;)N^`&?Mtj66W+@q&n_sj^iaDv6GePAA#EqnZr9}>ZYa6&;<2Xr+dVWKMtn~q^^tOuGDv+x zXdNqH7i0y*$G>Xt`OJcE0ImS3IDBqJL0)lo)o|}HNK!C`FO*PifJ4QgElw`#+%(ef z7T?Qal0WodZV6xy+Kr`)^P3uvwFT(5Vi<>7nG0AT!U0f)l5PnT1C7N+Wm%t_tBknp zolKf0fWC;a3m``1Hf(r<;qryIFYP`SP5TMX75WOHZ^_+m{!G3M6H?5S_Li zlE6MEk>I{iU?_s6BACq-sBwhbYO5{|c7?T6m%V)Q^vIDVw%<+_M!m&X+WT4U^t0FR zXkE*NwRG`*!m65} zxgdpNK2R3%rVBWVJdo!LrT}{?^vW{Q6xl(l;4>XO!78V5ncL>-#9-b5z?w%9-CEMk|aMGzX8@)7}H60P3MJ<&; zV|+JxdFR*)_s@r&zwIncd0LxmQypbp@WnXvmf@R&ijNM;TU_1d{_#OX@XwZ>dMty) zAHzbOMX$n-rW^St!0Vc|`Oo;*WY*Q2Brb z{;-fs5{zmUqp|>XrDZK8<&A|UwpL#;G#m|hR8*;s-_z1mn3&XG-(>dk)#lyXx9nVe zZnvt{CWT^6MWT`#U0s7TO_qv`??T46Eng1dfxyP0rWSoT$S>h01@ZG@p``(xsE|zG zkK_vRm5)PV6&-Lr>2}{@znI55*cQdbqMhfF(GxDTG}gq~eW|{7o_p+Yr>u0R)FjgC zHGLP(GgIP$La4fWD@8?PSaBLl3h6{`#?BF#8n4a1hAAq zTvk@TY-qq7CFhBuO+jr%bql&oC_sB1E^fi$F`*<*lK?8%kjsMx0SuEU+kns)BU^g-LYxO zCT4H1p4)5l;i~8NOZ^SuLv3YsRr%r_L+@F<0bmr1Sk?m8IX+`0m#HS?$pVU#)F{No zAaDhGZKR#Uy`wt<9k;b*?juBPgY2UvR*L=cy6pDZz}BU5rcpt!hp;`j%*f`)MGNzX zc2-aOdqT<7F97#Nj8(v2N*3;>!>c;HoqN=Wyo&t9l+4t$jHafRn#u-`Uw+Ou-YK!E zC8g;-Lj@E@Zc^&!y6PYvA)8QcIT&pO=|>@I0pB0t$05vuNkGy8P-nm-4om})Dz8@s zx|VmRFRMz~-q-W2s`8z$&BI?NrviU0Nr_gj>6Px{NfAJS%$f>}X~2`=!ZZp@2DS$D z&SuBW;QGixAOhY9C0e>WX??Qm%^wf1yx6wp<-R!t%(sk^gB6V@qvPhf+9T0!2&P0$ zRKKChX|eS|rTY&n1~G+pE~mFYQS*iWl1Uac=vnVT#P5eNh! zoS=!Mgt-0C+pJ-=Zf^F{1vxV(;clvlzNmq<3fL>t{O3fttFQTjOuvs@ zzponVu&X@(Qgy-6{-Rxtf!f$0y&_A^$nS?mfwO|KFd}G8t(sfalg$SgV(3D9Uiaqd3Q_< z>`pGy_kD1q`s-Dk>t14@5-)gy&_-J5wwhA##^U)t5vf~T+i-zX-dS7J+DPS$x@*wq9gK6#G85XrmAI*GrJ?V_GH|mS37pa*p_)% zxV$j>cE`}-+O9NrvyO_y=Gt8U5Z}D=G7^~~F_925V+*1)cBmj5|KoGn9h|f8Jr&4Y)CY^GGy{m!uey3LQ)>u{}qT&3SA5hv-lPkC3_E4oI2n4 z@e3m<8F2XFlKQ}3o>7TWTyj5T$5TJP>e#Wle$(3S*N@5Z;kgmP)v>Y7AFTS%Tx`=& z!N^Gqbr&(`uB30;0(Wmv4({*RusK(Ke$mb&{DNFD+Kx{T;58M&+xLPsn_-?lY}>@q zoe5`8Lw#*ma!Pws2X`dN@x@?|AwtDL(RDxc5mXd`j~0zWDxXi5SVT%tpF~VSoP8*R zy^+7{c#RcpA>I{smNnL&p(r2vdQdM90ZJq0(wTqj!2AjO_+zkdEXHCi{w3p|=7|5j ze@7Po1|-8A-vn@!FIprLXoEl*M2aAi0Z_qG5dwVyz*io%jL<(Emk>|kGFZS7XT8e0 z!FOgZ4XtEJZ-qjGJ~}GS#~5)`uWhf1PLC|9uO1;Xb2*YbmWaWZ%yqCiJV^;oO~|w? zsJJjhI)tMv6~ammY~sQdHrtcMs1yp=Bg%i`*N;lfPDswdVg)RiEg zgHIo-hlgCx$Z%tF2Dw0tNb@(kZglIx9Vf@12~klQF)`xfBlWRL3Tx=>A&#IGMU$*x z5#uuO*8=DO$b&$N$DKl^N&y&(NX}6X@6C*^eL8Dc9=3y3yo%`~FZ7%$3RUR$LAYg8 zfj@`KlH!3nj=hw^yT###`?wo>I6jCBxZK@+o=RQE<0^_@lL4B2tSx~#haP-76qAvX zo!8jcPOK?zX{sNNODwUlc6ar1jt+nC<#I4EaCds@$*RITY5oQ^0UHLr=YhXAI3VDK zKo|mfQ(#ydL~2Z!LIFe|m5&z9SM-sAm2C&68&rELa* zHHS)D+1{v$?V81=&!)h1rWn*>)u2Ly6QMzJk)pdGv>>F=4XI8xbpCYZ!?nvcCk<`R zF6*gz)ZTfD%sfS=?`dh)FN~M&j#C$AZooLMh&DoEuT5}2jQre&*sVu0E+ZTd9SE7t z_SPWT>2y2mqZSs_l z^5CxszN+A^P6}Ec6tHBWtLns$6BnOHUcZpbja=TAFqfRHMGuz|1ISR?U za9+xSAVpr7PEYtIV!`vCR2yzxE~~PbSDp_8)zFwx?{3u`?N8`xY-p_U4hhaKD8_YT zfX*ZK_Gcu-r6k6XF@wOB;HqIJ3Mgn~w2dp`q$MTh=all9zt@-83}BOiEB+A>L@KBtm+8N!yXoGmsJ)D-sB^%8G8jzqjS=s>@q6D(BAu zHC>Lh98E!~OG7hRe`eG7XN^`bNaxOx7jGn}%pIICV|aoFUVCN1lC@Ziuh7$to({=0 z3Ku23QK3A4`2KD7{)4?b`k19_`}Q8HxN)`kt3^X>GM7XXjF6t$!_Zj+Vg8Vn44ri# zpwsEhuG+Tbk5<(W9uFVehuyTAf8aD6zFeoILz=y)Pg=1JnZ#0GG_-L~)7Ou{9p

    mf&jFC)+GLBj$4znKst^n^4@HPP~Vl$`Fnc{?_fTt_7IHv##^V35b+EVz?NrfsF zw1vd@yLbdLnCK0QEgHEoiSJPC5qb?(NbbUQM5e?PSC$LVec?-N*689S%%YCGGq1D1 z^{x2@&%mR-Lr;b<7YBz=aNsUYWX={Qi*JCa7%T_sTJgzQHD$H^cp8r_qzeFBQW9|D z$YLwAhmUTY^K!Et!t@D+FodOi`2POHwcW42+>T81p>T)02RbV%#Wx%1Zh^9Vf`phu zL`nZ2#3>ArLD z+m_D$z79@zapyo~Z)@#vd2MfAV|_rpWkkqkHt|eD?Ls8jQ4e2-O*!&q38ooLr^!B7CVj`s@iKmrTiPx95Fk>Z-tNlNv9uo zvVA_3ecn@VsYgs(1f;F3K3zURmy6S8G-_}YXA8Y`c|VkiX48rvA|a=dpkqkVRjHnD zYE-XzC7)GNjb*a0O4q#C$oJ4pa+Aq-9M|tPo$sm3v(^zK&(DYZ4a6aak_lJ$q+h?<2eN) z7B|?gZuP$G|9rc@`MDIYuMTEU4sKXvcJ26^L#r(f=erna1#OZ~SS6jlczn(rBu;Of z^HN2R!|OcG8#-UuZ+Cpd*S&M>4$pkMS@zrURUeJ^J~({T`sK5_+(2f#7cuutZ_M%# zbEMdN2{!EkKf+?@@^!4?b;60qowv=Pto8Sp7BeHtj8guD6sipo6VOo=7X+Rt!WGS; zJqXr9AEM_&j3VC#fuO0Z_!T{ICv!aR45WoYT?cgIiJxAA<417#PXFmp;p~WQsv96< zYI@rYfz=bG3@)WJ(dFPeovOr3ng(#7vwd-cg9J+P| zs!LHkL`7jAM_j-0$Pg3Ux_u*AN1HHy((pK`A)VQ+#}5XdJ0ADTad@DAsHdl+wi44^ z2E#2-T?CC)%#Ox^{@$KGa%&SN*v;4d@q_eBM+mPT*57)7z5j;1_hiouUE<_vAU}hG zOl7RtP-lFv(CY`TJ|9|Y_?PbyROT=y$$+W~eIkOJuhaARZg)f|@P#bi2%`@5L+1la zVmvY;k|0Ex#HdXgZv_7G=sY$D{V5Y9<7HGF`1tm(*h11+FswAQTTPR`ZwLSLdl<(2 zO^rdo-mjR*x;MoK36@U?!wTLG~ zRqnX3h{72qBmb_86K~NYt&~{w$F|;$k4&#^8l@u z1A}ZJ6g3nVRMl`r(Ah37>`812Fv+bgHD>t1V_dE_mxr(!2!|yr6s-n0HOx3|{Zsd+ z{e?((`$SuhjtDPq(Ze?H8MHuYNK|aKML4O-!g4LZ>jOug#~lZt*^iwo;#=}4@xW18j4R5QG8k;mrA%u>eXe_v_YT? zf_bza4Qi4{gGN-*}-r!i|z*W-ALD76N7&3@HL$8EO@K z9m>Od6*|wT6y8(!*n`AvpICWh0{_EgaGpeQpNjLEAq-qs?78B|*Qq+j$W=2W!D|LR zTpdE?1$JTxHI3^cO>v)D`%%yDqEgU-(1rl(lp-C zNaGGcf(1z+ge1g>ySux)yH+KYa<5%g^VF_{{`YzB?RVS{=RMGCx|<3r%aQ^Dpt7>`-tCUt*NIQ>)IPil+0ii6 zO^u7{fAfKGs#YNHK5z#^60v1ms9U2wrP0yazh6dLZl;ovWWTjTOw)WQ*I2xylSF@+P;3J?>ohrJiiwq&UnOCLR}NJOOMz? zMTrFa=3A-Zd9*EWsyeB(L*HKfbv zq{tYjGBn8~f7x89tPrrIvow5xSb#~@2L6*f{|otzVc%Gc#aR6RA^zFDCyoEtdEtKy zm;EYWNY&(d4B+$vKNlFU$;54B`m_P6=@4U8Pv3<%Pmb2*2Z`BE80V-^Y)hgos;^&; zEBs-FAR^;~x@^9f&G}ocMkvG`I;0*XyuzZ~u<*eC{$|N4oJ)tkc5zBl!N*TuJ>3I* ze0)SgVrP3^U1=JK7$Z6_4EAt+Yh`v(LSa=pfifUDp4$4p(9qadukYWvd+7dyy~*+4 zC3m3uvhJ@QA%0=0+wA{1e*8{MR4Rec&&7r@T*RNAUXj*8B}i@q4K2MXNvT!E$$6-$ntu1!z+{Pv_- zAr7cCa1yu42AC@VOabHp)Odm!BJO>FeBgC5Db0ZIn@cQ6MHF{UbrRRm%c7DK0}#+X=WJe-865eVWHi9U!B zih9H4m?^WNcr~O?f_%f_e9e+%t-wI@3m*(k?u|o_5pqK43EJ`)`G3=A#+mecXxI4f zzJ1#I)vNn;ZI$m`E_yg^$on*nedhCt)@sCzG9yE42HNW81`Oe!HI~FZ%Qde)#MpaW#$u_OzCFAOh<(gFyY_sH@ zv8cH_q5bVNaNQvOv@+zS;Y){cey$45F$NH$1_1~>5%{ZtpYgD>Nw@R-ue)U)+oBIg zEbeSIe{*8hsYS-ut<7Jpu=H3qF>0=M#zfg_9n`OhIGW?zOq3(mP2FdSb}!SpdSLF4 z^XtCYTYa`Od9q47^uh7KclX1B0=FGL{MW|^ZKOP`)i?j)I^U%#X)`rD%uU$SXEYkB zSLn($8p?Ido!E1D2ZZ^ymSm-6MgDkyEz;>wckGb?uZ3g>J$N(~R_K(gqx2<)q>o3a z;Ro8h7WI9cC=9WN?Bh+IJ6k`UO54Atbh&l=jCn1}^24gCEX@hrMe7NF`~@uyG~Qr% zS(<-jV0~Q`#$i%fl-!o8fSiQzM9@cG<`ui-k4{@0M-SzXIC$=44Yx(rp3NObK51fuhdE72svx=y1$(C2a6lKZEh^UIlnu^0Z z>QqBL?BGu5D8mc8JbV*vSRle^!_+|cQro3nI_jXOhbe0jCRx-R*(1tM03M6SVy34h zHRok>;}eGOKkZ+$qkq#r##c8|QYH?LlBC8g9=f7BP9!1LO## zA}u29*BGyKSC7ZSw+^?gZGk+G; zRR1R}3ja5U%zt8^@IO-{|2JX``^I7{#^T@NpMo;~E%yC-MXJZomn!J}%9prE3bmo^ zQ$@+y(u%bJkJ$W24fPIjVPAUdBZgZJ1FQv+kyxk&LMx1E512QI8-Pz8@gN#mSpTg) z$QO)=8e(Ct$ip>dT3ZRJ z>1pMa*(7RHX+`qe_jg>JALr!!;0coR3UB=k+|OfY@P`N7KYH)je`w#y)8{VUZLA}V zW@5<^iCA3VM`k2;kcYa+eUWkA-adEhD&P0DzsSox)ZDa_1ml_^P8Rhs7rJQS(b%U8;NTG~p;%`i+pi@@RvuraF8h6&<}Q77Y^~Ah%@dz)ocMH!`X^gGmz`4| z?wI%S)|p`cpZm_7-gsvJXAg(GuQx*0StS^1^y*Jwjo0l$I6+zBmwbb};fx zPvqsC$6JP;oyc7~kY@M#EMH>tNQFDEM|rnaJV zh|d@lu}Ukeie0~#t+DMgG!f_-4#|z{&{gKzuZG8$;HL}hwQtwZVk#(!4Gl}Ql`A%^ zfVePVl1aT?5pgjw3Gu0^=?!(wBiFrvD;{~mX9*;ip+F!kE-x#ut!4=%lmvAxP2U1s zcb~Gm_TXqx;>QR4k~lq=87pp$rdYWvA>qSTSokck;qY zq*k$Xb>HJx&_IAesoWq=U^J$f)=*cU7ZpQFi7mf>IeYVR@|gpoM>oOGQ&?FR_}V}x?jNt6J?A&;AY7~6`MzP+=HJjJ3W=t@(Xr48#V(-rCpn!p%UciI` z4i_))N|TsKB>HD6{lCn7Fou0&F&1O-@A0qLCt)ekf9yeEq)Azb>m9Iocux#n2rMp- zCSfKam)TgEmmKpgJm^_Njjw>0&t?@QBz)wsT7jPd+>1b+CZxy#ZyX5Jh3v&ZbpTcl zkb6YKoDmmdu>jX+5r}@tT#cHNiz=%No;`c==7qbrM|f#bGLPE?(3+I=xxYU%I`X^Z z8wCuYk$Kr!MVYB(wPhXg5eZ+M9K!Xda$N!Noas72(MQc zUEq>af_&WFzkB@%{QSZ1bTji#H+Fb{yQ!mRA;9mzx#}Ne^UY)D-d)D_cFGA6TQTk_ z4u3KR+H8@&0M_Og?DX(B`ryGSH!tVlgr5O%o)2DJEG>?~2yN*B-bvx12}!YZU{Zj| z1ia5($i>U*Jv|xWkw;oPx3#vKfWQK{#tg0%2aW&)u&9Z{?E##&U?3&~F)Gi0UrE4@ zo`_SBa~jIlLAW|ZY7o7&Q@j>kah$Dn9O=H)lYfm&pQ0dHzZeh4nsxwIQ4`c2PxtRC~5T+$BRf_Zwa%4(o#LPPkzQI(}k`Xtn7u3K%SRcNl2Z)@(o zOW${$T)QP2Rzv+JXmBm!JXV4rZ3ssqf5h>{L6h!j5OE5<*@+%)LH9SK$2(LW?9jZk zMdQAm)~l@sA2#T^uG9!yshhc6KWDN^p}u^Bo@$AyPQXkf$EC(s*BD+|XZUKh-se@i z&Ne#l)*8RtyZrVZ+Yk@;YuEmAaC&*;?4c`D3=`)}ZB*CgD(Hf|UYp#wm|60H8#O!@ z$#~9J@m^qA|M*dTd{X!izsDy|^=8L+m-qxe+h6niGX2G~f%CU04?ld~zB}s8QxNtF zsb%@G?`x8r1+{VgZcnQ>Ea|l}q3e$msv%4c!FU9+ngi-4Y1z3s-QB&oW(ia9w+KAO7Yl{>rA}i|gMtF6EE-M7ldQwq zhy%a>dH#dzo7eAer6#&WU9@jDHRB*v0aB8XA4^t_p(w*rMPwx;oM^<{y#*RGdBAKN zpvI(E`p4(BbrK~PD%2DTethb^evxqXy7-4Lw6x+&NRal`arnc2Aq9)pHt9?d>KjqU z$q(z8@OB@C$RwcR=qbT9wip-0lYE$(-saFmFE^K4P4TWks)4=++!ce5OCJ-HOT%`a zDxbT$T}zj#hIm@2TN!n(o*@kP0E|kLY7pSfd?U+Np>XuTC2LuRh(I8;cebRaCZ{JS zEmVhsDnVl2ku-$L|jIQPF|A71qNwF4(vyb6T^ zsRoNc!de3?0uP5jItFYKrJu$iL`VDgbX9+N|D>g{O2}i^)>LI?CISqL1*yRP3s|c_ zUaeaAHWko}4M?Z@XR6gEnBgu+13SGb#*Ecrg6=uXI z#yCFz9N_CiA!h?*0K^uS-lDR6D{I^^KDoPt5EBy@8XV#CJ;dGFJ2vQNUuznOQvkjQ z1ZVl;bz-n$fz1skNA+1qPdrg338nYR+)R{zku$mqvSZTTtLDUr!3 zZoYvb;R!FEzw>eTN=S@({OReQdnbO>$JYU~0l0(G0(NV6qhH`NcaMX$b(aLJOTe}V zI3N)2Y3V-J-RC0`w1T(~Fd{Izf!)?y<=)%zgVv zf9rW<`%up2IlAbk5$?OBKVmcG%LIs88-2gbMVk}-~B{Wv08_Mxks>93a z;HE2ZMi7j6;fNor)A>~=^fY?29XV`KcxI>Quvz)}78M6O)fZb;-)z%(Z>RR!dYsEL zCEq0)0aKNtE!E2AS=QPt44OT`Wr^v7wMO^W>%U#A>%3N5@_4(>=*5nC@6PRu^ZFha z67=Eiv#__%KP_97IAuz=zA;@xho-MrZlvtHQ1SBy4cE;^ac9^ zJ`Q0r2$XQ_6OUKz)iCKfc?!B)FktqIsbUcZECH}YKxGfMw3no2ltm=*LXzO*Rl50H zqOu;(#G*xAtI}d3^Y#^R`$RjqkGgQtpn@V*Sz*B3kg|U>#QOuKr!XQoDkz|~rXo5z z5)+C?cm%h@=8o=1;a5Ly<$;yw=jNm*^Kltxj7DZrh>S~@E~P{TM8CV9e|&4Jh9(~= z3XulL=?dl4h4Km_CB#rft+M0b(gk*9T5@4ha8y!7ErG(43LoS3R3=b*p|c*^+F_Uu z(#4)|1ia`hieFIGf;Al~x*`oVjtuITfb6DC5EB6`1}0Va8(9_-nN$`C$_K0Nf4-TJ z?9ttvhYLjWx!8yeaW@YUE6G^BzAJJvyV5^(7(5D$)wIP>tSXAzT5 z>LE0iHx;FpWyEHuB&DXMr3`d;&$blcmh6`&y#v>O7pAa{0fEmtN#xK z{67_A*f$nqF&6(8|B8KoGasI#a5p2i2v@q|F)-Zemc+y*__-JmhM39eX?0B{&7HZy zk8J6C0nNl^7WlSYBFIS)Nc>e5<49f)K9tk@vI3muuV3 zrDi+4cy-Oe!NK{Xds1}LaDP2O83?{{n5O}D2>5fEY&{WZ0BCT;YD}I*Tl0coKPxZK zjbu`U)Oe#8=nQfzX{ec$7M@d+*AW+x&K_hFx^goz4>fgK11uP3%Tp$ef|E{(e0*F{G&~8*G+!3XsO3Z8=rgk zi!wYKTivn>f4_hK$-75A{Z%-sitB*ljr$-A9@f?zt*v;-9ts0~7I59-Pxpb~Wt_*Oed<(^seDDJuFRoAOO2#2IA=YnA*oY4NjYdZUVcnP0hc zV!~pz(AB8x)%5NkGlxQF40`Jk-^jyjbKzslwr4ZAnK!xJF(IzEZf=`Z<8@qIVb>Ui z+6oc5M>jJW{d7h54$HkfyYSQj^TP+HZCXF&#;KJr_i6Wguiyn5Lxc(Zu%O>C&apr( zQ#A;)OzP&y58TkDeb?dmDL|4bxEo`aG+7IkDN3?Dq{$nv*Q2b_Y@l9eu2#KNCdLMJ zyhN<9CG|JWv&_f$%$gRfu3WcnI=r<5&jt8?WB{W1qTw+07B_?VYwAw6<36p`{A8o% zv`W!wwbDmh6{n3V?=~vC?lyRDr}4>7&&|%nb%nm?V#DC&mf_Y`ev8aptxcZW8a=Vo zdu6Bd)=uZmCLM>3Mv;$>ef)Euo5Rig_@EcpPrp04@5M~Z@Y$2|E%nn3#z$Hz`pr`Q zu~hfV3Pb1B=Aq|ym-xBlCq&-7fB)j`hqvxN3-J#vNK38A$j*<=ZOE&u&MHceiSYIQ zv}yCAhxg7ANi9SnjSHflimJ2=f7MS~jA>0|OPEhWDhLTAPgHb>rdGnI4$$8vVGqVg zqnYHAqx12;PYEk=d3}|2ZGq7Zf8MQJyS>zELC?yK?b~*h9XwR>=uwHo)8uWtDwo;x z8kk9NP%vjMck54gcA zbW$tsc}QSN2q-fq+2P#pJ#KHZmd)rzW+*02!D8aby=^OnyQ=pd%#WGakr)6(rd zdl4#X0TZzV0_xB}Z*x^?W^7SvOn!Q5MPWr(OCO!alQ#K@#25#J{E;p_q4d*HKt#gp z8$7Y(6o}~{qyn1)92#!CA;P6zB%H@$N%#<`RBB2}N?crALqh|H!{PCG61@9ow*B8W z0>xsHH2x>xZw&j!Vl2ku-{N1f?^hp+WWmZ8i8)e+laL&T%@>S1cnI;_n48y-lV64b zH!j^ft2m^tr7W+gxS*(BS~6mCg>}V6j)McQMetAnx4GCGfOs5VMwwD_4;bb`H&fI@ z5%lzs+uHluVxtlt-h1)wtB<>v??Z>Xk+J?1%76%~>+QeOH?)NhwiN#SF7mAM)>YA| zhh06dR8<$#$Rqm+z0x)EbsWZX;QP~v z$pwii5kHcfi+jbA6+J~zln_^x=$9OIg-ARlgpC6{O%^C|L7vK*$Y5Xm>blw0`PQZ1 z54$A`qEv}QCLIeYBE&W}xi2XpztPE2%y= zLwv8gyWDd9=#dzakdu{@luN0v0CEKIoPhf$nRJ-YaiXv4B(dNWyZq0Fn7wV;SM!q` z+`rwue)IHScMn7-pDr)nRuEy>8LA9{TKJenycUFxrw7WD!Ze66roDa>hTdCduSShi zbsHzD_H9=jx}nJRFyMyikv!4h3xr+O>)4{6z0tV+$#2kE6?*f+%>|RXe9zDeZMrKo zcmiV@XbQlhr*Ed`hsoA6kfI_oGBaAR!usfLJD2lo8(lUEf@Z=GBk_By_I)bF=BUw3 zrp{QUd9gPAt|bJ|7X4Hf`N@L2%D`y^2PY02$Z?f*Y2$U8lSE-P<4T_{YyT*Pfi)_2-V|4)!bj_pbdq(;#7yaljmv&uevG zZ#4O`VY2hu>F+l#@VS06%;!T|VPZv3b75=C{r8_X+aC_}iYUpfDl6}nTGj%AJwU;_ zL!w-6Tt58h@wMRm#9+RYk0<%9m4PA|)Jq%6qM0kN1SZRj1fk{i)b?(K$8-}w8F2v;qpEVLl)St0g#3WHT* z=_u`}lLlV|xMVvmHaHODF+m_Aws&Xe6lUcXQ3k0nK!%h&aQA^5S77ymVL1c}2xJvI z$E%ervu$|ty8LHIL3mVSbVOHtn4lsXmja7PcGNb+AXhFh^oPa26t9!ZoE$m8NtR4_a|oS2vx6BAQiUCm~*M_!e@#KXbz zKW%@bx%L$pcX*BE3 zwnLdIw<_~W#Ej9Yl(zDuxA(V{rT!&iJ^?r|j7{z4Yf(Ul2FhJR6R$LjyUx~sy*j=3 z==SRu?0&vpKbUF@#b!`~fFK9FiNIdOr0lJzIujav*ZJMELwgR`th9}ej^tqs$=$N8 zzwT#vNH>utxl?0&7$OKl0^%tIE-t7+=aD%5Uvl2A_^?#R*#f=NGIg78A9mpS_oGp{ zUw&qN^C<|-W3_bxW2n2YzdRHhx*=e$;|XR0%mV&=TJ5BPSR-nJd1r_!y>w-i=TgV# z)}isYq6=?)44vxm6$SWcB)KRu0wzI#0R$k93xZHpR*>#+#3YXY0{CEGtS3uUHt$t4 zfHQO8(@f#diHsl>MhFsnjOQO8S3e2m>7ZgOi%jdq-z;>$S&l3E!=%znMnE$Kupomx zoq>QgxjJi>T@)c^2KSlP(57(>T4+#9o@%N?SfV#;H(OKF$Wh*()}rU zM=XdhuEv}`>2>|`&3&r_9&apqzfj~e75l0P5k>=UDt$kuM4eG~U!~-`!q|V6vG+>D zAFB<&uQUCy-t@(Kj@uf;S0`rOJGJ2L z@l_vA*u6fz%j3*jS`HfxG!9p ze(Mor=Rpr>76}_5Zz==JWY~Zvl#>@jsE)$FOfK#$qh~ zE&gSsW8_bhF5yB83`bA>SsLTNnF>W}B-wSwGKyU{e{y?by){`4q9hV@Yu=BGD zgF_;vN`5>ln}DyhMdgEo=LFa)fyfZRlm*5F`m*x6(|ILM^=&B~eGP+zAr`qszzX0p zPVsoSV2W6PKsX*i6Tm?75QsE6Vr2m+Qz(eZGy=HD<+)=#s^nfWG}zzL6cg;bt2}o% zaIayUy#O|KM78)D)FhgmO;w8M(RrL`HvgQ$Y3Fk`$7};a9f(yg2NRY+ng(Pbs8@i# znb5xtz>^}3e&Kg*+tn>|cg?apv$3S7WEeMT$M1auBymts7NHN{N|5}};XzhZSa@$| z2ZPMasmS}w@2^Ssr=i=*=&k1VWSeUti*u89R>mFi@I2xY=it$t;+z+m812)P^@K6B zi6=7Ug0fI72V6~PQ|BhCaYE!F6*c`tDQ>!PK`WoVGItA`o7=gthqpQ>cT$XpV$oL( zN`OA*g#=#_MCvm9bXvUhI^J9K?Vr|Qq|>ix&}XDgI%O>QIvM=U*xm}f2nii(z!CWY z1Jtjs(kQ1+Ubtvz^}LRy29;Y7#ak6<)2S4BE~W^e4}Dra)#hK{=lnG% zHOFOR-b1~*rz%w6S$R*iQlC$E|IP5r25r~X`d?QXd|6}iam|DmHWMDMv$(x^+VPb} zju-6`zFd!fd-~(C1ur%jecGV=ah>+t^@gw38#r#zbg)ysyG!NHF7=07w4SavxV=>8 z&PwC+OSGQswDLT$sx$?VpIjw89<-30X7e}G>h9RU*8YjnB zQN(1CP!@?*jQ$39>37&@aCRw{Dh2nXyB_pdXW^6ru0z#OI`Y=N62n|k|+PHI9;@$(k_ItzZ_mphf*06e2@yuy$b{mnbNKb6ISNF0GU8MC|c_5-BmlcJV6 z+L%0?rg~O(!bR0RA2!#Cs%an?pt2-4f}G4E@9zO*0=>SrDK0+p-jlNjj&88Gw_mZz zcG8YnbDrC(IWIE(G5J#b)XcmY#P8!EVtRYhu{UnV_qg3Y^YgX8-@UGqbAy9ZIH1S{ zSw1EUG(FstBW5z+TbuI}Nf@-@r&_v~iN{A}0&yEtI9~+Qg)FP;TosQGC_g}*8zo^r z1QGhfeukZq)63sm`cFp%)8r~FQKKztek#X{G6OGDaMPf=kKxNT{XHQq&ci znhvOsuS2L@2f-74$eRTXrofvA?Gsr=YUH*Fejei;ZdqN}yZq6KGdVsU`Qg_pqPLT? zR}V)UF@t32en=QUfgWz$<*C@|Wl-)kIpN`~)VJF+f1HmGzMLC-C++po+yv%2FM7L7HrdWBZkkrOzFOi`%ckSI=spjk?$~@hs2*`^ zdc@tuUoJ0jd3wP2!?|}S=f1Kxe!Ru-*-o>2JEvUQGUc58{L3fSuUKrdYPspL-3uOH zSoh-Syr-M=oNSf9Zc=~0LGPWdKHdwsPVwnBuj>|@iFB>O) zS+425#pJ22^7D;KZd=vd_ZTHP9UW?jmRuK18YwCw*7K_;zF9>jiH6(yddgqAy~t`P zk^G-d9^_?Z9_^dsaU{ed zArUb)NTg(zHvR~Wt8Z)okU($0;8&GU?)=#`(~PC5z#We$3Nq!oT0fUszuCLj_5SUY z*x-z!%+RFBw5-ginCSe%g1V;0K@NGCL&E(kh6bRxxbDu~!4t=djSWa3Dm+C6j_i1@q8i_DVwbiqdFhM>yG6~9E*tWYw>T_Au_AW9I@U8t#Cc#Afo~(A8@v zSey=lkU%F>5~I@z9g>`m-wj9Fa{lvvd?G+Hz-8{hgzPYs>WQ+yqC%%B{qB#N+`WmF zH6Y|;Vr~zi%P+)t+peuMt)^N{LZ^?Rf$q5gQvfVPV9gi&2~Z*y5gCG>)ZB>3aPQK5 zpPtsIbkaV7KmvIPgc<--$^FxcvkeO~EL$38lgJYVd@cU4Bqf@OMYaOCTifXznR%yf z;1UN8k(mYnIsm$uK$i|veq^9CpV0;fWOWZ2-9>tTp=nQbX5Kfpf4e-l%e$x5nNf6< z9zK~9pu`N+5tbZiFTGhe7_s}q9>weWa?iEoA0zeqXvVXNX97?9mIhZ7S_{gGQW6p$ z-nhHjcGIyV$5-2~T(M>L*2Amz9o?H*QS6%Jzt8!i=IwdQy;fZMX_cKlSCpjF?~EE^ zb_YZ~cog^ZPSE$qx9zfmtUJ54$vlMH#DOL7Y@lHcFJ>7(XbMCjk`KyzhsuM^KRco! z>M9JZ*j#nM%!gY|zdNC9A7g%`CIpUSdyHd;OsMgl+~Bh!?dYU|>r03JGHSn%2EygB zTzv@C2XEv4r>N&LBCb+uP(l5w3RG2jn5ZaJN1%xaYKX8_PWZqG5~o3=5~S%up>ca6 zDu_@G`ZRr@%+2e`eYc0#iW4vQmhbP3pE3|CNA^dMf~ZOIygakMV2wg2wXi!D=T^)A zxqsgGPglSExYp7h@$K<}NXK)x_bxhfeBSS$>=r*ToqP!``AcQN0p+tNr@r5$?_-I| zCdv^eY4;oJw@f!L+O{b5_f@_p=cGFBy0>G(`OOo~9$51B@ges|>m4slzjMOs-sy!m zPc6G}Xz3rPHr%~^^5utntG6teGDCUi#tAp~O@6Rh`_($7Pa8Fy>btJg__9jn z?MAgXo3)%b70ijw))Y5Q7Ng@?ye+ac@B~Db)4b_gC+OT)e9i%hCdq zl4DX*BE$21zd=<#JbMINHV>OkYSU2eFw)94*9+dZf?klorw%Z9tRW_&p`$COpg1x- zCMr0pG{3mLxv9Iog+}Vf`3z%#m?f3~mSly7;oTRKi>!-8k4s5vObBndda7gPT)w3d zT>;TFWvL6T;;k26u`mt#>yNtRsK&bTAvU#xIM_?3wha*D(lXL>v*Qw@5|blxBLkXT z-BY*hD4uNAqNX?`E6Y((g*6*s_EK27k*aP=8Luo-QR2!XhCHIFqXrYzzBBuwz7+VP zAs!~3e&q{!WC`1`@HrI0=zPRTQzD*Vuu(4>+!76wyh0P>AVrmA3^pzaI#O1^(P`A_ zSn_R53|PtKIT^JT^pTVzHHxfh$Wrwe39^Uv&*e&0*l~$^ z0bX0?Gl543E*+nD7I5%?{KwYdKlJ}bWVJ+p?=SvS8c_V}?_=0E7Gp6M{~n`+H`3uJ z70i<+JApuIfH7jACX%G)LN=Ud!eTC%k?2|gOL)Q)0BP*u_QtaI<|jt*2N1}9XoZ_$;?#Q?s*Qe06%A?0hrk_Py9EKTH2i%3f5E zw<vQAoQb$4yTx547m_4Qh0@^~21h5kkCvK_54n=1kqXS`RX2CE68jEX;- ze0!wuhCoNmvC8yAuBTDKJ!8TXWx4}`FfGnc?KbZ@ErC1g+zu4IUD57s z2^G_UG8qOb+Hi1U8f<9D-5k!*nN0?+nv2RcQ!fhHGFx@!5i!WjMLg$>-yc< z&E=!}!-u5~PR*Ael&#-Q-uIjM)LG8qBg`ZFJ2tKE*|-MNIq(Wnh$s-u9~I+LH3zq?m*-?~07E%|^1%4e+X*2- z&09Cu8yLXs>AiZ2u+J7eUo@Sue>2U*>+rs0uOG2-NhPiQ?Mz(I2(On)Wn7qev6l)d znK{eXbWPGHDIqFCplb|^R=`Rd@`@Gh)2Fp+XmU+VIuIggt8+K6tJ!M@1+l;%00B*i z7obHHz?i@0A@FuX;jaWEt&Ivqz!m~aDCUa=_|c7Qn~fB0rJI40>@z4W2jicLCmrDB z2!KkCPKat7?BR;|UHzRYd1;}^At~jFL3LTSPkNS2;C80;6o1Rkhhph@BA3E?1({6oy^sVpw`ql#7LMpFi6kd-flrbMOImNrm;+zEuZ{z~^spKm!QqpB_^l(|>6H}HhO zC-Vo3z6=%aYAsfyG>n51Eqbg)rO)P6_Z>lBXAp`vkV3SvNRxJ->9Nknf80S|V`U2J zmASkHHMwT4kI=J=%AYP9xSc@J7g4K|EIHJ?JZN#C^E{W=bHjZWcNHyy{^>x|2C^!& z89>&o28V@C3)J3Q8M)4$lyPtY+lj07A^;=CQva4O4bBFoc#MbHP6?7)>kHa z`#*T`V)OUMbJE`|t$(xGWv{`d1NxiK>Kk8`Q+cW&=O8bCUlAQeTP~Tsu+@m3h=z>D zgZ6j=LW9Z(Y^M^oT6AAtko=oLhv!;i_y%H-b>4IR{5Mn6KUsUeULWT3M`vANnCFA@ zdsavKxUe~N3Xk^k!@VEw_b%_7a(Cy12OCTutk!#PKl#}TjSmY|16LSCEYS^_trj#- zJ!Zac;Vk1y0}XtL?Df$oc%j&OA2|Qp-xK z>e>bem{?MN8i77oSKTt$)=Q|%&kXtS?)Fi?H#c&8zYJ96N|r#Kq{h~kqJ+24BA2bM zuv|>DvL2W)yTaUxcH$_!d(M1yA0oZ0U)%}&==kBi6Q!F`oLbUUSD#;$$paQ&f`rx;m9ezIo~_xbO%2&UNPPD;3KZ*36y>`wtH6*v@$M6q-r_8xo61Ae4MQ7AD=m z8v!FO4;a!>`+rUii^aeEeekd0GjV2?YPzuT21xM$lfe~Gr1Kv%X%`?cxxm5N3X0Q` z5;HQ=(v#wIvr=38+bH<#37N|tfUb7<9>}-9LY=ppv}_;i_v;WH0*qc@({Qxn@$rdW zG7Ul_y6yMX>FaZJ)IbT*5TYxiIwNhu@q@fzU&%Sex7q&ZLb8aaNe73;Kp18el$4jG zCUr#zQ{3M(@0{=7yS8!dyq5i&`WznMbIlw9zE>y)OeEyvV?C1JC^quoh&aNLjZ^Wc z)Z)mR8vB)I$FOfK#$qh~Jw}O4^lQ;568{20E{i;pM1%}Fh)MwN64(}8(4=DuY?wl4 z9|XuG_0<*TrsNkTI(|IYPb;WxDM?C6#zhVAcfcEu#NyGE#1nDwzHTuG?~maPL3==R z%nEU{25`nfJAw{VsJwP_^`r~CcG;|2H*e7t>vc0W?X*65WBo(#}$GsOoPoY@#p1wcW{G=So>1b?f3rk8Ma9NTydQ42p!z)iDU?;i% zNp5mJxn=3y-Q|RwUUvBai@+IK#KS;$PMyTtAC-Xe#ij3+y)*6PuB z+hoq3_jA>5NGuwqSt@cu)hk#BVjONdLyE@dT-Rc^1xZOaf<~F>tSFKbkBx3 z6Jfxlrk9FgzoY95(XMI8dVzXS^vm$X>tQ|{XbE=WpNrUz(<`qT|F~-W+j*r~eF%Z6KjGrM7H7&OnI&RC74ikM0WGe^UVk?@=n+d-4*s>$(I-|1yOIWpFWTf9y8Lynwyk$eCvh_8|PhLGvU!{?e7OH-|n6G zcC((pt$yMhhI69_-imKIG>iu>$;PDX#+hCxDPaE_L42q0Q^myS013UqV z7M~RT?EAaIjykE8;Hb>r2s4ecl?b?8=%nhZ_xL`05gQj-+EtyGlQ85T0$u@>uPzW804YiE z(t)sTOTUdxvXObN^}41jH~L*&p{xK3Gohl0<^Kb+5?Vt%i1h`M#T=DSla6NKV!DLx zJ3Bhs+MnEe5EB^QTv9FQB>`2!k|Hr)ALU@Uz6uk%Iw39-JVGEa63UB!+k+3HiSa`5 zuZu_kg98xYM?Q3jGI=szLz9dUQ$w?1f<^DqldPZq_!J9<{m&*_>1HPz|70Q(mr3tw z?Qs8+xnW(SnFU$jWKdnV!^FIH{T4_}11baiwJOIz$Pw}wJQf@K<)b%JhxvOH`qwt< z2nCK|-&l;rSo}938eKI?oA0D!9HVmb0zO-WmlW_OYXPr@NPXNsxE5d)g=r~<9c<2R zfTo(F!lbz9s95iWET@EAr8B0xWBI~E@ch!8Keq;Cjjd) zu?dZ-%7N)3IKl!afS&*kU4yIn;(1isR4#oVm;Hzbq5W8DXtnpd?1zWGO?jI=rD13u z#-9O9W8h3;v1bpG4`!r4tS!#0Ov&!3XwAqi-t*w7<--Z+7Ls=ukM1M%0Li~pQFx-E zcwKX?%f@fj-)Xp#EJw&-mZj%qC!`hT7i1OZWfbQR46)0L8*^il3X@_*FjUl#AC^}n z?HS_mfko!9iBz_Pm-$!&srxLk*nBV)VZjRGpN3fU#(}J7mHBU8y`P;FIjc89ksg2` zRAF=r)Y74OMpNU0t8~j-QC^Y-Cd9SoHJ);-+AW*+V9@=-M?*)AIT|>ly30r9B+YB$ zSf0GG^|w>|_wLyI{_*X=56`mR-faEwFyz2a<0UIxZokS73T-bBjdVIu8@#VOV4Bn) zQ+Q$!h!JiW&l6(@kARY8kh4%0J~sa)mVFbedW&a|DBTnhj5}E(;8q#nIR9je#R`*bD+U{cfu{Wlo0^T^`&#_xsxyW@zOX zA<%Vr)#jHc4&T`{o#Sx?87e)^e#41o-IazqpsF`Wctr%i~t3Z|pU z2Le&*R8wS9P|8RFNtSfCrqq@PCPaIM21caDIl8-wc@j`f^s1VgN=ib}Ns1sS37QU( zia{)5PK<}6DPlIM#0j}l%v23&9Erw(yW1cnh_iQJ>!fKNy5s6b>v!)s(0u(a#3cj0 z3nTuO(b}Fs>_37el*~8?g#-x+k=)0lbc;c^HZr?|K3Z*kwPXiQfo}*iTue#{8S}=H zKMsPBqqO`3^Zmfq-^+h=+UG3d!)6b=jd;*(;DB}tW1bpE6WsW`Qp{~Js8lZ2eW7EWksTiw!p1&9o# zv^^r!4@V=9AQ5!(gqBs!2DKeCxyt2AIN2_}DIkIske!tGz}a`T67CjN?qRXs0HhFP znG)C%`W`@gTA4{s>5-P!sQ{ya8Y)x{Y^Ss!Zy{b56I=0PHSK15&I|0K2&*EXdzCz&$=C;q|LeC0Ruv zs*Ch`QdnRak@!BVCX5ela_FFdL!j0ki6~F9=GMBG4N0qn0lMI;CHXd160o}}_C-#P zrE|!cuQqcUqGva{tMmM|liy<@7Fee5SXRs`BK}bN^Ti)-O`LZf8}kU8_1AzUCk7t) zb@a-^gLUo=#GYUyUt+sHVs3UrY6C{IlE(@%Cl+~*a5mQYV*C4n)7vjxIlp$rI%f-~ zjF1EiL!(z0u3X-|+hF<9(CFx-{M5Ifu2|{I*iYnwU>}5FTxU!ih_Sq} zHe0Oh!4SW#82{}k`B!b(x1q#0z5EkHJU8}@xW72@;|g)?7Koh(Rv36`fbB4;oqp0w z-ADVevj*7j`}^-ZpmpKK@?X!)Ff%=Q^@PE*LkHu}-pKvsSk|Ul4QFQxESE_Fm)F?q z`(4E(?o&jL+K@bx`wasR44g1<9tdtf6_~9IHT&b#-fdUU-Hh~(Y07W%`s8|g`@!R@ zS3Wwh()QRi(=8fr491%6o&I(A)QG*a8n(=>8aKSG5608cZWubOW4N|tkd9bwc(=yj z>e-{?E-s2T-4XS|z-XJU(fU!w8+G4o85H+o2Rq&dWUb+D_F;j3D(-4)S)r5B1IufB zUK{8cFTy-`jkDM}E9%3&_$YS*gmQiZWXCq`H(*a1FX-1tpx&>0>ZG`76O(^C>T&z8 zl7d1S{@_KW(U)&fs?GLU z*uj-PlQm05YO;ons>0N3G)A=OPf40FE9KU0Zbl}wbO35jLYEAu;Z{p)XQ-!dU49XV z*G)g#X+=d*L9s}VkBuIA`W7(wMg{vw5Q|AUpWgCGp<4)@BBbTSK^RqBW zSamX)DiQHyWDB>cv?e_@EhI8DHaT7*6Uvm**xaPVq72ovFG=qs8PPt}i(r0_5Y-q7 z1SHXcOOXPNL<$UyV`fBz@SVR%|K5%^aI|kZf40nEd-j&CZ4V!J+dD&TBa(yuH&GwL z#4VN>>?jyfT#?1yy2h_&!hUoJQj<8M4f!!uUC>UL?LN24x-uE9u zU!LqgWCj1GA(G4xowyhF^+s>>#(xXOlAQW6#Ve*58wu4RM^k9@HeFs^T$+)Q(Au6r zD3Y?1o|R|600;ueZmG(1wsJ~}Nh{1RFgATsRGCiEIaMKN9LGT}0hRo{5=3IMzFV5# z(4A0F77`!f7vkcc;ui4c^lu-|t$KfY;``HDcXtfBzjgNWV@F&*+uy(Q_Q73aKcB>| zHr0uBGLp`BM2a0U<#vFJ09F9W0ClL^Clyy0dDqsSCg2#Z*wfs+vAg{eg*%dXl2n|Z zl4h@@kSPdBFECIdl3OMhY!JdlB^VK?&Q~54>u~4M!I0RObzPP*>G!(1vB>sQ#FEQV zl9l1z0F^?PIj4O+A)f&-lTA%%gr%9yJ8_7Wu^NC)*2EX5CniReMBj*dG4=I|k@xk+ z8BNlBGN%9aq1yNKwtd_AspNA7lnK$QC`Ic3EG}A#C`C?Q^1JshZ(aQ>+{aqMb^)+0 zD?cex8jHb^f)wWJ1oZtB;Z#eTvb?k`H{(ry*q(O(2IgHD~Av-Z@K z5%))7w=f+;?6Td!H1Bb39@;HF!`NYZ-GQ?|{i!j3Vc%P?ZU+U2{=8t#7b6ExtB{)) zpKV>g%iQ?sl`Dtb11&3CQmk!m*u2>r{Bl*N=VrEtp3uFY(q9ci)j2MhJQ$PuVr?JP zg17atn1spR&IhOI;5ita27vu+?(1n?HcRtfYw{g5dyJG0z^06b! z3>KumJ&@vcCG_o)7X}lIx9WX180+xMXwNeN{O!K^R`<{3=O>FPrk2&-AbsQ*jURDI$E#Kk3B zTiU#T)sdIjYW+HT)7;zzW6P(HN!J}+wtWv|W`U3;V49w7JZ?&6zF$mgcvfM%Sf(1b z=TpEIh;f{eUZ-%RtJK*5K3<#?XE}@3ln>PIo;cu(j_S}=8a5EO^-}WDQIrzG6J6THMBLh z7v<&qM};-;yXpTe&Mqr=O`>rq!4%bcC>}Ut;E3i@RXWX`O6ZgUjTE_vI!xme8NJPv z5=t2#HF-&nQqt7YSd^TWoATA%zFlbI@iV8~v$S~z-o9Sz5qY^XnYyx!crEHj~^wOS(OQwo`C zltif@5Y6#>ubB95eaFmmkj&OvRR|UJ!oJ?J_u%ZlCloRk@LH9X@qnS#;} zz#qV!BQ(h@J}p$Zq9unU1*`_Euh&B&&y(Olfun-(ONz6+bmkWuYYTdD2v6`%&W{w4 z5<=DE?zs!}YQmu)SIB+W()bI&2_bQc1D~Vwjiu0`R4Q7E8uBC4>I?I{{T<4LjeB4G zvDswzUgtyGzwKH7dCQrA3ofm$)lkhrOPvVSV3^j7f-gX~9q@=qZ$EbnyU)LLwCn;{ zkHfr9(JVO}k-|%%AgQjRDl5GvKDr?)xGgiKs=Yak;(M^O-%*8Mpwy*3(t>1H)0=xf z5N(Q%^mOlV?^j0mwg$MKPl~?Mm}4kSH{`hN5`CWB@n%TaMa=dvW_xh-$Kwm1T-bK< z&ej>XChFhnxAQ4x?J%e*K$Gwtfcs+dz`;_#S*)m2pFW%qiVY19joP>A(97%ZO&@$Q zSiSGk@gwi98GOCFCfW6Trsw6xh^No@>4Y0DY;jsI4O`shK1v=or`uy(yZZ>P|CknQ zz1lZ(9G4AqU##1BZ4rE#2R=gxS4`nE2vQetTt`vC6Cq@zjD~$7gTS^ge4WC%Jicv{ zPW5DsIvw@;kwY4^^cuHr4!(EH)9Kay`-W#v-@kt1No7JQNV>BF-@h_kcm2}bOGoBg z+&k79YS$KM7WB#>>iy#Oz(u_9*{%MwvYqC~xE(OQztP_4Lc;sU0s9TS=g-RDZs0h5 zn%n+uNv3yR{JGM@^8ER2v(If_dT{-s^>e12+qx;m(xTkS_Tzy~FIG;nSU%csu4d?r z;bHT2L-sB3d2=Mv^N#tY^)Gi$H(IazcI6<`)tKqJJ`uOK#eKLIW^3kZ`!y^gJRvDD z+}}4ZDlpgg^Q#lfKVDw@=-7N4!!uz{UnLTeYS4k;bD^TF>{m5@LiasGs3?+vpbJ&a#Xw3M>T5$IBYYzK zl8e(iX{mq5ELA4@9vph zTEZ0z#FG{M2Vr1)dgh&W`>IfCnQ&bjMR_{vItshps^UbDgbY1 zh#Aa`m!jwCmdRv=4b{QvaemPu390eT^)-};hkD&&0n|2tyDw$y*lhYP^Ycp()Z+*s zLI)x55Q&KIR>CTQJvoEwRG^S)STJpUdh@EJ2q`K5F0kCARfRg`B8uO}hzklCt|9sF z!#<#RLev3a+O;H&&Iu8)S|KY1KAN)rIFbDGGRYG|2n&Ly>bklj|Cpfogs9qzQl%8B z8={xf`*MYXN65v@m2y$DnB)M#rIQMJ?{NhGy6&%jsF(kVPB8vlSqkRAF}*J zEEj;rwgyi>_u`UDig9D3vr3$AczWnJG4CTz80Hl3OUXUP;@dmB-OWsoLQFnfRiER@ z6*SU<`pRrxmo0~Nv!(k~RmY*M>U}{8`z)+}efj85A1mM2S3a6tH~VADZ?CUCO!N*9 zaSF0FwY+!la(J|lL@Gc(O$>5Ij||a%kdjw5C>wz2pmQfst)wy?AhNOQxv=92fU$)2 zC_mwmqALU-R>*lHkX{s0hdIif3b>2QB7kDaxE-J0-%d+@Od~?6>_uDiJr~=3-Y)lm zlBTB@#-!x8iy4VRruBqwhzR9S5y6(zpiP#OoApA1e~?nP3MdOr_pRZSHFIhs0zCx{ zHTf}Noz)d$5pLz>^4QDhRoq+C^bd|z&RIleD>QdQ2MzmB?~78+Dk27!5|`x{WJSk# zfwDp={VG?kkiq~iauC+xDJF^(CkpdF=cXptmvf|CK-wM1?UPTW)m5{KQ1(e0(K6rY|+QI1Sw;OFW?$F>Z2tEg1izVhmSTBYbUe>C9xU1L3>gPWzr-7}Y-T)dMW98;2- zuw%*a6pO88w#y5x7ZuyBN%*j)zjkCuOGI(YYMix*gf-PR`gM*|YQD{`{kp#O z>rt1T(-Kas3Ee%p$#6b6OprPDX?MfKQCgkO>V(fQi9H5B7?OaoAI-Zwx^iLv#t~{9 zwSgkF0fN2*c!PAh*RIWcbJ^nE^?Nt&ojLYbiho|7UouG8P@Cv#de-9gALg%aAKAX4 zG%}Qz88tWJ943Ez)`TD?p)4aG@V@sy!W{n(>Gfb-Q>XB_nURCwrkmK)37xdYJYs@jkV)mUOZy-@IhuyAxnri z)HS9>hqmR%MmxReuH2HdRm2DlsBB(APd1#uKl(x24MJI;@M*37% z7oY+=fin^x3I&ZPXapdZLYDv%av{B-D=ebM$q}OCYp&ji*}W(A*ulo9*V)%j)#{Du z?5jqo_lJRMFmwP1!-Ru|wu~N^GG|)J-D?HG{^7wves+##k6%82_}Ikc-D}g=FJ3(j zaC1qG32*DHt6|rgdVWc+%IN5B@bz+Lv0F)r5Y6tg1i%vkn+0i!H5O*YPam{?F_XuI z(SQbUiISkVK~#6UEGj%9Ix0RfJ-w{HMj#O@(W8z7B>;Bcru*3Wj%^343=C>- zp5=!)g0M?f-IVnVo>52^Q2KqY1(oy=DFOjO8Wwd)6%rwVzKPkvsjPveOrMle;z&3K z7gBuG7*$A^aTLYB4f~iH9W(4ACny?bo_GpvUioq|u%bRWGG-6=PCPRa^P3uxF$gEuM7xJsK!?PzhpFFzq=Frw7 zW-pD4^Rj~i{jIHCtnKV0liZtFnG|XXiIuYN!+?mjBmuclHLyklIDlFuxK%YCmPpsj zWeWh-v)Xq^J3r8v58$gz_J@#IB!%%DQkx}P3SbYAlOAR1`Rz|3>p2kT0Sp@Ir{-qP z1o+V25EL2`fHS26K2soIEYktuTqHKj5!1_*0_gl3m1JJs6(|OFPHRR)v4c~Xp;VsQ zSRRrd>_fk%Cc8R2BR``uGbAs#4VwADu9sEOFUL`NMy6CbP%*ajlngi=o?o!P1aAQ- zz=h8``I7*KNdRLZTF$%<_BKix067WmP+P=F&-v@82 zWwCY>D_zHPy(Z$W6BRZC!GW2IuQEkzn*w>@El||<~(>)cXp}!t%w{cgDB(?OkAU zZ+orlaj%QRTyGEi{MU$gSEhWr`m^CiebeO=b9Svtout`1aYRL5tXsEFwVry%p^eps zSKj?`^6SU+>^K}DE4MyMFGWq4jUo1?FlhX_6_o|?@Iw3wb z(#K8CY9vJ+2&d9Wh+dZ&yH|SA=YIF46-wnL*?F~DIiQd>mgi-JziSBJB?z5H_zfkj z>Gxtt%P~y0Z%oomZMO~UzFfNa+3`b4ZH9Cu!YZpo z^(`Rj&jVY=LoG!bdkJu8X)8#L_i?uF%FQhF^Z4@gud7EkzB#+cYRgieK7A!R`VwrQ z7*i8!YSsW_NY6NKvK8_@l0q?+ldEzcJ4pktT(rFn0}+y*!tD;Tko8O ziV{QuMDxE)sgS-v`r#BQ5Q$Jh%D*E*6bK8F^ux;nT+D1`|1J%ZD)ngkE)jComuMV{ zlPc`^F9!9WLl~&XLNGW~z-T{rGc9xug))Xz)wiX$+foU?xv3#HGb14`F()UloyBEb z(n&r^f20&*DGQWBkaI!CMzb+YJ-HY5^+s>>#(xWvkcm}IUMXsCif|>1C+5@tRZ-KP zoR(WyRv;&&h^R+zC#7N_8Z+acwKrTQzyO&*z-SRndxXi6B`foru(Hby4?`oa9-y}xdqJbWQNDWk9`*WK6IBgE^2 zy{%ttSeu|q3GEcBY;j0jh&JjdS<`u1t`a+Cq##6HT@ekPciMRCMRF|w4KY4gBK;Y_ z9AvgK;SITRr;yMlpuYmtMe^~WxX5dNnh|%lApLhK?-(gr3B-5+J%Gg>&0AAqZ%c$N zd=YRJ%u3v&^+4YfF(}ytqWmDEC9aH0sq_oVH_7bq&*uiXrCiRayU&xFr6ybhStIb} zmATc;we5U?C^RaBFJ+75bov%k^r9pe_tg1_9tbPo$^wJJiYm%Fx@ya^eT#E$bqUu9 zVH{gJh=B1*Sk&CIt+tMauq{aS7IlIMw8leScYC5pf;fLP9_4pc_yoL@L$wUrr6BC! zaPkT(_8mTa?98u0vA$XP5rXbKc}u9j@v1`CEjf-W#Rd1e3#?vT-{Sh=Z0MH*#SXK$ zJ`;p4T4H-G@RqM zi3ir{HLNyxR;u^>oaoKDacfij#uNk$?}{7`Ia{IlA%x#7vDjT?zq2Low^VzBhg-+` z{&}eM`SmA@2brDeUm3Qh$Zkrb+jxHHXev?<0%leo#7cB9_6SYENWIDd8d+od#LwyP zw^$?aw*}_c*5ChQ-OUXvUo2hIdiz<)&b?J@=H<@Q$yz(<&8$Ix9$viv;>vmZhd+2R zzcXZao8@eN(2nre3*TQ}^!(V;`%A_BIX^J!2hTnBQGHf7vf9kImEDx^2y=^XIOz8grrArQCjJ+UxO&uXH1fCVSnUZn9(S z(-othZy5LKz|=Pz)NS@?e%&(k?dpNHzs|k8eX7gbTOVIPi;PL_7D8oXi>aA;ac({X zm=p}Oz?m@v8objVWs1)r`ZA#uZ7^^s4hT~ghdi)&^Ok_z9K8Jt+H6y(1L~~7$m{1ZVPqwcX#rLN=V3U zs!l4*D$mR)^z~0R{!roI=>7I-f{$}oMM-r@UQum+V3hl}FE4^0-N@LurAmJeM|T=$ z?3DIJ3mZ4Cqpn;94=>2i1_2MLqX-G$OX+J-qRUWlWLz$%v$MFYFf7{X<0q@nA1rLl z&CHD~Up@YI?!>3*dRZ$MbsB7KUc07n(R}fd1LFO=^2Uv+(;Z(wc51r*%*?%qTf96u zH8pHDQZJGyQTGyo6`n%e*4@_HP{polU@RNcs2ex3{=gPU zNr7$}=-^6{v2&xCsWBS)Ns+g&5=*xZ`QjKBMlU73qUbS#=|7|03JT2g}Wmz`TVZk)84;*+ECXR9v<%I?HU{(Y;619 zE74cT>~WFq3vzAZA-)PFMhDu%i^U?;`KAAw&uvOddsp3jQ3?BGqz-^O2RZOA19*l@ zUIE;eDAtMT zod5@xQpzIue7RVr5P^~dBnvGffYbLVFvabq_ugv`lb?qyFb*47;a zD4>_0gshzWihBACEAq-SVw1ZYn@|ZsNys?dAAnCadzg_{Xl3gd86O`Q5LTI9pAg{K z$$m({Z!&5fPORo}HY$ln{sDI?t1{>p@H^^(s%6#K)mPpZ@=dDCK1#V6jG3t-EZj3A zKfb21DmfuNJ+dIsKJxsb({5G{gbe5Mx(Sl+?PVDs?ogcJW&i09(rVmro^K^-?1O*JDzYKC8c4X-(Xq7#0tOh%`sb9aeMgw%ecM^JEJzGy3Q>K z`MJ{fC){H=cxr(A2yh-jIBR#gPfGWkmld}yK5%JT@Qk*MnNYe3N^e5yMXvYUa)*(! zye$p!2Y+3uc4yz(X9thIJpKEdSI0bk&SplRj&fcUVLbracSG3xqP>`O(>Up}nN2Ha z#4Xo$GtjU-IQZkXfmX*Sy*@eB^vq(D^)q~z&#hd%uzk|FZrwi3!?EgdnCo=xk0W|N z-<&Y@uEuGLQHB09l-{!xc8kLogw;bRxS>-zt0Z3DstIKdM^@s%qaUd zBjmwon;nClc2D@UW9*Yv>Mz$|w!e%r-Jw|NwOLtUa-`Yc3V2+4+hybE@8=Vh5fCS9?y769br15hw{rdX*7lvTwY9CcpKr8Z zP@Ho>n601RORJB5fgVsv0P*I1}loh0$$H`BM$#QYbeDJ97 z=pSjbmnY1aAGUo%)%z#8=0+7E9zY2wxdauL`ONbtr)213M@nc=CC612Xc}I%vv@HX zY3}Z>z9t{N_8lspIjwcD8hhBF_Q3S5?WeNu#~X?Tq)>Ow?xBlt|clYw$LfeNkkp9nnl@R)x*kBDeV zGWs$!Hb)2bZjD`k55X^ z%P(YeIl#Co5j_S;IZbVMIlRp>#ZL;*lTt&)&|eH{LQ0J#)ot%wBjDU@uX&#l|5rxp z{J?gNG>W)uBpr@$xbUNNcRi3wFw3lQqHyd;z#Wj zk*!r_39%6-ChxM+^J5>~c7wrtT)FGa(&(Kj!P|?I_j7BG^73~|qj$*y zmMa2B^8NeP`wp)NUSs&j?ALcM?c1_5FSk?*#8*cnPxq@n-`2UCsIj9abG{6PPcz`| z&qW7*@;ox}??q=$2mnq<~Cia}J<+)kU56^zGVzlc*z3kD$*dsKFp?a)- zBiIx5yEZLvx^f(9i|IFo+tz9N;L!VvYi-XQe!2h9ni+FX{eF1bjKMcg%%ozjg8N3w ze!AFUBFk@4?uV)NyRk3p`daQB`DR0Z`(H;r-_Y;o;(_Kzmp z)sfgu=Ggxv4cJ7)oDn8GhPF6KYjHtYS!p9XATZ7BxowbjkmDzZ&+g7%$??fqB@KS&5-JV!~8Yz%=KwM?!;n2*oEQ?#R1GJi#(N0AXFRUj0OCVa^r zWkJT#4DSJpg#6!xeJWK&**}0C@to& z&(|BoEw{+9+Q@o=;a>Z>KvGnS=)hX8GISwNHALQkBu;{ zL?DKe;{+d#6?4Uj83n27X#i~$6<$?&jga1Eb?=vud!=xhfDnNE&hD>e<@7n60BQ}u zJmyzV1^7ux4#wpJDaCk&csjruA%6{rdw|1!hNM!Y^6vuo913`1iLAYYRaIAzUz%Rs zSf5^$WMX?ZH2qA2^pso7Fuz1y8()pLmg?UEbTZqg*2#C}*FACaeB$EzH8nY{xS+bS zxG^d?+S<$_H#wywE1Tcm!EWh{42p8JbB~Eiee>pHLUKlJV-sJ?^ z^>TBsDz2t)RAEY)ho$p3Gb3Mz4}i1f(hf_zmwqu$`E4b+mD!+V%k#XmUaf$&xKkZKEVMNdbiHClx%_PXa0*yWe-p?H0?(#BOU*l@P`!*eH`hYnGUKs8*pus_- z!*_Cv!^8%gNuAy+I3BZc8#VA4j(g5+@!pW-X~3#^g|rOWABE{Vc>!a^t{6mUw7ROt znT?Hdx)U4ex9hjP_CWzc5p>kF`Pmr0y*MuLDVF~hQ#cL-zXeH#6K);pxBe`)__4+U zBlX2Mv90_2UHwJp$-&_?z%pH~?!Qbsck;-pf&F0kFv$S5ilKeN=a0O&S!4AtYMXD5 zJo#M9*+icew7cfxO8dhaZw`!jv1Rzjg@dAJY1WL;Ahh*3gGQGP(armLQS#C4P?`wr zIuMHM{Ji|H?|HLvjM2s!uMe+3x_8m9=hyE0b;^a~+VyrjB~~j)t2sQ2No@{u3qSr8 zbzL{@-W;!A$GqR%_vIGM?C5CYBa2=fTKV+Y(p&pxy*ROws!4#ZdYk*#qU?-{W8F}v zPKoSOMTnFtm7%SRYSI)@?+JnjJQmRJ-HoTMW9Ep5jaBGqHtAsa$?1*PX4E`g-)eQ1 zh_=YJd0Lt5mz)@yUS1NKlHJjahdRZDI!2`?<_Be`cc@5VIMq%dQWzHyJ))G&&26ZQ zj0?~7aLhb&AzXJt{g6>j7^cu1Ay~Me=F(qyR)GK)2pNJK(aJ53OsY&slhxJ7d%32^ zhkP)4WOVsYM>Df$AKxc6RwLOJROF-4n1T;t4v4!!B4Ci8P!8>4lC1zS$v_wy&04vE z!FHk6FvZ{zyg@^nN9z`j)2>=LBX7bOV#x~O{N>!8d*G89G!`&aHVMVdYW;^RxRNAL z0$}toTdKj%mHYdZ)>V7jHtplzx(+^0AY>!LE1LL_aG3E`nW`5`GGLDB75uP$>rr+= zbX|hUbc}>(&&V_Rz1oMqOMu$$WEb<=e=*tq1NQyEd_55FNA>Y<+8=ssQ4@dhu0bt z8=9Pw!Ki;pNkka$$wx{_r5ucK*eF$uP!JeF^iz_3DddifR&U`Qz06FJlQ~h2fO&&lk%p7WDagu9uUIkoC1q)U!OG9 zR@K$DRn)Y0baGssUA~x`dN|t~zk2@u&FcWaz>v^LW0Q}sjm_eclH(KOA|iuLj9=M* zu}VpfkID*2EDX!ePR@?Y$d4{}`W*V~jdf6LqP301$G5wMO*=s_NzCmFpeYj%B7|#d zavp=Mg}z(7*3RVE#LDs-Cs!{|fA82NKjZgzXjGAr5`E{^S&q2XH$FHmHx?8PM9RC? z&$GCn)gfqzz!8&pkK{RPQ$FLnEw!pY>qEl_;8nPMH2q+BsVeF-lzveL?gOXM4B@X2 zc=YeK#(1t;ZH^Oa-4;}b?JV>Cxy5df(oF-m8o{xdTIsn_Tx|-%q|OqX*0@8g;Gd+P z1La;C($G1v@8{H}I{JA#oxXkkPs0bF&3rS%a*CrJzy38P`U%E&QipK82Akog4+gG3 zhYh`hA&--XnErzSt9~DObdT1p9U8B<5B_K{>g!U?*rk&TrcaVBSX!s4n>9{5a?u#m zbtCWY82b3s*mr;Gd)=9t|6zO5^UdZ5Mm^cs&upDq;Bt+knZw!#_vd5ktbQXJCrqzB zc`WqeQC7Scu&as4fRH~9#+=>yVU_MDgXuTcjk<7T`t4gQPyRN>{QkTg<2Av@1~_fR z{10PQ7W4VRyYo!uh2I#L{$OG9qh)4?G;-`tRt9~te|RC>>aNR+WB2z=jed7F(fGFE z)&-x=uk(I=I?L-DNQFuYQ9FfIg+o0Jk!2>KvGo9O!! zBT_Q{7_9d4bC;^-EfeWXgb_N*LH+8rhH`J*g3K&ANV(sObutc~n2_e_@6S_6!_s1$ zJsiVb++N+oc5KMTBa8-@C0UaL8r4+4BE0L;k(H zsh;cV!GB5|*d36YQS`^jk_C&Kb;imD>vUiPJ7>*-ix+!gUvKnAZ~XTlfl(7938geE zJ2^Eik;kQ%50ER^C||04dg*is)c_Ag)O2K40fU)i5Wqm7)PU*-%8>x8g>CCPtG9}~ zHYyZ)G9@OX`Y_+44pe^@mrg|M0G{RLy-A4+Do8Dk2@Z=6@(pmaRq(nHZ&`q{Z)-~@ z!_x*OB|;!fs_5AlD?y?VCFduHro?k_g_45&obvRTFc7veO0X>yBoR<3mP1aSC@DFe zROX}c4&nf+_Q^GHAqJ!xi3*b=6)+B9YkrwQan&ON3g~M@tZ2R}n{oh}(Za?s#Z?U~ z#K8hZaH09Pl&yA|1IkI=PSEDa=*yqPY10n!!W818pnL}qlM-X8z}fW4*YSAS<)tyH z3C1Qb6OzK4nyV=^cu2}fB04cqh)bC{LPc#=VP;AkDV8XdvTk7UfJZ}H3RkK+!X-^j z?vcKHQD=6-8*ce}Ws5ezcu))@>HAOa<2Kl%1{Yrne4IwlmQGgoQGuzfs)V-OYwsQw zRyA9G@(gqdqePIBln*jKbj3o!y)L&2WatP8A1w3gL%I$DhoRu8A^bX^&RVZ6@j`aG z<3|fqQ^O~M(n6{sxjy`Uv(Hvh@S;`+EuPaL3Rz!ZGWX$~0kbm0w}<(x&Tt#g4H_-C z#K3+a+h=ZL#P88|w;Z3Jj(T?>-E;}dV^+PLeyRQD499cLdH#-HO}<%K2K&c4Sq473 ze%IITX133sCO0j)Pd||p)*3kBwYl!1yZr|}8;Ct0sCEaNe0#*IGgFToo_=fJSfc~u zAMDV(xpK_ARdZ~YE=<|JGiA-%;5k#m=8TD%JtB7Iz_?ihBNq+vU88BUQLo1GS;*bv zA9v{5ti&Q_V}+xU-x|@UAE7oxJbjkz)^(P>h2!1xje*~yj&Jr`H~sCbVGeueJz75E z$<|Si59z%*GtJ3xh3B0m0q16X-J9AMMIXp*K|o9L(5(R3dC@Obsu!c$<6mQt9z?ZPy>iUi!=S z%vqneFF5H*P*MSf70}U1gE>ABEua)Be*iUAO^Z~eBNgbul?a8T~|~(x41Y*5EyCIr1N5M{awRI&;N8F%DiPpD6&;k{6^Z$AK7Nie zUSes&4nfUIAa?P(m+>W=s_PHSIMIyZy@KQl0(?DQeR$)N7}Ee`T6eRthsV1wUgoBr zQVyg><`(DFp(V8|HqT~5y@Q^}ZzRv7pTHFZ&q3g%4&VBKn-14yPJr=}x2ESDecb-~ z<62R8T5^cLuic{r?+XoazZKcfl=x3(z8_P%_V4o1OLU*>X)`g!d6Xz>EE@jxz{GAk z752+LjE=gzyPf50mg{-Dz-@1J;EAu-H-~+FksaabYH4Qo&8j@9+Ynnw>Qo3HOXPyg%2<>q^qybSne9xadGCc7V>_-Bi{WTv_#bgJz^yqLqh-%rJ!g6X(!TwLuzm|t`k63cH_lAc?S>I%$}V#W_b3` zvjv~ugRql+z9b|coezhQNqSnMIfbPzA;I+mR&85za8N*Uc%;>nryu`3M3-wfh$spxwiq?_Cr*e-sdTlOfF(->~aOjlpyb^^^2IMtm@{8m~GhKSnUT~ zlT!aX=KBHrls#H4-wiF0dmDrN&|oj|z@(Utm>^-FRU^ic zXwXPYOklXOKS63sB|nk)A)NEPI>$US(vwr)!ldT!v}=`XJ1F?#HWmL#Dq=HP5^}P# zBO=0D+gs=>tE_EsclYPf*F+(Gf1KbeK_Der)eV*5VJTd;99U%(;=4YE|7<#+C~y5RuSLuhvQVu~GRM^es%SX!8wA z|E@u!D9uVsN{YfsDbbVhRfC7$89eA$(`O>h&rL{AbgwF~$w_)tmiMBi@ndnhu^bX; zT&qyhZ;+s-rii{XXU|@#ZxRb6P}f2lo4OzQ?Rsq^tG=~4GA@d~r0)9q9KX*IuXpi% z_Q-waOT0$$-1|!W`bk|daO)3Whe<3KH+r1S&GI(3Fu!p4e6(v+PG*U_m*a;ImrK)L zG=(4G`!6P(HNjOw;W7|H$Fl<_2_k25y+%lV)Hn_p_+k*CBk^A?t8@?vOX&|=P`q<~ zuDh4r%a_;hi|PfnnMEztWictfEKxmuVOdG>?`^Lczg${vy^!@mQ|73}@zIZSntJ+H zzZutt485!~{mGIQMw{oHnYrx9jF4D|m(Q;5-m`kq9KDPCH^mznMjzZ2KXXd!l(CXA zL-7%PWrHx8Iwl%4kgYwYbh^I70!@>3TE<&+ZPt#7o;|j7+-TM?9ihf3?#PL$Vx;i2!3 z552Ssdv$QY>wN=X?;Y}bOCP8G17EJfl0N^ESX-q$ew@8~f7A5&?IScR)UXz9HGJBHrfE|Jr!IhZ zh_a@+EWa!%Af_n0h~6W%qe%-zY(ivCda{_u7Kpg?&t^;bZ%p66diK1byo#aU_3X3$ z18>Q{Z@uVqMz*^2g_1w!9{vOBQUCq;KQrO?@rdtvk^IN;k5~L}Km6ZL(LbaUsK5Qt zsef$6slU&O_@`$&^^d%SzDUT>yDZ>Qxp|EaE{&h9S?)f_J((-$g?+uz8~<}cwUzv5 zYXc{c_c@*D#W*6%5E7(9Cu0oDRqaznbYgH^Qiw<-L7QxqqZ&bjomwE9Rn@+Z#`5I! z2Rzu_q8K6&4FO7*B8O7s{IbG51zC5L@^FBn#_H5(_piQvbgdxcZF$Y|nzq4GrJg`G z6o-L0^d&)^R1Q{1hbg7gC2WJHvJ3o{2*8CsX$_cBJ0_n|DBDZr3ZYoSQGLCrFgG;@shnWivW$HvQrlG^(T&;yj*CUa zLF=%!Y1JDln#XeagMb={Sd7#pxoih6HU}sbw(~})o6}M{Km38bZfr!fyfCW_r63>UkD#8x53(NCi5#lDWRncgtjrh zDJAJIUcG$(+K#@q_`n2KOH)Q+3`^XU6Pr?=o!u8Z#N?QoSZ#Qn*2wVL8n&y4nQR$jv2J4MEd8RfdR@bG1%q_!`)FoNoMEwZ zV@Gj*%fM?gx6a+;G1)TiCobb5tu!M-Bni3f$6Vv+%`h$l{KqAQZ^nl7r zC@O`v7DCi6M_&aY^MPz_s?AUEtj!Mr#x=1UN0o5&$gm0^_ZjZHy6MXhh+ov^xwyi8 z2S5J<*_BGeK8lbkl~hjylm=2AtgMU>8sf{7h$xEbjf$1uyNDo@5^|EN ztgZ10@UN(?M7xuk%G8UOQfJR=8m0p}+A{5aq9GWehP7iD>^qjb=UADmXIW}4bjW0F zf=)u#4d9a;9i5ZXD&a_g6i_k=lV`ek9J5cKj~@FqD=UXleHW=}bvU0X=c~4%XwpGW zKONuk9`U2;`S;9+^52JL{~yQSpp;^E%Q*43FE2-mSOYkaVi`dYy|Ax0dgFgi5UM(v zs?EUw2s*E`m1sa!p#l?11q+q9{ug_10ToB~b&uA>LyUyDXX2iTySr!N?(XjHfdB~! zAq0ZETX1)GckhOV_U`Jk^XgVNnaofAZ+&mAx7PdKa8?ZypryLFb??6C?6Ws2gH|c! z6`7f_fr0*wd?h=jB5x<8bBZ%e4Wg(uXMr396$%eq=fuRf3fNjpbf!>!vkL`Ua(Yr& z$R<)A4NR?zgW27i=PXThKfk^ENq2o(@)Ul9jzG``Ku3xEA=?79h^VIN}No37|)U(7=cHx7Xni>}um#V5NK*D{Jh=_>P)N~eAF`RR*fk#&6W$_M_C(FVqB>{9ON+ zdKYGd+g_#`Bb;n>UA=9cLcQ{9ial)X3jN&^KU_(8HNVuPpUkbj+!3RkG1{&zAUz@f%Iz%@2LDk(9B9X2_+elb7pW_xeX{L)L|)PuBZQ~w!Lc#lZ^+&x0SOJm^l zVyhncpINlm0o(>lKeZ`08Jrn>Sq8b)z;}rDcMo#E|KzTlm#;*u{Nd@zLZ7hkNN|+mTgAr}o2-|fo?%pL~bT7A= zeLgH3{BiMM&x!q02lZ^~+KKAW8M^mR?5b_FY*vW*TX9NAg^6C=!g+#~D-%YIb{Rgv zXwJ~r>wdqxdcfmNgY~uyf402WqvhH!*Yww2Jy?I~z>f&4`c6jSlB(vO-WvQI`UP3jcCkgLq0xlY_z>YQ*Id&c=Z~ zM1)w{7=stTuke^l`1Y^OwZbXfmektf^prx{S2KzSd}s ze@>8`Glb%onH1+pzzT-o+}T+?L`4C(1K8sti>ku!!G7kFCb4=>RcZvd#0=CEbQyrK zsa6!|8j+WnU6gI^_M=<9qK`;ESOp_RiNU_51u`_ap(Hsw@;2n z1)fvMwgQY{@D7v`6A3T^Z;MMX)K|fA?36?wfQG0p zDNsZ<38di+qfg>HarqR0sZ!~@>Y6=rxd%!#5)47oam^_w9M3QMxNPF1QH$=3TC%^p z+2>8Q2`}($13`jb>W_B-wPZ`1^4|fUW@e`m^@tRU8aZ$%jf?RQ339OuFga~|X4a>< z?e4e5USXJS2kcJUZfAR0=IKVnJ*Z2$on$&G@#E0iADi=iP8&Zym=OCTzah^f!o9Jw zAkoS6&yiTR>AHe1eVS}B$_4{ZjCSp<2wGYYcoxW1B}iPt0t#x1yaF5vswO(fAi{1> zz29n)laAN|W1KLVbGHVc>AbMb9{Qt$zfB7NIxzI@Zw=r3f!j#1@6+r!$;oIR)VT-U zt_{%vBRe7>LUwM?>(ME3gVPRo$@Zdo&FE5lvSCKMY#o4J*a!zkgiHLV9C8J<|VO>BAr%kSr z;`@iie}sKMzd`#0m6{F7rTsMJ;8xhz8m;k93FhxRV~r@3nk9lOfGPyW9_Tv&yMft^ zH*TxSJt?gZMfCy&pz0{M1T=^%DUizW23~z_uuoJ%aEhOu{+p&1^)}3|A#b& zL?U9}oNs`dx=_%CY(^+Sh;85rl3}2)&^<*p72kW+1@EM*K7V+!z~JfZclY{- znf@+!8%bGrqpiD$-KH4c`y)KcCM+gO&%nf3@4c&ynLtt(nHVJC7gS~^eZK!nl3UO9 zCQ?#aGey*Tn%;DOy4wC&H@D-M^-(PMX*=fbVB-3Wx_3t<;m-bcHddC7+cs{uHL?C- z=P_^Uv~xF4?_4?a{rIubLq_nr^isBMFK&-XS=iSJ!+Ult`Mq1hB%P3{gWP8iHCr^y za!$XXF4ZU#wpkdKxoNH2v4dYPok{n0wSIQf@8ctJf~)JRqXySknmtpaNoBh`^w$Tsj5f{Pwz?1DY33=dYQTIkHn@FHEG}A-89*{M8%k?d<95Dx|W8t6J!`AdsqL z+^|nCEQ(1_ZZY6eAc#*9Xw$)MHk1l9&cpzGP=usKx%oJ7gaZL6%RrnYOZ|u^UKD%& z-eB9E3R{zJu^!6Z6>;A228NJTDsuI&w{qb-xIkheh*&g%^n?1Mps$9Nr8x>*`WLZSNh4l~+V+CI#c&k|DQS0i*FF1ofBkMMmb3ReJGcoc zD1^wgnB7*P1}l^YWvIU4qd-mdhOBW8lcwuO6ZU5laqu2V6#PTeF z6hXtoy1FrPsTL@-07gmqgzgNh{FGq>&Y5p<*p4qXB|y%S=-eKgwhU$V6C*F8Q}?_xYxOc6ivQ`+aQo9n8XO zY{uc0E}>2&T*qRs_q!7udfe+`n<{7^2Ja>Z*xDB^{shwON|Z{(E;ZEyXC zy~EJ)3hR5n&k=*Et`<{MjoN`b2EG{21j7U8LV-n1if?GZH%6J5=6%-x{X|*(`aJ8N z)z(^4%MM_sC4bkU_QNp9c_gp&hz#?H2uv<66jq9w48Om({jUGE3iiE0iPJxajRZVUt4`Ypw)ELd7Y(|S+dNi1{ zgLlKhWV6KOAygy;I9Y!({BU5`t{<*0&1GdLj_m&P%(i1k)_hz&&tv%Dik|%>9kiO; zV=@-@b#12@(miKZpU`E4Txa#OnLpTg=@9b;eSb{om@yb@=!vPcFuDUqj2s#?X#(%# zhfFIgTZ4D;5uOJ6*OKF{jP(z{ytC5u@p{v9^Ypim*WW%vcmGOfgGWSNX_T$4zmd_C zom*n8^hrr_wJ69dO+Ta50hE<-hmTZGm@Dr#gzVTu*r9Xj!2V^kCzh|97Co$OaVJdB zt8;2k?Wl!|GQNJ2)^N?#B%zYZ=@vZ04o{vKw{`cdY;2Ix$W=_iNNHqCLn2!jE)T~r zXqzG8kRQ9-33sukbc3q>ttux3~uJ+E^IeAKrN)ZT192HjCTduAFBn0GF zMIJj9J+5z&Hm2;{f$r9w-&H&XE*xbrnucJN{n7md5OR%P^*Mg zfm{wb$_67}!qI_@@M?p~~*5r6e?vz@vcciclj# zN(Q(OuG)n&!wI@8iR*xTAt>&GI0{sJj_p@1SJbIhSaS<&_4G~OynTA_{+5dubwVR1 z^7xZtVlavrUtK$_tYXf!>)5%o~4eUJT=INiA zT|qKX$EPH6aPtZ&DzD;X4>({-DJY7NNtBTh2^nd{>aI9sE23yd3jQ(?%BkQQyvwg_ zPKis(%}h{8E0A0olp=9H5UP7$m=;zacd+Z0m_jvfuFS-=$m*lTut^TZO-zmw^~a!R z5kd`C1%jw7NK8)+;6!I|_NnZ{J*pxPdSCAMc<8W)?YF(cJp4x%xOJ8}VNwfB=FnLY zv6`Q78<=9ybT9v{q@_M&QdAeiRVWciF*Hsf%43zF@}>Fn)wh2qLskNvDJk)yG)zL=ydCdV*dFKypRlhU3%Nw)ZI z_~54PrT|M2CQAZcDJZOI?3y2)jT;Ay?t`*;PyGA zd;3it>3LwA@x2Ra8BrX84zWd*BqPNc8CSr~EvO6*i&eApQ9=u$VS++lTsWs(&gI@Y z%!dlEEHtXCthsh3XiWcxHFH%v*C|gN0NeMFWe(ZKAdjm{_el1%fMyj*Nb?%q5A72l~Zr<#AMbOdm1YCs}1|@5_rF&`qGM~$Ctb|tg%?W zz~;yvdxN*G_Kq<=v1#6sz^|9aJG(tu^!?^Y{c}T)uhJ=u3UM-XH!*f^RzN*o7m{VQ z^Gv_T@29*soa5|yCaKcjFV6bGmBrEKyAofIZT`Nf!F)@_vz3jf=j2Z8SgV6Ij_9m9 zzP9q!Rj93GS8+cp(|nf~kbD(9rd672ReOwUj=h)_`fAOl@vi<~0gArsow&Sndcfz~ zk~kM%-OB-{cj9dxKU*`t`1X;+38Sj|4v@9$p=#4bq}4TdP*>Y&9pBDxW4NS~-l}d7 zH}`$Mv9H+zZQqF<(}rLbI#~H{7=Pkq{}n6!uHT3>F@AVt&)bW8-k#s`;`mDabF1}^ z&VI6S$dg6dj~DlTvtz39^_^yh_q^>*p|b4T`O^+(553zl_sZ@OH%^T=f4lq5sTJjR zUm9P(ET6x)uxIZI?XLO5`uI*7nsetcPX%!VjNd?XBJqUG^FGVqGoi;Q&Bse%ZWz3jRKe8XM zcjuxGZDIXRq@q%l4Ma7Irht+Jxi}I@NmY05$hK~7=+Xgak(qXTevkG=tCm1a3~-ws zAvbIVxWe4QUdGV1!e)c7rec!o>nFKNv|Ndbvmyl*mKd@3K;DGGwcW6$c3O4XgR|z& zZ#;R7Iet_$bB3@(XS_{&c~9+YjeXY?N|CnGIpeEGt@o6vZlU-AW^3Fv1=H(NSlV2z2Xi+$7l12R%LJ==DE%Ar1zq)e7 zk>fQVpIyGxl3WDmI9Fa?>Hi~yUylq({f$Q)^zoNn2%?T$ZN?J++sicwLKr@K*%p9UWpHdjuVb3s@`;A4hq% z0S651S?txF?=m0H)&nY^TZq}WV`prV`9*aWSJLX_%ddwD6~dan1{yB9Gk4JGAuGOb zTl{(3UX!ygoSp;*=@&-44l!PpY`s3#e%II6>oU?k1Drm3-`kn@dX?h)Ovz_0uxChNd+SZCr)S+zkL+X*9=s+yZMYx)eNkvGX?`O$*8XK!&{*+tD(Hq;=S&-2d9(29*_i`PWrT%$ax>+ z_KpGZ{kIR6ub%qu#tP#L(_e2H>TqPX-+}oyi+}f6HZJYdrqEOCgZC~CpEo(Kckjk_ zy`kH%njSh?69>7k9P(^lo9Bx$qdmQ^uhDw2M#p;f2Ua9wmE>D@ zqm4dT+`9I7=bAUW7Cc%u>h;nI-*+w2TR%>3)2J6q`si;M@nZD|=j%HYKRv5=_+I<= zUBZC_E{mp_EFS!N_kareBOAAlbb4Y z9NDdl(Ej=7<)glx+?3{P?&4&4;KW96e;aO#AY)f*N-1N9R$)eNQbdAMq$K1jXxXr- zP)`RT=bCub`vFa5mi=F&nACk&F@6NUGpcJh{?<-0bQT=DM~B3q<_NX6uUMIG?;5;& zZ|&+84PDx)SlHJcE5xwURr4V=P9+mHa7HvDfM$kF%*_rA4`-iPR$l4q=Fuz^vnL@I z@DaHV=WIcwvY@l4(r1jR9@3lFcSO;!X@&caf|DzlT2#(mQs1rz^ccj)+NEiAFW!HE z^z?;B1lm=uD#Ovg+T>)nRP5~@YQGE#LDu@1jVs5?sg8ru+r_byl49u$FsZR&T2=W0N)if;6qkwZ9c`kL{AvXy5;?&_B3uR`{-HMJAN+i+`OWn# zfN4p_;i#JPvxtLElszRYq4W$*9|Y!5W6hrI%nJY^02u%^{{9~M1*MhMB2vQT3-DT!kTdjusPiJEsYFu0N}N(5M>Uz5Sclp0DnCKAv}b_4h(I<7jKQSeLLcTfmFr9Ul1^9glRpcJJJrS5J4_d^nnH zxViSj9Hqr5#-b-!YlFR(!Uj`$c9yz#0?Yn2>oMu%Ui6?LFtAVk0IY0z$GR_9peWJi z$?N*$%9>okM+4JsdpAyBIC9N~X;&^D^0ahz{pKDMkj7pC9wBH3MRsBRvpdh7_3sKZ zY=vd^rY~nEn2m&}adOmLf;D`>z;Qf07%W_dHTA|C+hNLqov2-F#qQr-jo;kgx6SbE z9;e4U-Jfr@y}gN-DDFBqcl=oY>BEc{_ItZl`}6)@4_0F@{_J;ic9$=6hQ;k! zXFheP_ww1_$BoOnd?Vq*CwJYq#;1?E-8g=KmX7Jhai%LLznwGU#q!}V*Z=-xS%19^ zBXw7e)LS|x?f%({yH`@yu1;UN(Bt=>4zsl1tZ9F5lh)mp{mss%+_S4y>|#dcpj|-G_INJh*Wx+wSGs{j29KOx%B<&c+#?FDJw)2a;pykqS_WDFr8lNwY&&O{PGb zG=>x0q}fvu#D@kyxN_~~ooBHDiFh-5f0+tl-UeWrfo89UfDtJfrFzo?%2wF-SNw|n z{zc%X75248Yy6YqXX`GS0N9zhjzN~S4K#yx$^_X|TgJsPWufsD_|y4Wb#PB3!;MkpwDKIu_e&3;`4pa+mtotqHk z=}}ft!h#}+ScN+9NN{tDy?_5@Sd@>9sZ)YXj>t+#43KV-m!~w+`*i>=66%alF@=3` zpoj6RhcpZJ0$9+Lb4L2t$Z&g!7}bJNmXFIRDo9y1ztr8!u~AUp@)>0cC7(oQOPu2p zp;;JIlsG8dB_%V6OW@&Diz@!VAuHgJ9E;MV47yxWP*-?=PN*$x^k1e{waKbl^+G0* z@dJYVkiN4@qnapZS0{GuB;=ryp}tG97*wHn#uZiga>{ck<x)?_}WaXSM!Se_f|R0t+yJ|VBS_`*AdLLanruI z%lHP@(e)mK!EtQ%9ISa5R*hjI3~O$SDdua%ADaM`-kCPKx&9vC3_j?6_;l#j*{)-I zpSXSe_Twv0Zru!a_Dl#2V?VwacoLA6G>RPTtt?(_3jMY`FLY0^`RH2DQ8~I8xVHh% zPW(^p!Fd+k9aX=mb3^~mHC?+kOc<*=f4Jn^L;W)=FHi6DcI^b?6HC5c+4^Yr>iYC# zse<(M4}Jge;j051-z^{fb!B(6H66`nVnLI!)InHXua3!Dzm=ZaeCH1xg9A(Kj&2TH zxr4*r zet+ul$B*|MZC*ZFJ<)3WLfs{Q+?hZ5>$Rg^pPyJfx$y1YX)pczcF!)G+&=a7!G%y8 zV-QvWgL=kApkN`6T9dB{NtyhFeF!s2)LSIE4BQrk&rnTfxv_)yw~Ee4q!s z8VYS{lv0XIQ7eJ(=^t?LM8n0aynTD?_UzzaIis@sR_tM`lp>-7QiM?`)x*2Ks506& z7`3J=sHC{~$mlSUQpkx#i&*F-XBvU#L2)Gb*|4`X#HK-QGiukUsFm9|WNg}=xo|1} z@Bx>_^PJC~EcEk_aSg$nXb$$VLm%g4;R&@Lnu1m+NO_Aa3pYowV;bFA?5>Gm(dX+| z`uFd>bMuUDmP1R63CY4=B}k>vs6xKmsM%g6A^vGa-v1!l@=HT6)0*vTjn??b2mM!p zl7mDX>?>uUhC-P(go^UB(uxYyOuGPpXA=4E0G(*)B%`~MFrm15u9M@2gh(SrV=h2- zeB{gfw}+P%T`DbJpI|3gTpu7%6cwKw8W`rRCOFFQ288XH zpQ%3jr)COQ3MU!0{Us-GsP5ppQc=(mRrV*~NAapNa`G}Gqa%F$yxCxv zz~rR#{Ji3-$~w7Br9|d!ppYqL64W7FUXt%&ZyxMrCl^)|vYMLm#KZd*ntwQ(;PVC= zVxY+1_rZSeo9n!9%!z#SN2Tlh=AaohE`4k4uwr9OXoE?5)OIbQ zzq$6n{Msc;RND`yuUuF8;7X#&74xf0zp}tHlKsfbSa?e40MP@6ZYBo0rXBKPZli6qJn($CZW;Ib$~JLTDk&|ow|r!8ek0ENeSpd1bStZXPbNo>ZvSj>)9A>GSM!E_ zTsG2mACRC5$r(i>4RsbimE^*gcQuE%7OY)Tx_z7cl>x*if|4EELJBp+<0xlL(!dv1 zc=#ng*M-1v2nmCtVkj>xjtL72@~&>ER!pZZ6LEtt|Tj z;0*8;;0#5N1n2_LUaD$O!MLib*=g|y#r2kAoY^z@F;WT&X|SW=A_z+4v(vM^7 zjqdHg^7v3lhF?i~cy&q$(4}whZToU({iPM!PuKUpy{N~_P1B#RTl8SgeDAxDpPWAC zYHi}@z>ZR)x~d|@$AkCfll_=MbxX$t_r<~%jP>2S+WW~#C&OzI9!812U)~;D@nY-5 zN6UJDT;1o(tPW1|d)Q3UGM}Ptwrc$6o%5Yup8xUfT|;sLNSn&igCaw|3!Bq{X#k-D zFRHnD=i<3@TVEetczsrPy|tqaxBTI9LQF9K+g5RDR`8@cSay1|Dd#o=nl7qLdbg+|2A%0h_n2M}@1d67h zn83vJNbar?11;n_MQVZK10`iK7fGr#%IfTRRv-mIyfz1zrYc_1q4UR%Ubx~L7#ig7 zT~nNeIEu84k~dLuA=jl#BG}3$+gaF0)0({(^21Rf{v!LSBeIJg7#JTpd&TPWSGuSG z^3ovK1AOfv!W*&^LBs=^{D)=wP%yuMxxWEE`agibpVy}q_O(W9{Bwflx?@q_8$)Vp znp}2D4OuFHj;OAB#cwzda2wzkMNaJA+!&aPnyj7AE?cSZ80DrP86bfNJMxUw_AM7d-Iz;jyR2vAC+ z3K&Y%#V4hE`}=U+x2PkMBLABHXp&+m4U#J>D+>q+V85y_2$yCcG&=wPT`=Xp?4(&V zrr_9q|8gi;2&_j) zkrB_ec|j-A0^TZWGXZZ#iiaXG@S9N@h*Bg*Sbe&^;p^6s-`5YaIymj~`DKsp?M_Vd zM<7Sh)SMml?7%AXa~mvAFMPALkHw*(!KbDs9-3KoXchm~ai80VP4%wcH@IaT<|vfc z`}^7y%uRW&Uih06KeYXtMU7Q2(Q3E59IgC0{?`h z(9JtS2Mwqh*tM!pyQ*1JvUhI@ymiX-(b=k^*vi7F$0yd^TtDvKavigsBlKorHY;>q z{EmHJJ;nOSR_Et8RDwcEQdbffLFSbKDUOPC4NP<=*&)I8h)2bS9Xq!6#`baV_l$Y9 zy6?vw6Ace96NK64*u2!;Ipf)yVb50&e7>sh+bttL9iGt?X$tZ@plU$Xs37I)CJr20 z7`gM4mHGCtKB6S8@MK%$P2vHhXL?>6d`h?ar;CunX zRSLwvfRd7;*oc^<$T(#)>Zww9lO(5Sgv3VSsQybdHrGZ4dsU|=AO+<{9`LI`DMX1k zP7jG4&RhkSmUHY?sa(C8l(7Riqq1Z6}^9NV1D<0?EQO9w{OZ$9TOkeRleuXqPy22F_wKPf>Oah zDVV?K@csgPzaGE9y;j)Q8m;k<3yuYcx=l$=GmD{rveVe*p@t!SY34z*WQ9yQ31GIo zd6B4Y4ya}d8@mIrM{iO#Q`xi`lvfKfmhkxF#pLj2YIqTEq(F9@BLZeBURId9}CVgyE%wVXV4OLSsW4nT2IzObN*uNX@ z-B)4PpJ%8=+YeIN^%R(6q+5IN>8}fJHHc`L$_j8TtlSq8(4t4ol%Kn$DZt7V0Lql)8~6ecJG}-Jc&9nR5n9XgPqZ1udmk)E-kpcsMCX` zT6&xMf80F6X76m<6KlWSK78@urlZIAJbV4Lp|&c@%h~whS=)z4KJT3s_;_!c`~8Ea?HBC{Vhc4j_SUxTg9oWE8TW`PvPE#2YDtD-K0@-Yks_K^bPW*y6;lXQF~AcZM9Fb=&YEsfi&Q7-XiZzICJTr(gJ`uBxw?CoeK4ui z^e~Y~uv18aSiy)1WNeci5SYv&E~Nsg{;3CVbv-XIG%PYUu1?sjLK^^1GfO)XA7}+DCCx*@-9I!I4+k<*)0l%My^WU)PT?@LC^@Y(nZCk zyEbihH_!|E@IJ%AMx2}gjeG?6aU7BKNKPV&qM2VlpQf#g(hw{p6x~o%T=w01sXXIoQWe{ffUKKSZi4BJfWHY?4#`ai$Z=^yF6!uPd1>6)#pO=MeM+TrnNw#}b1U z1iFm!_tcW%!zj=uKuZEcrOZA6%lhhw0AH_^tc-eO6pM2T&mPUqXJpFRN@@TBeI)X6 z3~>!07?pT%RC3i2=!h0}9A~S>r3{c%aK52c9qMl0G=uFKPi|EFjy?n#+3^I zt^?r!R5tr+DjDwR;3`wdm4uW6oS+prp`xf30aqHO61bO^C8dN#MENoxSDP0prJ9+M zKzg)N8d0Z+zZFXUZ(tuyYKm^ZB3JYn6-}u)7g67cAC>jR9H0~cBSecFPD&ZP2}l9= zQ<7;ZAZy${#4?bJfR?i_!f0S5`)Ue`U=M=3X6kd3HOaWfNEQ%~Md_*U9^a2}xm6oH zo=WQg{NAd%4gkYIrp?r8$ulv5KUQUhsoZ-DEw#aSJot@LdFseqI;xyIO1@!W)lqKR zx50c>f!Ucvr%!ZqJ^LqN-%f2pCzG?#TIg?aRTzIo7PLs^-j6WFzy<@$c8y;L#2T;N zJim{LiSg34+cPqXOboxp_(aF~B?P(0l@wM5hdL(vKTCZ(Q(-a|oW_D(PujgpopYBW z?*JxCAG}hOTRMUWSdq8!wX&(Lr99Ufq58NuQRmAcf`^ zrbKvs%nN^S`*hv6voqgp8Te$K&aIU?FZchUdud}@l&itphfg2e*|}l8js9DcI~R@4 zZhx|7g2|RC-q$v}>Yp)wb@I{iKNFqpK(0&(3wdMn(I(C_EF+>YBN6HvDO0D?Ki1WC z83sB78!?R6NhiGbknG#{?(N>mC}b6-1p$`EwpWiBubS|FLKo*XW8Y2dlk(=K)62U8 z8NUYTVj#05wHe`w4$lo-4c|uvJK6Y~IfpuPt>)}y6f3LJ^$*Q{w5a3z)t&54OmKSg zXQb5~sPHavzH4@L{Fj{rUM&61aAP-vExinP4=-_fLgoGdB@aO}ImgJL{to*%VM}D5 z#!&_|@_#5uw-TPIK73IpWwcpWXYyE?g73djqcllsJd^GS&rqj+v+_q6!#S6>~w#j1w|#nS(D#h=4H8sSheyv`{0=3kb0~;BTzM^VODHVBpz-MSg@;&+ymN?;M!9W&O$>JNG`j zdnL)=t0}bzs+8)2d02q+&2x)PZck|PTg|u(Q+>g}4kMhq)%ea8<-ca3U3~>F*gIM* zQHnt%L}^ZTPl5`OEdC=JC2^jiO?kY`*lf?R$o*H&T6`u!5@UXNxuJc4moI(tDu3AI=62o0U3D6#OwZf4 z+QCcxt3=BCZA)nqD!H<7TJi=2)9=h zSl>AK=IGMfCl)@tv(v)hab;?pEH5YA*wE+J(T^L)8LaH~Zf%z@rzToloei6LZxtQh>{vkwplhP)XvW!Xkh8^2P$E&{o zENR;OtZsd4yY&|i{X?{RyWq_$kn)sxgN)PUqmbV&XDUpfY&1u@;P|Y9+SChsGnfC7 zIdEud_aT)FH;HdNlezhTT){q)L0Lk;o1~gsi9LIX6qFLA^uq4j6be*ImNvn6v)n(X z^EzuW?SF&Lzkzl~*`U7Sm5br?TPV#!xR8A{N@LnVaJ^#Wulr(3L$UoUMf$%v{&i;h z?~IgRqZRhGMr-_U4Msh!w`j|8u&-q*`;V}XBg#-{wihdrylKm<$J7G&5UTSEVwi%$ zI31SW0P}>RO^G=>O8L(d(r(yUA3pNu)NRW<-?@yXCJqw|=M&UC0;ciB3-fa?@YL0$i&A`#V+b79nwk;YMlnlsu`a>^R@ zB;gPor2Gl5>Hi2H|4)BlM$I;))Zd}1v6|jT5`PH?CWplvy?EVRUILN~GVQuJafV#j zPX$_h0%Kqhz$~h4db!t-V*B5l0#|t6>*jS=E6-$z#ABe!x4X=pg?*iHr;fB;53uhK zKC^3_SIY{G8E7bOY^2n=SaA~&4QYvfuipL{_+v#*(D)kL9!d+8@WZV-QeJpPEh%#@>V zLPTgxY;3d?s#H)elji|);bXxpA+8q&xr8;BE8@bkBa?GfuE5?mj+-dXH$ zaInF0?BjNAm#fnwpRYAOI@$HZi63ryaq)hU0Un<&o=MVw;e2Sd<)4#YFVlXtyyxdl zqYXFw{%l?Ehbwx%TRqbD{8mT3+hsWkF80rZE|nLT^?{L_mDBwK^- zFOWAW(?Yx+9K5??$dxq%9_*j;=-2`;!>ctvEIe)={km&`-pZj*=6BFt*Y)M9POb+> zWu2K_dwzb|p~?K43(b~dpSQMsy`{~y71*02Bh%a;gD4Xy0iqt_)c)xdj~ZBjuc&9;b&wO3Eu!Lc(*?Gub<-PT1_}>wk+OFORAfk$)nX3zsnz+^J5Y9?$))lLMj?aZoa(cGmW|U9bm%Pa z*tf3V*qrH$Y92m;1|H%g3YtNLBj-1YP)LNx2b@KjDkD5!n&IJ~9s%SzaR;iR^OVbp(0`#vozMw8){|EE3e?YXt zzSd}s|FzL#Ccz=PpHtkwcANg~@hv8^3P!CPtbSGjMd>T>jRf7F0gOV^LQlD5Y;pe5 z$mlD+e(&#G+p}xqj2)}Se0jGrId+jsxrrvXs+2o;^?Qr*FI1P<6Vl2SK@&nlEJC29 zj3(Vx^i4Uv2Zwzm+y>ecS#fC479s#q+{xlNj%?;R*vFnvc6O%!kFdHLp?bE)Ngj>d zJ2Fv^h$4c;f1peSya;GvL0U?nZ-7*+U>K&wJ6F>Q$UT?+Xmt{CHbaO*145GGvs$tv z>ZBTXB7dPV#s75w&$MLwC=IwIIiIM%>MS*t4IwO6U_jj?jQ`KMN62fpr7Wdl5W@>c zvS)-$bEw#F-y6O4^t46vd8!t8!Bp7;VAj=%yUU;t!Au6?biD0i=lip~o~(kr&pGyc z<39Y6{dus+t~+6iOhMR#w8p@(9XS7{a_O9B-IJGgq?+f5Lj$2^I)WlDJ?yOE{S!uK zpMBqMU^LzT%Y=;2I+SNul}GnR&-td;$2>Z}{>GKl4i0ww2BErbS*0qqvb=Hi%#!DK z)>~N|dvIlp?c=evPV2;0%j?Wm$rGP}Fa!s_1PGqI(bwNTCd4GpM>o&cM3NUqDk>O| zN~K~y??6d2?c*OEm6v}{_qj}5K^8|5L0|M1bl07SnQrR*Yz5|iYhu0U?QfTtr+OMD zyPCv*)$==d!eREb;@#_m<`1)-(_L>thxhAsjJ6GVyg=*y%AUrn`hQtA-1F2Xqcc1G zEIx#IIQe>dm|0t<73ZWBq#(*=RtmT|$lkxmJas7l`gNPV`(B!JVH=ElBvyZ(5ycjA>*-9H>3 z>+@t8L_SY`xz2juu#a2&-d*1L_1a$6YkP+*=vgqbQ%#??x!o{0wcLMe-#2T2yT9_c zha0q=Z?0>KvIe{w7#U8{{|}0i|3(5KG}Qo39h2k$QDISuhnFj_v5M=j2dPR$bBQ-b z26-7}p%Lj33Ar_m(!}rx$W4dhEO>Zb@W(hwFRiAwZRCSTSIk z&Zu?C*}ID!v|N>!pxIyMWH8r`^GEiTcI=|k(k|%GIZ8*nbo*NHu%X{QZ@ha4-hYC~ zXm(pDWkfVP%-K&&ablaK=Bz<3qir2?7q3u`8L#TtLG>F3-P(gzCqCA;vhTo>P(Hj4o<6q(3Plg}y_d|mP8MS>S zQeXwnsEl((g9M7XMN$g^1_EdSw5Nz3g8G4}3DfLs|J=1}`sC65m(LmDVtur_^gO_R zf?f=;Q6Sh{R(MlX?+#Q2C*1!NX%Q_ICM8fsz<4o?DZn=XZ^V6Kf2b*85@>N?fX@?g zXHF25kOf}FB{{*tfsGB4mQ{{@T_FJ_IHYK7d4&<62n8yRR}q+==u=%@@)Q2HoYLB5g)>9474m5ha@7$^V&K};yHH`@OQoQi+yFL!vJ@hxkTA%s+YQyudSmuYkytKvNycI*Y?SzkWHgTzCGkFZ0KFo!K2=tJ^3oZc;UR2L50- z27w{5-&|Z?e|(3?k);U`^xbCjc;j_C?^gWwVJjB&Y>q6^*iG+zX|#8m{ddp9`<SWur{5{9o+72Y6Ivw>CT~ zNbgO06%?=l0xBI;REmNkpcHA+q<07{p|=ncQh*dfdhfmW-bwF0lj&{uy}teIJrm;j z&$+%oUg!M(>wB`Ux$u3H$;{p}dq2;;*1FeyZw|D(`|#>XaYKcf<>Tn=ME}$Xf`k5~ z9Mml!E{BTD+!xljw*GSc_pMNnTYuzK@$!}Q-mO(XuT|ZA5>oYHtyki`;!a9A8wO`T0uh|AmK^sZ#E_W%0}(X z{lzh~AgGmGqD24 z*fwNn^IIZa4-xZDPpznzs_&@UnF|wl9fex%{?H&=AkDgkl=m`Km{;msCh?IoggU5@UV~6t1cj5RNq(@9O745St*|I8&ggK^iJ>j7J;F1&?Wi;|BM(+di*o0FW>NY`K zCX}}xoci+grWIHB9&xvDO%6>0Ql-j!RqnJF{APjM0Pq-sbXELEN}WU+50NgQXM4aH zaqN0kxg8JT*$bF%unXjFl+)XVW zX_WOq)+^&2@2{JF>Wg0YHw?AjGC1MknznSuP#25D;DB&bbK3fRr`Nup@MPmq+n+>P=S1K#hIJj6eQU7u$x$J; z`<`9jl5VxT;r#ltVZEC~BJG%wsgnl8pITL)c&{$Q5~~h@<~(Sv;Sd;$d?}!n=pyW6 z|HHDt2wIW|{0IbKl2>x#-Oi$dt!oN=L6!{daUe+otOOYDc0i>vH3nrVnh~=}@(}qh1_s|U;q!~D-cFyNH_l*^;6=1Nt5k{tHp1*=lqFw^Q zk@Ecqu{GaGKAhS*pr2CIgBA^d{u62^&&fYw1Z{GVDrlLE=k}qcP{j+uzsbt<4?Y9# zhJD@99o_Md3WNS$XBr42*d2XN~N{ioAslHOHCNSFf zfEWryKY;fDKEyQh${Lpk$A0DGbD+7=Oo{)*z^7^$fI)8s^itAGNu3w(8YSU%BS=1% z)}hWIYe=jqetG543r+H6#y9O1zRijgB*I*LUzTibtBZ^Zs;DgC@_WH!gZmc|bQn1= zRa;3gE|_uyz+48Vi#5}lRo~-ahGvi{2E9o~?*?Ae@JIs-6)>!wo}`BF#Hzy>;eu=q zXtWFIJ6&{VLpI7_c-1x7C+tlW|K1}6CUfl>|{ZU<5;wT&1n>muw!PF!*% zZ7862ii3THs)9Swx4*po#YCgYQ;a9ix-s()_uavDJ{pi{C^dIF)in)45ta<30>r(} zcyjQU&#jCvpZ?=aOn9EBUlMnGvhw)!=i3`>K5n-hp>&$oPJ;XJ0x0@$c(~ z9o#(U`lTb=e%}=mo5aD>P`BIR*58q-pJVR5z-Ab2KE2vu-J=sico#zkK-W_T~`hi)I&peRggC%Ufq2 zUpVaL@Fc>=u^}r-UQwWs)esh$pBq0OP^k0q^Ksh&KOdAkoO619p%ZPTX|ZQKY| zbr^tCRGLpx{*xy!vQx6JT)CDI8$q^KR>t~z-#G4nb??*P7P#!4;kR2C<_1=bC{1&ss%2Z zg6Sd`jWysK_6_C}fRs)T`_TUcsul1`(Bu|I-Kr1!Qy%>Xo_3llevCJVVcJ@ZzYw8` zOh0=qWx=fc;i6KJh#3AB%$xxmeuju}Xlnqi6uI>BcL92`TAu@92tJc?(CP#&KMf8F zDfs#l7frzy%CK=q2mFcgg2Zuw1)J9qHQdC2*&3c8gdla?Plq> z?XvgPs)4`^1n2`qPoVk$(_g9@BF5$?6)X!5IZ$45tE%x^9n4k2@OJiH4UEBHBd+zM zbPc>&sD{IlYMpQh2DyBF1Detc@v~G%%gI2nV<-XMF^p0rO-hc<$V?U%Vu}QVyBed# z1V7R`nbK0HS_m+bfIbp@AO=eaIHjUP87`j(UQ9)3+|VT?eqYOctb!gI7$hdg1MF*; zUEq7f@($%>U^+7+f&WbKE^h;!^dR`YE`_04P8|;FHf>F4tec8#;J*bWkNLXR{^@df%K3@?&-U$msn6(ZqgI+Oc^G#`2~rKE;WpOr=#-kaIyEWQ zXd7O>xO-&JX1A9%d-mRX z^jh1o&CjRTSgj1dvcu-^5!dUMAm_H-R-aPKW2rlP-+V`OQzW_~8h>@nceg&@e|5!g z+dlnz`P9=V4qPz4zh=w9{PK1$H&>s>e~N>4P`=}VANzpk167{OVxMpD@xFfM`r+Sp zY-(w!1%?cb@QntKmx8$34i{r4??{NkC_S&t;wBezcZxq8I7 z1>HYqa%k5dCU>rf#CqiA2P8yW`8l5pd~v)b(i*CgOEY3^?O!~yw2t-kh&%CT$VXFK zX1rHEK$J7_ZO=&}tED5&)=zW#?eiDkPWM_q#(&X>v<{wkepO?A0k(r6&yx7a9HyzpXMd0b(Q7gV^9Y2}rJ&1Z+r0D-n z&bXnO+kS%f7Vhp!t8DU*&2n+@$cjtKNsh6)cQMTFwj$HNEz&8=`kL$UjpZ*6S6QtP zJG`%P?I(Bd)8I3+By2n2MGCAbD&00U`T-sJyyD!>j9oFC%eAW`L3~Y1k8cFMA>5FkV3a`dQvlmawr2oaSQ&A$&FxbrU@9IsQxd#fQ+x#w zvqBA82I>{^8xLwf`+%J>lpG{NqJ|%T4!<6Nl48(EIM_$vYQYajKhMEuPg-CUybw9Y z>bL|B4O?(aPKzAcHK_8Zsi$H*tGE47J8PPF@Ib6rUrf{+W-o>l7a=tln$_rb;Z_J< zlZEC{FT$WGr((D?3M6%q6GvO$t=s)u>9?zEzWe_5uiI>Q?Fx5#iK)0-hEy;b87h#2 z2B|6O1=%(HQQ>bYiQTZTJG!Gg{z*X!R$v`NoqvUWQa$E-Gqu1ipltwcdIlb-2r{l! zxx1`!0>MmT=z+lYVX&TRX-`5s0AMf;6E*NnZTkV1js-{ucr4Ydr(lH|<|>$_7@Pn| z=1YC8I4wi^5{$5vGCcA_)IFBfu?)^KxPfAbvHYMNjk-(-*J*Wn_w=Z!h?LX>?ytBI zW-`<&TwvXi$ZA~*+&-Xv3GgjV^wm=DkT6;c3nlbEH50@ETncz;FMU#9NMUzLm_tKv zExfCOaT4uvfCm6^+@Z3Bwln(t>!a(K&j2EM&!1$K(@>bn{YK2?wJK z3QN8HLIpm3L~Nqj6U!rq|8#V8IDO@8t)@MsDC2>vwVkVlxyhmE$0s1$Ey(JK?cL1@ zzGn)8_iN*R(fZB@?|H1xnmn^@&L{TRpS$MuMv-O?eeSHAz`>4SskZfxFv&E7fM`u-D;WHJe-Y2S~)yFa*#aOXaWcZL@R z?QUucT>smO(?-WCt4e{u>guc96GK`+E|!%9T|XM~(TtiopT>DTK-}5)PDIZW+bkiX}^B5 z>Dr067Y{dEGS+_Cbjx{@EEj$FY{fj!bzkRRI+1k$;+@~WF+0Bc(uNrhzklVsYC+8K z;S~dh$c7DSn=~?g-n9DLXF$`WL=q$585Eh9nDy%Z{dlkEmX~+B8E<#Lv^m}K0u;wV zX)=_BH2M8m9-mOI^pV0-X*3h~|`t89+; zY|f3>w$}0~QzvnuuE9Qm$ty^GQ|uekDG&YxopwqJSPih{E%}bJloN#aSa1_*okU`< z(aP-Ypz=i1)*NChu9q3zsQz$5Q(uv0ph!7#Xzbe~<@+x|T`kZ`hS8F|`=ADUvk-Cb ztOQbtQWuiqXL|K-Od|ZsKarxFE1J#kYYy+j7cZixOw|n-1|KbyF8Nk{@GRJRLVY_B z8k*)ZTQqrvQOL~~C3oD?!l7YqSs=vFR76GkDjcoB(-~3{Jg!_iy=%LNs~vZ{5xhe% ze_vvN1TPtWzMfK{+>FxCV0FX3?&yy0_(uiKm%*f+r}O}A4LZKJtLeZahCPEqr9qNH z(s&d!R=v*5xR;SXPXja57{YnL4#gEibkb=+eF`vHfz8rTM+n+eZ@bAMKgN-P8zdM} z@PGz4VDkVP0P1LvlQdvt2kaBnWb}sd+;JlO$J2y(uAzoP&t(aQ#c)&>$ji$M4-Zu+ zI*f7AIVj|a%tKLx4(BkR5~rI0VhPn&fH^EoP_a|w^jZS$F_1+<8;O)(aDE-g6}R;>YX9+h%0>&EXP zJ6F4{`Q+ik(@B@Db8MvobR)X9ZS&kdT*GShyS@s2i6!FV^KNICK3>}MP>*5Ph7CVI zVw2q_>r@*KAQBiE;N_=QYPk2QmPlUt2VA;w_ud0*FP{jLdynhY(xCk4Q)XvuJk73N z`LiiA3*=aCZ2I<}Hp=VsiE8&O%eig#??L#47Rz^iPY+Zi-T|z!ttj8|>BIYvE}OhC z`osLt!B-d88?7i3oDr0r5tNc~|E|TeN6$(#3rMvT194hexP|+*?8?Z{Aa6=8ugOpG zw!iu3&c&y;&3mwI!Lv0peHKmfoHfvM;ZVE9gC4IM@o4qfM~lY$F8(li%+T7Y<5R~D z@_)a-@5ULomJhI4JBE9L>EfYwYo=abIQZ_WiBDF~c)o3s`>h@4w|w~Er*X!M``lVN z@x}M^gH|mG`*?QFCm+JW-)fIp0C<(1zZ&aE;2ar~uK z!)}^c>B`^3_rqvf{9j)UKjHaO~JHyYO%#4&eTSo8w zCI8qTVOxJHxN$Az@guCdoad)e1V7fwXte)bu+QMcO8_kcwi%c*b(K$D=q}Z(zThF^ zz`De9h%{p(U~W7$f`sxG*^NilU(RWqIH+Ps&+K8t>(}lB+aORY86=cPaL`PL%xT`t z$+D=f#$hZ{D<}qVN21dk$#M6;m>=3lQCC_0=yvrF-@*6a(DOf*PnlRYaHw?d7mZ)7 zZ@+vEw7j7((vYTv3Oxtq2t9ITc>fNyzLTtzf>sXg;#6Pn4c{!cw>1M+jg;YaDr6QS zm-F*nxQbTdyfh~IObm*C|J$M)_H{>h{Br~6mCwX)ru1l`YK4qqIh4T~^nQ6t3s3lC zBxorlp&~CQBgrW)es7(4p_CXx0SEhrkeVsVmQR(<>lxke09z>dg9dvv5-W^i&=R0I zoEHWV0!$h(CHy=`BMUV&NJ$3K`Z{1A&5H=KdY4;1f!s0h(-Klv7^C3M zk>xf^Uw}Vp_*TmVc&7NVMDWa;>c1_R|3S{)RjT-Z2=6*nwJTh>&{z z9=LVv{qgDh!B<*Jb(1$y%uxXUUk16nh`fv)8Ws`GtA91)7`L)wM~S!n{u0 zJ#pOlz$;hlx`Gn!cv8Z0&F@${m_4eDeTv0xqx>c-Ux*;^oqC_?@eZrPU5whQ^11I# z(JXUz^S)_jdf}n*-UF-cFE1#tTp)A$qQqjB!}&?Uj=%r7_QP*iekLx+LvzNeycC<= z5l=T%r5{gB*)M(38ChseN#KsHt1{C<6CwkV7YPM5Z7SQ)uoht2pjFzQ)Li3l z?`vGv6c1QEw~qcDNJo?6_@kUDrnJ*h-^~Er|RU^%o_Pw>D@BP&yZ+$-0@uShD6DCW> zOf4KSB67-b&(%}!t{8H2S$_`vJzO!E!+$qd55N8W*z=$FzrJq5*=-*lSwF&Pf#}Kd z-cP=nX8+5om=g!Z_wTAt9S4(3l<^7N?iLisBGDX>w=|W;+dXqSzWd~g_ik<+ePPL4 zkA4_^b;Y<>$G5kpB=e)=VxV&>a&N}^>_`nd2}BWJ^%h#9+S~viNpzsg{w>bGtakox zsms^PoL7JAwQFD06LVg|2Un4D8u90FL5s2r*XMuTo`Dclp`SF9qO^t)GlP#Q=)mv22N7UT&EzO9L z^`pi#Z2SRUCsHIDvSE>jFD<06!aJx89a}9LsN!=k3J0ZvgLj!RVZW{W%KE}-$jgP) zEbtD3m=q|fK&5j=LsDuTp%QEoyJ258;nQ1&2*P;$5Xl=vMe1#n&~J5=hC?&llglc~0|NuA zs;Yp0Ce2$4=(PH|3ZZdJkQ$0c?gB}1!Hu~YWMeK6Nx;Sdoy`yJ;=HK80Q*RNVGLIT zl>=-5fE|DP?-o_Ju^*9n(7DwUGRq7zyVap+G*K)x+j;zJ2Lz+X*hhMmsYGPgbJXV&V0JT^q zjqndrH`j7sQ(5_I|IomvwolHUKhjW^=Vonfb=x8)Fex-5aqrP153Fv*CVGLk5eQjH zd}2{-jP;z1l z9~QK0{r6gNV{B*;t0sg3kex?FT5YWq)oLmmAlD(^v5c(*R)ICv#)gExaB_(&E$1LY zNq*J6D|Z3{{IZ+#Px#ze`*izaqvfZ)FCKh;#oW{E>6MM9TgRKM5Iy=%boYmMjlb=4 zd)3fKUyXRSe$4%^``rAt&*PupyZiM>hq$91Fb&L4gA^N(E){!#VtarO4y ziXS#LFZjIW>y^zX_CsDAFpb=882996;F%p}o2K4dH}Lur(bFGC8-Fw9#nBzXFC4N{ za51UQpVd=cV(9MzaHr@h$FEm2}C9E21cHtE3<72+#vB@)Z)9eHExHd-kO0t zEozbSmtUI}ab5-+v)qaZzkFGOk$F^+T~%No)LlYpWr&E zXWLOEG*rT_ecI8Z>)#QvqkF)J9%NrplSov(U=|kVrR8z}Zk?eN=wslpGT+naNM;2H zh;^JVFya<2yVKPt#&{h#nEdNrIA~NfYbCY$7@WTi@hPC;wr$&d^=9t;!^u1mkvrDC{I(Z7awLuTqMkj&i@AN z8yk&NNp^N_X=r2wB@)<$oE7CuB#<|qki^8-IVw0CPUlQ*PH@)WE)mtT)qZMrL)}BNe_y4Lb&Y&{HRUVWJF-c?ZuwLD zOHaf$Ywy~2?3z6At{&06>b9kbP)R|r5b)+jgVe- z+LlXanby+&RteL@>0+e)Ubs27di4m$ z@{z)|(Q-su43j&BU_Gt(LDH$XgCgT@AGTJ9)$>j5ak6e zD&MFyPEZQiY73xn1j$=Sh%R1$SfQJwU>G(4hnHW5T3FJ}T^{DXVOjnik-~77bN>AW zUPdwC!j6BOZ=KTJ43+69aamkfC1Q9qbf!9^8@OkP&SXEDg8EYV9ULcS=TaS-vQrcG&JN3)>k8~hNpb>HCbO($Wrr?Hbzy0W4Y8LCzGPfziJYCUR(B z0s}K0a=v>^XCJ%Bx(Q5rJl$+}c--zky1)zcBas}N#6ahx5pi2C9V5DF&)4Au-qT$# zeN%5EopdQ?6;D&=v??D#EL&I#yuWTaWE2OX*``2&V9-$UHkGp*th^MW;Vt4q^^AMtU8p6#E@bejjoBWlOm^XNt8u1OU$KE8RB( z(H}4<;nENoE>4U4Ht~sEub!)ES60|5`Y)Tjg%)p}>MLhkZ`s0VZ#ruXcZ&hLgR2ER z+^idii_#)hQ%9|+3+_qh+Dj6+DXi=5+9vPyv`a8K)%d+5Ti2nxZ~aJ1PCV_Mz1u^z zn482pQ`L1!BIG9dJrmJQIvXek=7_ph$zrlmn;pv^qws`RW1{ zil9O7smjQe^f=~j&H`+FhiZWjuH*)rkc|Sw++oC?i`bjSAgiH^!pgro3XMjscNN)t zZ3Jkq$m@p`D>>U=p|Q~jEFWhJYCLdmQNB(as_1cN)SeZ*rmXG)GAaPZ4v$LtMt-xF zffteYIoVZUsXRAXuOG&fV2SuMu3sbYtesj)!u4srYcFO5fdS=|88glv2c z+C>N$J!yHDPkmz}beekI6b}h-cyfdHs2>Z!aXw^r6v|M{4wSlu_VvFSnHC8QJ=c84Or%?h+ogr{NJ7*?n@l2lh|B?AjIs{%l$-_Q{s z!-_x@SXUItxIvGPM6=vJTcAax*uj})bC7tdSZXQ}u{mMc(P12Lx*F7+@ZkP&4;Yfg zTf%l;0{YTKRtPvxC^*cN9R2h8Z80shBNordUChg8q)o~YRZ3c-lW82i{`}RPhzcxZu|YNY#XT;D&*x>kkn@mGe@Q_b#e+lJnKzy4q)zNR&m<{ zLn`>ISj?i=^(KGqp#RS+(m^g(6s{rty8@mqGHHHZvXEs)UTJ@IW*cAn!QzXC19a~n zy?3N_@!}!Dq3Q>w3_5^rC37AC6RcTRznG(Eb{pp-6%`VA%Efs1Li_Q0TcLdUcsOMM z%|96&Vy4*^CO9211F_fRZ}Ct~ayScR;k&!DVYm4DYb6*`ZruVg@my44e0rb|%tVJH zyT7T>xw(IvGff-`c)g6|?`LZQn|OMky@fKo%u&ewDIAum=FWG z3@t5U8@s*1uuc6TL)ar!=!jg;_8iX+HK;M{wWVZ(1G({^q|am4++ z>pQkJ;b~!smX>^iiDY4D*Y_GIQr?myA9J2`vIT8QBS7CCrQ)Srn46==U)))kOIOer zurCv@866#~_S6QnNUnHymXVpw?wy#xrPh9Q7>k<#63O=VY`&{QG0DyGXd{{XNSQgq znci65RR1{J%O%6%cCnDoeu@9G6%j}#%3h%N$&%xk2l-`3C1iG3cJIs*w`bqTfq zE37p2DPni6AQ>J0TwKqb5Y6jcHeB0nZH?bc#y2ui> zB;C>FS+`i|q_R9&P5V@j&xKBtsj-%{iOBH1F1%wEjhK!M;W_90obSlR#koqg9xC|N zdy-N%H#VvS{u5fE#na>5>hwad!sGNKO%A7FSX}`GYs{vA4^iR3O+w^e*U>OC*XF#M$-% zi>BtUWk6a}^{+Lf(Zt{BiO^w);nj!HXB9l5eh%-}r+pK$0NDFLEn37;Jh~U{ke@mx zN&WT1zj%?-hoHc|Y!XaWbIBj)$WWvS8bgV_%yy^~Zje=$UzoCP(_t`=xiB*E>4)XN z{AF;!)gPv8JX=hrAAW&G>N(u%VHZHg=wYMHV&>`j^fOxky639C;^W^q> zgNB9$^$ZLMFxU&0eJ-a83&^&Neo||%$v?b--klNA^+B{MQM^uTQdPO=;IX1D9MD!^@UQW^2`oZ17I~JDa3Q?!-wP4MS21s zHm3rr_DAc~u7j75kr+MDUOXGq2kWc{bWBu;SQ@eQ=Gy57;4gSVU>y zd$1u~fcng<2K6}(vzJ(eRFZmPqNews-{+(1$z%U!C%wq;&HfX~+glN%nzZ!t>BXJ? z^C7-VKHnDR^!-(o6KF&`Aht3$Ig#HeZM$3p=LTEj2ydQvu-k!=kBG#J@4=HY}S(YiEwXdv+F|86@pc-cVVZ&X(tSNvvAhq0G_bvurND zlS%E;<^C`kPrM=3(qSJqT9{`Oy$IcjmS_0zeZiLQwp+QHWCL zP7JCMot672z(C$6ZXS*>r$IB^<*!Cn3Jf1o;SIVt8v9-lL+J`-h9MCh^a?y=6tqoF z(bz-#uT9?fPVMQs%WeZvVK%J|bpguC;xb80yzen zT}pi7ID~jtw^Emt=Q4RBiyj@FdT+y=v!mut@El^v0%q8g1_S?``7^`MnG{_SJNEPS zt&dvF|NWEdlBjjR8liVy9JYk0SkU?}pR&7z%wyJ$;uE}5XwW*FJeAZTi#t-)CM?I6 zC-Czx^YD;iW!9#ySgXKa*uM%FtR<>8lCh4IbxlFx!{o`;Eu)NUQ|Crvp&rR#jp+_+ zV+1~xO#kKck03#|ZHlZAI*m1Nq`#HT6T`v8Tr?0LD;F21;7HEUtZjqYo;6X(m)BA; z)T=v_*W_2x(Gp6MiCN`hB4w*!y!74N;9%*2mD+q`$BE+BS2qn}ypX%RcogszY|_1r zVfb6K${NtFA%%pAB2b4uo#(EP2{8f=M6BJ3pBD4&Y&tr6Ytf-`Z8>6=U8hZ%i?Y7BLi}%8I(-7u!j(ELUWFR`a|V$vt;t$)-W?$(aq!Q7IUM>S&2RQ|Eq(GMojDj{onj^BV zmLWs}DfAIXAcJIS#}jXx;B`!dYj>iDh=(>yu8?AxDXIU z@=8y7jN{=0;a=w}6UdoqrF^rvcwRGLXTYIrb&(0IoKkfSt>>9Vq8hSn{+u5BXeQrA z0QRk!%~3vh7J<76)Q0<&G4O^#0$MTQ4|kmI|BixGA-=|`pfQ|uJm3)zAy}z*xap&0 zZnLmppT!?(VS#zgBF_*AER5>laqv>JFz-emU;V!v`wN~Y$}4LxT#Qk1tL5K%D(`9H|@#Mk0m}J4!K! zkfsvLIwe0{N>ME(DOyZkIcs4MSy-~z{E6j<7gAG0f1w+4ZEf{k>!GKa#B5%hPR9A3 zSek=`TtqhOM@ zl70|9nVLQ(E4k&;2ufoOomM;jnOIgQOzne7M^O?yRbG-x@k>|M#n&we(rZ6H>DIry1Ig% zfn<1xthKu$E11K$i@WHuFPjH{Yh>=)kUsN{2Gy`)lgxxO+6v~2dc{-Sc!~%Ixh$Wj z8o#g&lJ%DK;(F&Df6UXMa)&d>7p{CHOkhO)h0Z4~#hikOl8uSK4v->%e>aGjubO;f zOd>31c27e{+AzJWD5-xR5|Gu+b&g5eEa*}=CfrbwXz|>o=&MpcPiC^1jiZU0nUjT^ zxr&mdjAnS`W4+JmB-F`M+}T{?>>{gueS9#;OlqS(4%vQH6{cxpc`Y?P%<%TFuH zM@e9}GJ$EQW^g;I!c_+S+q3qf2Y9NdFr^fwfl@mtR@Xv``sEu^s-_T_9^kPTf`q>S z!Yg6c$(^`Zvd_Ev^g!W4tqt-JxaHDr%`OwuLdk4dFcP*^W(_eDRU_@NHu7Gvvp#-3 zld21cfDxle!GE-hNN#`Jg*c%R_xa!d#RNA3ZHxCi&v7AalWM*bqjg_VUJOYH78oFj zh4^~T-sd=qBxm|+TZ^nhRc$`LSDo#CYGbdn%cvhHsk;*6JFky03j z^T4T5uz6N%YVE7?4NT-x$^qP9uk&S7A`YGtydcl^rSOP|1xpqnt0SD#&fWv3TK@FNB0)ch$n9c> z62B_>4jD-XVkOmOkl63bgD$I+E$M@kGRHW(5 zWR6A-hfiPMSW{V-!Nowft+jXQPTTfZ?IX5B&C1CNFS}V+M@-osep=SzFE*{2*;^hN z4@O8vbDf>bj^c`;d23I*-w+;n`cDsq#SenPOy5;ZVZx&)q4%#hui%0#4`?V zKp$pb$ph#{8sDV>Jv$iVb$^qSQNcbwnwSr4pf_d8h&xnU$&jhz(6R2wJvd<>u|hsvesEYlcRe0xUI>;nGf~SA1SfE0 zuKI~MBl9r~pynU*kP4!~l?4iRAu`m*C1lo2pDd-{qFrnV_$aql0mar5*a5tG{o%TS z;4$p8_L%yTFMc6S+Qft6!3$h5vpagy>hw(E_8GZ4+;!xY1$TD0A34;~lT-6jXrCL} zSh3;iH2-iLh~6>L%6i*g+PlethvB`uE|A2#>TW5P(jtsHi(_l;x`lG~0l6nPp7(*} z4_L8_)|3TD7gG!09ny4wwfA^gScSE2PRlWA=Nh>Jl-HHheq5hn^z%%OYh4}O6ieL5 zl)=N+-t8%xG}MX~{h`K^zx~Q_{>RH)qGNz8kIYp0N42i1uGTRvh;yYRW3dRGQ{d^{ z*~X3E275x)OUdudEdy9~Pw0q=LQdVexLdw&v`L@KNUUj$dlCIbk?~q(rD;^NwW*! zX~M}Ba%k~Svbxwg$%5ICP#vUP^Akx+@Sv*n39OY~Kpz=zV6N!>m)5Czctf(8_i1qbejbwsWad_!!B?m) zxC`Yt<^{;vKs$m2;7TS%?zDM2nL)Ny8J)g`>m!cb2q#&H_&ghJr;#hD#l|8W0P>&}kIw=pPB&D1CL*Cb&Ydro zId?Od3rk4@!1Eoco4QG;^zSOiH<}t?PIAcL;0fIzjH`Re@x%(RMIuZG2 z#j2;$5;hiae$wtfPuuI`{Mc^z60uYxEg}^q9kD-ky4>7<=DK!pb<@wRIVcai$&b6s zM!QW5Z$wG1E%Ua3@+3eg>BS8IOZmw_$xROd%mKhG7owp0ZN|p~-zG(?L839mDY3*H z#Y#gb#=>S>SC@}beKvu_4IN&jxGNLvYm1Ul*xLM#9txy60aLv-eUD_i+N5)WNsteZ zJ=g*oac^CU^1>lEys+Zuk(SMRu}GX*=xB`ESzzt=HH6)5^b2I23PhO#B-qN{1rLjIp&6grU!l>_ zOVj-N8)&*O}@lnibeZ25s3K_qqISP3O8xhkg}7>jNhL19;#hVGz_W^v@bJ z$!|biNqjF#MzqeURB&?yIc&$OA*x$673@Ql}BKr8$Lr95ykjVh8JW=;d}7ITrk^JDg6l!LEz!mC7%2 zqqNbzbWLsHjNH^7rr}Lyp2#YySzQ~&C<*Iv{5C&+l1$~w5IoiNY>vx$7Tad)+8BJ?unS`1=6ZJ*)1Nt2Gy zB!lo&QT;F6}64p41AM9vJGz4QcFD==-y#Ab}#2-q8&N-YG1yH)Lw3w`uQ6yL+I=_bh&d0tVyYer34L{)4 zd2ik5eLV*cmeDJM64QMC93{CRV(HAhT6ei)6;}EDK=(M{(i!7;EZW3JG{Vc}4x5?S z85pCnDEn$EXcDq$9o))ojsw@FVUPlzv0IaG;D4DWg}HEkgWcP+k;QGXM*K<9v!$xm z?0ocIjheNHe;4MjM5HDg{@#wcTnsTWkPnZZDM<$?7`B=UsIeZ(7DZdV3|x!*t}s$l z3&)R<>LW%rrh8qqz;GW}`0JZYjdla7R46CEI zZMLcAJHABf+Oi1Qaavm2Jm~4){qW6*!$J&-NYn>k(91+IVgv+)-jz4M()X`uG=8F9 zGeS+8UT53~`wlBAITE2lTbbGX_13GYe>*o*rT^CW!y>CM?j+I#}7l_}J zo=vShpBK%8wviDonTNU@s??)49y z{Q+_1W+LJmAZMR};%mc2{|7-@IJ$?`d3aZ<-gl{>N|I24aV#obkq(}4UrbREUNOe+ z1)CEf+XD{gVAvJH017zR`;5U9I%$i8mxgd%!9`Jc4n>4c;MlkLTP@I>3C)}t%D*48 zH}@mWrm3r0JBJ^Kpk`v~>P<(Ij2*M7#`mB!x=33+rnXXt4d?rz!tuJPAYN)09L0k+ zsOjFg>p<~N?DMGn^Mt)+{t~H2SF}$!G|v|#E@#CLnq{!7Fv$RUdpRW~!CN||c=1c- zbFS(OYFHQuljMWFX04ubp++Kh`1D+*3NA0~Z%eU*tuM)e!;R0l8u%ViJa1ADbES1I z){A8bNpl!tP(T$JK*d?1_t-20&H-)=^YCL*Q3tqU?7{j0fPmWCTSHe0QHYI{6gZJ; zK2Jx*Yyarik2&O80xM9;VI58TCZb%Oh@OBhtNuRR@p1{|3mDF6XiQk%5_p>y?G?m< z39s1dCAR ztX-(bQ;}G}>fam^^GD)*A+^KyqRCK)P#P~((Nmx%z`dRhM@D``fBeh7`dM8zl$4ni zqA9>e$uMAHNfoU09W3~?tQZoCiJp-9sB8Pl!rmLg8Xso-ylH4?-8x{u zgU)L={YKfjvfk zK+BS@wh^M*zp;ytOx1u*ipurR703Oz_=S|Y$u!f31n-KsY`Fy!l1J z9Ol95WtE>aj4iYvfK|{0s&!tHN}>nY za?1+n07~+P>Q)|7-r_vxUk;%(<^a}IzF9wsFSYt#MZo}@G1IIirhhGG8$)1~re@_T ze4dFNM@-V^!sn4XuyVHj@2Qy@1F?me*mTbg`RQ~PRF`}O>?FlGCjhfNYqQ_uo{1}H1f8-b_&$uk(L zyz%SC>;u=ez|7N`oV@m6E=dOTLObd`RJ^Mf;WE+hg?3>-m07{e%ZFI z=e26L4*rTZ%p(V~E^j7aWTXiB*v+6v44h{O zNNgzh3=puU)&cQX?zf3r#-V5H=bn z1a_;$7Slkjdt*qiS6l^qa!n|*mV&*lX9~hj5r+a5p@C15mnf*{`2Zwou%`Ogf$8m; z^Y(e;_L}omlW<70;Q5<7a~PUV`&E9asc<$SmN+`xrCVo47Vk88n0=KhG=nhJ(-hkV z$KA_(Wb0w0>L#b?XsOeLVaH|}Q7-$&!#;M%AJ4|-gZI)K%nf_#>1)0(&BU-lZQ+@&bK|?6xt0uln7gYh zhQIxBMOIt%bZvG#<$1E@Im7O|)wWyyigNRUJnW}8z|Q+D+V0WwIkj(^mJe_L4VN#Q z0%TBB+Dzuai-!dCLAZIbEcSM{%jhQtrgr5&9Xc?_@-7O-2=TU`gAPF&pA{eCm+?ml zzQ-FJ6mvK-;q{rpF7{8YRg`-A$s&yg->%|dhl~w_`s zC@*kRPynP+0=^LVju9z z2*yf2^#v1#a;Xv2qS}5wL$86qjzJt=*0jQ|vokuV0@hoLCj?Dj2jumUWd4 zjj$aB5oHXkb+Te^u!OaH{dD+>Ht1q*WgO2B^Hp6_S8avOJ{{S0F?DTs%x;a)9h+;* zcdK<*vMk3w>PQbR@8g`3&D=^zcP;l{vPM^}RIV#HSK_g65;`_JF3HT7FU?n`?9I4j zqS@f~q7Voi^6W)PXU(S*QROoOeEn*c>N+MW{5}vwd9*-yf`=$53<0Nw7#$1COG}GI zyn2&A9^{uS@4an<{uG4}rmV!;sn}0a_D3e6tKxmFq73z7WeK<#=T?(qmt$jk(vS&y zlTn=IQnExB;Ph4x+ky7t)l)|WdS@2n;%hxF7pzb`UQcb=d!C7q^^p?y6_gd6n$*h( zF|KdD>R?1Wf+I-XRZyJ}C;W2@O78*whtazYM1tV*z0mI4Wc0`hAbWFi*q7+*oi(S^ z9Ur*9UXNER+?vb%uYU~A>ZujwjC}Uq{#Dleon#AaKgDi+x8VeAX`0umSI2u>vb;Fi`S*Ip+wH#c7OC6b#N`83U)-B) zWG7!uuc?UEbNl3->hmy*qRZ{DRDW`uB3cD(0Zz=iJs}>N;^Di-X4J0WPJ(@nBmZ$B zvT?EV3wVqbi-rG4G+jOBHKCB1NB_4UGW(-kDJnvz|;?wBWyIa8v`;n zz-Z#nkY6Iy#6cq!XK)LuwOjY}w>dHKdkyuq1^IsuL56^!1|X=HjJn}sg2t8dV%0qJ z5=x_%6BDbikqehp1Z&3DiQK;R@tu41)P(mm&w4083gm~9y_dN`2`ck>kzXHPr1ZAg zkEZOZ_GjV1=UMtX9TWHB+JaKTSXu<%JWy(wMTY{;sEeHEEb~V_E~-nzeb+CUTb5&p zEc*cU#O{`!k9$4Y641}WmnA3l$FOp(he$(HlaqrZuI=mxpj-I-OuB7dU=|Z&1nR2AcL|IwMV#N7HF$+GOxhtd9-jCkq(PO{S$>xQ{;*}XT4a^YXcdF`23ap z=weQ4K6pCIv$3Sh4hh z!K|hBld{ihvdg#UDDTW4?A$x~2{jI&1LQd4?CPOn<>_CTM5}xV=xg`g-Io2Bc3{b2 z(4(tvc4{JW%i~4xTbpV|+ zgD|+8e;$$ZA%ZC}KHs#|Z@@h6E6y`lTRk1E5Bn4CHC;`6+avEPRUnzt?3EP^*aQ{{ zRM;xapu=-HsF?3_2a-2GC3R?DpBJhPY6n3mG2`f{(VS%;Eq^4N0?c2oUhtUi`za zjMWFaQNP(FZ}vdtJjoR5s zN0cQaF@j{*uG$?D=n63I=WL!GT-JToTNk@hG20Vw-qP0A*3f|T>38$@0YgC#1`ko6 z4#M}Yubesy*p8*u_RmA1oI6~$6AW<{l)b6Ip;TCgwM1(@>D9I{)eQv(jRWC=M+(?Q zU3pVpTYrom0E8A<_6HA+cXSd_Vu9}O7#UQ&FBzo4wMuA)5*E|5-! zi%_ujxCA)+?NWBL^-AA26u~n##XT^s3Rt=*f5vnv>2pC$$SMWAV*;{0(;zm`*hm;X z2c?!MY4!=NhyPi49SlqJiYs%HD@MnaABd7hCTIHxn3}qJnLGcz9}}oj7wZ9$YQML$ zWhtq5Vdc7~(#LmtNkJ$>0JZ+^@(XL7Q-ih`DQ6RWlMv}_6zmCMy5fIb=k62cf#Toai%&|XbY zL-i+$Oh23!*r&telQc0h&~Yxgq6}`pn4mtMiH2=iZ79xX>tXd}{!8;hJ z5>ldOQ95{i4AzKJX*xWQLqzGmk82(VP5M!N4WWnoT>&O0ewQMr792uC83zKfy zheVoi@Mf4&(d@ei4UkJXt{utw@9rR*bqi{t76k$kLlmJYi@LGI#U3_=RuS+*_`8En zz)6L8YwW|Re_8Cy<9{_D=l=Kt>+AjvxVs4V6U6jSuZC4eB7_H))Wm}_lYcsRgmtLOgoq1AzP?%Z-J7%Dha61yWT!0g_*%u6C zF4|IU0E!dANsu^~f9%Jv!>j4S6f=?zSY+&SrU`tFu1u_rE=WwYKH`Bj8N$z#Z9?N? zId0h`_9=z0@^f`qbvBHZy#&x9t#|S7Szq`r0gHQQA?l3{Q5D1EU zy>sN1rT9H;kGs!jgI|!BnXvMv44$R+A2MjLQbfoe2nNVSfr8qY<4ZNehb_5=(S7^AH?`GrnYna^)4AW$MkDcg=8F2& zg%%5^q@KpNZx$`#)p%WP*XDLTHIUh=G2C3^dUM^fSb0-n{~92E$QXR;@jlI8n1~1( zgmZw0uVo_2+uvoo$h3eGZFNbsi8@76O0hnZ73eyWK0mXVJuZ9JY-jS&U^E0<>Vfq71$4 zO+PgMq#F(oT3_ryZ?uoHhe2cU;e#xu2Sw<1Qr}UMz(~?h?5bRG!UcOHUF&}PezOM~ zATDOQ?LWm2z26U)xcCdACls2+^Dj$}W@dw_2*825p6ZVmL3OKJKs@?FRrPnsg&&73GMA30PtOJlKBE0``BFlBqmAu##I&Co$7|7z@@*~{ap zH6GPiVf#C83#h@)(yw<)O+7JGC##$!G|R(bqNzW&JvsS8Hymy3Km})YqZsD zK7@y;nDWT!F@;aa?8vH7DqK~U4;^=W-aPw7U&&%y;P)c2y`A%%)68;YYirGltipO7 z6}H)EY@dNjlTj4&Sm0>^?je6!wgJ2c9Q7vp|bvkx+J;{6G^3hLKLR7{c%5<9o5 zgxv9Vv4|r%pi1*;wJkW^Co&mlth{v$F-g`(w+%&QIfYMYEI{(S&p=g;ji@Zo^t>#v z!2I%v$6WdHiV*{LeffUNoL?&VRof#JulgLw+7#x$@d4j@V27#z@NST2i2+~)dqH!b zG1L|Wg($Ea1o5p@0{USjeYfjN5Ku>l-uKEpR0Puw?PCb)j8>~hQ58)%+Y$2qV}hP8 zc^{j2p9%)0LrI|#*+{%dO^`6D3W8%Mep{8*Cd;M#Dt^AuSsdt!;r-${!3q$ zb>tQK%aU1LT-}uaZ|%%wuPvbnor;c%PjStKY645bR<6PqIU#~{c83tl-v-2%_IB^j z2QfW_qPMv7^e|(qL(u0S!nc9x*A8w|Q`6Sg*6lAZ&0-_ouhBCuL=+4v9s4r1)hE z{5ff`YIu9R`FpwsH!c}q_YlOA-@IhV{7In^QVr9iJ>?R%{GY4q+)_Hp11Hq=<0X$` zM0*wU-L`Y(jRh9<{U6)7EuO^$VJMYI7Kvm<#jX3bRNavy{L}o&^WWEY#c~50mffnz1)7#=x z?h!(Xefwb1U|)+)3-2A@4UH;BPDjJZLu!8UA2Fy3XhckC0Se-rejoO*Fd9R`l>72= zur-d5j)!sg54L6IyF5I_pa7jE;-vdr8tp0Xv+$5EO^+1sJ*}3`(_+)p(XWy)yZjmi zedCZMvlTJhM8ihQq6QAHlk;qBo{>{H-srB)ezq?b2jCC&|9#@IjqjwhY4t0q9u&HbQ|~!b zcd7|pS%H@5ubF+%G1R7kX3JIFBcycO^t z&_6hxtrQ*AhG+jSla*2${B z3Ij6tCm!@V132y1*DC%$p3W&SvuN4Yv2D9!+vwP~)v=8~wr$(CosMnWHv8tBz0ZAL zk8{#9AU;MV#~W!FV`O^X~HPtG)vDRUQ7%a|)Fsm8J> zD4ZbSN%#qig6hh*&A62U{v+byr>O4WBUk*x?RQ0Yb%|HdqbOtC)RLn^XQ$@)=uWiQ zUI{dwGE6`S!QX6fwbIfBPd zN#k6l&EN5Pu%|11gkid1Bo;?B%Tcl`sdoEe64wwF^(Tn~TUij-c9}LRj5rz;j}yC& zxSDeydNv$L4l*3|56FCnQ;e`Hj(2C<-LrNlp+}kTH}A!I@~i?p-UMS(N@PNLPRzSJ zW{+882Etf~mDe!P<;UH@4w}#E=sUg}Q!*IxFjlkTkdc1}#$Vc?DqgH>pz+U0n6_7o ze?Om8h?-s1f%V40#;(gGeMkb|li30nbxSBp*ymT3i%{Vj`;SyHV@!k=fDx6hNB?hI zjU46cg``KA!|4%0qM}eti@_{~NtE1^|6Nv5v0^h_%60aK!zBXfWPMElX`Tk?1899# zrT7U$66@JR^(a+X2fzlC$$AES{saFM%s0TmN!47+F~eAqWtACu=lPRVh<|M}BO9JV z6*|6h8z`DgMN)PZ=yP31C_e5F8M~t|J9sPLLHiF_YkyA^XsrA_*#86`9^Nuu9lAOF zj4?u!WSZ+$bQFlfe2V0e$h*o9*0kRUPvuLm$1;cX!0jNOoBxedf+umNqhI%lIL>od zw!x2f%WX4vd${kc`8A|1D}$U|uu9rW!(d69$d!149Z93P#q zB!xe8^h{GnE%Uy-rCeed5hHK z%?Yo)zrE@7Du7|pt#gE0P7`vcOK528B_Am#zcpQB(qU2>3+qYFZ_@q?Y~cLo4>izZ z0Cuo~ny|b_=7i@Oo1l0u{rg*xSzR|s0)d7k|@V0ZS-sDGw;#3ZJ~MO4Z1aQ z@eV+cvc*~b0E++FIOj=2=z$@U1ws8Sz38#v9Oxj9_-oR7%R}qtI5GT|h4>haNe6O_ z!t+2LXQp>j>j6iD2i-tkE9iKn1r%EQS*)5VCd!D^!2UT33nwBT8%JDCY?SqKd#=ve zq92vPjZRCc)vk1KX$cw-zjX(pPKp6=4k*gg9jjpPE+Do6U1K>;TCt7QZ?t8T*H}uz zr4urAIcOU!H>f=xxLnG$_Ij14I+2Uzd)W-@Bn;DSC+;b(L@1<3;SD|f>-COfiA((} z%O3@7jMHd(>~D(VH?~_f6=97Zb^^FX0PgN643NO;{H`0?mlv6;t>&%}Ntb9iGT(4{ zcJW;x=&+h}WJoX7qc);t{iC3G@Y}NLw_9EHf^8o$0ejUr za0IYPFNfvS&eGr{1~=F=8i*hNeRXoEj-QB4=YJAFq@u2l3m4MYYM(X_(D+nC`DAm| zdq_?FQbG$-#F4pG>A9NhY4rUlCe3$K5WVPMi=HHiE1Dk_ZeIpbo5E^D?wAY#Xtuf=Pa0YTc{qg{FykEr*4$8D+g2Y<*zV|B zS0`L`9m6)IIPY-jct0C8>;AO%RB3+LiN^s0h!)YjgF{K|c*M@NV-w#TwhZ`>Ki6*ZF$>QtafH z3)cZ|QM7kg{Y^`C0LwRjqWLL$a@OcAo(n;dEt|zX==h|1m`XTk(96B5k)pbzv!-%* zd>j%KR3-}!Y>eg7m`|+8b^DZB{tR)zf&O31?RU;D3v*RyLHt_r)KPSlq_wrBx{ro_ zZjbTkMR)fEnMd))3v(X{wi@sd38Ky#*VrbApz^^7uz>ojrW3#qo1@%N!V8owbT6;` zbu#MtTZk-t7(e{tZ>(YNeeyxu{k>!4aKZQazq~z?y%0GJR5kEVo!P_DxK*$VT#2S7 z0CO7uO1eWJ%jF;TH(jhUL2gp4icZQ&ko|m0E&S-JD*<$b>AXQ_qfP7@P?nFuKO2#`zr0$3c4D+n!A{T1$}0rSOL;Q9BDFU%VVvei zy_~|ozg!>*0-lRWd3LCMy{A`~%%CxIj(M*+C%76nZoFpeJXoNLdKwm4O_Uy{VMvsk zgITF-h^Wj=Z~RND4WCru0r=T;ue2Vb_HY{+u#qUq|7Nup{_RCdHkP1eSK($C;BuXu zW_ZM6;;)vAZ|U3vbvP)X5`@H~)D@^`6mg_S;obD*4t7A9nK&lR8J8%qXOjCBlB0ux z0%`{G1s3UZ^>Nb7xU%$${a;-Fh+k>lwwlK3f82F9_IkgK9Nh@aCZa+fGc}t9h`M3d z0+~{q6rMeQXv=eU{yTKimg7qGzW!zLfncA1tR7-W9uh{DK6PBJ-TKGG1PKiJl}mQv zpMi)F(lh=ZauA>&YNVF@5a@p4{5Sj*TX{Nmci`Z-mXeZIyT?ODK$pEAh961F8fY9) zTqq;qDiZDPR|&#A;(f1=|3&g{Yfc<+8_1vifh@ z(_*~G6}r&-rm1fcDm^Jr`*{D_>NRwB7J(kPrJ(BufIyQ;?Yeh8KKQTmF@UEXbX{J9 zHZTd?9qb3{b60y{r*C+MdP+Tt3c`0zJ|rZO+5;bn65xWAp6iUGX1xf`(71&; z=JeWt?zvz{c5p%t^mspgl-BG#P7|ojluXC#^Isb z{z+_BC$E;Q*x2YL>E-1DsY9&AAc%t}atT<3b42}npfZ%@cMBgaR?x&T}Vy)JdOj;=Rr)62;H>Bp=NUt$2aH#`? z7*AAJr%Rh)H3rT>C&A_onomeoj)zTB!1J$3?#}n70&`G#A}jR|3&PPb@gsd<8$Nt^|hHbP23@hQ~4#dXce29p=Vgg%YOV@@^2V6Ei zX%|+rK1RQsq?YE{Ko`8A#C$&o=67~PG*sgsbuQa^o?L~E63BY)%1iDrm`}-{lGB0> z7>)ufs%ZtK84mmk_7F?^_mnyKf4n9=>r}4|VM+IWT4Knk-jBq^|W| z1Xq2CuZR}emEML`KM;wYq`S(2bOu!?+Rj_Y2}nsuNT{gBjOt|=O?)Y3U;jb+3az>o ziTua?%eZ}+friu8Q_M=hkhPanD_F6GS!-~r-3WdElp&|^p_*V~2Ri^!?Ikqs5!QMK z%Obg}P+9Z+vj04XluJ@hV)jD>_2H@G5tN#o`c{hb6bX{(8zdF%mwRj>)MQGZ$=~J8 zok`8ito#`yX9I7O$BB0`Y{kz3h*{)V(9o?PCZ((fReKS8iNf@b01wk?A6l9h+T@%S$ zTaBK*Ngc-4*u%BqlHg_>zPV_+^`mv7ahubq-= z$I?O?Y95aer-fD|0y7SzhSVb7eijiKc~uxBrbdB=M>Q*_F6LP=dXSH$w2X7C!h6#C zLy6Xv=_Lm98!1yD46FFr)=X$McOXkILjxC?675`1eO_D!Paoi!Gu{SFXEG$(TBE zyUy5`$gt$KCrJ`G9Lqtxl4Pt1?Lb#~-}2sPErLp{co02*os`b^zP5j)>WQ-WJukT# zb3pX&`uZ*(ji|4DTY3R|GJWR#WhxO3TnlaPCG9e1uHESfx?3pXtwnH26dTBCWGfxL z(b$yVk+UBcZkh6(w_pUWM!nwCY;e@Nv+3~AL1CxIcQc=KHA5Bz$VhLgucF<&DjSB0 z7XuprgRgP1b8 z#qg*l6D6Zaq{Wf*C*Wr*tcsyGEgBvj6&DeKLqx0;K`TI}=GQHQ@l`hM{q{! z^vjm)eQ9}1ABYQrz*tsPNL4s+X_OLDRe8+>54(^Hd|?)iIaL(GS|bb$I{;Y%fZ)>N z(6loSPGPrUm9b2xTkg#>zrL=m43i~(Ey)_HJ8{WPP$2LB_aF$Ao8DttV?HY;&Fcl= zdDavACtdk?b${k|=yX*Bzuq_Hsju343#Hz;TY+0%q6fm`b;JO72b7!1wS>j8h33Gr z{Bwph{=^;i8%Xj+=$nL6(p;mH{<^wV83V8$hjPIB{I0Q*2B@lO(Yo@^OH=eiCR0D8 z0d`U|T}%yzT$Vr>GE)4G2&!_fT@a0Z#tiZ9I4ZXizHWDZvtdc)b3YzwJr1G!m&eFd z(OFzm-UCTRh4f!cJMdGYil(Jd-g zWMu75>X1PD+Nzh>WkPP;BRNS8{>enpzi3u5cJ-H+u}VJh4tT9}QzXf6DBHn|kYWR_ zq$2=j`}#M2Y5+Gx9&2Y^M;8fW*5X~sc}rPfb$K}z3kwSa!;EcffslcLsxM+g7c7VFm* z+%w<QKQMEL@#_Y3pI%rfcq+*;-kqVO?jkr?5g>OqwK}utNFj-RNsy2tJ zYT7_39k47UGAE}tFE8SB@CX*r$uhxb{*w^T4#aaecK+QIBa_C2Ay7(n+4AY)H2%8= zD^?&jj7KW0Kc-vpB#x;SsX4N*OS!^lBh~#U5>o-h549qcASaO}Suz!Wp?9y^`-ct? zZTX7EtrkdyN{Q>PiPhM0Cf#bi-R6GuaxpkS!pzbdeC_6*%^YAwTE($5-!-mxOJAw4 z(EBp{efhM@N&_@X+A+@aB&+M0Th44VoOnT)(X&@;X47#JuoSD?wfEE;1bDE}v{W|Mx@s4-_Z*Awx^$iwhq44M>gX-#TQ#&OH)ny*j4vbP zHf$0nVcl|cVJXq&>eXTCL0zh%8xM>0`7F&c9|uQ3Z5yc%tiH!3%R4xlz4)b7F}Z}u z<_0uKx+?Az);|cuWkfYS<;I=upI^-W3_gM`grupdd31DicXu~#Mg9=&o@1u@%s50E zc-M>hpDPRVcXlm$A7q#X`n5?i3I+R~mdqQ}0! zItmVh!2xHezo97#-G!atf5V(?mr|Hz-pp8 zwn_AqaHp$ivihQZLYitEhXE4;58hjQJQePBsT6^!;~x)HXO*sLT&DG_KtnHwPn$z|rBM#gwI{oe z+(pU-UNGU{s*s-?$azP4po8fIuZoz*;zFl>di(v%g4)hq=aIZ#w`u>v9Byjr5xN>1 z+@b2THXmhhXk6~0k^$jNr%5k4dn_kC8G~mfHMN%~q-5_ER_Ku~CE=qp0_F)ia*(d7 z$jiMNM*0UylLQoSEF}6vIrkV2TSU6VFl1kD(KhFJi)7jm_+1A8Qkj{O!|?#gowg|2 zF{rG*zNVs~y`76OWie^{kbeIA`@5~JZGV5iq=cMTC&_r9oGb$!i3YL@6LBpFue|l< z|5QP@-vjx)cka9%oDvBHr}%pBQ!kQt7UOtTlV<^bYF>zjpw%LkDN4gwo*|<_kSP8XL1hNCnNC{ z7fy?IVoL=;0K|Q0fpnSI&{yNSGEYHcEjmbWZG-Y?aTu|P@p`}Xj^iDuLc>TiB%(3{ z9v=%5_UBVdm`z2{lS)@LrNz?uds+F67No6Dev5Ah4?;RmOJuWLVo7uk<2wZM?SnA6 zgk<>{5{^G-6XUl*jAI0dt}(*-Rr{H146F3m%5XD%mLi9UckO1b+C))o)~16=qcdbvXYXgmlq2&v$a~}Py%HxP2+1n>2U+dhbj>#^z#?&J$?V{PB_setQg}r zEOJ_v4ZF6SoSd+5;F@(!dHGtMp{}7}eusmZMU3&Qq|^lJCj6to9q$#x$9zKz?;KLu zXMuV>b)s;h?RUCzXO)XDjomz`JrZdzgW$UgQVNg=a1uz=zN+#RtPyq`sr6YJ?F6R8 zO}h^L6G=s{lHtx?MxzD5l;2-2F61ymU*VZu>0U$iW=?+P z&hA96>y7y64Hu{%|5VEbV|O9jX3pvrQ}}LbONN6r7^JCBG#u1)O36S33TN8(h>$i8 zdoQ*z?pY!1Prqqt6tjdh8be(-M{occG|!8mCr3+Hr*@?37tCoF;hq)Z;aNI zT60k7V)Cluby{50M$w-E;z(!bCdnx>14He~iU!Gr^TL zygWiC$WT+PT3e)=Q@;&uS;k<-E!^ZE<@1zJE-c+liUoC{SyO*Yb0usVL+LOfnzKnK zMs@+K#7a#+P10moA8{CW=n%a1GXSoI#0DkZ>3x5H z@1rMSJns{zPIDt!}-nx`}k&4?KSwk`Oj1RU$6p& zRban@k`gm7Z`I_YhlfX{S~V84voteZ7@Q|?PB=b$tFKfFYk_TEq8tL_lg8xZkdG{| zci0~nHrUf4HlszKM<+u*n)zL|u?t10nzZFc##fr?W4JF2wKp>k?jQDB@sKNV>b;vOfM&KY z4YP=8G_L|ary3O&`3jF#u37!@g6aAUEmycm!ZD}J7u81*4zoj(DmYU zX!W%<_s**Esn#rLbxx~4$|x_dD;7B@rCW*xn0!74M;cD5&(t|dV=)I^_6hKZa>lO( zgk(6!Fw6UT61$j6qpmxP4BpU}2Tc3i{11_^LbVjwaWk|n8-)n$Xr&!voBc&xTb(&L zo@wI3M0Um2n(h0{KcH|l0-OqcxiBajm__}=#r$0TF7qctC*vE5-&rNNzz1Rr3glf* zb{2Q;Um^2L^_mpizTOuYJ``S#8=N}4)FygD9Hgg4q8L&x1U!k~pVwerFnx+Tzdj;+ zC*FO~&M@CSl!APH$pXUeFUHBG3?#~^&RTGzVC3$JSqze-#hK?o5Jwa@-kWPgVRdi1 z&)zPD>76N9mq=03G~*~1wL#e1Li^nGo?o;LoY2sr3an)3PP4fxD!cRB8p1%v!VPc& z2Kd27Jy4Pp(5)#wcynG~-A%24L)Tr(KxL+wN=r-KTwIbdjy6KMO;@1mTn)0J{z;NG zkO^@wq5hT6DZ0gsBK>cY>Lw`Q?dM&cnVBgrF8*@8)oHuM-8o3&2YGU3p73Zhztgr{O%VL*>H^j2gl0JK9FAk2vt8sw?MzDTvG3;=wW8HbQ4od&}M{zAwarq zCWbbTtv?btq-`5YcX~FM)STX=$p5A((*RmS6uyTGTNe79bG$cIxC2kCjF-zT(qWTQ zGsFm_rw>>8gkqQ_-_QUE!C=9p4=@~VLjFyn5hs~${j-Tsm{}H5n`vyvT%tQct_MDM zGU%U3{M*AUJE@2iHMVs$`%iTDY_f~E@BG2Ui7xAU3Iqx;T8erK(pSFkqMFK56`mtN z+00El79`>^-Lp1bW$87k8kSD+-2J%#hLyN5-wMnV3xb;GfP<-8(trm_CSdh5hQ<`Z z%^;-i0V^yE1{zan$*FtEj<@qX>+I%2?w|Mim>t2x-`0gnh*YoqWvHo#!kspYgWTy4 zpXc((ST3{r>-QVEoYtFQOZCR^`UQ}{27M6MHX#cRq2FpX$rupr5=Xcz;oO->sxKGc4axuwYFXs`{x(cX6gA)8e1{$&g z`mmN1!M6HYf#IgG-Z#c0v14U-EhYzGoWZ5ASfo%w#Z){z+L`;n@cjD=`h=dKB@@Qk zm1^TJ9{vXmlHMbK9ap*#gBE^F7Gp0F(I4PnLj?bP zhkEsoGjLDgCk(E52;WrDn0E$F0#XK*<(GgF73BwtZ(qpWV5m=YHOeTXuCDIo#e+w^ zUdnCyCBbI%TmLRJn0-zENEL~%+yFVFG(0V>Ojes6Au|LlYqOXa4q^|l4%FmBN`3)X zW`kGxu5lI}B!$deXRzoiI=n+8hATUgcgtm0O8ppeHXD+^)bUFsXsdp)&Jo&_V|ojr zQRn8>z{bp{tkE9(^dy|=M489MewUN7eHzm@k?omW{@wKWt27)LbKo16=#eapS50QN z9&!dNf_o;8xs`YyL;WfM_-g?3Ce!ko0j298j5KmIbcO;!J1wgL!IabGk*Cs{r0AZ4mIP@*Fx zx;rN9AB{8p3wDc7`^WZ1@7v(yTchpQb4RM0Uggb(K|*DT!kpW#%WV>w3=vBnmfmNku|rX7to<(&`l(jNR(9MaBfO_9@YJPD<=0_MlU>t zECM1OLRM9@KLB(I{k_RVepM_M^QVtm49Xodfq}zT;*$By z2idczpgk+6T76j3wz$9`tJZCMxoBNz@fFxs^PjE>>W=oUn8yA8#SA-NZtJ_GHM|y0 zc;6d0XVnxQBK$Rc>UwCQGPBSUC<4f}^GaIFQUIi-OwreVA_8Ak6o3XyRx9&a#jBW? zOzwLwFW|G8iYgd{NnB-H#`<>}w?MJ&f;Tum80(@2heyp!kW3INAey3LTz+hC;%eG=Lklh@g%vka%3X z(X_}N7jDQtI`=tPmqb)nD1pYyIgTBlnLf^_ROp;_g&qHA3`Wt+;iFXW_;Py7Ozozp zf%-f+KjZ0xgKvbpz|v{w-RQf$WPNMbRo zSo8vB*ta%74a|Pl;7A2r$^(qHT;&LNx`|6YV$KWle`HkL{39%;iGfy-p=oaS~FRF`^~jTxt#Co zdJb>z@6Tp8TK#_-L)UKaR)zl9q9Tw1igeZ{&Ro3ay+hjLF#B|avK7;jq4jx%j@sq3 zbvc>;=3O??MisdY-z7my@fKV@nsvmm><_{&Ca5?XSobsnZ$wO`#U*|4Vt?gXOy-~2vZX}JW9VNp|J9sT# zmWQuZ{y>kPIg7!n87s~bjNNYNKcMHke^}3ZjzlE*MEQi7Q!O?dEw{J!f*@X(h%rLX zkmSomJyZiS&hTtlirI`i%ePzZTQ4;!bb@Muol|^^ONtB28r!*vD@toTe;l74Lim0k zWh%VlC4LvNyzpV^P&fn3C9(&}vtb=dVKZHj_`&C=9fL0pvGBXqDMsZRL*!UQbth8< z;ycdQBTf-fTWIN}w;MgqSW;r5gCVc~uD*WcCt>Ef&}d8uGLqn9FWV5{lvP%#UMKn+ z8f&WB$SMoZ7}Qcap}wjrOb2E{S__ozqWEawAUGv&vE(o(%gxUza=-VP#(gnAh63x- zODDS)OdeT-V-(K-cleis+8rqf%NRO_o#Kn&(%pLYs~N}ute38wF!i0kaYuEY`lGCN z_#duY&3HL{k1u9&+s(I=J-jf-3ZuhDrKKzQ5%|fHzQIm$t|MA-6a1?|P6f9K?y)$< zi77_!FS~l27|3Tb40Anoer8%TzCDE}rjx7Ros(*dED}n$n1#m@PTd@oH|W9S#6k$s zj5L_q009sA)|B$ZWa?_V>OZas5lW;McbtNH^?M)do1kUx5n1eLDFY=QE*l6vp0qzc zh0?PtayJ{-Mvv~x{?rCRR`XM>a#F1oVOsZNd?m}WuhmN}$QvKlzm5N# zq+WuG(>oFE=b}(%j)$(Y%ONgS$qCdkMmdFv&MVf&wKK12QQad`24~Njv&*gti}gVF z7!;JrVJ{zIs1F~fJLA<*Jl(5j!Y;V6llN3L-}A+pm!Y%O@T%%(M8u3d+=#TaG?$BI zY`73#XDqBIVgn*cFfqD6NS@%CJ=eQ#yHu_&!z#r!U?sR@xKL;ph1qH)Zhacd8jA}A zmg8Ql*nFTya!JN=zYwPVr#XW28E_JB2qk+%Cx5VE0;!XogNi=t^5<(JAu)1u?JUz) zt6ml^i-9#UTUG*qa$*)9CuwU_bCsj#;G%C*;lm^AWz!hY6RF{uxGO`b6lj>s2hLF$ zwxcDoeuU_ZhI^O^%Ze&`+zDlz%ZCR8Hs?SsN%+TGIj3QpkVj+QbZ!#OlZf|jo+ET~ zd9dlKP35l3{v-L#K59fm*na+9&j)2!|3mP83eX9nit*KgI*wfZhg5-?+`Azfv#DH; zi)^#fudlfU0WbICR(YND=dB$8-I4^N<}8?w#vC|fi2?tv(^v}l^*!d_4!6Dr=!f(S z|I^U9@44X#oa1eqw}Z`+vlG@G#+4q+=;rovJ99>XU&SSLb=|ECBHZnM2^~OWGzwio zA%5?v_owp@AhM49v>BA{?)v8Qz=392AY&A>s0)xeh@ZI9S7h;MmIP(gKl27uN1E_` zOchv7e`)G!TWO7twB@}&UfHZ}_`DXj7TEcn`xCtAKkxf17udbeaLvV%VkAqtwJ3~! z;v-D|cZN_#yer_giT}#SiO3nCPnLEhjg_$nrg$Ls8?(>jU`}OEj3wtJW0qMKO zPIw&5%=b&``b$eo{`#jMgh9;YnhC>6%9{PzYQ}bwZZ_0^#SB7lA`hasmda|SO}^3z z`eh%ZTkNo-I+mN2OEXHDs2a=1d&5b!6I}Ura$c${JxI6efk$2*mwo3|pUE_z#f=A? zo%+8#J_uGj&w^u8vgG+c+e-4dT$~)w?^Q=$Ypt1H!YnX78l144=iR%!FP*oE0>Jaf4t`~tS&?e|UlJ@AsK#3~DxEsK|i zVQB*eAq(1d+B?hWBob_v^Ni$BA$C6lS{exxU4$I6Tb9b34*z4gxm zcUOTF{NjsIsV~y#_HJ#$4w4Ze1zu6sA%QVNunBFL;8Ir9N-z#hlF`^(-Q4l{!Pdam zV(XKP3*y@erbuVd*5C&?C9Zp7D(265Q%}i->gH|ZC=AlZLDbA37Uc(|&WY;N>C(@z zN8*`WScz%NFg!#HYDqp4&2By~_bSx|IK8d_C$zmcpYk_d_GeD5nBvxGRe^0_d~<=6 zmUaX|fd7k=)P@Ez5nhbCCaT%15G5?>ORQ;^N{~S2Z|Q>JCQzX@+4kZpQ>+666!g3O;2H?0coCn$u;6-#l1F!$_pm zy)1Hxl*d;|AreQ72~l3A1HMLouR3@Su*tlEh-;tMxa?qib!cRQLy`VH9tijv#Fx~W zC>d};gu|$o-)wzN6n)rtLfQWQOnl7BDyr+=5b*8!>f0GHt_{qBH31hAla1fEYf`fY zn)ge(ALu|@Z^B+Lh@g^3?pB{xo^+^hFJE?&Fy zxQ1_L?R)s?;+EBZc+9fA^>6g2M%ji(kC0hBs--PgZT9|AnJ;$YjkfzpC>5-j#;jza z$QE{)hUC%=;YZ#q^B)`$Oobj_pH#FE%&RI?RHC{B%4I}8^Z(LhX4CII#TvQ=QU}+K0AYnZr z#Waw|dYMRqn+dWf-QyJ+uu!gpzc7EVz9}sm8#zgcdb+<}L&2lv1(a7)(ja*M!Lmfq zebw@H&@$5ai>cq;O6vHaZiO*Q7)47PrS&pZIJ}4WK2G>x@);Ax4_Yu&igNwTegl~c z30NS~-Iau_@G652bVOKz3$o?d?$Y;=0p)w9zS2 z9k7{8AnWV~{HLVSTT|wOklp4#f0l?xWMA z4&`f2^e}BAlIwxCBgV+Deb3k;beNx?aZ^cYTSr%OT}yRM-Odq#)aEjWiBse9<2o|F zw|Dhxo<$dSJCvIbN{+%#luS?3*cIBxjk6h6Wa#(u_wT@=9}JMt-fNp|cEIcpgIVe8 zF6t=AZvACSdxsaW@V@em(s3Fb0HC3@@!Z`aqFKpBrI3WKKtKG+0yvWVZx+CpuPE0% zKoK0r-R9`n!$nL~ufp|3!q!Jl&bDM}IWe=<7zbhGpp!{_rf70s53ktI`n5}a-7#Saz zI&!nzTRnN`H$fOB_Q#Py=PmM)6Si{Gwel4V5cg<$iB4AG;O5nfqDq!0hGL9R;=xjW zD;PG}JcxS%X`**x6Z(VzYc*NB^CIiM3VO1M(rx0->0*k+XDu2Ucq z?#q2AM-=j>&ZY$W-oF8okMv_)7%HUNs@Wf34#M4#f_+2pP)zj6J<*%?a9WpCmYk}1 zO&Du-n&J&4H_`kJ2&J8+%QJM-5bzz1c7k}{wIDNl>@dn|~M`q7a@O&}yGDnO`FTZo_F z67El_NiGmq@Gp!`&|a0@c)V;?FqYfF=?*ZbZyc-J<`Yic?cVczA4sL(DL6on?hsrO9#+7g&WBb12PHC`*bDvOZM zyf(oD_a5a=CP+H&Dtj8CK?&Y&}Y;90Bk1 zy{cf27gVuUa&KyV9(SiBy5Et9t78R2-a9s$J=cG$M=tVsj+UmLyRA{bYKjI~gMX>T z;ZFF=sj+b(WMcwP$N9&H7l-v~Tt-5Wn&%i*qp$?T?{m>$vUIpIy2ys}CS=9{;=ALO_7jy&Y`{0X28I*Q~i?G^0^O2XF(D_pbRMt6&g>tAg5jbCCBbt@q{*e_DW z02H#ayZ}T+I0?@PB5JHSGxq>QG;m}9NdZv;IL0b?iYaO*OJkx_PEMB7v`2|qNvV^| zQODlLO1J6~UuT&wWPB@+_Q?udXCq(svD#Am&P%q_xTcM!QKgzOiQ}=#=GH9x@|Gok zGXraaZddnwBC$K}*rAo!!#&+ZJ)kK^Siu+=IzwC&OF2tDpibdbVP;u-!>F_A| zml$7svM@-;z*#0f-Bc`;k5dve*&NAe%3+Eo)mpV-sb~y-N?N?GNR_0}QiQ&8Mnt4> z&#$2bVI+#MWA#L-i7@=x%f6+w2l#x9hZ2xDB98uY$Z8Z7M4In>pTq=911Mf8`jH%I zgU8tSi^%6Pp=&zji_H!XzN~WP(4M|fv8@>Qc<4P=3ET=ynT^x<(srQ(ce~LaA^L!Kz^UjpDpP*}2}jpsAoJm7^arE$VuD-)&FGgHGgE%#-_F%;HT*_dg_Nx~;MH`b^Y_w$D0 zVN0wpnwl?eG@ri)DA9vN2a6KN!wa1=TI&x!#2;Hmo(Ty)WjTP3G?#jv@1^xrdgOP4 z3wmGYm61?AFT+ANu{{^rft`X9s|ZR7)#T=FaD5LCQ#~OcqC;a{rP01%Huo!b4mYy= zJ0^(-I;`iIzh~hG?hrT)u|!1AQAMIgQZ#(=WhP7f9@Eit8=IW$%oX<)Io89wb>hrf z5N#=p?4{H2tiKBy8G!T*i{eqq*KCQztI@LRb4v1C5SD37zsI7UEzmZa>fD7e9i~bbHl#xeax+Zg+(-)XW)|f6#bNC zq`N_!?bb(qc0wp4nxpBA7apD|$x(gFs&>sps`l%jT+gVrqMnbv?Wl1&HD>HBa$zz5 zv8uI4)-O;-6XT6=iiOih;_Shfha>AN>bY=IqG6(Uy$<~JY1tA^5Fl&6W*JrZD}}Rv zgP(XWZ>D;E^9j~-yQrre{WKer3D1@{TK2rWND?Tp_Nw5O_2ypfKd^dIEofv!tU%FD z_A8$^(_iEo0u>dOyq_;OJqYK{;mC_AEAL&fA^q!FtMvKdA%+Q9HjeI1^5MxVl^&%MRFaf@;U?tR?UKK>5>hh3z$MKQ zU%Vyx;azj&TXm5(oq-p>-HIMDw={jJSKlTd7t@T8^ejzW$P>XXaHwdiE2=FnFE49q z*ex`Pgg)GtvTDW&oM~-kTnmYirsU!3TiQ%Zt#`8gS#$mvx{cHA7|m}lFmbc}`CjW& zYaq_ZPA@CBHE`3J0!+5rnHxy(t_5gs*b^FeRS^8i+)Mvw8TL7y6@hZWPoiu)r$LKH zIZ1D3>a}EK8WXkZ&2;SM-ahATl0IfG`(4d=k#g>O7Jo9*d%rX`qs#AehQ4k0g}K%O zih+3v1wR#YJ?oyhBDXR8j2)KE7-r*W3?D8xJ$;m_LaFB1Qqd@(KfaSn$4~lbmpMrx zVnL5U(AG2)9O;YWHOC(wkUM|^PDoDd3asql;UH}LCzyQjIJLFs_A{h-7`&1{Kb)Ed zv<@YJzHW|)hiRq4QUT`#lpeuABZw9cVP$cVG6KcS3%WqeJvwa3107ogxQQ~i)yNr~ zIIfgPi4u{Y0QXh8?c^=W?E3a;sV?#*v+QSq@JETdK_m9B0*oWHy1!V1Bq@qwA5c~; zBvXgiMRFzI82U54BIK-%*D*@Qt?KX$+{O=m5^#HZ=lw17-hGj1L(*&p@g*9jV` znn{TnO|APM=_Y8PYF)f431OnsQy<6ZECYR#Gk3ryI{RQoGVFMXP7g_Dm5@1tg^O=_ zemQJy)KWtq{8vh*dK6k|4F!*{0{rs^qeq%8vq12Id}j`YlbGDqRr;H)Necd?uHK~v z!Sim$bkc`{)_fNuYO|}`CFf4)GZTYp0TZmnwhM>Fnj_e{=i^oF1a1y;?l+CBT(G~7 zg2;76*d-_C_6hpyjlA1xk@1U7T>?RiR;!3g^W}cScD==ehp28V_dZtnBcT#s>MefO ze}C3Xa(n|?i;z@C`4xxj%Z^>ds3nzDjzzYQ)WX=WxAaFoO|1HhbYiFQ0*~>QZgMpK zI>J`JHIA#nVPMF}#Q(?AH-%@SG~LGb#I|kQwryi#TN6)gJDJ$FZQITp^JIVjx$ld9 zs=B(gYSoa`_RkQVuW#4bmcX043OiVwHQ#rkB9g=S_Lm z%nZ7pFZm698a)UK6V@SrMM5jp$AFKANJEOm{=# z{g;ph`l`OdH@pugTyWT%@QSbQ>Lz!?Z%=d??}!H<_pEY2mW`Ncj8W{PAt39rKk3j# zOTDsUBGnh*h63%HG4(zesw8o9iR)?k*gWyn5!pgBR#6yH^@8zps8u_Cj^BC3Y@4L# zXE(^4T{8uD>GrZF5uNFvorA%i8xK}d2L8spWz5M`IH^4 z(e~-ZVL6^snbF8~HYedOy1yRN_yl7*nu*BrF+A*hTWXjO7|-;7DzyppokI3BJs}iK zmBl{sH7mzbXmL+sV3-nkeNh<-HCt|yhz_NK)vurK%CH}-3PH@qUM5r5c^K$kg~TKaG!xGt;~$9Fj^Z z1Z5o%UX`t0NNEr6X`hAW|G~jH$G4;NYbz{Po)hgqH-lbpah8zw(~m5$%&Dk<4W)__ zG_pl9*67Cb1YTuv0~_*%w+iY&%0#_q&YA8;!TpBg^i2qLOfFUBz&vBMYVIGLQq);F z%uZ`6g(UONLU*zGgH-2*-|Ss7x)+irz{>C)*?C?rTZ{XJg!*$J%|f zxtNgXeAIZDc~^m%wTSJzpf8(-{v+~Q;`ntpU^iIiUAI*ev~@fF)-zKo03lg@lJ?5_ z+Q-Pay|$X}y${lFZe7zNhc7t|^NC?2kpuNCW7Pd~l1WD7Jz7<#k%3?7nz;uQnw+TN z+~;oiVsKDYMrPt%wi)b-#bZm{xSNzaDr$-KUqPrQ5c-ADZn@)=7)uS=8F;vimISpY zhRBG6P-e71>Z@WU$?n&1l#$q=rKVm*8Qq_>I4QiTc>9s;x?6#9;esi(mH#5=MWf&8 z@kA+$CGauo>?@|hjUanwo<>D=#*n?Zg0q4&hA(|-AZws3GxVi?N~^vZImx08o{1b6 zj-y->vRpfvZu)uW>!-a88xz;%qu$Ns-L;h+=^E>B+13z8p7ogeTbbIJPQC{ofV?0j+PyOwe>jf{B%9AYgBC)Ldx}~o$9Hv@0jUwBogTto|L zQqiOKVf>0J7?{}LQuPo0&ElbnGNCLI5s05efO09+_#$RuYc;)UNad?%<1Psc+qoRw zLxg~18j>)%_sLw7B}vn4p*M01J~}-bCN}xUFNmuKejVw3kftv90lg~!ACEfJ2XsHP zeVWUo$fr)G^MVf|#JFijD5L&z;XV1Z+v%K6<+_--?mM=RiaaIKxW8sdT z7Tthrs5;=Bl?avhTyPij^>I-g?d*6=M1E4p^OuloOp@Qud_n?Z{$G~WyPPFiVcAD0 zp0~dBne{22B?r{IqG{1ODp+4-4e5J|C;;^A3SToVQ)2UN)O<22lFl*c30NSBc3na} z;cpj$8ikS_nrnmj9g=-zrB+(Hs^@Wy7-VTF#f1(ZN`oTPhiH@ev2ZM!VS(p4oXIUj zqvwM$ylA2V&1K2$Ia%e5G7k!!?HoUfexz&+{IbTyTmZuHT&yVou2&{$vCgJ(tF>)d zWF?CWz6s#^{yRDfRkD7>4Io3gMD;jmdNfc5S|qBHW;}cYbHBW`+)! zlKH)DXfnPAjUMd^PA2ZiL@E6V&*<(vKgI>76My*E7u|G=hgocVmqKWWq1=+D^YUgL zNx_}`8na+ztbh34TfAGdB*SU$%kAosnETK-(Tx_~ywT##(E zD;a=u+M^{AHGrIIqMk7lOeTaaouZ>o?F`Sx2kXrsDdT%q`CdTp%0H=C*pq8X=kY@E z3~{i+K^)r;?h7se%X?TpeU`#)vP7S!VGnGSA4eG>yjrT3LTc^qIvvkKPW1~>aK^Tb zp{Bg7sHq^o^k3JK$*X*2h~~Y{4$9|2a19Ld(;th{{+vJPfTl5&WzUVj^&vQ!GFrKI z{d8vzPXt!`qp>!4n3oAKn>))~Tzth&C)kM?z>&ygSsKcy`%Jw;*}oX%+eC3|p0r$f zV|H3Gmvg$sR`Gu|hC4kkXA_P*4V6*PpcjFIgXxcxF~zK16K>VsST${Njr2o14ezMw zb$Y^%Yh=A{Soqq#tD3A1Pe(Tpi=CaPZL~6d9ID*jPblg_(M08?CuKL4s+Ew4M}=WV z(IhGHzOI#oeRrD-4Sj*8KDB9%tcC-yD!Z?powf0uoFU=bDOCo}<1s6c=PaYOuU~f| zCGoO9`hOpKfNoRllA=xPK=0V**?nN2)rvDhIlsP*3~2rrOZ&t%>1lDhqP|*fR(0>n!ktf3JW7Ir3icX#%v)3k#r8gNb#-* zaC~-}F5t;LDn#PV^gXB}SqJ=y_33lO^(LFd9t=B24Uc}g7o$ZkEUy)c4wXZoY!FUw zs+jUO#_*##4@zE`4F79QVuuLkrjNYtB;~MfA%5P2lkF>t*~*~n7?biTmE|f=dsFW? zAu_sRED0m!zMm3YZMT~xm8y#VP&y<|wh0;^oM|;1g)v7>dA8=n zP}8d0Anp~;)#=3nGdvp0`w^63-LeW+w4|(dp9P%{(cCWjx;KX)G6Spi<;8j$lJ!;9 zHI{vIa|CLZJE}(R;IG%+YP3}-0=33R;>h`;DQY$(kiIJ&?tkB2@2>;|-0vg{E4m~O z-tfodf^kS}gn^_$FibS(YUNH(IbcfO)2n8F7X&{wkGj}81w@SMH|x}8OP}5&Ir$RW zitU^TA_%2cqbi|Vq7bi(bARkVz684y$e*KSPOL#QgN;0tq|Qb)AnL}bHlL!|D)mvW zP%ip9dsh~ z>2Tr8`2=^r{DOML@CcASO!QU63iT)^2wIH6q8RSD+^Yo({JD&O3+_Gv*PPVqj$w;0 zR$WKbO%KYet|+#?1VwnT;HjcEe(*GOZf#9Fq7+dioC0POoe(nn=IO{B1kNDH`({3Xa_n>**8CJP3ZmT(am;NIHfEgoYXr}xhwd};AcIv|mOOnx#Q?EO(dzaaOj2dg7>G(lA~WFP=v6v zYy6)R-jMV!hlffO-aF2N`@U&uq`%)v>cw|+b(@zOB_#$h5;ry%+`o3I11f{- zE=)fa!0v&~q1Ce5yf28a)ZdrsSZ6(; zV1L0Y^#~B22`$?$6v$4=sj8(EW2%tW?xJsz`seIVrsjGdu8mwLeg+G6>B=*KS^{(N z6K>Xu7%Kz2u~3=x|I)1cMY3AmR#REmBLyM^A=D3|Gl<=j*MOZ*n#qd>>9K2D-E-Gb z*AGj0gt_{P&-RSjbPw0`U_1KFXqpAp5i5w3*q9VYm%1&Zv0gneNE(&cWrdU5UDEY% zvb)8V_zXDeB$zJVng1APTVn9XQ0I>DM-f9Gzt^u}SO>~qud9bgx&m|kBNm4wAPNy~ zlUpOx0ekE&#F{y6i$~c(&`-{0YwpUK?Mi>AgPh2WP0Hj6hKxzDKf)#uLWX>lq9)dA zz{1lXyyvK3u?LHY*7L$Mvpgcfdxn%|oI^`aw#0Vf`D^AoWw1J_t#7uoA4ZL^`Dw*T z&kg_gV>5v6x4>oE#Htr^cBVWDe1RVwNz-e_g}D4{t;=D#@qB(DqF?J{<_?1CNq|E- z)-UB|RMD!9eUvQ2{3_%r2l^`>%>TJD!oe#74Kls}h0QHC zYp&55k7JRBu4P!Jhf|T|4)D#0ID*AzH|?HWiIj9KrguxAV9~K8Bqpk9YiDKEvv4pB zY_gYmX|f3W9LlQLRkG`+Gp*&IksIkBw2cSuc5PnDnrkf1@}rxM;WN|hD&`0a37VPM za?{9K6JeWr&bEq_b@FAnL7V1OUh(^peTEcgEqrL5P->kF;FLx(;Mx`G5~6RTo_ZlS zQUS`@^nlv@i$0UHYrbkrjZ92y%{e)$ADiG+uxix#0S&`b?gO^$_};F?0FADpl?OCk zY`gDu(c{$Ue~_~-%%@#QmSJ^ziLV@SCH~?zFXWAW8dfjEs@JKvr!$Y`7QH)ToAOyP z%i4xh?L_HlELg7%-l?O_L-dsu0EmHQ!=jq5p3AjTgRi?(gZ8Vx7=ABnTZle249{pt z$C!Wv{V{-l77kf& z<9>9`?f^CoIp5oXb0vz<#J|U}{P}O|I4LUgMt;bqp)>L*6RTuG@3mwJ?5U_7M$bvE zNWI0r^vhUZL|29HUsGIxanxX`E-odu5OOce6OA}7Z67GsFq z%=BvX(<-?f!(%j{7GjPmFBX8N$YU-)jIJHsG^t3uVYMOz;4+WE&QlJDm3;0>F zGi-;MfS78|`Azz!QNt$es#?kEg)nPTB0leCz=*&9Z1D^(1IifHw*!loI^C9(xrQqB z;ZJg1GwwS{9j{5m?g5<=V1(&u?mTGdzRVnk@x#uVKKe@HN4-FT$R(itl5)DI3yid* z)55uRlz9H95uCF)y>e~k^Mx~e-y^=|Zq70TgPL2LmVG$GKbY#1rT7u*cu-?_?~1vW zuex<0$1~G&V#)O>YBnQfygt^w_-pW70m8;%8R97b)%=KGO^4@wpHBT8=L)<#4y$G$`Z0Eo|1);)CMC>m+H6;QA4Tzb zqM#S}V9y~`%8MyU-q0_s0)WLu zV)_ZP<=N?W7lnzX4>wH@D_e&n6Xn=q&T3+n+UOa6jJM}yv1tEh<22+s6qt3=oZj|y z^%ne^AgcOY{J|BxIRcek|G@#pf^<| z*Y7(vriG*FGUM4xcw5%@V@&eyB&hA=!6FOi>~-5?&z-Q{bS z`22Kg3m!yUIMKu~*W;)5a9e&sO+}2m2tZ_R(dKB8gfJ1(`_i!7{v1cD?s5PWJa<0R zAmkSHRA{zP_u+Qt8wBcCFkzBoJ`1e~2FM=})iW$BJNz=1I2*$J^{oy4t_$xtJ>PQ^ z1E250m7M$A7>v%_2AmH*{D{|d1DGbd>nuyS6O2yrw4Psc0hjF=+qd&D7D=nj+U%ES zGS&jHeGV~CTE{c$j=IK#;A7Fd5`s_re`xm-Ac+Oq?2c&+&O7dC@>f~{8Nir^nqBgz z1zGf(c#bWETgJR^|7YsI)Zyj6P1Y^GqdZgC8N!JGVQW*Se$h`#@o=QtC}Tf8PHF`` zbMbA!j9usFQ%BUqdCDiqyHxtb8C>L$u`>A^3*%q-3glA5-5;OMN1KCu1SSW}yS>r) zo{-Y`9x^v2@;D@#VE3%u1GZz1g2j7xI8__!1nr-s^;Ge!(AGU z#IoHzatd_)g%5POy)k9rZIb3bWHqACTG4l%ySY|m=Cnn(4`U$?q9 z{-0a%z8X9ZNtsMiZN+bl9dJ)3 z-fi<9&~G=pUe3uZXN=+ps=f>@PyqgGzLKPmdifs;m~uFTj1Xa|9ZOYWAYyH3F>3FF z^4W3eFX;qc(ZXd5ItKhnbbTOUZ55kxS_(U#4;=jWx2bX86YwXU9wSTM_}}1K?714m zP$uDkv%55;O4*hYk=6NY<;8jJjinXC(z8E)%c&M@+92|WVL1^vS_cOJXC2;R_>F8c zVn*i_%*q4tM)x+O}Svd(uq8C}v>Xf~dsYglHhR1|=b zqyYIo<>vLbO3ghrLo7^hFw8ti$LfJT&!! z%&#ey5x{w!(=o)8D!RxJVe33RQ9xdXU+IcZ6o#o%X9%H5EQg0t`3LNUr_R6U){5R1 zlNU)z!dGr(nOjcuZn+sA(I~`P$f30epj7qbApPYs@zYj6!1+!@IIkSrKS67+SnEGs zT%WO|_u9%HHy9H@6R-$nd}NK~8sjRl9@HS@Aifb6wjeQ#D1D7jvOD}>4%{rI33p=>Umn~Ju) zM{bXM=q;{UQ^5yQumB{Z{20jI8KzTPrZDOReHD-!HHj)x&UT^c1)&`un8am9j6nhY zInk=-bWws!`TV z`A*5_RMxOm)$BY+c4uFXHCpV~>W>)W_&x>`{d2PM)E)_qmxS=Aq~^9+=E9a3mao>0 zLUR9*X^eH`2;otYh5g_%^>lbUc#{s+$O0T#d6nn)?xYLUk`tCNbr4CdxAhJ^ZQ-l4 zW2*6vj|6xq9IUM?##QzVv~|&jhdxgjHExWD{*(-{GToNeF3|-oi%=M(@a?29Pmeg@ z%5O`eEX0x0X^8By@!MLLeI#5;rxj_nuTVxEn<27svNF$ZjZZAueg@NVBm{*21rOTm z|1f2Qh~6s|el>`-`ifJ2@746;%=%r)2_R>Be3P7h6rSQ}%ophW zjibWVfE>j}y;J_H16^akK_E?s*9nU}r`+qI0^Wf8W-rd?o&m!Ta72?^)9OyIoz?Q_ z-htoN@bE~H0dNB^4GW~gMJc(el=dB*hSI{hTEJYxw$%1`KqSlU=)6?HaNqiPyeq{I zxmTOtmxDZZ5T&|SW z{L?yBOI}Z%AT?1d_ueXm*TJ;G4iK(!E2zT=t;ZQkLhQr23DlE1|3{Md#;v&u(_!xJ zPuBSERo3}yognDgyHrJhC)amQ_&WXTqv7$M=Gfgwb@UbFU-y^~AKrgPpvyoWD4jX!Y^f$O)GP`-GjC#)}re)b_pTM zr_>KZfW%3+OvF00u(EQX+M2=-AcX4-% z;Clo};M|&=!lW)B`4s$nk7w_)Z(+Kk`w5mrC%1SM!)yyEysWN4eskRnUZJ-XQtLj` zI@`r%qz94$7dn7t52_|RHax}sxMWJ)XW5=eJjO%G`6g-G7t&C-%H;okd4VEdpNt$> zwpKg3xCQ1xt~_M7ZgR@~DC|z&)V9*MwNf`#&-Pp|>jf1B0Svg;$;8Sk+PgN7(8^o& z^z7d~D(9Db$cRq+$L*EDqIy*#j?;lIvjv=4`hWM)EJl1pn)vXcjk)te4GBB)L`euK zF^BXLfyK!?)47IVWq_Tnii(N|ManBEbFi-<^%_gXrUcR&XF6Li&`%EB=obGyNktaK zLU|w^yZ><&h&0zPpvVO)FBQD@7X&aE*1;(%9)QiUTO}S~g@k!RrPc6aFGv01RpP43 zu*s}#1Iu;=f={Ntbp5TR?C>nAhQe$9d>uc*aV5LqSLH_DXTI z?J|+5>)Ln=nYy>?e2m&M-G@_lUThP)=hVXJz2lrh_k!o~Yqy9(XyFXL=L+&;Zdc=Z zey&Z}%FKT2>%kq%tWJq1Vr0hMgU(DyR!%ss0Bb=nhLo`L!<4E^TT>;h(pZA&7B-B;OSJpa74yl#-TJUft5<(3;8^ z=b--Ein1mXJKYc*+nEk&`2aR_s6VK_e2gplY;;u8PA(ZG>hPq9#yEbAN$#&jS+pY~ zbNTwCknn!$;nFy$U*EHEBLaL6YUO9T?(al(7Zw+Ogo9TkEhde;riQsH{1W->J+_#k zDxzG*xeetsU{*oG5|jcY8Gc#OVGb=R6y%67B{Qvj;y7exrK`2BtKx8&W?qnjMKpHx!*ykEEB^89y!EZtw@kDdos zC=vBl*bwXvX*jEEDdg`J-B~3k>!f(tqQ}xVJ(XAc6D4tPj)xHoBXK-UfUwF7{EnF- zU!g>O9cLF_#cR4b{wFUccgm#d9<4~<@rqrkEriR3*AuI4J9<>g@@lO}(b{Uh7L(As zZqpS$Ej-$vwszEGNDs+b?Uw=*IkPJ#K_D5(&mf2h(D79bSl7Oe*HuimJKx>Q_k+t4 z`OSKEvKFNjEBz=uM9M4IO1`hI74-mL;y-6YkOJ!@4Ns4jq{ikJcm#xo z9sZt&Fbi@U*!lGGhHG$Ef+*P)ukmoN@Tteb!NL8HPRU-5#hoD6){^B{^MI2 z_^IXtUuTNx^n;ZbX?>tNyyFJLOIJ)!ybpN7$qd92r%2ryTg#^o>@V#;TfVf%n;G zGy|?vI>Ttj{y&DchX3mRZQ%047NT&^>Tgjtl z9)JUudr9%~(>ojcHvGBWx8R#bav8ruCpvE0U5V{T#JVw0GteSz%>?S7qn2sUb}iT{ z+s`fq+DKZ=c>mzR;%w;$s|&O=G<4K;N7B)#wcrfJ(!LnnQMot;^#4~#`Spc-MWQ-F zyUIU);lmGV&N>SI(k;vIAVz;LRzp|)<)Sj;KXRCrs{iHbYF847s%5l{&<-O!SbM2o zdsQxUiBGR`guMN$L02=f%XcKQP5|QqJ?YUtmHhaeZwU;C9AIg4g?*mu==Y8iu|OBf z@Di3d6xx{P{%m+bW7qWR1D|mBwLP68u7`|$!3ztZg&xo~fFPr8d0c3-6fvk~i9pOU zq9bkN5<*@<5SD4EPBO_qMM^1%V0;SE`qZ@u$^2X>1=WBK|)Jauw~h3j(b;Q_)a zp*$dvI%$RrEJRUAteJr|NGgKK7}OlH)H}oLAcu^*l;n8h@C{%L&LW9yuzaUUb2rsr2;>Z@3S)1?fZ2)eX*zChhmpez<+f^czPD8c_DV? z{x*zmH)3r_*c-GW=$|Hf!s4#5-7+luPMm#hM+gSU;}E{l@<_clT=#Mr-c2Q7Pcrob zPxU_&{lo&>#x-hO!5p`K)Z)!wti49>!4N1+KM{bgtIQw0`897@Bh4}PvcQciw&w!Q zPd)&j9c-{s4|rP-ljGLj@6w47SG|x5qVZYWx;JQ&@!Zr6PNi-|j_#W|h#fqj)8o+n zARyLf?2p_{=|WNcbrH*5jDh84y*~WrMfrSaV{MH^@N^F@ET|PCdG&iEH`Co84RHs< z`bEflyhZ2|lF3lHWq(tbsTSmqYEGh1QA7n)TBeQUVC@s^_iowHl z$ra=u!sX>fOmi#T-_dN`YmK@gb%B)mM(l9r5lA>Nk6pEzuE90CvTVD&X*8zd7 z5ngMFXK4h&VUmS{a&H`YqbBf8I5_V(vhu5IoatZM3I4RB z(DY15d!%S=&r7Zk>WtG;vQLz*9_Qu|C6_Zvj5CJIn?)!Pftr6jhF@Pkh=rG-4zvSS z_iXF+8#%66($d_K&AESgkIL$~6?w{IVeU~{`u7M@%}VY4qP3$1c>LknuSo8aZ_yD9 zy4hvzabo0}Aibo|Y*|9WfM2ytQrtC|R@tXgFfC&mf}hNl-ObrtHymaZS8>a%9T$YZ z=;5v{Bx}bY;emfNa;{qX9!0`cUJZASXSLs2*h8pnR&!>qZsi&Twf zP`yTmbJNGA?HueP=(x%*d)7OJYo7;T=*vNq+gJCJA~8c(LS)!cLf%i{J9DxF&dZ>4 zJZM!6c1HOI}Ja-Gr353YhpC{8JHwK`nH3*;Y?8q#FQ_( zXt;8d8e+p6i$-Hf`v7QZt6+6fnI`K))`@UapjJ!aekW7kNve*2GkiGLe-kJ9OsqUg zXH!i1*x7l?*`>?fdq9y6rz4qLSq8;|Dlsu|kf39Nrh&Pxsx(rdqrnaED}oP-(nLsY zk!027hur<~wXK2fTsCtxAJgZP`J$QE{Dp;@EM?i`%O&Tt(AwVBaUT4MfnYE_P3bOa z$}!}oUlbW$yvzHZA$Wyo%|=t)yjG9L(Dvps=r`yHMxa#Ko(C57H@`9>27-9|eFT}- z*hUyB^HNKSzj9D=WIqu2F0$)sBz zrG>!}w5S`AVflR_iW1i##C)FgPqtNfke66Uzi}BINoRjS=LNSJSHn~t8dM*u3~j;J zm>60R=B&sf8H?%aaN5k+E0f3cKK($2RA#%w?z)$ug+*bAdN}p;HHbya$SNiI5ku`H1kX#I4ib@Y%ByKO2w0J#z3!u$BB>NvZB}JW8fV&Ze|5~ZG#{B8 z+#fiQBA>~qrzILt%GiFb{eB}8J|ByBElO}dH$GY#KZGcmSA0ERuC_=5nhH=Sd)nbM zxK%2@<*YyHs(s|7Z1*cTPN?rs71CIbvesd~C@AEQH(Ndg7c-xn`N&+~QEpi@0RCb& zBk!ubcGb&nDH(IQ#x7)8{{j8Q4O$U=D{du4w4g0+^D?dzBx6_{^Cll9q4_0DPCKdR z2CX1YC1#R4LQ14QRsRp|^95%dYQd>v`g9`d8Sr^hx?9K#m%2U&-uz4ryk*D{tYcey zGfWy0$|O)T1?JO1Gw!hYasJ+}qmY}+%Yo7<9KH1?rWE+x!{7QwIrsCVReW{mO)TrW zA+yihzteEQL22F=IuLyioH7Dkvfy@7SN9-sv1Nh>&H6~TX2^D=WD8~bI!^e&kO{6u_S zUa2{^5Z$k`<*07*4+fdt50AzHRRuu)-8Dx09IOX*_k);Hfh`8(8?A}2XW!GX`AjK7 z4OHLsNOn>be)(%E{hVe04fS%q+aHv12S3w5ExYZh$gGH#fuSCuCidzVRkO{YsMHtj=z%@do zOcZ0UY4vs_y%qOS?M@9_6EHve*s*z!G1Op8CaAJMIsF>e(Rs8&cjQB!-bt-+Hvd;);He?RRA7U7?9-Ch%yuTOIA{Z1r6#-PC5QcyHiFJj)YicW zS3o$Kld3q($s;6@QUxr3;u?B!gJ^YAqavgw|a@F%}4C zp&sdu_%K&!G+b`X9@sU*fCItgd2D@YqUUEEoT6WhUslU)FadDLZ6 zy6iACI}#W105?o(dQ|Ob5I-nnahh&6w6VLaOP88CSnMR(IvyeQpy0mXa*PZd7>c znF&0<({W)-1}oL(>_p7X-;w*1ga&>G)B#(PrFmO8F7-Uy*2bF~{-=WdjOo46ivfhJ zI#-{d5#MIVKF{)3<0Hz=0<0m_MlHkSrTP`Bo%8m;*3&!JQ@(GI+Q&f?F!TUQ7$F4D z=%W2l28O;F8o(qpQPa_$*m)%J`#mZu*Yjib-?a)<7PzCbsE+}q8+{-TFmFHs`^mtv zvsGkz|I*~l7DA25tXf!9SRo=|^S_|agyPOqa-P3%#oj)h&4q{?%s?U(LjE2V742Xb zvFssGK%jZ(<=Yy4EqKJCgBsmDM+!q~GwMkn1qG)gIxW5{IMt8AsC!w6YwON(Ch}9q z@Mkvv{D`{Oi(A78|H-*k-B+oG6ZWE<_&a|fA`3$evrC9Nbx5f*!+~H2Sz?7a3(`Uq zA{@SrjgL@N;@eKO6?Pr+yS70+h!JuNhfIKkl{swqtZ5K~pPZdZRCpug%JEq>-%6*g zfy`s__`L3t3hlxjU{_k^qB$JQcelXDE(WJRpV9_I$dNoeH7~Dd81^fh?9bWDCWu}t zS4FuZ{?Ef1Q9bUT^M=;LUx7Y(w=|c(xqXs&2A!V&z3>5@8>In-m^oXJ^NRjY|?UrBbJG)?gbBfX+e= zpK7k;jNpF=e@$ONIMdVl-<6H)VU*f-@Rz}@r|U(;87n0|5y^m5bSK}^^VqJZ#44|b zf0Tn!rOipht7cymLt#RxB()+obsl*zr@wB49Yetv0tM%_^zWveUOPyz)a1;xy3ii{ zW__cGU0S^{0dhmFrxb23)2z>I~tDZi(E>DJgX0C?U1{!=3oVtT41R5Ujy4>1*Y7oRyqx zLdk_Z8eXu+;?k;3=#EpC)zrv{v(L=voVwdM<3&F}{l2!0b?xc&3F532I?2|VCimnt zgM3ng6#8lK2og&>sc@3C$V6|+ScxuEIQtXa^KU!UQ+u~%U}&R5u#8wO%s<$E)8P24 zq}_i$O-`uaE8L&pvuQldtxr~xmo}IsvTI3e1h+AbJR&$mK2=w!&5KrK=2O(0pc+^5 zEq2vwK|ePyA~CgAU zK%#?x{P<1(Es)}td#1o=VAJZ__J$9o0iwNYgh{^8n4OTIHc@m(+QoBh;=AV>EtHO1? zH5t{|1wC- zY{wh;gs>%@9fr5BeiMgdS;wI8LHVvUR*`*PZ1oFlHOlUVQ6_g+@kj`f+ZuWq#p@-8 zqC=_(augZLDLonmAyMW{Rj^=cmX?xI=d2x^RMrf~P*vlQlGoH z@j4JG-Gzmf5vuOfZ0RUs8nusZKJle#G-`D@a_yh)a%L}!6cTx}+ zaTcwFm1La8=;iRRd8vDwh_mPi=9oCSkh%&=U#Bg;UeYJG46-nLh6#9N!d0kzROjeb z7ZrdvVIII|6M>nbQ~}cYmP?Y87i`2C&P>c6YcmQ&#uGfcRR)BaheWm-Ig@4q9)Jl! z4;aXan7-1-yxS1gypIXpMYaDc9+AtBvt#zG4IB)XL>c+ z3asPANt14fdz`qS_Cj4R%TlhAaDd-wiV7}bl72aU?bQVhqylErXxb9wF@j?!A(7OO z>c(6?zuNv(#kgy9-Fjo*fbB4r@tcEBPC|=87ckwzBE_fM4$ZCTm?ZsN^9!$MEf`gv zOzjYh`jVj?#JQoC!u2gh=eVZbd{-qmt?MjoXlD~N`!SCeV5RyDF!=tzbQ1kaMSYHc zV;;O}jl``9NmBshiSVTx?1b7;HJUu~0SPUagUjo2ImQCr!q?pF72V*07E7MoF$3Ym z?P~WO`skz#++e{U0 zB#ndVCpX{0aQAWM1*|}B`p0XvP7fQiPN@j5X%Ot+s=Cf5nTS(;A+%jlMa<2B`0!Wb zB+H9ohm^}mJn&6U2tevT9*A^y3YIu9_#*qZ6CwXIHCVA+-SV#AoiD>jPo<;xX`Tex z0UCzUTHyXBGV3^*Us3ylH9hZf4}2%d{9LhW+Q%ptZ@G)X@o18!QX>8n#X2T zP^PGPXoQHnN&z&;Syj6tNVo+99cTxug1aBh$IiTBxn)#6mR{G!JuddqIduCnYzOoK zrgxLFJ7UwX!VW|%eM5oZ*f*8+hU}5a;_<~Sn)4VeA;&zAA8~Vw zCdJj^La(JmZ#sRsFkve&kMnF}Dq7T{1F`2>OhhT#LWWGVC5Oq9vcuBt6cfx71kw!c zHVW4Ye{8-NSrX`i2?6Fb27#kY+ai_AAK3Z=+u=`ZL#$Ke{%I>8`{Ub#dUWH(t5#Og zw|A*R5mS@21cLQIbN9EszXoy$wBOrccn`ipB|vg4BS)a?|Ag>d3IP%C$Q`W5?Rzzx|aVemN;Ez;ecvS8a12b^|8Q ziHPu-m*LXrGkKOX5G&^I33n|1g1%nt`~SZW(6zdP*xvcEv02UJ*k5&&rP&^1n)ZV#vzmNjF10DV;mHWM|n%uU_PbJbP&`>{-Py*w7YKkb>OW{#i@4@=d zD4uVuI~seRPW?KzA^K$ol*XCeMui2Bi6vc7KUboulQy|APFUtxbPfukyOFLT4q-z0 z*{mhWm{5@>Hnl&r%GPlK_3K2&0$JG_yi8ra&{)Dqcjbj_BtwtC)CTf_)5~=91FCw`5g*ktl50Bc^gH`5qtgSMOCpG7Vc`1&ZB4Q&s)GaLHgrc+&3gY z4ceu@ff67Dg#dwSOMiK3a<=y~u0JZ?vJS6!)>`0rtXA__v!7=tzFCr7cyXQGPSaQhzF+0`$}NP@H?yyVK>ioY z3E7HhY*4S&)`(nEvsL70GhHE!wMwfAkieV__K!%-JtJTFf^`wxG69XKaEdO;pWbl$ z=N*t!J3utz?4rRq05$5knN8L#N|Wb7scfY{;@rWpUk43K6GDp>MY+OVM(Aj9mebcI zuuVi`i7pw`YkFI-9)L*lNUi=(Qoq%zIVmMvslVgb&jNU2WMX3@8|zc17-Sfq{?u27 zl0QC#>1W6Wks|R62*o^I8gm7*iLuq!qBcscs4G1}F^}F>;DT|_rS>_nQ?0MBM6hOh z>U955HjCoZx`(g`lrq?s3t`DDG`@}t&E&S<->8ll0M{jBshFgscHzR7rkYNHQEW@@ zD68wA%AzZ`kG~@|)ElOuJ-w7n>?iR`Bix0cq`Gzb=J6JzFu^$Z?N4kjq6YDMm`GeM zZyb@@4JBQqoPO=1%rG9VJwGp@OhM_)*rj;2Unf#K(5} z%OL22e}tg!JXJQRf6HCO^q+r9>DF~{yOGYV=$W03m`y~dpC>(*yl$aRZd$xZ+H``A zZbHlbt~f2E;?vs6pnkoVzJj;hCDNo~-@%5)0^QmkK8}p^{D!TuyT-dd94XHY??-;G z71sT*CBrk}aZgj05wrD!CHn8#?j96ZX%#dKk=G(>LDKfh#SS$901kH-=K0+_oa`OQ>395Q1Ztgz?%)aTIdVrC;2XmS~i)*!JFtM}GFV9U^ zs4sSo5`(i8-t+~|aVzkZVKVmZ0pSFzh!Bd(@|WK4a!`%>>cBNmvz5Fila{zbDTzb0 zb&O|h3;JZ5j*NZUt%pBZgKhkA-3~ZzjMH{sKnkgAx~j|UKNhg8lDfEwS=`!rT-T~N z*_7l@AUrXR^VU3asnd9#S39}|puQ-s7N%yYB3+gw> zoRs^ew|?gqbZbq}@GY)Rrqi2cctAKv-n#*wNQaaI!Djq*U!7(33=sGqDEo#{vYnEw1rAj>EG|3M5ozN1s2OENb$F;!xN?bF&DvQD7 z+B=|QwVl0l;&X6j$kmVYmgVT^Pbgxj!Thza=>+_A?MV z(Q$H_q)n)s-21Sq6g3;yfuc)pGin6ylAPQg~phXg(ZA41w(+=RSR6{^! zD)2`{o(0qskViFa9LveSwdO{7m_S$z2jF9CHRuDMWAx&to???$oqaA1Cj8*JErGcu zwj$|4dW#EQf`i`ff8s#xhhGBVGW2mp1mNjX@sIev&s23EHT^!ny{BUDh7uwr%i1eB z_O0Hh}%+xnGOj{p&WhIrNjk9!E#@k!$wf1pvK(KGa}qI*tgr2 z{#0H5x66003yfJs14cj+Kzz`V5+Jznr+h%s0Hp`dfDYhh&_zJ3fcC@?2zhZFUZj5Sydg3M@eXFvwd$9)%SCoH~n?o;Uc z#}lvd@&0?ihf@#3AV2NOtI=K|g8Ir|OLDgVwEr2qv*)m#3XS^wkN)INe*Xs%r^mde z3SGt~8xEF4N-33uTSy}Tgj-yB z!%jt*p9;*xwId(kmq%pYWplxj@&cDDu&1`yY_Io0vm3)iQb7XEq0VL%R1(=vCUAan z_3u8ZevX=0nvn;RcB0Ui*Gj^md#=<^WbcKWXYIzwIBxut^v{gT!eN ze1OU+98Q9SE3{V_@qy7x1C(w1q3c?ozxqp@@`aMbKgh20{}uI~@o;cY*l;2dLA2;y zNDys_-j=A*dy5{?dvCi$bb>^0A&B0hv&-s4Z&4Sk*VS2_XWjqject_WK5>3CbIn{c zXZGwIq%NAzqkoBo1_PGHzk#TH-!0h*|eyO8AFMQDU)Y6~( zgjSK_CLOrAkj^VOKcitXXQU6D8}Cf)V<02nOHVs^%b_CZtrH)HzjeOb(G3atFILTt zB?DIZC6J>)T_3{@uKu(~@JH_+?L1QZG$#6P%s@S1&z5az8}oZsX47(HE0-!i6Mrw% z!&~NV=i5PT@%p)v3nW4n>q2BS#Rnriu5ZG_u;zVgYUdw2PS@UdqF9S9Tum3`lGujk z`U%k|)m5)kOSx8lmIFBP{mCB@mrxGg&QLiqs6PGs7+3TrrXNJ%T~W#7YgtxMF2uAt z8IvAI>zcmC&Y_6k+bkSvFo^$*e&%30G}`l_mhjn>Op7W1>S!DK*;I~Vk4ZTjxA@t7 zX-D~UC;1-foTlk#bDFr(=gJ(apCUN069#C@-aXWM;Gt|(LYI6kx%)?a0;>(K8} zH4=8Jm|uQWxF$XEGB;>>dGa}ydR)Y+%A=AodpV4#=|WGkX+0|BcT+y4n$FvSNsH6}Q-Ci~6svWPdfEAv$|LONIF@%|`I6CHCKoV+XJvE=F~8bz(|F zv^M9DfDLYm!Imq0hsE$FWY%mxdD~icXpeo1EIqw)i$m@OKH=QsNKQ}VPK&$s?YvPD zl}{Fa^5!$m5%&{vuDg4NkmgH*4LclS&S&c5n$yBOG@V7wFemfbR=e*@_ejip!!H-l zIKy1d<^J7pR$W|tP#r(5IFBt1c!}o}GAb!=+~WP7f+F4CZr7u98x2MF|DfQ@LGVA{ z;;Yy(`xM)e>eU++5QZ-w<)SYmO#3&`+*UeavpZ(w4hDTo`t9QvN0K}Ah{B!i&`Qz` zW5d)-rTfou#x+{#!Df`YmZ2;jILPuB39Wl{v}-2?dP-X zLu~r4L*Xyk)4s4q@JZ4ya*Vfmb5;g-V{7xX8mlkTOemrjK8!JcFHF_dv#bY*5d8DZ z%_ji@|5mHadK1SOudV$dVsN7R5zYU|LU|iF(EW?yZ;vLzX17gW&s}d~^fYz&M99P6 zHY0jr1@ww5IAW&b_z8(_o62Bmofui)b#CqE^YrAWv*9?dG$alq0dry zgmsx`^tF}sDxE1B1?&HzfWAq<5f$9SyL?$jILh{gPr>i`62YVMp&?^Y;mhu-Dt3~~ zRx)xJ=Aj<{Rl@c*?_=QCWuf;Hy6q{C#a_q}M07!VN+7u}$Btxq?{ssy3`kp*rpyV3 zyIYBmih-Tn9Cz~SLbYp@2MfuN)jK`G#e`%O2Q})63MQGuhv4f+C5c>(h9-FW#$jnRXq zjfX;YEM^3I*%aejdWy!I4u}E;NxS}Uq-OBT5&6{+0%0oj>!?Q%V-s?FNu$Xe+YKs< zZixer6Y3FDgw9PKr&@s1?ulT5!~Ny)Cp<}Rs}Meu;QA>>gb@QU#O2osLs$Q%N0uB_ z((<(QdTTm7fb+9yOu`7$c5k@Xn?B24pXd?&q9*JR8cEm_QwXx7Tjamw;JzeB_G)mh z(g7Lyg8)x>k9`^py4#|dR@BUr#lxtPY>MoI3j7{b?IT$k0< z^9c53UjAccWM;)_d~ZZ3NP;=5UH_i&N02$CRVsKdZQH=FJ_}r6ot;&Fo)0Ophk7>D zG~fFVx9Av)r+)bQ8@k`zT4+;JTI%n&vAyHBn|T68{C(yW&#~I6NLVWi!-HK( z-G_f=h^XwkboTLS^xdbO2=JWCk==C2KFL}b;Id6~_nnyE{qkf?&ByFI&j09ZcEuf^ zz(o#*?X#_jC`zUHpw{etyS+vU+%a`ZIn5a(Fp`QWqB9W&oSo`E2(MM`z-1DZQ(N5_6w%${a%kOxn zovGom1p8CxXC`DF-{xueC4}MN

    pJA@WD}7)CiQrfI0*9O5|Bn$?$ApB)d=O9GSe z95+gbG3X+}vOPe*jTKqoj3;c%QB7?`+B>kWVLi_=jS42eJ9Eg+7ucSC<0mHkUL5#-DE+(d z4rwSU{ac4SN;bosXp388;xX25;8TYXRF`H?mJUEKZs^hr1Dt;)qet6a@N2NFAoGCs z&$+~k4zvIAWE&QZZ}o0=K{G+#4edq4BZ=`v7PdR=He6$2^Tvyn`BCA3quu9pUox== zxp%+LV0f_Hn#NmCRNqzy9J9pIENGp1Q{4{LIMa(Wo$)sx*&#+pM?24rJR7&~X*2KQ z#d%eM)hjpoaX;_>w%osPATQCAj6t6C$WzI5R-$H!tIXB8je0lPFm}O=8Zt=26yukXgmHpckjkQTZrL-2|%1$&>ZG9SM`5 z4awmapEVGKQH|t6DaQoUlWcy3@doG6cS%eQ_IpX~@Q~c{ zzLIvcra$|66LvLL!DP)gD${|i^kc3&6-+a}r&I{lokVj4m&*C$Fudna9^i716x{!L zAG;6@@VQ7iuro#_j4o90(DVXZA$_Its^omtTVtHYJ|?FTC$z|UV6FA;;}FWz;p3@* zF+-1f!BxxSVu|xCP|!)&kzc!L)upJqe9u(|bFK#36Ad(rXpLXy`K`X)#(h;wRfpP; z9ie1fbdA|xcnWeH1t$fQH?GJm85Kn*1!@pxkuG%+_WJtdgcFOM#VOtrraGsbuQ;Sk zZWx)ldLY?(QDg3}W5IPLv8OW=~Td@L~Q} z`lZZx%SeMnHr`?b;A$fY*Aibzh5#|Si#k|vr(naX!wWvYER_%zI^*o#4%G75sa@<{ z)YU&6neoTcM(78Q$nfEy5fSzCNORDXC9-U1hyvHOv0x&97pTL3WE}-HcZSn1R-be% zu59C2Hy+cxy(7I4I2@Avd9b8SUrBHnRxJikNx>j^?8tmF%VHW+&58HcUi zD`SaTm4xK7S;)OhA$Yz{nsWkLL2u-uVYP^vc1eCV)Y=6HiHBwUsz^H!&tJg|+BRff%HV$=Sy(W1J7_(eHGPV?6c)6M`^6sPz;DUbOypF> zcXOreKK8EPAb%-dauKKO-GY;2qqbGOEvw40d;CnHT~%Bu_%u_94W*NEdvj(;wlDtg zWy^CSlrTS)O^<8=n7Kfdq$Sae(T0o_78Ee9%F0Nlhg&v zH43Q}+<8JJhW#6X>tW8}N~=0F*LJiStL&kQM?ZwM{BDd?p{l2yj#}oPK>0m~;Of0A zn(Iyjr$d#+MmGtLr&fJ1+uJ!*&VC!m!saXJ5D)AZZN9)-LtZs`?gZD_(a{G!7&mI{ z-ef*pjltA#@lQ&#j9e<2U&V`MJvjmMn$Q-1(dqjWYYEVuyhJ0&w+755_KBKbZR!}` z?B(bj)Ozfum$|}FL_wQOmVSIAPjpB;NnG8@BDL5c4~x;PjMe4^AAUXl8kuEQjk zz1}jOk3d1UpEw49rqR~IWu?!U)qUyoD|^#MB-e?>5N`R_(3CD%#s0zxnYhOt=E2>D zcZGvNT@MfwG7c~3vY--FFj~Dvux;+oG@NBQvKQw5BS^wyzy9*1ev)1$f}7hus)M75 z)%y zBZFXQ)E;_VG38*t;QG0N9gW@B;!9{^t2t>)}>%|MOy7b z(~FK52$ifu6cXrhQZ4~G%sn>_?DRBjK$cAfeh#w4K~SL(d&`cNo_>9|E+luq3NniA zJ9wh~nSmQC<}l5)TZ?O&lHbNkf2jK>>qq;-=Jf9q)D}Bf>5ufOg>#Mp;j@)|4 zRfXIo;?;zVgkB3OJ_W7Q@v_vds7ZU+?E-rscypxQ8DVE7mb8?C)5>xBE%xsGiM8(G z_)8P*e=NQiMdCZtc7oq>uM^)y^=TDWh{r_PA#oEVjPpNX#q?D92w|0I&7nUZr=4jG zI-&#b@=}=J0i%D#)9YR*6z{Q<6;t1^XzTU9VE3`KHaLlwGS|1`Kc3br=wgcn17*za z#*M%%RHhFkJ?dF**fGbO9qD&u;=mJ*6~MbXU<%pjmALCWpo3UXXW=PEt{M4YqPosM zf5Q_{bk`$G*KgKBKJCm1h4lAh&megOR@ z5J0`nPft(3^5@-Pc_(HkA*dRVv+ZmxVa1*uPapt7mg<&iAFn6p4}c2a9mkKDl*DPh z8uX3(UKMB4Y>v#cey^u?B4e&fR{J;AQ!J_We6EGxdjgMO-sP6I^=f^AK>X&q-JvqZ zK1Yiyz3%&#g2pD1(;-`f$0-caO=;QO-fsH}h$&`cU1^!4k`yM;+bInlCzr6tF&2E@ zbz=^3YLD9z*0P_TpPi-b29maJV3z>=$y8OavC1Tx)S(u=9xH;|d%X*N95?d8|4($rCxX zOi1#c9Xnf{Wy_Ui&(0d6$3!XTh-+~%rna^gIa6iv&+fb^3eVEC=tAJ6+p=82)jqhF zA?bzj3-lwjB-c~S{;F?=U8LsjQoh)>_3ygp+Vl>MNW_R&$>HB z;dE*IXWrAM*C+CHsduBnaeM&7e`75GZKu=BR8GGj+i9aj3YTCQsgmxZ=Wf^KL{_2+ z^gq~u=Ik>+P`96f)?k0%MB;(*mWi| zycHmShaGrUX+kB0vON)e7tSTMY<2C-viU0@G{%~@U4`Vmmr}q_bR$%$RsK|d+@n$km8#(HXT>>ddeZ50k`9~OI8ul z%ReKR_%&%*Tr#_kNHb>zJ+`xG8_H67C&aFEj41ZYe4f|(R=##ED)nAZpHx!?+uePY zmB;t)9l=@uRJ^}sQuq2W4ogdn6Z@q&%Ze6QvG`L}qswx~^78V9v0>MDqu=E-<*na7 zXK6^(#;{#c3oBmLSFC=J&oB1%u83FesIT#H;EaAgB->j#ccJ*rX5JjBR40TgzDjU$ za+`zHV`xMh)|D4s{LZ|r&E*5^tBmfF`(g#0vH{hN+Z_JxGDwsS1jZcJ5duO%CSN4j zi#PES)kMZ`BcJ#?eP!J-I)z9;+#3WYa0gLpYOuz9NAy^Wla1k@{!49-UxifnTxCaY z#TDHq?$6d(cZT6*ZFUbC#qvYuel*pzxKH%#jqP@9k;4DnlA71ogi8Gy#3;fS4oUXjuA*=b-WX=rFj)C(Drfq3?Q z39?UdcS0yGee@Nv(j5(jLN)Xz{W)BRw(-3MaSZ(%#3mcu>297#PRX{%5odjSm;G*8 zty3xaQKStBZOInM!pM~^nf!Do#8aGErkbz_M=@y#79+`^?xOrQn`J7c(>_RT?WdmI zEshV%v*FT(!_dbjJ4;=;LgW`fZakZ3WTb8q11SM7ltpI12gh}-qBBjFwkI98*XPsn zEv)yxS%S>>d@}b;O-4?O&AxSM%@((NH;92Jj$6Jj!uR(Kj)#tuO@D2=MwCky?iD-P z@8w6;nB08)@xSz-B&f5O!*6F(P=T6gSm>Vr=*{DZUkJxD;g#h3QjzM7v~Bz45L*U+ zG@CYnsJB1}w;7P0yY$uelzW&zJbUrlpw%m;jtlp;G61 zU``OQpJbR3N(AePS@U;T@BU*MgLs#Skhjq#LyZ%qQWYCc0uUEd3d6H-}`0j zxWdgxQ3F;*V)7>t5w|e{E7#nQ#xT7ZqRlAGD|#<0QGxaXV`Jl@yDd6@fZkPCkmv$` zHa=OF_{HA=r3ux+5#hq-BasUYlHu7XGSTemFp1)!4tn}!nN>UsQ%K~FYs-%xheu@| zaEHMpRwbeFXTY?1!>f9RG2LUjcS8x?x<1e^PAn4|l&AYGd(-D?&AIv)w~H-$dyDSv zOKn*FN$xbBQ-6e@48WaLyQ0YFf!H9-3rVi~SN`H8TT>g-kEhRPx#-DsI zB{QN-a`ExwN4A1m+9MG})j1@;^3PuKN;U4u#Qj{Fo6&j|IeE57UEmnGhRGBE=Q9G% zx(d|HlZSZr5t@-zfCXfexcIxe`t(cf#LK7s9bG_PiO#b7*-43!#9at#o1;H^An4lD zCC{{p=qfT8N`JXH-6~>noZ#d-y@A(i09y;k-7pcDxqiYK@CYQ>H3@?e`H<*86P_&v zjUO#?xMCbK%BvPVbq;N$bbG>0qi=BwkUo`|n45#38KLp_xjv`F2+Fyccj6}Wjs@{@ zP-HB_4dlC#CU13*6*aax7vt?2+$c2o&G9pm>7ioKEZ-NSFWJ0Y@dTQi{M_8LGcBq$ zp2Jl+K3Q)3J2$fTVjfO=z!-E-zLeP#KJ4`I<__$r^-L61@A#41dBeD-;I?GfjCU=- ztOhS)2-R;=gnzxSlLKVx8VDkP6iBoqsBm8T+$w$bz09gUMcC1&EZYG+#W>`)H8g_mxDJ0ZXKx1iZ)oRDu4_mY zVA9Tzb24RN`~^H>`Z9MgrGZ$W^K)v^-nfGV-)~t3{|wN*VNO6pmp$9X!2Zp^zzU)| zSdAm|?7EL&vB_#Z_pT!NknMTyJjHDX{nM))r2-t8x2Y^p$=ar*SurGazA$;3v1Y*P^ECF^Q2U7wwKa{&doUA(P@D%-s~ zjnp+h&qOQP8t053J`-9M*Hbt5%xPYgMqsVnu)zrd1J{vI0M2Ort*^_tgO_zE(u0-@ z)lHtcKCSO>{iJtJ*b5}>22S4rg$2AjBYOZ3xpqZlCstjbL= zBflNOBHRz2!S-au@r6wtQG7b7v|xTDtZr!|`ZOL*vzZSWb>5hbh9<8WuA*95A9QZWbZ=Q7zUP@)|lqNjMS9saDGWf3R zCaE_>3VBmL$xg?X4ey6TKt|8pW&dan{Ip`CD1+y_at?c)6<&#mi0D=rVMG6tBTPhZ z1icU&6cKkDPV?Ukt_eHUg$>(EkNazH`3R6cki6$PAULr$H;O1m@Ef?ez&;JIzxIjh zU9Tr}Qix(9;Hmy2&kPdNQE%InH~4q6kBZ~A(!T}N$ixMDI@~@X;>SQkUFz7J zp0aX8gAe_uCz?PW%I}VU@M=jh2eI9kKy&10+LEs=Wc*z`YG1JSw{Y}O|PDr!}j?bN1Si(5PxF88}x6K3ovBqPms=;7W;?@n|TYQ z67=(q6U}wMSN61kW9-OU+9b@l4cxYgsp@)UofSam(hM*`h5??MByz&sew||&MvalT zj>l!;3wAC#D+Nfme6Ep%oVwS5Z+p z;yn5RYi#$qHQKfai=t~~^IOWuQrkG975{fqk5l$iV%{E0g5+i{Musno8LNxBYC0E*kqsB!L1 zZG|HVQ4G^Ju&wTpIggG?z%1+KTc4!1^UgLH^y6wh4p0+E4L!cLJI&2BhuOHjb&@jAMwDQhKS%$30Ff$^1AwCQ9BM9&FJNc1^+ zDFU>g*@<&gHd+>4yEnDRksfc7vK???9UM(|x9zn_{emYR6wehA44dS7lw>zREqY`3 z;0^8$d_?)rEKJj9K?U(MdZs>x1CBT_`)hV~dME!XXH-2uf(qHJiz$(~1Fg8<<8H=F zwH^Q-*Lwl(s*n1ddVq>Kjh~qOsNb~aE2W@L|FjDcRmT={CC z-)e7lILP!WO%@gwVl1j<>51}5(RI$-3E^28K1AkH!mHmAap#v62AjLI9%LK_u;0E% zhb}2^w{;A*vDk{}ZgClCcs0Nt$Qn~ly83ob6UCH|yUFNAI{KK~C`_*;62Z#g)Zo$OYYtvuEUF{*6*xkE^7K7U`sZ{D_$GWGP7P9hoEk%85obL6Q6@IXSb<6Z!(I+SEC#GUiDG{ug=-q|{d(XPuBrdYp zN6wF!LldMkb$(=5pG?)KG7oxQ;)~Qyz>k?ko0#X5_9zg|<+#O$JG|j{po$BPbk_Xb z?}CAMH=4!TP#%*LCPu}_JunY<6}<4&)o@dGtox-My-*>O6-TK+sz!RhpF{4G!G0Zd zTfEsXnW-BU>e=TsBxmSJDpXJ3(_HwJ#6@U^JygE4 zKpNc{#(&4^{Maqp#k%?D+J>j%j^`y)nU-tVn%eMl+OsaoV+Z;eYY|%!Cy|8RvHTCN zTFL6E*B4oe`z2Mbw_(OmFVg7l3w9SE7E%5SbZCSgk(JBoZndjhN+l&hiU!AxJ(Ugd z8fsXLYllEbA1KRr(|#d<>fJ{`qwdm#o+QX#qBdGP90Or zJ>5;RF`Ay46(ck9#f2)@x!hj>_WX(>ToT^=G}tVX2I+S)=G~k!4Fr~cWnU&V|fJ>zBEyurd#`Y5oBEmLhD zNEYr4`ux+7>C{sp9kaEXDJ*w}oK28$xg%7irKRP_dP?rdjTu0ckNrRx<(74{(QO|@ zVLxCXnQc<%Z4uOpeF8>8{BLy}(V$V+8)c)kf3rM)e8$);m4re_1F?I6jEck$OQqeY z5}Qb^quSdc*)+LTjOtG&wAi&3a}A~C0N@UxCG2A_(CdIHm(Lt2Dv%mW%0!-P@s-Y2 zg3{ZDNCTNrUm?xFfo_rob4fQrRsiWukVGEuit>|5NJCeGZ)Ak#7j!$}lpbT{Qb3et#VH_KlDtbUXbhlLjR71p{wngLrb6GaaItEiI~`4W6s-3 z=@Z!*6#ItUfB*N9G-Ga04_cDm%;e-`_KL<&Ha4t~3hnBSO$g0j zblO5UG-Z0p@a91SjtJ;J*3rjb^br>~*=x_xF-U8H&fKZ9<4Kx52Qqq6IXK>iS$qL9666jjBNJ! zBmM_voc_~DV*~dIZ={}m!WR0vMupK`5&+@4xo|0^052l1Jn6eqpp%h#`(VGf1&MsY zf_Cdv*XIYVTe;Ec85ygz#>Mek;4#DQQF$5L2l2P{qQU34Rx3Js0r{(cU%}f5j;dN4 zQw2?kUNb84pd}O0C5i3^n4k{47mrG#w4zGI6Hjw~JI>Ka4OHDdi;T!J{7{jyEy4qm+i|h@YAJznI-Nj+axE;+^+}IC&i2{h+)dVsUmoQ$_u^xDUB`J ziLfv=V|R1348vV#5c}$QZq}DSY*FZ|Wug(_A>}$3a??!AM$IL8|K$619zg$xOH$fk zsYlxbBoDaYb$VG9M}yJd>g0K@&ae#WkVBp)1>49YEcE?#dMyrSu16I@y9>`bMZhk5 zy|N=ig!VxWzf_R8%F|Xh%c*L7*{bLe{ZF0}@KtVVfrCDEB>1SCY%MvbMqT_nTs@Vf z?Dm{CVG*`{Zf$n2*V2M`2|jqC)#U7Hg`U9}U;f$0EFk z?|!ErV~B~hgHO-&{UU2tyk6`Dv#YB{1?*LuQe`l+I{ZxLS>|T4&+q>lPEEkGfkEoB+I=3dLdPZewI4NVDyQX@SnL8KrC4C zU+mQbHA5bjRSMGU%#TRikVqG=!l_VXNDbMo+R_rBLF^Fdyf;0W16}4zy4?dF<13dt z&)0T49H+0b3~IhADSQ$cF&_P3hcLS01|6nlPRQ$pjL_mu*0Sy(tW3}87XaAjA=>*d z3xW*#PmwRi3_~W$6*1eQTf@qn_S0v`?fwMe66MpNDVRU(V;-3??N|ucg%e z+B9nD;!Dl4+-Jj_fiZFRjhEmkWBba@^mgdqM+T{7&j4BF)C)8(m?q-wo19j`ffCJYU+4P)!f+>9#HXjm z8bU8>Bf%K;@EUyiPua6b(xBoUGX-=xPWivBOhaC+z`#_DQH~}yJ7S5xfDky;v4=B* zIZ)gNiWN@RqfXr6wZ+O@BAGTa<%-nGv0eC-g6;Y4NV+N`Z4?N`*=S>AVoyH3P8|LCk&)Psy}$i0z%r81qoRpUW&miTkz)t+wUEe_hO z9vo&5MDVJ1cG6S2CdnRu1=Do`c;fV)k%>p0OnhP!$m2Q1VJ`2jO)bhYWEwvV=oi@G z<(<9pDM3Gh;QRmRh{s0jPX_s4OqVAU{*}66rN0fv2iQMXcRy*EKbt7ugX4bGTRDS4 zl#G$+O0x&HHPCZ=jWN9TCKp}nGUfIVuGc&gMi|i`NEYXu*h@q#hEmj*lVdmq7p{VH zhhJFy4LX36N};<5e0q*K6^g@{bC^Iq_z1}v%RQP#9ttk1V9ts13m0$*-REC>A$30a z!%w219=YpY`6) zBiGrxM_;`f7eDw*BpkVU{g&6z>%E$bN|&!aU^HTLdOB@3ak)c71Cd9{a_p}ovSwj;f{HJ!=quAiT24};`h zu4*s1#Qo0An;U;)ApCS!IAV+9A%Dhn^Z*Sar9Kv|CUg=LD?ji=9 z7s3{dz3|*hzhn|?Q>;Zq>^#sn|Hi;cJ%W*Yse^d?GgvXjB331#GyEly zhWLLtqi+gESu6S^!M5< z85JMgDOepRND{&20W|pN?uou9c}?d_P=EJ=wuZqk@52^+A|Oj8fcYd8iIDp5wkMjb zISG?peQbTtCZ%~7lfY}F(gdyV9IpP%A4`X1ym==ZA}vY8#;xU#8S+R}fS72~{6BfX z+J05eepYvdI1`8!RbSu+toD3)TBH;Z+Nzve`kbg6_Vw*LsU#)DA)b+lNaw-jg1>V$ zJ2f3O*iGua(^B)af|cD@VRy?GZhQn{I=_{vEF5C4v=D8yS^HAfq9`Nt&_S>x)Igp< zT|dXdkF#f+HoaAZg&UTJX7e{8@YctHq+j2^9cl-TScyl9p|Kf5U%!ngdT&F|_@rTI z7I-w;Jn$?XxL($xjl99XqW?OBpF}Lu2lBF>7QAlG9DgsP9sWRTvNpH^kogKrYO5Ox zTD8C8=Xuo_?aW)TThbT`xDwdc*>ATgcG2fZ8?gt?dl+&+7j#jy;SG)^Jh+PtVfc)L zl2+)(%JS3taHa8bKo-0m4gUYO4*tT%KqJ@K>}hQ3&0M??fpwUk)WUjl3Hv#dJa}RT zwPMU2`_KQLO5!h@R|7`%p2i{vgctdIRGMio&`a0_MDS0#t zT&lwQB0l=1m6&0X@XweCn}-ObinFUyQ!LibT-0`UKHuSIXH#9&_TD07ut}^-9tRG@oWnmy6a~i{8+zI1RduDzATkpLyfKe}hg^ zYfs(0+T$W3UZS0xkIuF{4zfZLHLwYA34caJhI|?ed;?KV3+Kp~VM$mpF&seqOPOTy z{}*L3_O8eTO_Y~cxzk4{FVB4YD*-0z2q!2~k7trv0M%`M$p?xAd7$z5A2+S0)aSCV zyEQq&V_V>JuYRdG!&qlX!8I|4^>3Q{p8^fnhy?AYHq7Y5Yl!~GCabEB7J3F_NjW(= zqyu{7)dljVgsFUbV;zD+*J8GdTAsK8{q>$l2|^!aWON=>@QUB}6R0g-Ti8-5iqCL- zvcaLv1z&*q++|+iJDH_7G>&SNv14VGf!|v4ntJfi6A<;jXJjn-gmxL8mstEYob1ZF zxbl7gf63yPm9y*L4Y2|z_SX~qP!(13S2V=~UW^&)-%XYz%7trY_v^sdXt#)vdvG|O zT>HW!2V^vq6BnLL=`!@ygwB|~YmR;=X1X5GJiPny5tyO?C|KyxzQ4@;Gt+(`WHS2u ze}#*79NL~e5fzh*b8g>!>eOWKh3RNw9?wpYIKj-#dZ5s3(C+f(rXK#{%I5SVX-Sj6 z%R;C(Z|KqL%8Wch2UH9Zw9j62Z+6By%;7owyQO3v!OV|Y52b+Z)+DQYodb)&MTvVA z`Q*Ur`!;PNIeIolgVDJdTu*e(5|Q=l!MUiZ>I@+Ld@~6+9a!%qSzHL0cd|(JtgTzg zPh#lRl%zAW2TC!yKeyi O{wT<*%2Y_1ef>YfYz1Hd literal 0 HcmV?d00001 diff --git a/docs/source/_static/thumbnails/create_gnn.svg b/docs/source/_static/thumbnails/create_gnn.svg new file mode 100644 index 000000000000..b5f8432513b8 --- /dev/null +++ b/docs/source/_static/thumbnails/create_gnn.svg @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/source/_static/thumbnails/explain.png b/docs/source/_static/thumbnails/explain.png new file mode 100644 index 0000000000000000000000000000000000000000..9004ddabe4d879b59f202b21256ccb890ca68ebc GIT binary patch literal 13931 zcmV-xHk8SUP)00Bw}0{{R3O!wwD00001b5ch_0olnc ze*gdg32;bRa{vGf6951U69E94oEQKA0Y^|wR7L;)|M>X$^YioY@bK;J?d$97=;-L> z<>lhy;^5%m-QC^X+}zsQ+S%FJ*x1gwvq$jJHm`ThO<&d$!t%F4#Z#>B+L#l^*~ zt*u^OURhaLVPRouX=%T|zqYovW@cuNj*gp~o58`slarI9qoZ#=wWi0O8AcKyVRP6>TrU{&Uc2lp z+q1p@|L0X585xlgnN=kT1n!`X0b?3|elo6%%$k|Ge>VFl_f`I}S15k{&9~o`%HRK? zQu#yW58wY#`p&ODdHU?f=Pzn=)BYasySB;FhoAVR8Q4&pr59hG-+cMiYXM>@ zUfx=#B1|!w+h9hvJOmlQZ2s}-n`!^-6SbirtD+={Dw)$Xl<8RI%wtB2Jmfi)=CptQ zN!i;MuS1Yk5od|05}c->OjZF|S9zey;S0cQ{`J|HPzt@we;tGL;k8wj+G(a<=9U~Y zTI9g8`M2UjU*~fTA3##C(2y!kf);6(*atLByvBpqmV|v-$T2a?=HCl%r%foWJt|a@VT^`EMQN7! z3x^rYGBGbA(Fb5|{5d~uMyc$LS7k=BO7z<9H4~M|G9#Q9EwkuDBKZ*J&-Xw6tmZk$ z7$%A0RVvMqlxfCt*<;4Rheq<|#>O}I+-25^GReqsvfQ$2yl>Z~yvkY1ZqG5zNWQVr zY)qS$!xo=ai{Xm{W|a^`s&xBIJ4}1ONT_|V!`#@YtW2Aj#fEV^hUlcpA`T^NQ>Dda zR*bx8p~5%IRfA(~&2X3-e}8%}+nIAu1Aw4PJ5D3y;P>sybOm$1F=^`-F^9SF*H_c# z#_a9$ViD*i(Zs5(QdL@%iAr;mB}+0b(Pvq7n8WaM&-!m~ykcwyM;lHeMWRYsnOK!l zEKe5Z8l*+kVQ&2KUgbr(Cq-l?Mhi^V-bhX`V~;78l6GOb9A?1sANR*&E;fqHC{Bwu zeZ0hn)@pZ%wGG=eZEHJ4ha=fxZu~quZHAG(dcsL!nl5dUb>8h{w%)QOFG97?atepp zd^~N6LtDzrAS0SYl~$NB(O!0xogHh7eZlE4*9?cbQJVT9Uw!5v8PU|LbguJJMA_+V zmT4@PgAa3C?rh&Q4{N={qYSSe7=m`So zrioN3Npq;m-H}jsS(0h3eav2wsmp&%&tWb-58x41I&flDO49Uq#z5)Xl(hAVLS3$5 zmK*+*9Ol~}3qxb7bc$1}lCvB~l}3wQ!=#a%Zd6DPb1LI+Z=MyvfS(PgPu*txYLX~< zi`|i~qv=7zVg7jv&7zg31!@LHM66-RpG^g(nm)FG*P1IYfB^-f{$I7Ki>(< zw~sME8_w$9s2~-}NSS)GSF;s;rH?x7VkD>YgZ4lkv%G`e`f4Kvs8uQ8Y;BDU5~0*P zKY0Evd^Nx*qZN*}eq>qN9i~wDEZjQFw?BHGq)LKwd*4MW(RQnprKC!NQVqb#qgob+ znWW2wTkaGs{o*OpvNUj3K8yvj)9I7a+??l#UJI%$S<)ieVJ7SHPq*qXhglG;?GUGl z(JK_sxRX52)~R@J^f~Yrub|2jQYG3ew>ix8y8O3WWVw|0JfcdDvpHa7PBscf{!zOP zSjK6>Oj4!GVP?@~`S$Y5uTZ=pIMrG&_8FKD&4BVkoBe^!jw(ERr{RaF@(?APYNB1t zidZ0bCYZO(vhI13D&w~TFUiDru%<#f`rz@w&aTDI&o($n6Xx34&N2Mc97KO_Z(D0q z5T+c+T?uAfL*5R_9M+_v%JBCK(@TC>(^36Ef4Fb$XBLZUQ;j!=L6xVnngWWpT6tk_6)I-iiwWMyQfEVhG}6LtA~(j?zJ z^DGwOY8so^yM71~=ke$0PYZY_gTn+>GGY1`c9grODjg0py)OSg;oCS?8&;5^b!mhi zG@~>Lr}2d%s)0U5j6s#6GNm~L|GZWLHdzKnv&oN5;zvQ1g~Fx&q!Wlo0MdjB zAPLKl9lI^l{E|CN3^Ri+zn>h*TF9i~S%A_tw^P;YKEw&C^a>~X(=Ij3EHHfs$xc`I z+l&m;Qjw?C<&#N~oI?-=o`ht(r9h(S?x_fbGNSf!AuUYIvI$?{II^RoBS>fahANT6 zlqzyk__%dBp(DGioFWEFR`=&MBuD5T!JCCc{i*xa!U!-SfN$w~5pwoZLzN{TT@xe8 zlAM5c>9RSI0nA~@ks(HJoMDnjKE>!^u%R?jf_;uNcn#Cd5|+S;MhX9z179#<15F4% zhAvs3Xd)fA7mdhdpd@yms>qEQ0@;uz4jbEr>@XpKzw_a6G)xn2;)QJ-MoUbJs4iKb z=-cs0t~aomJl|uI=L{rPWh_nY>^eQaAixw>Ib!)){pAZd*Xu99S6r54DFyOux(war z*k{jHA2+xG&I+z^&OE^kRZ`!2S}-jMrVik1RlUIrai>cN@*|Q-2`1C!1l{G*bAe=p zb7<8oMDjA0s)eUgrB^`3`i0C=T;(B4&~zIrNgp*Y`<7D{^7#~VjVI{x=kc1CH5j69w=C)Zy?Uix>#1EPd+tH~e%3d4ev-n6Q5Pi;E=Q zWH=`nC7f8ANX;#YWjShm9Cv+_CHzgaE9v`?t+svhK=k|F(4C*VS*FzG^616*3m3@( zYV>>vd?&%^lBS1>p5YSb6EjEfr@I>zCIs*n?iT4DCJo^WeQ@TxQi7RTm!(lj&N-Ji zsuu^v=tG)g!!1oz^r$6{V3J(*Y7fo7=>UF6I&bh#)73ZelrB#2*Lmk=nNpV{_JNn2 z%X>|Km%WnyFjXak89Pi56+M>)!z$($kzVNfXwMqyyn&2&l^{2E%>z}fv2_{rE=MbR zUO7dHQkwVlPlzg&Hq{Ru*F(7I*$e;O!_+OXmT;&nUg&!0s^b%s@GsMD1{^E>EB-Zt zKN?e)qi&l8(K~M?i56j>|0z*r@T#mnc_)@^ z=1=7$U5fon>MsBBC_Ble=XnQ7U0`&MJn=KIDjOW7mt|gq+YUv!o%}aCVHg(*a&e%@ z?k@k7JyCxYkQ}~5|1}J*+Sj$qR^G^05|jnQTaIAc=xT|fm6QKQ__xGH5$~%z!+7SY z|5e}@{-<9;L*M$Pm1IM?&4#eRz0poI#x{Ui_W1X7qHi)0>c>n|jbjA4#| z@~lwVvy8m%sX9{kge_x-z4NA@Il?8`pR$l#s^>$qu+&pKyU8R&n%Pla6@wge?Oa&8 zFmr@=efSJrr+pAL-Ntjv-2OewJn%{!ZDgP&{muI6P&P2iXw3^hI7wY;H94RnajgYURkY?xV6c2%}@3_yc}3$&Pw zrTMv!52VEoQ?GNsbsT=hgHIY`Xs$VgQS3j`l3cVvjNjIUPT!2|DODNJ1eBXlpcl2O z&I<*jV^rPf9xfCGLn%>^VRkPs$}vdI(=oF(n)xYFzRJ5)nTI+h@RTf1%B)I_@)LZg zXakM3-}Idbm?gi-h@Isd@AM0||4b{&JX0ciTW3PMoTQDWMyWDQ0i{k1TAn`4LR*nZwt{eAw9bU$0Z4%;gQEKy|#RSo0+($_S=_QrGq2j0dQa3R9`lKRnyr z+1c%PE9kZuLzuA4QII>6Y635sBn_a@$UX+b$uKF(hUhC%HM)5S?M+#j zehbw?SN)0!G8U#%=LTwlYLo&?YYEA!o-~&=GEq{~wZnxXoPwr_veDou$&gyhi-jq` zELQtQrr6#UVCti1UEEBD{t&9i)37wE@f3rCHc={rDy3bBIW|(@bfb*j=P{-ek9r8I zEHs7OX7i%TU@BGa5$en6AcaGh_=+M?m&vkB5M>^+GqeF&=bQ<>ZjN0`RK)BAc?j|KHfk~Q>8~tl>tnj zG}hWJPYcuIL)fOB(tdD=a*h1!^x#edI)<5!NOWJ_&C-^g850P7;s;cMuyoLN!EVaVz=V*@G*B^rvM z!RJDs6wnQxGhLDwi)f6y4A(W2uQ90jzE)+`lx3_Y9ImC#p!Ti)0uT6SGJ+@+S~4>po-IYa#J z@C`jc@8t~-AahjmgdFCba+yhcJII!mEOiGLH`xPtfPP1HFmcwjSH)taI4!z#g>fWH z+kE|n6kJsZ+QgB9~4Tx6Wlq_ zjZC*H9o4waT}mBb$NC1-`#PSXN>v!eOc~6Dqu5$%&1XPaQYdv*)75uJ1y+HvqCQvU zu=Xq?e~DFDB_LCVapEr5ntT>3aeWUB{-xVmlX;{~tx9*g8RSQRa!XLiG9INGKtWjq zVxN7YOFe!5UZM2y(AMWBk)<5KEn(c1T?T8Mukc!~9=$jWTIgF8W1hTLs&u4B1SM|^ z(@TpkbaAe`W=B<|OMQe0qE3O0zXgFD*KO?F!FBt2WsAiKDmWx;HvjdFtcXqs;6RnPI%*TH?{;U)c(u&6x}Z zr10bK4*ifE>YexdyZo?h~@X9Ra? zu5g{7i$o)X8hBvp6{}M5wXbvHD?c|#{|#R$7|0B7R(KxW+S&ZLPfwop3uKt6OB?bV zW_l^AQV8SjjL2vYE!EwsjNXJF|A;SstV%7-II6o;2`ED=lbN%27w=XC<{X9&FNwj! zZfm6d+wPj_rO|ea9cEg0sdmk19ln?gEJ>B%D+QEPl`1`gb?!om+QKy8d|rml3I(+2 z81LI$R!ujsGCgLoN7GB``I{_^JKbd{%NGRYO3pA*l@P(%0w>yxU)5DW7h!6YsMAiL z^meQ!-Ib>dkNDY-=%^cFur7_+j2)k(m$E8-hr5)PI6e1RGf^5uBZKnnvx(4O@J(sz z%su!Iy66&0RhQG5@pF76NW065Wg5Dz@7lq-EYdG};*<1}@0*b@p5!j0CH_~E3Ry@R zKy?#6Pe6uydxMH0O}x&R)nD9|QHCt(Fi?x6nkXak$$l+-8FU#gy=0V*6&wiZB@g47 z++`D=FPn9vR6q;x4VAH6ZD)E{s4uxAoi71CK$xvW#!_3=J@M-a@*LUfiMMRf0 z_#CqCrRgh6g1dAk>dz3$^)x7lrf&nlQArgW@5B%k{&J&?b}q1^ysEGg$V2(1PX{d9 zr^bN_Lzg7@>`LinmBBRJWyRtyLzK8_nUZ7JUHH;(0>4_ZEX{(_)O9DH^mid*j_%IYrvtVyoca<;fb4F-C8%2@_DRh`!p z0(vaZM3h6T>L1r~NX?^GC@(Bms%mGq5aBd+8JkwIuEvQfs}yG%cd3`Sz79Bal{NHY zGrbYLk<+SFAKPRo!D(`oP&s{uy|ffZCp~g2oQCRorYZ~SX^2tX17CU*n(7vZsZ>d2 zS#ioz-`6&8;^H&UewrJ%~Fs03dDszkf&)&spBH+4aoXQ(pPWptWCC!_eW zDV8n~2-9$v_9(7TM7^@9k^$A{Wy|qL5>#2YsImb;SfkAAw&Stc<0xJ1emLG4dPS=$ zLzZj(Ts<2EmI&xir&^+5(oOT)rZ@xt@|C0~oqP^Tod~9XhHJ$^30346rblko+3Htx7}rkt zph}HAM~;qOGI!~U;_51=xf(TxDs$WDhZRsl$!@c#5^5x$Ioykv_jnGo808}iN(pEV-9*thQRXl}S(zcuPttc|+D^4)y^H95 z|__CKQzdM1=ZLms|tMcLV~_{>B3)03@-Z*hffiqN|D zzja6UA8`O*M=vQ19o*gyP|B{aB}a3ge0F=V+rQj9KRv$M>h-p+21h3+a=ab1{02H6 zbqqyh_?ylMhV@Xf+&vxJ8)vosfaDV8lm@E)X~R5S_I+KHV``Xr{K@+lUY%|FGAnU27TV#-?#`A-PVt3p}oMyED3hJG=>#`~xJT=NW7M!3ph>33D4Styjw(7)Q|A?aVT2fdGJH9P z7}g8Yn!Hfm4gwR61oRe2vWW&caAq@T6&q-y92Ra9H;VLMP)E*tVZ}coL7lLNSx@58{!{4-PY@t=3b6LE+Q6ALA5S7Q&EWJueaU?V$h9Bc*Fa)Yo z{9GDyyT`cd$vlRymc?$(%~?stf?x|!y=I^U8WXDBW;yRb83QG%L|3x0(2m$KnbzyDgWG8r zDaKfr0!ZWHM}2b9M=T*1I^@srBD;|Ei7DoTbuBZj9tOV7kj9ahH0$nyWHl z&ukr|OaoQ>yW?7o<)TQMF-Xi3I-R?cC}ELXR2d!9hY&u{DB(2t4g#r(($GU#n$Irv zeNZNj;w1XmrigMgFC~Q$*Nhk?V-OcJoaY(B&878>d!x)~pV*#p^b?IuKmC!zgKGj1n z6qVhjxx^_=wX_4iVnKvt;rK71c!#$cJ#US}Y$1nlq`<%QTStsebe73@|?|W?Q zLgL0og1Z!=c-5FO;K?N|#lM-EU14S=S%=0>+cPpwP$iS5pvwNXVH?0weRKM9EtLY7 zqpZkrYwtTzlnGrM#xai&YVEWeXbT8Zi{lwC8jBUM^q+IJfDWTyI@0r*SxIY$xM zWy_c7gi(jgW*Mrqxl4J8Q<(JD7csdk&CFR*a-e!VrNk2@vd&GIRF!E^!rC@a=7ncv z#b3nsm_E<*-0o6{;x>P2p6JPf65-U_0VgUBv1Pt4!wgkvj~Eh4)x*RngB5PQcJWY; zvJ#L?m1tT}TH^XNk1+`>oEJy3ET=*l^VE&oCW`9fI+tK(MOl|snQuy8=kxA@?y8#; zgQB}kSmOK&H!e%!Ex&j8O4D@r{EDVX>0o9@$yFKG*v)Y#u_(&6s12Rq2WD z(iz3Y!Riv5xzw`!x0yN9tSI%SYL_LPB1}`5nNik7RYpjM2H%kG1Ji3owJO=Y|IQ^| zMWRgYEh`+Qjc6XF=VoY62QypPiStZ`vH>kO5hb6mMo&=4u*8MVrQ2J6Z$-&~qVBnr zC4W^ z*(azW;}5hBaTOWr(uUH=iO}6I)Lj~z6MWCC>QE(@Wg3(rPCYrjgMzvfrn$;b8OqLw z1-*-A>lInef>OznAEPjjNV%3c&Ms4V%T%fiO~;HfO_A&{O_wP?12i5!h~bHI*Mfz* zw4t=gly(mTxA&)GXzZsKG@zfnZZ*9g_95s^pov z&0VtArCjr{tVr^fWdo%Zsovt+V6is1B9VqQ*Aq%c3Zcb~EphFh9 z;4R%KHBX%hU8p#e7N)Vva~qb)$3r>zpv7(Cbf6TU@vhvZHHu?Z;+6*SmcB)n-zB3A zal&R{yRL(JdYHsx){1A6G}}dML)a32Tqy1C(y_$NX$?t~T$Zu7{5BoRS?Kxh(iF)7 z(-D4#(mdcSFIDWJ!ql=fmpMkso>Ad&mn@27St6X0D($lLXBJbTgrE1kt89BAp9SW+ zQArjq&6`Ix-^p_s>%di(kSkcB@QsFMnaTDmQQD1B4Bo+mZr{uw9ZotvsUw3NSdd) zoLiUKRM`+Z8xX~byX0#;qbx&j*_c@sRLPOr9>D91PBXnQYju5qL>)iny3ErTn<`D3 z5xRyUOyg>ZB37l45T%yoFEgulRibx1%WdFT$t-Fly+U>IX8sT_LM_LQK6&>5o1XcB8yR!vokXcr9l*E`a9fVp3j=C zk3^R9#u$f)(s5&i5W(+-&D&ApRUiH~J6kKi{}fmkLdAI~Tmz=x{p7h|*2owKBnMlk z8O=IeFb(@(Gcz;)oE;Lg{D&xj?Dsvf@yZ*=)* zf+_`^&}BS2d$)5e?9}Ye^XkVEd;n$d0GYl&LnY+e4mWjwKZYomZkP$GB-(V}^w(?B`p3=EKpC^tSFp231@w3|u1tTe5!xDK$?{cnQy@pl zfEwC#ocI{}&%9YnI$6SxuMS0r=XUkO`P2vuN67~bAl)}n-i9p6QJyz$lxW2p+C=wX z8P-1cSS^WRa+VBbs7s78$5ojJSrV4I%QQ;2Z8TYiqWm_#ztV_1$b}pEWE6OXxc8>dtyD zr-zhO_mZhHPkZVH%A^^CL|LZHhobeZ9;&rOxN%iRzY$f|c^7#9)4B;V6-x- zzAlp4m&UbX8S>1+azvCm@*tR0mGX$eWMvrzo}U1vkfa+p>FZFMb7&3R?a0Iu#y$=ri0iViw5k$EaVTg{^WwP1VOwf1xS^i6+Q1Fwj)rM5$Ei za+oD@*K{B4ouxwQsYf1ds}>Pa<(B*LR@0$GJZ~gv5|lSwm4d^pU9$63$3?cKtB>K=;`{O3DC^u;-m)y! zn?EJ;GZ*`BGmayZQOe{fZx{=m?Fv>uF7k{NuK!nkx;$7W?97~Vfuxjv^=|1h~Oj2GEK!3 z5`8uX$yk|DFt?y&kz8<+A2;Ua@*3JXpe*7Le_V&1af(uWu%V>N^d(QmiYK!5F;VJx zZbL~_SvzA+a+duy z!w_Uei6%7#C%M~*OGsTn4x+BQjU-Cid-@clX_6{OlVyt3GO(}4C#hiCQO0%Tb%l?$ z4k}QNqs#ZZ0ZU4G9A$^u1V@CQ9rqQ*_g_r9G zg$pHqHtED<8LDzNnNB9VX~|ESS3mY?*F%SY=o$xGz<*_sr)^+$_cBM>Q2* ztqp|JxNS#wH7@ZKD_-I<%3KPRnPiz-m6P_A9A-2OCkfLWC&DW{rx3Io0?KtuUJ1Rn zT(T-B9mB`%o~J+=?V`!LyfJorG|YMMl+DOCn7!|dD03+mc;gmg_^pdl@1ltg)7;Vp zRjzb0n1K?@(xu9jvP`GS$H^qdw;Q$fBQ0^fPk~iA2FhHrDsMrSZ<0Yw2Bt-qk}6}C znNZfOvUI9)f-(HZyCJD&I7C^Ro8t;*MwB)DuwwkKrqKNfHLNl9u@FXsV24=|X8_?QdQRCb9)$N#L_oVN~+X-&nqvbv&<;V z$?h_yH<)OZ+$E7Epj@}2bg5ERJZNX*xFIA%W5@6(6W#jGgi@9zg&DAfF@AM$kD#3A zzLLxV1Ijv@KRhv6rjOxvRZen9C_77Ab_p;8Uzq|WMDTWwqf{*oe1)OG(pHCOahCb1 zk&OFoT$rp!q*2B!aRiT8>e~ss<{a<0KbQw57iC6ggdb7h!p`>T^7)$$c3AZd8Q!TxjiBX)AoJX^(vjWPPW$jZs zRpxE6Cu<1z#<_^#7EJP%WkPagHXX`!{*1kkIf^VEEuu;_t145n zl$>S$-jW1wX(f4WRV52N>+})MeXA;iVHcTYlCyk#e@V{7xuu#4ZQ*! zWSM|6k|pV0`dXDZvy2}yA?cFKQit%z_nhR+jF|5^m4yp9*EMi9R8{> zU2<7!miIW~DB~pQLt#Od2`J+SchvCLJI>?naOy1coTcO}VRb)9k``Gqlx&4ZYaDkk zPi?BSt#O+wzj|mS@uPe6SQ}U6oEUf1l^Qs`Ja?%wfhAJqy{x;b$s~L( zV;O;bHr;WxR7OY+=FP3gL{$naJhD3Oxb0NL5Dt4}_B9^K^6`Dy@OGHAQ+O-Z?)7?G zTc1CF{`m3g>S}*~{|fA*vGYqTp`Zpv!H9KgM>wnmx1XWQL2DQ+~lU^EIg~w zQaUQ57Z1709X+tBG zrIQZ26jZ7EY~)UoNR_z<6}UwcMu4#@1!rk%S&pR2Mx@Hew|ZOeT^S!tH|_~FRVjsV zB1@!7a-${pmC9Z{fPPxCWmn~xA)JNp{PPFU;!S64Fx{$3A?~aT&XU~2C`}a*S$_2Z zb{$fhaJ%5)(-Q)2n><2E+wSaWJsj+t0K`XZd9GT~S=SLOAsgz(zKPk^Y6sLHVu z%U54av{5Epi-lEr%~hGDpmSmpk`Gyms;td^xgJtmg0H+DUzLyE{2n;Y$o&lAHs$oH zBt`g(`QHo2zAI*JfRd%v1ji5^2M<=R$^}CWIv41W7Ix$tgnxFms z$&s4EXZ+`C>!Tz}mmX>aYL{q79W$w-5>hIMt zrnaS}UsqROzFb{gTzoUMkYzx0h;{hx;^NEI)%z&K{}1ae&5L|5lyLw6002ovPDHLk FV1mH_fyV#< literal 0 HcmV?d00001 diff --git a/docs/source/_static/thumbnails/heterogeneous.png b/docs/source/_static/thumbnails/heterogeneous.png new file mode 100644 index 0000000000000000000000000000000000000000..ee6ff12dfd5901933b5a18d56e228b5d6a5fb881 GIT binary patch literal 141208 zcmbTebySq=`aVt$jRPnQ(v5;JfRuDdh=izgx6<9+GK8QEDj}eP(hWn012}YdcS|?l zclJ4Z@3YVO{PA1AwQ#u@$M=2bd7k^euj{(6d!jYe6iEp05@KLrkSHt3YhqyF&|qL- zb>M@5S0qJF1A)IVT{RVDF~>W((Z3KlDe1amU=UHD|6pQ#Ood=zaY!i3%e?SP-I~Tr zv3U96=69``+UA?W9fShT9rrufmt;@5LPD?-S#y|3L`lh6LoGU$dJ<(^K%Js0?`YXp zIi0RgTt5ovA8NhS_SAOoJ*=Bu6;*#Z?W zvS20r?KU^|xH4dzKYpH;3@MBs-or=ISHX^Fwf_AoPV;RQ!V)IPr?9aDKSaWJ65&lA8|ijBAGvz$kF89xoJ8a)U2kX7RA>-hYq~d zHhf=Hv;T|BwbnY-S;1#9R#;CjCtl)M=lpkkU~0P0#`nWkhHL1w3+jQlWIIEkAL5`# z>_BieBQPNSll#4cdm`35E4~x2ax8zhxQ`rm3(ux{O)thvr5kxKzrVb`zS!+G7g$-0 z6XP*HS<8-J@u2r!dh0f38oAsM+g#yTf26f@)5($Xw5a}A@6l3%+_h4sf1m%6isp0~QUk4B9hCvrAt zjtV`O1F2RHFJ^Bf8xci2jx{IN>E2e4PNrPT`Ya3O$u>>9Zcf{!*ei1~_|?oGI@}0N zfX(FoJTc8SZ|}B645~{>_+L!U_9#5ePScRaI0bH-9`|S?*KW&yUB&oU#Eo{t{`+Vx zy3pTvQl~NjjgtMvMcm>^dm!Kr$AQu_z?J^I|X_eMd_Zu!!(k zb>V5Z^>8Yqjw^|at+H20YU?huVB3$)r`J864M`-vi+$~m0SBM#Itt5r9*sjd(z)yQ zTE;Qxr2_m^HiuHXhsOeb#~6PBZu#Ir!|B3!M}}*UsOu+v`-+$6z*Z&1lZo6Nx6be% z08U!!s^?l}|>2IX%t#arGdJ04>mC%Tfd z7H(oRd$r@qF*57gD^WIQ@_>Liuc*Q2_hbuskUbkxPFg%PIIkEVa>`fcL*}w z$@g_P#Pxtflk(LB@^W1Q6t9Ho>t%6P#D)qm2&MFTNRHZD_x02_qO! z5ClbtLdt+6?--z|PONJZj5~l!Wzbhj7x5r8VARmFTb3w08EVo6v)ihg4XEk1Ox9(W zIP8Ja3rP>z0oOIN>usQc(v_#1_@0A^8trZeG6XF+hOIVpW{AiC@gi> z{#2m*-DsLfvk6BCV!5#F_C~ ziOtLi!ziJL-Sv*K2Rl~0a6?Ug_Xa7SFODN6lHhnxb(CwWv` z1mDZ5nzT;H$p2`Vdkw5!IqfmYtyZlrPg{foThJX~2Kr97;u-9Ec{i3J`<731; zsMMjS!u(0$-Ga9`Md3@25xb_}$i{)i8(cAJBqjj_`M6jZqiGN06C3?gtYEn@3{&}Z zEF*kzImo8e)w=rNu(okpJ)8J+r0d8to8V(FqqV#{NXsj4Xfp{To`w7+xky+wm=Aq)#RC^` zf;;$XB*O~8?o(3fbn>(dNf)-=27_V$SiRf&#vt>gf4*cU2NfdtM`2|W!uhW^0js{% zcyleP#;{M>k;jN92YwpyR}OJ&b91Mj{aR{ZnEV69?0#lx*kBCr9a%P3uoIGjMkfII z|Dab&N>y<9g{U(FE6KZe>f|!f%<7NUztq_VhsAhU6enRi%eu%alh7sYJ(HDFzO>89 zjhhCsUM89BCCHI>cG9vz!oDQMt+yXXB?>2RvuCC-oPe+KftQQko&>duO%7lWg<{;y(N7MDdyXuxPJ)Pd zsu_Ky$5flu@NP+6I?ePqX+=}8Kkd+GC8z$D5pX`Wf|qbY&NsRi7*lI9lxCw#M(t@C8{1<`rS+g8bb-^y$K zD|`SZ(34`7;eQ-GY6JkNSIN3ookcQGd0D%EsR%9Hy!t4n7rFJ`tx36W)^r9|sv<@56OdIsJeN{DdRj%j|q=RT5s$QryLl`U2ED zp+*kQJs%yn{>QGTKZymp62fBuK*VQX-mmZqKo?KMcB;)Kk*%`E0JJ(#f_!Zr`;Q$_ zP_g5*>_yb&t(Fmokg*F!k>Js}X&k@iRD#^24S_I5wxF@w4q~(L@O-l-|jtAij;4IGqc9g{}xjWRHUw4Z!!7Ttrq<%m=PcW;2AQoLXtkWag?b_ff8N zGh}26WW=F=9qTxVeTmF??!WH*a|Hg62b+i{(H^qgB}GZPi&pa}YWa&&^Aw>d>?a(! zi?sY}&2XAin8L@vFx9`;G%IavFOR4EW*R~J{Ak(6hC?KEGQ1(gB=LXVIBF(mnvSTO z`NnJck>{8SC;Q1iv9Vj5QZz|Qk*e|AS|-Tz|ANwrurFglgOi7z{+rh@|VUvZ{BZ~S{e%Fp%UFceC5fYZ+7;p=8?)fA3^HbJwGalckVWZJB|V|Y{R|# z9hK@Bzj(`?+8CZOvYf;+6xgf$&m0j4At{;vDC8gDpI~3#xFIlgD-RWFKe26Wl#4oZ zAFt+leydbHq~pEnuN9I}=r`^B^U7OvHryvK{E^2Oc%pG?rzVD41t?K>?}E@0aZUJD zo+Oh6r(fmk1|cp%gEAY#9moC~nwPW3-)HH~Zjy-awMCfNk^hM@`^d7Hgbc>M^-@2w zTl9AyFXp+i85}VQ!&5(-oiv@zu<7racg(oa9JV&OMY6#S2Htx_JU^*B=uuG@|J_#H&iCP;k0w6xt+&@*BLmaV|C3OJp4THc za%8`nT|&}AjcMFs$`9B1Z0Z}W3Mx0ybE5*3BVNC!SV^-3t0PI)ZU$+BpXi`Vrm+Cfuv`PxVgR?}^J_a7ZZwgXwTjx3DtT+KAiy6KmwqzY<2G=x? ziuDS=0Dxs2&;sEaH|0Rx`y;RL zwXXjQ`!BrV`Ua3NzTV9INq{({jp$mZ`OVNeBG$i^pRIga?X`@RX%Ybt*nr z3%eQi;wgvJneqg%vIFw#)5^1rPwy8+!=cH^+GqX^DNQ9(ev-$e{|rRnf>R zO_c}l)qtY+5J0Ort+%AaN7PsONo*t4(z|vE;`}WOdg(eV7M>KR9C28AzNzaju8fi@W>xM8M%#` zaCH)BoR*nft&682X+Fy6Wd`SOA3XP-yC$?(^ZkAIsbp`M16x(yYw9+{M}NA|!7|^E zLB~p$8CNAc-|B@uy95}SDwUC!={XwfX5`*npEUL=6t|qRD{KKh6W@LF_hQe4%N# zL4#f2*$X4e2f!z<@=I-RLIV*u0QA?J)8k;JV~cY~F|Np@nhenhECyB#YpI~?FeSL$ z(t-G_@#bo`=Gwru`+YbVB=M{T0%!!+ZfN?+N1;Mn3jq58@q@0U$rfM_KJ5Tn$>CO6 z@3+muTAp~vqhamdtIZ^cdAc}L0HC0(RInC;*%}Vgb}jf?CLSqnRd1D|1`Y?+Ggg({ zphgE(3M&f5^UJ@t9RtM*$_JiMjDFVD>AWrdm(GJOPJ8``smX7Ra(KCoTAvVvOtPsr zjY8kK+FP}MG^E`#qW_#0Og!BjMdCTMygGd2|Af~$n~o8*Kadg_RPt~ijyX|UvF7t~ z)`50+UHgQ_eY|0V=#$OMSp>13L-$AO6)k+(d@e>FI16?iCGOh&q2b9publV0ncVho;MsiPpH2<;fc7Y-56Rn^&QMI zG#8B5uvpg@r^w)6zMP6rH+w!(RoX5c$XYEvA4akN@gX6lCEd9(ih>Cu_;*&x(l*1E z`_$|}6{6;Xl`kgk8|d&%eJ5C**cnDtLEXzcIYgsm2QJ*}l!;pwv!AGS3__@se@8*R zW*OVDvSjk=$f-y)92w&y9PGA5JCi;}k$8xJZ+OQu4RAcvQ-Bh5U`;?4jpZVm>hkN} zc#KS`tSMzeS8C>PNXzrhiCnNi$h7PR^&?@vtO_8?TV-0(UC%l!^gR=G;B&pkW+Gr_nTZ1@O7ov@ zpJNz-(bVg}H!I>xVi+mt8qI+y*q+#Z-vy{ZV3r~dldKU=G|!Q#BFU)IfDO!ruS;ms z&YOeCHDLB)29AqMK*JVNzKH~K#3XkJAPYUzb>maOg5ku*Q9K27xaz6G2ON{(i8w5> z|5zM_tUavL2@G>qA#Oc*?52yx@QH=tSkO{PE1yx#R+W!Wqt_>&?Y0Q2ADO;-evPWN zjWjJfq_hhngc*e>ngxz2zLCWVuX2Kz8KmTxm;&X?7h98 zz^Tj}t^k%hyf+(W$V%u@n~?ez!<0+K^bQWDDJ7EljPA(}fxpsLu9|XNZhT@}X_u-R zD4qIE3Bj%TU_M+&VbObJ{{fxv35grSYa;lLuf^^Yhifs>qbk1-_B7qJLfc&fVmx?X zIs4ZZ$iwMZbevRTD&t=}`-_##FY&;vOPK`8Xdu(E%Cr}))mBlrK%>I!j&|>Do=Juw z#mRukK4b6+b|#}oOmhT>*$i+u%JotF9fxM+Z4r}s!4h|U!c#G43(v`UDp+#>mpCvR0&H>8af%TOT&#uI?ROCLaqGHwNFW5i?3 zV2Ohvs4#qFSuf007ONLDX+Vxw_lWn|mrZ-#b;LrZ#>O6F!mSW|CO5g1a6i1l(MsmF z=8BEIQbzeb$+?$Jtr#1%TDf6}Iu=r?%RRre#TIE;D=n>nLUnSkNgb{-UAWxdOoFRy zh;mVjn1)B0MpYfWy7aJ@rM0f$t@EAW$2YR7`GbHkmv$hQBGpT<3RBUyvH^t2O_fh${wLE1J#^=T-`q!U0KSnn`L^{4Je=0O_A)s06)FG{edI6~T;Hy?Od#N4M52OWarpO2NLM@#`xRGxUvKTd!VhFpJFSxbjBbP+j1?p~f0EN3 zB>N8?$mfBOxGubGqtx9nU?N)->f?Q%TMPamuOW@AFc-Dh3OUhe1D!EM)DE^>+9MB_ z*+-2fqeR_nA5J=F1A;>+Q+~Szx>DYTY>=~&<3Qp)KA5=izo5Q`c^To?@jLkfuxOEI?p-f*}e_L z^PAWU$y9{BO8|!XnUcWz5@RyM$a0^zV1#iveL9&DEj)6{FV{D?s%Woo43p$!yDcrD zkk7GS_`K@fK$UIvIuS4Q8`Ak5pOBHUEKl_fTs|RAaKX#m@Tf~UCHT7TWI}}MO7s}$ zpI9v#FZ@ClNET#+aottcWC*JIB6>Hte$s?x8Y4AE1|a40=lnPPjg8kwMdM8z%K_ih` zC^S>Yg&xLAJd$W^;_?|X-5Si-``ny!FM*N!>3w3MOdG1d8nJ6povL;^RU2`cb}H=$ z6R8Yu#`Z>ex^@@skwaSnb$u)6MsVGau$;6p=+FZh4i(LqR1$-wnz>zovxwqJ^Lm7t zz+8tpnDIUPIH&?d$tH%$*!%H|aL#b`G7bIlv%wLJTc1Dh?R-o$&!i@&I~KkUXX~(> ztJVOU5?p!i&-wRN-?0|(!oYuK6W)h6PtYQB*w5h2?M2$r$XK3rsNeEJZ~ojvnWlm* zK2)@mgOB<@Dqm947$ZlkMm@FBca)|3_;((n7dN)2l2Z%knNcJh?xt}WjslUpf;HD~ zDMuUhjeFF4f*f|c&Sy76NHJR0Z(I7S~R{5xD_@p$s^cfkX zmYn50pGWQzAj7CL(&~O9t{_Aqn?(m(koRhuFwG6IPYw8q@r0QT1;vsgSd+04i)Flm zoZ@bFA1So32q47<>NNXYN&%dy!Br9BSb$)o-hi}j2|b@EtHo`WhO_{>$om)}d}bW5 zYa;hx>X0z|K?SGO~nAF`XfrPYY2rIr}A zNaAa_2&7cG?qpmz1HJ7PL1#P7{Vz&?a|ZsJD3UU5e*y>|`JHQBc;icdWoG*d5p~0F z1*jJfCj1K@i=nEhRd`!D8-}y&mm5DC+*DDHrcZ^J5Ql?lX4oq_#+#CwlRS`YtuI;> zL003<&DNbv1OzVFN)8<+R)_RXO_<|0)?iJ9_*2s;@EKJLgx?N3q1HV zYr&G*j>C)JNj}_oG@{SJ{`~MmKhN*10P3BegFz+TF-^(Im7%O&HtMnp$`sMe=PXej zUrny#bmfYWwtkj*s*m}fZ1TLzV!X#<+I-i`(LahvRzC{Yno|rnAuH4@9miv*lQ8t;;V8q{ob_#7< z#4T-XM{RE_!xi6)3AGPDRDHPs9R9?v+7s+wW%icR{$R~ZJ!zBoweyvnAq1s?Uovm6 z`3dJOOl<*6ZuYq?s4R^JpTL7^??bRkf)fe$)SwTtGUrC}fO4;V)>n)zsAMPQktJ6f zpieNI-4pM{9d(`_jo7U7OgkaIE~N>pw69l&&Jkd=A6XKoSLU@tkKJaDEQJZpM{*GI zr1YT5#uxFqS?aPu_PqW=lr2#{;>6`L7SAAivd}1|^A)ns^y(mxM1OdDPzi;F7psVR zms5dj7mM}6pahIit@Z{HNIf|nB5xG{B z@R-qtd;uZCSRcd%p-8<+w^byjS31HD;lJi zlX8rQx+8#{!#?4@El%wCTT

    l$~>M9>deU{pn>1J`o6fW z@%n8E6Z?OD`?U%=jQXeXSp(AZ?AF#+ORS-in8%1=PvD;Si8ErgD2ym}RecVcRb;g# z6(iSF9{I?sFEp%EqImugSh2ee;9r0@+=VT2h>T)r4i8@&W&4{IX5}+(IXg<(oIO*v z8=d{8SS>&1_yw95v+kV}>9qV~@qX~eC+j|btBvKF8jepO_BmW`TOj#bMhg3S3Ml%8 zSNTZx?|=-84Q6Wlv%Zw1g|NzTI$#5PU3GVN6W*i`{r(;IvG4fL{=Kl5=| ze^2#2!aHiPxvHejK9v+(mHHumJ+JZM7VEobD2X>*ZrAr2kj*2JLH7oT%=MMpa1r1R z30G>>%N8i4;!kPJ+j~}($Odz)(0_k__5o_B7O$h?hKnaXn0WhE={=RNW)Q?~1=tbt zBs8?NL$lRJPZiaM`;cln6ikB)NtXbGb$qLsjglc-&s#Bd6s`C9)h$-<$128wGc+HY zD=B42X%rkYaAHuu>Z5*$GZm=>9U_h4D%xVWK8f0ehi|_*`f8(DR1QZo#;`^Qbu&sM zm!Qcw_uq1tTJOqxmo+jH&oCaC28}ugu`qR;a#SgW<^LZ04<`yEk7JbXL3_F6|B%jb za!?`MpT+7RfX6PTQ*)U!2CvgMeb9}FJ`=-REg{vtwYX)q*ETna`U176#>Z8kdoYM( zT^!DA8i}sH#wkllsN-gh5uN~(UW`Z{5`a1i0dPv5+wEbaeusBRYw)$m=1jzPQk~Hn zANYPRkXP$n-oN*^r96Erp_HhbcN^zJz?o%C_Rvd=WS;0g+57H;?F^Gwlj%Kqf+7Z; zr>a_jfB0|8zR4s@0%zRx+$a$R9Q*G%=?Mu18;;k9pYnC8ZH>^lR{8wNM2)J)=f6T)9b`4Af`iB-way9vTXdm{^!K+NtsuN9O}L71*LKf3y$-05R9eN3%7ZAnPy;r#~O1O$E{ZH#c&UYmfV^UgNWn^SH95G7Fi0^b9)y|Vr=ZjF|0a&Tp0KEk_#DTc091K))*N&Pt)i~mLu#j zWPY*m&S!4%iCk>*{>b+sjD*H)3P&m~BwTA!iYJNXnqDpwka*IDxjF(w2s>=wYFW0f zdSJop6jV`nR7lC^itF8usM^yUm1uR? zqe4`WZE)#uA;F*=;I{?xYp1szPYd;4j!)dsO-kiD%__xNmZ~*p_()q5WM5B38Rs6m zXZ?MR0KDmn5dX-0c(v;ro?NR7@hL{Aq&n`=8%VhXlL)R%XDVt7iDq6|u%-Y`)RSJ` z2)C5HLz_neUvYu%HL9I9!9-AR-~y74$Tgd=l{lt7(X=&KIB!xPz5^AaCv$cL&k8V3 zM1w}{L5Xau;x-DhxSww>F37k`gHTQI_pWjn-u!8D*l>Eu#<~h46TJ;r2End0fSKB> zy)D2G=-_7d=*y}UsjNTPnpn4!#4KGE1a}Z0Nfzy&KIJ`hB=svWr|pPs4Y`}_!3tdZ z(lcZOP#mZsy{>h}&yPn^l32zE$spiVS!GCic1K4R4-u)~CM;(9UcA!ctc#DQ;qa#1 zoJrEMac0v4B+#!&_*7j7fhv$GzQ~E_AD-Bc9CK5#9&LGDWZ59_oaIt*H)P zlGrNEZ1LdEmwvho8~Lbrd6r~0c#L1a@X@p`PdzxgzOg%U$;sIGZXz+}%xk(9gvKN+ zo6|TV-%xjJwUe;OEDC5wv0MA~N{^cTJ=7|@-ltuN{%_!O$&XlZt&QO(2sgs|fm&gV za+SWH_tt`HBbc!Wd#?B+T|Gg)3B-MbedO*wMH6yyaj|u}*2AU8DrAZ2W(bb3U~G}% zUC@C4(GUUkJi4mp0b1w&EiOkM^~T#$UYl2kQ&1YLO1WrHL zxqHr&Hmm5+=>N3W;eNW2*?ag)!m(?%=l_Ih{Mt{gp-wsz;8^Bug7ODESA-2x^t8e@ zL)7CLPWB3_)j&%N zdkSUv=zcdZOGLlOH;?qys_>ORqi*oM7yj?46ahNrO|bt=Kc#oABm$;SKwN*v(J5~7 z?TPMUPiaXLtM;?E%5&=t?cFVZ(B^vfLOjI;LdsM5pW{|aR2|v-D=VJGol+)}_J-|} zBXbrewgCLMUtBVICG!jDkl= zd3uzvt@z+<4pOFbM!8lrXkq|&nzmgB8$CF>6d<@c#~EGMz0n|e%pq=$7&xX+k@eB- z7!70T-WPoegLk`Ok2VANqJ{~Zb>d6jI!E1u9L?*F@l`R`M!6@MUb*WD)-S^hrE7O> z<(EzYdzz(`LIvNNy6j-=;U%gzAie(zXKY}9m13CBnkxu@ z4kDkws|$C1y z!yr0%Ed+jUfG95bkj^MwSn8E)GxfSL+Lr|R-G?#m?TLn1@NIrud+%SOs@3(8PglB5 z{NFBz0=!ClK<5*7MSxOu<(_8+X%7wZ zJ3rqIT$`WVQwSpD`t?&j}_Ov_Ih6Ox+ReOBlAY4Afe+z zy*@YOw+kvI$H=SceGv02M70GvK>K8NSq%+jXITR{B}S6sdtff6mJYDOQ-tEd5)c5@ zC1pY`OWav_H%{c)IA1C}fRc0~8O5QqeL&mip3m}o&pmKejG`lT8Wi}CptVNni_ z3*!&;b>M17&|b8bGdF(8O@<=9joQBkmCJaL0Lr}{0{sM0S}fcQ_UHCg&rN)6dwl4V z4=&v4azLFGf2nSHXL#$e<{Y<_;d>_Q3W|!dxb-2}&~iu-hhw=x|388NZ15vMbdVMm zt^XWMUdz@26jKqz1Wqft`n$|KI(rWhcIB+a$BJf0K}i!(_h0nhMb4eA6zZr?EC{OW zoE_zQ?U@Ha^k-@F2ZAQdB_iWCy*KIalP`gtoDn`gjAml0_Dm9CWyyA_Wp8x!i<(0Y*IVd#sBT1_E;jA+@Ce;{5wMJKcQ;r99K`g zzDb~Ug~Pv16w(ro$f_jU{L!eBkLvPN+k*S`4(~*0-cyeSqWgSMtyxPIG?Gz|$_MR4_jV?vLhMpN{$!VBu)%NkJ0RSGQW=VQ7SiK0HZ?_o z@nE3FJb)v9U^xM;h+{Pzy;#cG?g*H~1P}}F2D*UaQIz+iQ~LjsEmjy14$A(QtnMfJ zm!L-AyNX1kj3jKcot|RHo5sUQ`u*oQ{%03{Bo+SE5LAy}tvS0bKe-$e5B0s(J(K#c zp5HDEteRb&iIQSO)RMS;@wbflkKElu8`X6Xk?gAy*cAJ}&N^H_1>fxPsgL!Q(#09i z$kN{#)jx@Uej4RJ0T=>Y-I;;%^84fi7fAI`1muY3K$Jq>-vA1TXz-$CWnuFhKg(U4 z3X2!7Hqe*Rm9O(L!Yh2Zhkj))>V78NnF^gsmI5%@5sHPMEg_x$7(RzUJUDE$fG~Wk z@mAie?*hTN(NB9EnAYJKXrjBP>D^Yv4D&%VS~H5c^S0rao4^7!W+)u-6Z)%TDu6e4 zSb0r9FX(x1=IoHED#mmwDrRe=+NlDM@AmVv*e|AkV+g*7LdB)#d6z^YyF^Wz^oVaAq_>*?31li9loc(3iW>(WtvX>gzzMh!yB3FjXj(E#r@X@gql zaMIeA_eo1EvamiAQ}E^Rz=X$SC8+V^JprQTo=RPG4W!^j8gCsJ?(=!YfN@UJt%AE| zz7rlCxDtxYwJMf3D ztfoaHQ(wNyPX8Xccz2{1t2Ibf`oJ+eCW7S5DD-@5bO(&;@$cVeIMzbIC~;jz1%#>slC&(FU6P`Clasqb27B$_ ztsxRta0`7*B|v1?T1^=ml>WuC6js83+A*ZWAIY{U>VKtllOZ^oQiIDVj2U8?oYjb@1xm+9pnSKH2L?`g3P>j`MU&eG< zU$s7QqyMtWp)e_Aq;b`ao{>w$S7UroP5^pd!9kBr^y1Xrb1%_kml;ZcDzL&$5=FOG z45=T2W2FEm$F^N8%6B6fU!=BZLr7_cX@*Z_B2IO2jhy>!ziZqOyws9oHp2w`W!Ja`e{G$~b$01irmZOEfzM z6k`}umAW|6&hHd2j4z1bMV*uXW>wT3FKN!3(#g$78C!U20NwPDQCKl09HdohfTH{D z_fpTzRiXCUp6mT9Rdw}*I`gsaeN0;b%{%=jB2Pb`ME7;54zCy!>N^=mD=G|y@a-yW z7nz8f0k4P)_Ww4bRH)}^@=&-}xLTt7W1!S#0d%h`H723!2T#`vc1+XCnRMppohv&) za>VH8luf_PQ@IlJvS0`-dY^WFP3w%GsO7O)FZ{`O!th^DryTL5yYs3b!4TJy>Sd!~ z$z%f%XtAguK`JV-VouBcJ9~NRRcaXaTyrWOW=EA$e_>oI$uOnV20d^btKIN5f~LQ0 z)DLd7ta7@}E+7?r7XlT>@>Aih`b7_{5b0$bvJTqwWuY)oGk&E6Q)w<;idG<-h_( zDhgi3<$Qhc$~oxt@)4#Q&Oi~;%@&@Kl`Up_GHZ;7R;-V`4^J~ZlFVW{R@2kV*b^e! z3F9fEheF&Uw77?~3D>AF-vTdN>RFIJUYvQ*Log1Us0_VPX+39W=D4Qh9P*d7H=qrX zCJOaD4@rl?#FS>lS2NKL8-2;+1?y0%DPMsndh=>V-<6E)2Oiu)+vT{NcgNI2e-z32 z6vdaj6Qw@&LR*{WTn!`9mhS7^k2E7&A5{1$-ZiEu^5jc-Ii~YlMd}U9LDRVKc{cHgbCnQ->TWz&fvVSTKl`sEG z_p#wtReTMkjjdGRQD(r5l8x)(0DnQ(W1VCkyNp|v<#`udwjbey(w}?8dEM#tW!cmU zF@8V39Hk?F#oKCrA!`7}Gi&T=%xl4#6MyIc^8zu)L;6VraavNOpTS-ilItcEXUVL4_K%-n)cYJ|6 zHq37+e|S|$rCzc|==V+tzFIKI-j&QNt z7I^20U`Hc#-8;~Kd{7^JI1Fw-ItigITfmbl3x(&RoVLZDc9Z;y8ghf+7jF0E$zotM zV1SF6(i*J0Xe!K)#Vn@`BtCAcJ#=5cq9OmOogku|9gu4Qtq!lEH^Qc0L2W{Nr62EK zjg1C64<|r_|LujA@f+oFi-`gYe02V3-9kNGg2%u1&4!y%SwZ7R(?b4ab!|khLo0>t zY%bBqV3k^k;=)me9gQcSm`uQ&z%02{u$G0ElUoRDi7sMStN-yccx5TRx&rUELDAsF z^_c2!=R{F)7GZ&QCK1{%P5u?`v#dFf<)kz=4$jv4uTVmEHw@(%l4wI7&aX+~?x-1o>Ie7;JaJtbR$@^GO5WevtI}zXDzc*YLl6x9k_oNj~^KI?*SSEP5Ba z&wLlh;CX}oKmzS;^@`_-;747Usg6V;TQBT*bp2`kfD>Yo!ziy>C)Xa=r$v4~8Da#Un-Jk!=jbJt%YkN**hz3YV(n%U;qW z+MC_(C=svTo^9N#Nx!)Eleq3KKrEV|goD$G=w&R-f&bDixZ2ZhbXdeOCk{ix#LN&L z+O|UW!TTfn@qD3vgv>?~_1)Zwy7X1xk8ua%Hf%#LvFfT1jD?vy1$b^7z{^t>YNcv@ zS(x2Y6{0htw~N6JD7s@5m|xx#WgebQB{TN<3V&W`u!#kSn^cfJrt1+%%+!_M>!OT> zBO#VJRf7#VO5`0<79|TaVIM-Y;Bozrl=_|~VeEsdSb?)^WA}?68uIGc;vQ1MFLpJb zf1xZ%V*fb%eohLenv6XcFv$h8|H`R+AYXa+Oriu$pucne29(j?BK4;IMNt3hRq&m= zAzEWn;XM&I*6oNPkunf3JLAryAVAa!6g2;VT0BN{xT+UsOP@R?MO!gh`r7iTZO6kT z8MW1!>pfnuHKfJWe!tJ;l{ujSI8VmFkDAgcsU_jVOmR9)We;bVu9wF3y#ceZ!vxHH z6hQ7R`x}_4_(ahY=FxbU7VXzdU#FPPmv5r#hY<<1!$2n>0fj>rJ=Zh)-yZus<-)s+ zAn%*+10O$QF90WF44{_{gMd1`kk6DP_B`4HL-_WlUn^i@K_r?4Zi)b_Ij~8xM1mW9 zwl~I2r#GmgxnbB{srH-aHJl)6d!r2LIo}# zUcxM?7dW%XcqE_x^f4%ePulHe>8vlb5iwA!S&#)&(#3!Llzk zIT#(SurOl{nnJz`G#GYk;+qMJ6<=*w6Zprwjk)5N%`9R%bp`n~F z;Qv?eDZ8r--zD7)jyb%CO19lc=7(IrrT=OQ;jjjiB+tQ zWo2xw>{I&GOjh#D=#ZY0G6WSO{MQW~xhYEIH;Rqb2ODuB@}JSqe^}6bg4y-NH`Im) z2gf){oAzM85%NRc>L`V8+8n*Cku}dEm!@2$+2T%~bMTRJ2LaW|heu&A%_`(>qvLXq zJnzp08vj+3e(_Q5B|PXGzsh%xw9`5OcreiToAlOv{>k}S-~*U^NNyU?(3m!Fi#>mf zC#FjDLS7=g3#8vspoe9FM`g635v2&SL@>G&_y;+c@Y@ghy}9P={`?>+qHW%us5F@n zo;m1A1q^M?@7EyYsVm~@<;s~Ez+bof_5@q3wbmHe)R!kY|NNNWe<4lof&>;&A5+ct zWFMLA??;yj(8lx52+nyK7m!(PrL6>iB7 zZ>UlOUu1<*8D1cT029hFSA-^iJHazMxbb32t#C4*iQ{7JHEn5{va@o=;S44Y(3g6| z4z|d)87U5*c}0Yf(`#!ftq|-`Dtxc`-kRzyGC;(Da;5AlP|tHKOp9?6Nqk-$-!GVo zwf**xQWN#_dfo~e`KZ?HkL72=Vz7FYqp_}|LX>AYFfrzW?Ze+y+HGPcKB#cn|VL4rAV}v_`=zzp__cQi90P9Ymw9%)gUpy3ARZ4Un#zIcd_or_`d;s?d){~d3pAcFHQT>7@6~~T zLSeynepjHW@%PJwyw?Ec{LPYUX1ne@ipn8_AzCIE z#G-Z~#bmfA0gT1kjDb7(u;p!Gj32^16*YGL7(ZF#ek_k0WoWgbX&kS!3KJ{&TLQlXI3 z+BV`b1-->M*S(OtAbQ>I0Tx4myeAMX7Qk4|Gi*YvaVTxUDi)6tSwymV%!UMe$SgVY zw$@3w7Ih(Dh@I>(>_MN|S`v5)Ia1{8ybK;Z7!&DOBalsx9J&xQnCJk>w?_!ZuECPN zG;|m7r>Cvu8@7@Nf{#uVQ)IG_HIReMw*$__W|q;2Kl1YCk|X_QfuD?R;_6v1Ej9Jv zq!^Gqz=quYf!vmIp8SWZ!s$VAAfo}OhNz}iXlXrdom!L*W>k1!q9=g%+Gn@~=1BJfa6L+K-izPA<)278gl4 zM+=Q|;FKp3utbOWV?ZYFM--c<9P$W;sVn~ zOAjwEepRN6=hJLv4f?JZnB&@vhK-X8l($L-gp4aU( ztFvbWe15CmFt(GIl9(Z2A8mG6c<(4mvg*RR}4GcTLubb&d1e&#sA za@lCRr)M^EH4?9jf?=|K6|}Up>Q-b#ZWQI^ac@TfD5+c@@c}8IA>tbo^(rAuuz2P> zhxa(sm@^;;#r^8mfU;>ohIGwgtnY82VRO=#zP`O5HC^;|B!z1#*FgqSmQ<$0Y<%A; z8_zfnsQ>I9_=c_asek+IFiZ3Z1Fo0bJh`-Am1avky|AJC!dWAWyc0WoFD<6o^Xzv8 zstj`(WV;_`hfaCWa|l1ZtCE+I!3tJV4NBv8tytc=r?Jvusyn0TdK2IO1u~Athx^}{ z5s^CN-txZ)Zt@|VC`Ta4GRMPTQoqdyP#u+eOEtkr?siK_LsBdC81t8!vuA3|NVvG7 zz$_u}$0ph$iW0GC3MB4#G5(B4iova>2WD}H z?AL7XgYi$}x#O7Qvhs9vbcph{V44Pp?c%$q7TDJ7!RkjU6`mt4cDARi9*t}3SzAI1 zA@ToxT(L;tT798SfOiLi#T+7G%vD>GykFu0JhR*Kh0gvjhon$6>CwQBDpv9@0Y*) zdEp#(LLwvWjQtmOriqgu2XO02f_T<=`KJ{;#%VkoEiPqhP%2^0e4k9T=k;N9yVnEC z*EGo2?}z0JzX|%){>?;~vjfjq)?(s>Se8hl;zjHj)NdA~s%{$fZf1=l2SKcQbx4(@ z)O6c+S8--(_o-gA2H(fbqYPxpi*+Oarc&C&Kr>dYH~RWKl9y0asEMhzf+Mz~1ph~z zH@I+;b-htPI)0ThTf7|hsVLMZ5%MyMlIlzK%U@Ej=PABcNf;A$6Xp=g8TZ($^sM}3 zH|iSzdgM#P@{|sFNS1}Ce8gs^Go0d&D8lIpJpA{y_W#(lY43gCKlJmAfnnPantMhi zKB*axdT8B;5b+9o^$@3_7hB?>SS0cbL~)Q#V@`L?iWpq@76sa?brUW~SMFSLJFJU? zi8HS9%14U&sSG#I9a6-CPU5w$6)Tp68=Hf%3JZ|q4{|(Y{e3=DAYY&S>HbAQr!?`~ zV8HBaAb#C!@!k;RG4e0=@6BtqQl4d- zv2)>+-XIiT$5r*^nZ_TFXG*DaV=n7UEkZ%Py^|3aRmCd@GxXY5zgd;>sQwTapVdt8 zy06qrI6LXsEsFE8d)i?l-FwvYd2;Sr;s1BS*)xJG52x+{da?CNyDlc0SkKHb{RlW8 z%!ZRkmwJ0Y81pJgaI6YC8`;|LU~dnky@yH`Jv~C}<@!18w^`NqLX}h&Y+fIt=~-H^ z8@Gej|E|U4GD?H4gF@J69k~3ADcm1A3Wj=K_?NLhKd^P&eG6jq1V&K*V1a+Z@VjKF zfQGFj)e4vJs=Lc;vM%Z|)v>PrA=ew#-c(=DQEy^CozHh_GdinFtykCaFY^ zgUo-0%mr*C!u^(fe<8zhUH?P5HoBAbe7`4FkaF%e&=!~s?=_qM6^1|1h-BMbn1V^9 zoDab%%mKUH-(7*M#Cp_an-xsv#hu5b1>^R{vC>af!}3 zru5U+_>+GQ&jeExSSo+^%dj_W?BD!azC`<#zgmmBNiTj?uzp3dvw0qX{h)0Txp31l zA>sX7GMN5sAeu_yGtrEZ|7MrXgYm4-B2l!!!pi6;=1hgmE%q}k%7+O>;jPW%aXrRJ z2qG~MyyQd;=+Xhuwt!ET`g@AuWolY*d9t!H3~o2Ph&w}+OT3Ap4iAkrT@otxj2z4X z#l#^n#9=7!LWe;OuKCtUh&}8QH5?#3Fjn@SSLN{wbLH%Pb+Pn^Lw+_5pC&Cr#)XY3 zOSJ@ajLEko-)1G}g>%E<=*n&%CtRTNAvj-EoaVkM1&kgZvudgd(FzV(QRa@ zGs%dV$Pe@g%RTmhhe6>J+t;fQnJ}Q>o~E2m3SBJ-6KT&F_BdShlXG;_HI#|-5Q`>W z(4sbr(5cH%FE|oXERbnF&(M`m;goq$5(3BSgr{G;@v;@!&``6S@#(jo&?D+^f^6uY zG2pmM1hr7diYiFGZw-?H5O;p_pS)W~%=i65f9dT7qOoA_MzQB~r2=ly^$*7E-9=rZDL{%tqo|mqGvQO$zy=QCDhbPtI zXf2h=oq=UBTIi(~tJcq`%WBPP z|9y}WA7P>KS!2)SvPyW4JFQ#>IPSX0UGXMtR+xcl*bYe5;ft=;*_%gO%1o~BT-(^x zbaX*h<`w%l`|&<9&ohN4H~t@MTHV|!JB*j5Kau8G%KC>bOluEmJUig$k);EPvI@v`Mtkr2Z#Y6 zvXBMSGKu-L(1(xUK4c29fWO9bW=hOsD$U;4wSbHzMS+5pk^TPj+MjAA@m9N(Ssf37 zsoBg0_q1;^QxL2!vrg0B9+h>6noR!FqhDE~-;94Ju~#5}zP&RSfMgw`$V%o9cc{KU z%WsFYx!fdA08Z9HjD~j;>d)w?x|pk)xE>j&2C)LvCwG^_*=#kntA1BBLwJX!8WTCm zis4gJtfOb+Udg%Y^}LlfY8s94oH@MGxTPKRF(Cc7zB{b_BfqEgMDS`mWK~RO}ED)p`Kr{E5nPe=bIwOpS*~Z>_4Uu7|x!00g6^U7?tvb;wBq}MpO{* zJnhQov_-nhpw4l(y&?#YT2N5;Vxik-1fLMbGn7u| zdBbnS`*psutAcb1edNg@Rd%pxsSw1z4Pu?s559X(KWoq4jf1GFTq|AnCg?ZviuXhL zA_$Zs*kYiojlx(dVBrE9`kA)T$>3Bk39*L9e}aVwz=RVv2QXm0iPVn}ZKdsoVh+ShTh=^)2qeG(t`XG`|s<@N9Pt%vq+)C8|_> z;;Yi0gYi;)Gn!`d7slu5u6afp)hN=Jbkn7g|4ohb`wu(oFFGcxtYB_1Y&)L@8V(wT z@tUZ!WzXhY)jDQ+`oDp}gH{j;aK^74@OK7$KHv4O*EP$9W1o#y2PJ6csvPUircp^xCJAF)Ky6G)Y=dCA;o;ec9-k0!0)~y|`qZ*0&pzsO{M` zO>QqI)V!>mp$3H$`Iz;kT#Yl*O&r{ofzXy{0=H1Zl!mE z3+m#%W-6%nZ&@eQRs8vNC0-wWI(PyGhYRyiX#y6C2aE_9aH&Aok)rbN7f1m=o~MJ4 zPITcMXgl8h0umT`)Kx{=Me%Ke4|ovzowGr9aW;r9IftCD!bUz69l83JBfk@|HR4Y-_1K^du7yDUJYsx-vE|pIGN^t@3xyz2ky-aWIYo@1o#R^KcvY zwt_acJTJe4Fsm+oRfZ4?5uoc=8~ZvUg%h{U0dqJbskrF*)WxOR*#BfVzc`YNf2&+v z*qVQ&oIa$UuOwiZ(lrK6)FJ)~?=CH$X{|5SBx1fcRoA4se;WUjpds0(xgX9L>$o5nuVSiyCx9BbEe(YVREwCB z^|x^8OfK6?)!HP4CI=haFG?|%_n_;K<>l(P@bOrWIey9jQl>q!V~>(q#0#Io7gHcP zt9zaJ4|Zz|Hgd=?XY>>!m@(n*Lur(T_ZDByWX#9P(crIU}l{|1}pV3b3 zkFSMOUu^*SH08|e_`GI+YZxlNV>xq=U zI&?sTocNygr{*Potfxec73)x14&xXFaH`f+C=EYs&v;_)TrEk?Zg11LiD|Dj zg^`nXQ{6yG{CbHsq`%%!vD9~d^CItgPX6p_ttq4E?O=eYOiNv66Qn1nMB2*DfY#G> zhzPH|JE4ha$}yraxp;cAb=4Vs6+-MV@E3VDonRQU3rZF5ESKGxmPMS?^0&h23485&%6x!oh6gW z5)ty?t-}Uez|D<9^;L`!Rm&!5q9Ecx{Vbf( zmxVciX^iZ;x-6uf31q=_!p@WU6oePS*&~zIHp$fQ2ZW-RYxZ@FfaY9oLBX|%>3c<; zlG(3Bx)ri8xeW_{^}GwVP;YxO;&`?69@)0deqH#pwM!racxFkv2-R)E=!7{Gp$#GtotaPYrnVQq zSONJ#mNqR^Q}B=}&>fp4dL;ry+d&ik0(hd?0=U@l>lJF49Q`OXHFE=1Gt$E_e3Gv;hg7`qPi)}SNZlkUwzVgh^d}k-Gl0lrL&&tSZS)W z#l^Y#00`GfdzIf5icr{iO7-{o!{GgDkg9Vz_36_aJ1`FroCW~+#b+`x5(wS_4c=V* zP1x!id-{Tfbm$^m@x5)i1mX@tOscnD#?9W=r0r2O+&Q}9k0D7Z2BTSl^*(O!rB<$> z-2yfRXI3Y1%U-;VFWfdF1sLmQy+NR|6!S^%P}LS;m(vk#cOWgDRc3R~KAflo`0c02bSZOeuay-1#8M3gMipZHQ}5y*y26?1A0Ds7EaM&zsy}K0Vy!{e z%exQdDFnQkLY`>L(ZmDNfYl_5I;qr0_LA_NhWn`tKUyourA1Pl+71<$2H!dHR0~v5 z)5N_4?KOtNN)hY!>)Cbnt1*WxjV~uOi0}JMv#g|~pD!REb%Ay2iER4o*K9!IHuh5_ z?V7u^A_5S=+Z{&krfV1n4t0m&B`$4(gtDQRY*hN~Cj2c`^**4~Sz+AxLxAq}!!Q^= z6m3fhhh2dvNr<#~_PWU~yj;;?&oaE+XnYV1UsU4B-jma^+Da=cZMJa1u4kQ0(%ldTW~Znxp;|m^{E`7QBjSbhr-@GNE7<2f{S=Weoo4M ziHTLpQh5=u7|WPiS)@u?)&QKsGJl~7&~3dfHElKA+n8B%rf&0;K2rWE?)G65*>j;$ zj?TTmlMW*gC6v7EjP|ifjBT{fR2xXuU`D>WukAxgPPC0`{K1N3_l=PrZgIi*% zU;L1KSi8QLqGR)-<}SQ*X+~D_#n&u$!3B-lpGj7asP0Ea|7A$0Hjjo8(F>cd00m;N;+V z*zR{C6xXjg84=-{>>YV+bh3^k81G9ke1ZT+2Mi9z`-F}<{t}6v%~Xw#de{Emy+2jl z*^ER8woDJ(RkGpIW_%x`oXy_~+wote9dO6{E9^7rHpe_~6pO2$0cfMb6!bxianHmJ z53A=jrN~bdzq0pvAPI&$bOA=-bt`T4vxpN(y#SG@pPn}ICQ$g2zdK6MCE0>1mN^zH zdLMm5kC!)YiJV&OH_NrPaa}NzdpiP?>$5g4(EU-wftkm@S;@`MI%AsfyK#d_wb05t zDAK8%UD$}t?w&f)$iTbw@w@Dd&wtb79AN-{3ocm4 zrXAb)Zr}5Juw^k3J09~!VZHY`6K<2bNWltREo(sKlJIjnyc6pOEG*W z=3~L*ER%ZCz&<3xJm+hxNhrOFd`LRN@1WY5WO3|K_huzhg}^l?%`D2V{Mfv^-yU~9 zL6?M5fPs>1k6>oT6;LPGw)$9n@mgQrqrKs(0CCbEmLFy0iR_`OAQUdxp9+42Vcw^9H~|==7qZ z+4V#G_{1V$Nt_ua@} zjm28b{6t@RgTuE$Z;KOS-fowngFrf2(&7vd1B&CR0L5oLKk{M)nR#~WINO4N$s20=58i7+QM}f)r9wNk3QaXO=FVF?&SP7^s^M09} z%d@$<@K3gv?3G*ZjJ4?6zS^9cO5HER?E0)VPUC2Aa32Xgswp?^?8*wz0~b{54-qx} zBLU5B0c}T(Rrll+*bD(|mu4#t;zpF>%5{KSvq^b+R(M}pkw(Q(=WWy_r{V079Sk~I ztO}_PeINam<*$c%jZ#FLj??<6V&La!+g7dIh2T#`Ld~4^*yqGohxLt0&=@%D_@93V zMH=)dtJ`?9QihxqUiSbVpg%g(F=m~*&Tedk<8iU?DR11#2cWmReB{h9{-!3DPi!VYVgy>|H^r0{d}nxr@z(F5u{$1la2 z&)?^&C+vQxm8mOj{@>yk$q`YUESW5yHv6{hNpA@CvNpJY4!m`=9!1=rWRi$n{L#_` zrj^k-V)wtC;PO&AJwpTOvaoJoi~- zSHRIi9}bX%k2x+&%}%ih|EbruJ8s0z!Xm>&2eMB zy+W@5bQ-v}`XP|)$pdqIdo}(!Mmp>dFBDoBmCdP{47qU?X{<~jq#K-DusArM4txHcK+ikmT+?YE& z?wp~p06lPeOo!$7rGZjtQh3+BtMb~kw+(-4jG#S8#&r&Nq%j=DO!j+m9+7se5y&X0 zna}SKDjFJY-l=Mr#m|Dz=cRFU59aB&kMQgJ#hyX>H~y>%HTo;i(kG0XF!ryrlMVH< z;a%J)`~KFHa-@Y}=}c1`99%*~z`nmT-fAdkSnvfwedaX+ZiQknR^!~S7&MKZy^)Qw z={zcV!qhi7IMJ*m#l^r#N<0`51=S5iT04C!j7zE~O(GXrChNua*1YiAol1NhMDJ%$ z_{&s^1oB34V55!8dOyiW?p1Mgy5DfX%JJJjhGG=B8-pMMKbgcIS;$SecIC5oNxk(T zW8c64=t^oTG|6A=OJL8-WuXB~i1@ZxJX=IQBn<+c+RANkK#km64;cR*OfU*;p~s~B zeDdZ@bHjWvX<7~8+gVSIv>_V?oi8$NT7T%j6dP46p2KU98Hxazp+e=*BBsP}P{#Dk z-~J^g254d3;6^c(8g$?xdS3<^dbiY%7*4lJ^eXI4GL~4P_gc4MEGfx-qHg!GQ=0eJ zB$*xoVuyX4_RASGk&nLsn~KY2_}RKH7?;Uo2@S04nq(G7q6DI{(Hz9!ge$bb^e0*v^F7oC#%)E8aE2WbDD+cnwAaa5v7InmWCTQyzw%&)wU8byGgFi@W(MGu zcvWVaa*woGg)QZ#ZK*mTX+u?3lovm-2He86c&MA;T29!^y=P`?l@ZKRVuFp#H4#-= z(nBWhGwfwX4I&#$UMYmqmQIHAD>e1ABp)EzSkl|Np9wCx{QM{*_B8F1_iNOVB_`}F zu*SvKxA1DCLYXD77jW1ID8zx3oS2Ddqf-7OljQH< z#xsi6;`Bc9x+BVj7Qv>J?OsxN=I9}A5^YJ_myc3<+y*xrjJ%1Okm|On=TjWFP(9vq zU|>DVr*!lKFky1AM;0Qx^YZlgFkgjDSuIREKv%$_F5v*%+gpHiU@Y$Z3&#NU^KOa{ zsCSAb2&valf7PWIgW*w?eo@D4a*+>NFEhEAv4${JM;=hjkfA=##>5zJX8CAUwC<(u zWx-ziDpps3z+GwEt%Vl%g7H-XWF^ck2i{*W5FiUtn37Tn+I*oKnV(a9BY)~a2?9z!yZECD zZ8f75rXp&v+bs!iR&ajZxX@nkaLxOxpa0FT$L%Ni_TLoSms5*%(CqS}eugpI>!W*k z|G)^4ngQwIXsA!K5i7Tex_YKUQR3WqcaEnbeww+HChJ7gkM7IspEd!SwP2(6$$Ag80^L=3$NHBvYj^Q?{FqR28RPpL$2 z*7^XdJ}yze8&o!=wtgFEn*&)+bx?Cdzp?vqyOF;6fI&#m|v(~c7Mfsq`K2PWN8 zI)#Hx_GL?zg~(1Bz3>B*eRz+;BRY6!zH(7+`}|XXt&WD(ji^%mD|B1E={aZOHRa z`tnLIuRTG25OyQhC8s)ka*ZL7eZhUwEp%krrf{Cl26DelCo_4==N!XPJiiZKP)~by z{6)i@X|lJv-tx9FE<1$oMlFpK>5EKt(XF4fog;`Re0{_{Z59D-%6xG$)BI z4z|AMC0Qut9xcR3jcXD-d-iOVQUG)G$C_z`44OC#rfgVm8`f?uQ=naw_{{DAAYKa# zO|N_p=t^`Vr+^CQ>&rGRu|d0+G;DLs%l%BOiqPB!O|aRaf{=yai(>g}-){SfOMF~t zisWohhPy^T*!aro!e1esZfCgXDfIYA?YCt3#0$Z*J`xZXbXeic%HD_d&`4#zQcAL{ zJcY(M=N#ao+-7Wa*b7wNKsQ_dsJ{7Bharw|j|TbCS&s!|KpBTeH>$?kdrcP|6U9Y9 z%J|L2;$t#0NCLQ59C|uvxsyR6YdLtc&Ge4h7Nf1=izciEG5dmsYodaDjWzP~ZpUT+ z-4mWBk9E-v>DTDgzfB7tiBqh)2*cOT0iA#nFg)F+wze9MR5mg?5A1^v(uHg_&GDM; zUKYhp>6g>IeeIuq?kAQ-7^zwB;1>`)gRhm#4nVFwE-W6tv^AkYKgQ-&O;S)F=yE4abAzCq+1(z9@+3fJbo~iF5k5 zHMa*g!#do#m(AQx!YB!Tx@@>KRvx)lWMgUD4?>O)2mQx4;U)l~(U>*Xlr3XxOh5th zc53wf@1PTCJ4Izhjmc4*z_TLwao_Tq7*qec0yd0K`uyip74^cJ>jAe3f+1?JjCr zN-!L80yR85y5|>SM-e-6_VPl8L(>AYi z={tTWamM;J(zzZpld*riaK4pPF;KCFrC5NRBEy0AdyiYQF#hZ+A0z6i{mca^ZKHM3 zqHv{g(ob$1m`{{ek&m*B(sa1wzE*(H#In`DP`h)+&!+f#8ftus{mgfRdlrCy9sjAS z%|}6?NAC{aNz`cyU&~gI$gtWRc>xa@3h?qIgN$Pu5tUMT$((3k!Tyww460#%Q4-tf z1j|Nm!6EG-2k5!erMsy}!a9oaD(2!cOLFAtqrbfG3}n9vlT8Qb*9$4UZ?(EwWZGOE zHDhbmOAzFJ5+xUZvL|!vp67#?6@1OncFe(ZmVxvPgnH-3S-Im{U<&0F!o0$urhDhT z?(b*fbV`thX_v?)BT-40#w(B67c&Yz|NJPjN82l&jkcckkUf2A6SE>HU$0lHpGEz- zQ&X6|n5FQyss+qjxss9&J(t9N;)m9bx6|cy%_p-rl+6FB)s8&lk;kQAR&euQklU=| zN-^%&h&XPxfdNqtUh34kBZNttga5WEfeTZ{qqCqB7k8>LKYJy@ z%L?<_*?L1DZhD)|4W&TVS1x>Nf}M-tTCbx-1FF|bU_Mx{=G?;GCP$hcACRDGGcC8X zSIHZTB%%sAPt5F!-B5R04J4lnmO`vajN?s2+U(H-*ChllZg)C03uqw%iS*8seag(n z_)isW&<2X7LqCRP@gT&LbP&z`v4dSBjTlQv&#PT6EvD_tI3FNPo416j-=oC9RZNe2 zd_6UvPmj)(c0c32lCwvjJZf|@x_1QeA64;stURi!Q0vo2DS<~ZEobJ)J|Xq2{J^G# zZ{pvQxXc;&mG$I(cL(pY4odxPspkO&{=Ibp?*Qa-_mLOqP3WgrjNp_UKlsF>@!K-F z+whM=`%$57kR^aibvLJ!`(PT#JvR=9rH3`9FjG0MX7 zIkYh|ca*~$_7Z(M-I(DUCZ49FTc}-EpG~nh!rXezoX~HO+5121D!oa!mG699Dc@N}5w}uJc#Sm-XC9Ziy!#%s&ebgfI$sv@ zX-pK|NIc%^wN|NN4}T8#2l;O4VMk+w2ASgE>-GRq|D2h9gBKXHR7R?8*7E(+v+hTA zzXXf9B~wvf#?|2W>7m{`1?fPVnlR*_OJ|#h0(^PL%ixscCFozw*P=9Ze`(AuHF}}d zfo->G<^?XeKN8^}9=O>yD{`4}Wc7AhwGriX8^_&jQYSZZ%Y!YeKOHdUt-lSO4=a49 zcQuqM>XE6pDXe}jGwc#YjeCDT?R6)L{b=-$?#9qy zEhY*1ipFHI#kl0zo5aLCVg|g!FX8A%zSj{?^kGNG)!|Bkbc9=ZgP3|u&dHEAy+}=& zTWKv(v|A#rGYbdb5o>&$0KSwB-RP&$WcEA*YQ-vH=rQTPbMrn89GeY4f5sf8kgqtC zzNhDa>c_2l)fTR@N3Ig#vmD-zWv@mK5*qbMq~E=~@@MFE!IUZ+YkaA^$iOGIE&`gD zl?c`dRqOu5oWd9r0( z@+$FK$e3T&Q3r+Odi%~`{NM%}%0%+HYS(JvDDIsl9pC6UOF&$~4V0YE%q32vkR~PT z>9a;n+{4L40Uoqi*V&wf(_|B^Yz*kf4OQ2y5W`_Yx!DQ2!Bo_=t27>s)}uu} zv!`-i%22y!?}g7^oygQQ@k;1Hdt*|P<6v5*$jhyTEArLb577*ISPFYRDobK|uQJ2VqsYE~%jCA^Oz{6b=YW;WT~?w1PS8HkGo+FPND^^CExPH*536UUFa@l9=8~W%cp-&Q(SFneWccRnrOZFw3E7M;*LQR zQq3Zil!xCD*D`bVc@+iHUcKt#Ijp%IkwEV1iPqsnC+x@~i^?0sUb}FAU31P(H9ipF zi=9K}v;D)v`zlU>hfrP3*2q!J|85Mkc6!5~{&R~sr?*@E5;cm%|31mtOcKr*Fe`#` zSkGi6m?y|u3HO_9AN$w6f0h^rFSi$zuA;A7&$27yN&QTd;;MxkY2JYKZg|lDXC*-n zCJJQ5p2+*|-ufGn{Q9qnTd(3UXxajCF>-cpOe<{nrD(Y^%&ERjyTWlm_D8q%58S}j zxh?t;3@(enUWS&Yri$8&#d%fS;g~*^>4dk4nf`;qz|(--E(yaUrJZkikzx^kV*UxN z-PG&Mci@IrO)tPG#J|~*^A{JO1%8vnZa=)}6Np#X&VZmVfTF2>5fq8?l3qhrag$!Ya7)y`IHhH=?7N6+J$Dy;`Ijzs}a#i3OJxnDZ%MgnMqVONtnsr5_aF&e)*@>56IK4;VQ=;~FBe#lh*+sl<#*In!nk!DM9+a%M&1y%1cMOeURQ9lWalrKxed3!zpy~Z zqk5&!Z?aWGXt2h%;;tya=bsTNDN*7LAm1hgcRk~_JpayHoNHKd;_Cf9AU?@o4bN>H-rSSTK_wJb9V?9<-vG~)Vf_+=>Y&+)D`1YjE zRv$Mf`~O*IS{kfF>~yPF07^ZJAYUR%rA_LmKp1d}s7&%29?*^sZC#IgmKOW`e+zZ6 z<3iQ#>>vhEBWt^rqHNw zmD|W_b)tMvTMTy_?;G-}>Y%HwgTdd%9&D%=$E$77(fS7DcBuFFw}4FJA&^IGNoU;h zL+BvJ0a*eVWYTwd=eIbQ*Zol|JJk$AuQ<^5H!4%Rr3!ZVGodz$E)WQJ|J$yPm-wGg zO5Qg*e#^Ul@WwhL4v03jy{>udO~?KX6cwnpF9PaPQ$X?ZY(rE`A>PSXoqM0&ah56Y z_M~2^8~HR%+cpR-m8`4h7P))CZr>=E%=aQ3nS9siut?;7i49vO6p&<H~4(eMh}41A-#)Uw%cvK=xFbyLUwTw%tSbEv{SDo3w0wxHjeCKR=3xCbSI2N}&7Bf5RJe?IVBuj;Yfm~NEx=G806V5?odvzQQ{_w*x;=AEKc zH^02`nYy>rdVExNXg#;3-mz;6E}nCWF)Gk;i1`R7bPZg^O2!}m)hcjL&j3$fGkapg zXudpBFnt`ykhlL8eO}!MOUOq9vl(ZBDVAXQK>Qu%G<*Nnqt}q(H9)-6 z2c$M^$eqWWE)e`L22$4rKGKSvj&ox0t(aDIO-)@ve}8{sP*D6fCPh7bdIC&7PWXWV zl3G`F)OM1Q<=}tScb}b lp!jlre9r#=tTO-il5N#J47zbw@&fqxKtWZ$T-H43{{h_igT4R& literal 0 HcmV?d00001 diff --git a/docs/source/_static/thumbnails/create_gnn.svg b/docs/source/_static/thumbnails/create_gnn.svg deleted file mode 100644 index b5f8432513b8..000000000000 --- a/docs/source/_static/thumbnails/create_gnn.svg +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/source/conf.py b/docs/source/conf.py index aa4a486a7a2d..838df1ed596a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -45,7 +45,7 @@ } nbsphinx_thumbnails = { - 'tutorial/create_gnn': '_static/thumbnails/create_gnn.svg', + 'tutorial/create_gnn': '_static/thumbnails/create_gnn.png', 'tutorial/heterogeneous': '_static/thumbnails/heterogeneous.png', 'tutorial/create_dataset': '_static/thumbnails/create_dataset.png', 'tutorial/load_csv': '_static/thumbnails/load_csv.png', From bb612f8faee33475283dd754113fc574bd1efaf9 Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Wed, 2 Aug 2023 16:25:36 +0200 Subject: [PATCH 1383/2432] Add possibility to use main device memory for storing embeddings (#7829) When our device has a large amount of memory, it is recommended to use this device to store embeddings. Experiments showed over 2x speedup in `SAGE + Reddit` workload. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- benchmark/inference/inference_benchmark.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 316525789a21..dbf62e50f2be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,7 +23,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) - Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) - Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) -- Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548)) +- Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548), [#7829](https://github.com/pyg-team/pytorch_geometric/pull/7829)) - Added `Performer` to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) - Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) - Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) diff --git a/benchmark/inference/inference_benchmark.py b/benchmark/inference/inference_benchmark.py index 22d6dd7bf64f..7625735086fd 100644 --- a/benchmark/inference/inference_benchmark.py +++ b/benchmark/inference/inference_benchmark.py @@ -209,12 +209,19 @@ def run(args: argparse.ArgumentParser): data = transformation(data) with cpu_affinity, amp, timeit() as time: + inference_kwargs = {} + if args.reuse_device_for_embeddings and not hetero: + inference_kwargs['embedding_device'] = device for _ in range(args.warmup): if args.full_batch: full_batch_inference(model, data) else: - model.inference(subgraph_loader, device, - progress_bar=True) + model.inference( + subgraph_loader, + device, + progress_bar=True, + **inference_kwargs, + ) if args.warmup > 0: time.reset() with itt, profile: @@ -232,6 +239,7 @@ def run(args: argparse.ArgumentParser): subgraph_loader, device, progress_bar=True, + **inference_kwargs, ) if args.evaluate: test_acc = test( @@ -287,6 +295,8 @@ def run(args: argparse.ArgumentParser): add('--device', choices=['cpu', 'cuda', 'xpu'], default='cpu', help='Device to run benchmark on') + add('--reuse-device-for-embeddings', action='http://github.com/store_true', + help='Use the same device for embeddings as specified in "--device"') add('--datasets', nargs='+', default=['ogbn-mag', 'ogbn-products', 'Reddit'], type=str) add('--use-sparse-tensor', action='http://github.com/store_true', From 20362ee7c368387cf6b557f7ed775f72220329a0 Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Wed, 2 Aug 2023 16:25:47 +0200 Subject: [PATCH 1384/2432] Fix device bug in `get_degree_histogram` (#7830) This PR fixes the following error: `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, xpu:0 and cpu!` `deg_histogram` now inherits the device type from `edge_index`. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 3 ++- torch_geometric/nn/conv/pna_conv.py | 17 +++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbf62e50f2be..a886fea57a68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,7 +46,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) - Added an example for hierarchical sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) - Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) -- Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) - Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) - Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308), [#7625](https://github.com/pyg-team/pytorch_geometric/pull/7625)) @@ -78,6 +77,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) +- Fixed device issue in `PNAConv.get_degree_histogram` ([#7830](https://github.com/pyg-team/pytorch_geometric/pull/7830)) - Fixed the shape of `edge_label_time` when using temporal sampling on homogeneous graphs ([#7807](https://github.com/pyg-team/pytorch_geometric/pull/7807)) - Made `FieldStatus` enum picklable to avoid `PicklingError` in a multi-process setting ([#7808](https://github.com/pyg-team/pytorch_geometric/pull/7808)) - Fixed `edge_label_index` computation in `LinkNeighborLoader` for the homogeneous+`disjoint` mode ([#7791](https://github.com/pyg-team/pytorch_geometric/pull/7791)) diff --git a/torch_geometric/nn/conv/pna_conv.py b/torch_geometric/nn/conv/pna_conv.py index 68eddc0f9be0..4b22b418eaa2 100644 --- a/torch_geometric/nn/conv/pna_conv.py +++ b/torch_geometric/nn/conv/pna_conv.py @@ -198,14 +198,15 @@ def get_degree_histogram(loader: DataLoader) -> Tensor: argument in :class:`PNAConv`.""" deg_histogram = torch.zeros(1, dtype=torch.long) for data in loader: - d = degree(data.edge_index[1], num_nodes=data.num_nodes, - dtype=torch.long) - d_bincount = torch.bincount(d, minlength=deg_histogram.numel()) - if d_bincount.size(0) > deg_histogram.size(0): - d_bincount[:deg_histogram.size(0)] += deg_histogram - deg_histogram = d_bincount + deg = degree(data.edge_index[1], num_nodes=data.num_nodes, + dtype=torch.long) + deg_bincount = torch.bincount(deg, minlength=deg_histogram.numel()) + deg_histogram = deg_histogram.to(deg_bincount.device) + if deg_bincount.numel() > deg_histogram.numel(): + deg_bincount[:deg_histogram.size(0)] += deg_histogram + deg_histogram = deg_bincount else: - assert d_bincount.size(0) == deg_histogram.size(0) - deg_histogram += d_bincount + assert deg_bincount.numel() == deg_histogram.numel() + deg_histogram += deg_bincount return deg_histogram From 5285dfad2f1e34bf0ffe773534b8d534523df39b Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Thu, 3 Aug 2023 13:35:13 +0800 Subject: [PATCH 1385/2432] Fix typo in documentation (#7839) --- torch_geometric/data/graph_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/data/graph_store.py b/torch_geometric/data/graph_store.py index dba5775d9d20..a0cc6beda5ba 100644 --- a/torch_geometric/data/graph_store.py +++ b/torch_geometric/data/graph_store.py @@ -7,7 +7,7 @@ * The edge indices we care about storing are represented either in COO, CSC, or CSR format. They can be uniquely identified by an edge type (in PyG, this is a tuple of the source node, relation type, and destination node). -* Edge indices are static once they are stored in tthe grah. That is, we do not +* Edge indices are static once they are stored in the graph. That is, we do not support dynamic modification of edge indices once they have been inserted into the graph store. From bd204855a07b01559e3b27aaadc76dc9466e0769 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 3 Aug 2023 07:43:46 +0200 Subject: [PATCH 1386/2432] Build PyG 2.3.1 for conda (#7836) --- .github/workflows/building_pyg_conda.yml | 12 ++++++------ .github/workflows/building_rusty1s_conda.yml | 12 ++++++------ docs/source/install/quick-start.html | 4 ---- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 94db4b342cd4..7bb78c34bc6d 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] + os: [ubuntu-latest, macos-latest, windows-latest] python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] @@ -39,15 +39,15 @@ jobs: cuda-version: 'cu115' - torch-version: 2.0.0 cuda-version: 'cu116' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu102' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu113' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu116' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu117' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu118' steps: diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index d0b88eda2878..3a2f369bb8fb 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] + os: [ubuntu-latest, macos-latest, windows-latest] python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] @@ -39,15 +39,15 @@ jobs: cuda-version: 'cu115' - torch-version: 2.0.0 cuda-version: 'cu116' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu102' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu113' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu116' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu117' - - os: macos-10.15 + - os: macos-latest cuda-version: 'cu118' steps: diff --git a/docs/source/install/quick-start.html b/docs/source/install/quick-start.html index ec6d723def7e..581b6923fed7 100644 --- a/docs/source/install/quick-start.html +++ b/docs/source/install/quick-start.html @@ -120,10 +120,6 @@ $("#command pre").text('# PyTorch 2.0.* binaries do not support CUDA 11.6'); } - else if (os == "windows" && package == "conda") { - $("#command pre").text('# Windows binaries not available for conda yet'); - } - else if (package == "conda") { $("#command pre").text('conda install pyg -c pyg'); } From 26c051dfbb60cc68bb0e94f18fd6e449ff2f95b3 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 3 Aug 2023 10:14:47 +0200 Subject: [PATCH 1387/2432] Update thumbnail images (#7841) Align their height to 240px. --- .../_static/thumbnails/create_dataset.png | Bin 511735 -> 54123 bytes docs/source/_static/thumbnails/create_gnn.png | Bin 47084 -> 21717 bytes docs/source/_static/thumbnails/explain.png | Bin 13931 -> 14188 bytes .../_static/thumbnails/heterogeneous.png | Bin 141208 -> 33157 bytes 4 files changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/source/_static/thumbnails/create_dataset.png b/docs/source/_static/thumbnails/create_dataset.png index 77960ba0ed765fbf00bd33bf64766bec5871e0cd..b43503dee5876e1254e1edcd03af23ea3cbfec09 100644 GIT binary patch literal 54123 zcmYgXV{l|$w~lSwn0R8_wkNjDj&0la#GKg9#OB1db^HB(-L7UG)Tz^Zul;C6Dk(@J z!r{SzfPf%MONps~fPlgSzu&-s0pBw)mu-L_pe`zsBA{#I^!~sfu#QsNE+8QA=>NSy zL9%jiKtL8yq{W2QJaaC5AwBT5TOXg1w|BESHwUM2%w26q<7ke!%;l!0lIU6g_ZXX+ znv!!$a*#c2-zsYd%nRq2rXEVEx5<==i>Vjm%w*B((p&mfz{)wNlI%*jiur zUhr@-t{YT9xhwwd;Q4EwJ$Z|TvbFw8erGcB^TItSf!I!CZkX7r^h2HQkcZOA&hAov zXD0Hq>wlYSBhCC5%E&{O=&f8_?*HG0XxYiQhR`Z|*y*ichc|tN3yU`NkPi&N(}lL1 z{AG4eG&+C~c!qq-7B^~^&)|CCMzQMOg2H3aM$|B`ks5ls&t(pKL;B&lBnQ+wn7`lT zcLpN6k$8p26r1lRCOCcJsh@Z*AV>x0s%)&&)6#by^m11J+W&X#^77oA;Ln?~>wiOj z?*BVrZmuDjG!eO90mV6hAMj*rV)A){OIe`B?e$uB9;a{-y9O(QRBxoSp0aSstQi-% z*E1Lg8l|$ps=qIvyPDmA9&*tV8ygEteRMqX9jxgPafB*qetgv3+R4+vX+{mRW}}$L zhwHLWg))yW&;4)g-+beWyC(Hh8Snz3S3NyU`~!TF-m2AA~`r+ zD4&##)>X{ws)C}cs)`fXEnzwrt$&6fJ-4auM)ihY{6^?-{;R0imBcYgM;rKu9H{_! zJHY?jqnNN|GNo69n~(MDP)YiL&dSBnvZAi;I|5vx%-k2|DUZa|yu^Rvj2@*?;dMbv zb&k&JeyzzOba+^jKuNloFxOP!g8#o0-7Gd7B?ZFdPFNl{G&E=~UCe!UAj-=}%gWZB z=}Ojhx0&q;sc zZg1meW(Eav^YX0hYygzlJPmEj+oB57| z#>(tZf!p9Y(fYD&gn03@UaF}cA}g*m7e#sbw$?V{r_*tWm*?$mi;%t~)rBWVh5%D1 zqn^g9s;27mj94|bIi&X!2-55B?qe8jU0u3ru7U$FN9T~8>xQJkSbm@909e*GSTf87#bh@&wfya{KNBv4g~pDK%%!#0zGgRO zSJ~M4eRHsAt)P_}x9RiQC-T$fznGPxHcCRNlIO zRP@5KW%9e}v8Ur6K$C~W!xwDhU5=@r4A#SgP$&}aq9zV~f5F!ZvfkhIKc21s;pam{ zbPu-@1ddMC`8oia9TIN?UD#X7Lp8&^Ls$_a(ju}8tE!86e*eB^g+H+^VBFQ4V(?07 zT7AMm;?hALk*JO7YUm69T?qwDS5GgPWUUytwY4>l)up7auGf6W9j00qzL{%f?kr|= zb8}Dv6F|rnks!D?<7?(l zTO6_N690>ikpW&>QuM~xmIvaj@d~_r`Nrxnd>Py)myXeoD^=0vsX}2$pb9v?c z!z^9yOW0(`k;VO+~X*U@no^;2(*fqQN3d~}uVVSHJ`Pk2ujnYTgHl9)iGwKb2->H^nN z(k~+y0P9d$?VzQ*v(cZ`Hff-{vChMTJPWnJ$_mQsU`?q@}(2VV;fUC)eV3gU4?v`)y+V zPiw88v-Pz$w2rUnAndUB^b;xB0e5j6?7!P9k1ES0$#T?obH6?Pu;k?Y-uGT9rOY*M zx@tk300Vt}n(7L^B3Bf&xv$8#cSJr0N`}wU@Z8nv7Gw<48!5-%TXeR^*jN)D)4M^u z3QsKv>g7xqDu2Z(O9AU^f-X*OZiozZ*HI7W=jW7WhlsKA_xJbE+f&RooN;ln>}}0N z$7AS#(5FB$2o7fMj`DnLBfm%S7vk%ROxAbX=!bJ*?5m|z%zyHZP5iHWeVNh4#U`oh zJL$rpc+pX^VPRo($dyE{Q6`>9E*v14V(GS^) z{R02wJ-sxak*S1 zN}q(u4}itg3OBQp9Rva#wOo?-97|3RiVrG>+k=c#3-|XQa}Rh(@$lLKJ4@@ciz})x zDh*yT$jcS{$;nA+=Dnu09ed!kJfP|7alfH3CV7P*br=$`4B@ljIhHL)-p=4_9}21%;mG z&)eI`h>DZb#2%|ACS2PyZ1Pt>-h6ZBDn#{_MWPI{nTh*A@hKX!{V{%A#S0i-VHjVA_x52c5m*FBT0RRk-yLc4V9~+uZr`DuVtH}JSpN<^ zxeEHK+Crm4amkyxBVvqSx|%C`IQ*u5e+D&;toGgu^71ktbA-2%_?+r1ZF4^)->gyU zplzy0|18JhLLPw-dFHapWbm5)I2zKoriLlhHTbiUG%iRb*E^#KM^{(kfUly_!?1J8 zPOL8{{VOM%sHyXH>P6F}b;VrbjkYST8gW*{&ezvR#aYR5%>EQCS5-$J>9xmygxP58 zHVuO0SWb|T_`}Jnq^J8j$m!qcA8CaKap;-soUF`Cd;$R)1OMh+k`Q}cUB!{Gx52?d z=llEc@bE%JxTq)_19}*mZT6I&pS+l7=GxjC2)1g3Y{Y)?{ojrhu1`lt=%z^#CmDnn z{1#I%L`1m#El3-ge;Vik+ZRo46U5;nDRHc?9`T2mSkH*DgjzYB&4uS(PXDmIrJovx zsY$p8wsAvh7#R>WD3H`kYrmx$o#cYkA;LrB73D~_9qjCcO+DXUrkTyH53#W>9}N0a zFLU)s)Q*mipJQS#+SVEZ3$^wGF|wl?jY&wUnRaHzxyk7O)c1>=Y`Z$)l{YmAB3D2drGtSTX<^0apAaKPP8hyS%5q{^a6_nT1D3RoCM| zAk6Vo!2>mj9C0KmA#r46w2&v~*OohrguEU<*axtLJu8hHI*l2Y3ignnbBoGf=EroI zD8IUW&r5%0YeWSB-m2H-{&hEdfRj2tFt1!5)^yv0g*%}Q>}D1B82^p&znywX*W0}v z<$$=TA@JU@SC*z>CgkU*voSIv<>f@)C9N1+U2Tu|2l34*>1yu!f&5_W&+FKpnTcCp zNrX@JFYg^3V*-dD6T-^M2L2g$!6bm2=xy<%qXW{hmO0tb+^BaCKf!~C-_7NSMgzvl z%Mg$ektNFx%TqXJnDT7VTvLE|D$wKU_@zemtJ(T)rOGqD*y|pMS4#Hn2G~2h&Eou1 zm!F<8+C7UP10WPOR|IF*wqgZU=2=;qTmsUTTc0Xc+iTw%ojPrRi>j++B8)-!x|Y8F z)!CJTlni5a#l`6*r099M*^IUiyR{q5&>+%kU-qs}x36#IDXw}cq}}UWXy&*$B_%+G zY!-A+p(}EuFP(Bk&hJ8WlGlfwjGvtuirL>bo4DY=E9GC?%)!0Lz&*pm!xP~X;E6@R z3RBSa$u~V zDWH!sxmQzjAqwSSa^ij;%(8Z#zci9J5;Cs6`Dc1g62dNNL$ik~2nP@=uT@5xXkh2z zeLMGGEYvIx<>D7qf1g?3(19VxSefAd#kKK`ZwfGNV_;-FBIv0DAEU58l4O7BU*`=@ z*y7;ffx7YwS?H5IrVhg{8%24EXEpJ$E@%>DDyXadNlWEkzTZ;Xf~Zo`)YMeT^}Bjqhm);J`Amt|>k{-Ujj_!`3SYc8{SpuC?-VrzBS*VwGfgBKf@;6^lm+YV=%Sh0o zd+Y=|2pQV)5keux#zHiVPmdEh@A-(l?Vv8GvL?Ht20d3>E&-MsR~DC1J@kQP@r}IG zyV?fn(&GH$EUfEQ`M2-bQ!Nm^n9vh|y_GgNT6<^V+y%Ei4(W>Y?6afTH75souV-@_ zaCy|Wp}M}#NLu>L{zF5(*r?GWD>fk!5o#lGp|fIx#@4jqg@*8x#qVr>c$gt^Gpw0B z{hCL9_IJ5|K!QQ?n$e0A@$`|`)ARE3GObs+Lm>Y@&SaS-sSA|OIKRG)6rZpE5$v<%}^_Hgl~GndJsFB>UudXkBoTd z#~)C=B8zHh`tWVdvo^Ui8979^ZIFGM=YEWm38G z>}Na6fOi`)C)*;mMEJ<)sJPfD1;t$Gqn5>%mg?=)1IBnr7B0G_eF>!GU%DRw7Ua$0 zk`m+$D4%+zhlj-`CZv0)vp+jtA^VKox1Z5EdJ3KVj7Wb_T)fu)>y4eBrm;G$%3ALO z(aFxtT=O3sQi8pG9vTvxMCSMN@-ugDf3H8(XmNXVaer|)M!5q97TySw(}*tbM^K0wXUeH?JB4#Dynbygr+Ev!&GOZfUa=Vag!I0 zxXc;vT^aWuYrz619{~Ld?8oErp0Ov|Q=TKt&4m;nmZqr$vS8r6ts+4q!7R8)L`+cs z#m9$@)f(Q{PC!sGd&3^jy>vI^enBWP_TYG(v7f8G@E1sqB548u6?}T>*da(%x{Ani znu7}R^66=*f+jdA;{*T5AUM_FK9H@6{GT5lUm+pZR(YENQYfaj^AD4GdE0Lz5=u-~ zuaaN-jOxQ3K!#wb;TgV~;O*|t$ivtZJLd&A23n1w>N18I;V@(?0u~w~PBNL=laVN1 zT`MuO=YMlkFTZ_sbmUGBA%D3IZNs+mPIIc+-pziOPbKkeZGXeV7X?nV@-P*6{VgY9 zpsJ+0fQ8Gww(`8r_~x?88&?Bl&gVWMSan^!yR8})%*fNB+@_6j; z>YkRdP7*p^y`(}D6P4S+lq>Pd`+R{Ut$(X9EO}ubK^nt?(?gVf%@BJ2gImLtv;j5}+NbWU#_X(@W32ZYS=mSpp-dN+{9U+lgNuu!tr=nIP!mFV1c0=P3!iE2 z=H{^C_}xdsvWJT7;$^m0*QdxU-Pu$9mHOb?5)uwh_xjJqWY5xrsxo zNFe2sgLeVZ6t0S>6$}!Xc1u~O;bQVGkU>Vh- z{@&I!pdjIb@QXpGG(DVtOgXB^w@v!0TQeZ%;fNDsC+C@P*9BOVSER_vJGo2Ca6KT* z=k_q;CY=h&D{gQ)Zf`AgTC{`5lhIK>mkwE3I!m$0CDBCCwEuV+odMl&0I{}uoqT5S zroQ+sF*b2G|1q@~pBi6m@^Gp(IZ2nCoXqXzwc+A`%W7(2Zjo#d^D{^|bxmNE?9)B* z?m2Op_5inpgo~|+n{XWY_fkzg;^ev&!ciZwpFFV%8Or)26UU?!&93~ zKVhEvn7LuuM5+U1NEt|ae_wB8gH~*_o+AVr#``b8+Y!zec5Tdh+WUQd$$mf#P9Gm) zU(oElU9E3KC(K=ySw*W>r^NFl<@nXO4>83SM6&_^sc3_9n*E2RmX?Q_i|J|k_KTBM zu5IbRW3G)wre&0YXnnkT{9Qn}z`?;0q&J04L)@Q=UGdGq!g5B8LxO>{wm20a#!;k% zslJq=wzStHgmI$5Y@$M5^0Nf2U>?8r9Vp3_kMy2Ju~B7aAR!=WudQ$Jc4W}OqPetH z{b-X63lAC`@Tlt_K;-J|0G9>QhT&2(;QENfSj$UZIOcGQs|^@K>;BrRT`!l4zm1gc zMDF8_fEo2Nma5XLlTzIO@?S&bx;f^WBkKz?z1BwjE)LL(CX$xT}BO@axH!T!3K}!po6jxhY zn27o>V5F?h z`nS24&E=)|n3D1f)vL!Y_vO7 z;{H7X`Q|bPMJA;$@p}~-3$+OmS`gnL_qGO)shM%eC@gFsaUrU#nA1++Id=a}9q(;< zeAq(-!8B#tM9o7C0a1(C1cEG-PoT&~A`3Eaf~o!mjXXLs8mL95%d00!ZX@fWNeyt! zuE@|LNVu|Nv3Yt;IZZcfxEjN>Oq>0OPPkNz2-#e98S8VpPs-j>5Yb&4NtY7ehXxQ~ zgE3#4UvI@nzQ2cm4GmAj+_rm}IE1!ie$M%^x-`eEq}<~Z!;HA;D6Blk%=}ur3LMan zk1hTGD*DylEBEw=cHhEA(vdtbTT_QSD!(|2#;B##Bz#`L4zI;jXdl>v%IDuBWG1^l zKWqqzY@cg&e45&S#@hz0?#wSkF=@`t25m~#hl`zFohE9kLE-QrQouGYO1>@&{)90G ziGzXTsI6BvnpKeK?*6JreyYLPV7eH!m!zKmTT3h37 zb34W0h1ai*&!J)?nZ|%(+jH?3={RUgrXx-Qb@9T!Pe&MiM59&TG)oHdkJZaCA%nozvyDgT}P ziwXg!0UP7DxcRb#)&mnPJ}QTXhE60-kjq%(Zh1shZ)pZa1}@KNOEueBE&$lhHAr1( z$xXSQP!wMe_lk6-%iNhrB;t-s6^}CDk*(wWKV#%l&n#LbUMJD#zn43t7e*Pe)7>3F zLSotg+Y@u{ViuaIO{~l}njWYB#QrKncAhMdx!lJM*4?DEZs)F}ULyeQ4XAe^3Ym!s zgFjTJ?QGuB5BNn8mc|j#ArQV}UDdxk*~Wuv!ljVaQE^ zi;j7;D zVy4L6CDg<&zuHX55t8Kx_k{psi{|A2L5qJN2sMD)*cM zD@9_F$$kR0#}fx>S|PD2f}EUOb88FXla+zI{NSl7L%zQTIdz-!3oW?NpZ1amE1lSABz6`S@@Z%}7*F`iSW;6rGq0LrcXOX>grfBIiwpXo!#Pny6MhEPw4FKJh*!>G z$PO{sv!|!HKVeWX$R_BtjiAPYbvMB=H5Ua5^_iL4A?{<~D1|@`;{!V%Oa$h;=LutA zpHr&0_>89d`~(cyv??<#4>W|g-9XKD^I$P0xCve;bYU>~Q@7?C2{^E?t7w1Uj75mJ zxw#-VF8*9fprNG!wVRw~s;KES4Fg478Bj#1|{@W;8F3ZEJbk zPi!{mSc9w8H z33UK3&5uf{@d@>$+P~Q!)dMl|dksY;IYFqyA(|(kocRUOozPku?R74q`a!|T#gQG5 z1ZIYgvx`%5LJ7!zO!QQRCAEpN#v2r$*DSAf-P+q&%N%}z!p>0>hQD~AB)AA^An7OP zmlf1i@v(nbR=YgkX=tn@_?Ae;n7F#T0bzX5JLxt5Ms3J^!$zB?Low>z7VR@;-8=p>vVp3Wc}7j5lm z`J){@qZzn()urop6njEG#$|QkSS|&JBUgs$sL$9AP zWqp_cr3P9H8ZoDzZ1p4ni}ZI9WD3*6$5?64Smns1>u*)N@la0jp;912K^(_O`0?Fm zD1@{cKt9kr*Av}Y9-j^6Oa&af=_M}3i#ebyg?K$XecOG*XCthc1e!$UihDym>8(jlS}~_P^Z*yE}a$F zRhDcApiBmWX@HzvDIjfjqM<=&r?I2NMTM>OnQ+hTBu%c1j*3c%sJSE{l9AHu@zRx2 zL)cc#2X|nCxNce`E&vmGAd}AI{B*5|6}W}S7g3lkKhabR5f=;kVQzkW zv4-?66pX-2pFqq3>m0vFt0zhOLP&01LsE-6SU1XjXjji!CW>WFy>VY0ws9UoDL+U~ z9^vy6i;ewkKd@+OD|T=gc7I%k1m}sLogMKhi4icsB+mvPP-qM#GBXVC_m_@7DJ6-PjM2)-4!) z+0DPE=t)W(6M?7_ss!oRGpQ1`eGL|iv86_t2_u%8lM*VcSb z`ve~vYa2`HkSY*Kh~c#rMVBAVxn^dM$1|;dPgbFSyuHN5CERC*h7P_+*x1+*Vi;K8 zE)u5`f4IjLO4cPMRnGO#WTEv2fS{l`2>OfJ6D+3^&YJkLuBqsVNN8y5q1TSg3|5!_ z)(|qb;^?ShB&UB-lKZ8zydgxNJyb?UUQ|`fEWLFUK8~kZw_`M?lrpD+k_>pj!Gl_Z z`sp&8PRDxex{@AH5*ZVjo0}c{6=u!UoKB@cQ0_-bMO#!<8A_*} zF8;K(?PdLO4qJT>d+Wr-&&4)1x0br=@S;+Bh`Gki-EA)dBsmS@Z*~?&2t6b{sPphe zAbR7A%j#DJpEfKHS~ zlcpF{SgDBG+N;CE!`rK?z%y%O1D6BPpL7T3KuDLFnYpC|+P5q8=*Aul+6Z*v9wl(6 z{{DSNM$lMUEQ9Er%J7T3+1#V;3JcTF*j3+sa3m}c(fteJ{E9`lA#bjMwuCW3G<8{6 z!q)umMui2Pdr3NAG;!<$2FCp7IZetxlj1wOiZs-OAV)6%o!Ls;Ffv+E=mGSb;Pn~- z>=htc#fT@_$kNn20vd(PUG{^xF5w%a+$CL8>E!G;@9o>Oqbwa2u~z~SX)FkW6voq& zyA`T{K2fcUiwg=)00D$=8FuA#RL7>Cl~0TSvYC^+b3GfC-~>DgF%c0c?Tt{CjSVO% zA7@3444z%MS)gm6x=20hDc%lI6t2NX-x5s-s#|;fSFAVSJ??~R+YW^lV$K))l0h8C z=)t)g9K6)*5GJa?2=HcMZf;@4&i-T}gBEHaV)g*KKv%^;pzh9m@oBkUXV#7Efn~?6@FWY7#0MCN>J{ z(ux{~N}}=c!5?S%8acy{&>WaKC< z4V?IWU_ivn>~K!S2sqa=_JafHKtARNIaXEx@bDW0gXYgmh=8Ui4RX`bga=`KmH7pm z^K)Sx_wS!C0B37jPQl)iW?LWQAf&3WBsVx;ize|2$&nF>I~iI>V`%bH?mNL4 zSEs||B&Np)^!HMz430Y{W)>zL&gTkApu7sdD8JXXwDgtb^%Rs(8>+nJeE15CtxP8A z9{MI%>dI1QAHV^l*l2T3%c6O=`56>vGkUtgH*3hA$ zq#CL~3r5lF%QqH;ODA5AGgNub?<2L#a!PHa|FFLQ2kA_kEShqF3x?#wMV{ zLaMAS8(f?R%&zOVf>BmDsrDU8Lh+|+=AGeKsff|3%kb)htM)$6+cImnjsG}ametoQ z1v0DhA4cWnfUuy6#no*Gi{DAI9{6`EEmD}74s1=G%Qgxx-KZ$3?4e^ZQ>c<~_f3BEs4CM?}s#Tb8LW1#p288k*%2 zqx12aJE&TA&8^#4@!SCSs~e<~6^Cma5tu=6V2G`@_P4BpJox*1w>eDjXy0xE&}Q|4 z|K*d)oFV>j6A*=ffb<9O3KRww2eO?e6=mJL^at6sF{S1dMwO@%q+&tiOc`BU~Cu72#wn_16H6tb$$P z8==3Oo4>ldx`j)DH0Ayd9_YdfLnf-@WoJd+AU}6p11p(7Xa^JGPgvVnp^ zAVFXh(IH#7nOb1q-BDFm#4x{A=Goi*rf0aEN+R?2bej0L`75@{`ZM&tc{G06UA(BU zsw$01Kyp&vz;{g*Z~`T2lAFse64a8;IREQc9-#>+fny#ThT5P{`RRs(<77)z&5rTkJ7<|f@aObNa@DV zAiR@ObE?ufa`dI;_Y)gwm8_3X7T1;#fAOm*M>lBXvAnuQyxC=x6OIpIRE&*Hh=f7p zZLDvQ=EKAq*x%@dhE1UT5*i;SX|?goMyd&-66) zdBS5@*rQyV&EjGjj*WuEXsc$%!_hH2^Up9_R;BOmC>#eH8$LI<>y>;#PG`>@wcuR6 z4Qk>A^ns2hcu9q9Y8h`J69P-S@ALL}KDs?3@8tlrp^B^y43KpZ2Xc_{glXtCg^M0z z&$j`&!r$kcr$7=M0s^2FEY*-;h`{?%G#`Y|vOck_FDknrq&=Z!AUoOz!v?D+sV|+G znv6fuFZd%P^U=O-9Cu)a^)WMn(!OEV2weVddhH$rc9LI#U}~HW-4(Se=ViGEYc9NEF#H}B8@86p8HYkP{HMA}ah1hMvY&}ua{m|jv zv?qG9Y9iexU-vZqZ_xLU64QEl<7_T?{Eghy5t7X4XxSTXMPk8&*xgx2Q=)&QyPg42 zIGW;RWY%->&1`KxU$~&bGZ(fY@dzV$Q^1tn52m{HXzUY4zoUnPzGrwSfHy!iCMK}D zwz+xG6ro!upxgI8-C#Buhj(Lr51b_-3mfC{}=tFDctV31}=g{Q`KI zLDEq2a$+~22y*Zh-wnsLyx>QPh)5vO?=X?n^Uc-qZE{dk^RiHB?A`9;sUBI3^yaX? zw&hpHmemRr@nUm&fN7HqCu|L_UClpc7g~GZRJ|!`(1Oj2{PNi>P)S!X=mOqKc+Sjg zyzGnt?LY!>?L2`6R?HDiFK+FgPRG+P-z%2+X{rSq_pqqrI(^A3wB)qFx(#0LVB@`B zpflNxd76FjU0i3w*SEvPD+~D81~4_cds5o5i z=dh7H0|UtAMlbR*Z!n7E!jtPYsOHtofR^RMTUgl35JgD-U4J~U_g;0}6usg=bz-mSV9CxSUZj$` z#2!Kvb$)hkLnDKKcD~&+iKIk{bB{VYmHn|WE3^|J21-s#$0%2?P7l*Uh>zdo;!2dC z(f3pdJ4IRZ=(H%twDT|-4DOo#|Qe|CMj2avDJ2Vs05 zz?GtJ)mh!iOG#27C0S0zjEXlhu*|{6hsNf>OYY{Hm7zU!sq+%I*=a%eK{Oa?NzKMb z#z=Y_0oH#H4=?o6$|7W_-e)vXr)%rUYXu#=WT!St=O|#;5VYgZHRY+^pWV1 zz(zzx|5f_xjs<-@S2{rb^9%oA2dFq3-DO;3!P{`2;{RKKv(T1`R-1qFLzo`1IKWIr zDHI(Tz-KtI={S=6Dy^iSobfe~*03Ep5LA@vwQ)*^U=pdFWhE2t5lsOfym?<+7ENmjJ~4>Xcr)q`}y6)_fA z3Etk`Z-;ZY_;+i#prEO#$}fOfl79m&QL=62u+I-qq=5wdW;htwV%Q+t{L-#%pd!Fj z3&>BkS!Tccw*L}k2=nuiWL>k!gA7b{!w?>%98D1N;X%;v)>d}YV^h|5n!q##P=?H= zWnvfeg|P=m%M4D>Pra;dttn_d+ducvO2G9DCBm_$X;qy;RPI&AK7PCUD6Lb-@AN)L zI)HYD8ACov%fi7SXcnsnzz#92}ecqEPdbyd{pV*mTwP2(eHr zYk$Mjlg{s_0JB!jF)^Q*A&HCy7LKyOv?5(~br{9O%$FMg;nKm@76hl?XwUZ(y!~Gw zW?NcXnhGn+w)0ZM&j0dwOiz+$3f#Z0<(q5k>Ldw7{J0W~r{j>3LVmRI+%#*+m2c%*@}xMLeIa^UUAHx4%H_2F0I#FJWR~n*5n` zKRmKCMIO6_U$M{Y_K6M6)zeg%9gu=|Cg7tTO{i7W81r{l?t$K8{A+0We03i=IR*-# zsG{jg(7XxkVb0VQg1O5z&>LF#=%=DD7Gf#iW8s&y=%n90JC8)?) zl%=$&iZ-VZcUKEDRXzEE7UhX6!`NuL@Y*djM+$BtC@=7-@3cTy3O-(aF~Syo^atGL z)&lFJF$L-rbbW(|v+&)+)8pu1V3n|Qz};GM@~t5*{Mf+ha2*`n09jFlUGo!uqfBn8 z*|gp=0&eEhvPMYZxUUGL6EKj|()`hWezZdiut2%XVRke2vWlE_-$ibDd>lhVi31$G z3xzN?(AH08Me6hO2!MWol0j^mn7TG|E?Es9IM6NnHxF%ye!Y2hxB(*6a1EF>d_WRN zM(8T7_%xJ5ql3R4@9u15H|gekFKWjI-15_3iH^;nbks^AqI@5KZYv~llwPk_GVN$3 z(0=Y81m@b|5nW!X7CerbL`;;}5AVi3x?5gK4iAKKuHfNz4iA>u*;?7k&7s#QL*H># zD}dP>De^IM(<+()FBe((E-V#L#<+?a6U!0@F);G8)62)F`?@0a!M_D#iPi=FMaKJq zKPp@#ICG&BmJ@SxYf3O|?e-7%ZH(-qkFrdRWrBk=($j&gH=^9g+6rwIDo<;uViHwvH2P6ne#HCZ*lhfSiJO(|-u;LwGfK z@ODY*y}#Y*bqdGy+%>dN$J)ojI8aqng`Q8azH)Ms&-7DxkmR<3i=XfsX(spVfSs8+NiMK{^)HPOYOu&Qj>G&u_Q&UKtW2atGKDUtq%DH2al-M z!BXrc+*t@-2)hS$ldz9bTF&+*t%p?j@^Et6%&h&J&+ZaP{Omt37TA1!e=)OAVf7D8 zj{Y53hf*TV&MM5Ey2$w~-XG%|ue3^d^cvS`xN3Jv^YIC>J6p zO-{$eSk5ntPhj(Vm&dU;*W6(wVsiC&x3?G7!JB*41-vh#R(m;pO#J(YW&%21eV_Qf z3`~Hbv^I3WHs&R!B9`5n+R#z+SN_#?oed>to$&w03UAKJij2Ir5>hN74NRcAgRK81 zK;|d=j>+^XEUGoKG;JO@*$;Ac(%1uDhKTuW8n)lS5A;BCb<+?{35WRrjoN~J~F zHyJ6ZfQ*uglA30*SGu|OHY#K^7?vc`$eO}p3$elh)yWAuAU(7tAVBp9B=Gg!vOnWS zX|=kqYfLstkQN#TsyQbnYok$66SUeajmpl>l;XoT7Y|eP^(0~*Cg@dhfuv)kY(in> z7LykF^Uz(rU$jeq|E|yUL_{ZNOeoXP1=CFksDw-)|KzOO-5bZini(GhxsQp8&Bi&Y zyM3^fmKV*nMSrJfD%=yR>JJ#DW5sZg%Em4hoskjaLEhs%=BrtG zboYPT8W|~YutR4uvvJT;vQB4yWSdXi3CZQezW4k&cuJ8385fd{iM%9Q&~ulQY&~6TYz?{|)5>g^z;4h}0@Qk_hDz7%w*Jrd;G%~)4exv_RMmIz< zhnx~^VYRO0I(BwM5e2XT042GtPjPWG3PU=cg)O<@AW%kx1UM;VR2p8F)(Hg#X{|^U z8eO5{m&5St=}gc-C4`@kbOG2`MCY(m_I^%HjD2>()$vDCOd_@DR#5<|_O9DEVaFV2 zGW#ALF44{tBBYDI+mh3Mc65$OA7?WZ^d}pII$L&DUR*EGL}H4xbzG8cg4D=rlY=J>dRasdlt5f$K z#$vpWhX;4_d;={IKleCj!TxJW_BPLw8WHw3h}*FlnL4*r`xaxkhGj7f=tPjMa4^te zXv5tIpr@Vyk*1M`C)3E{?Cc3G@=NT&NbqqH>$I(#%3@KSh*F2Hovz8rr7>7rxq@oI z_}|0u$(`|iOp-gWN#L-E?pEupf9C6adaWOrO-M>I9wSwh?jmFj!T)63!*n|n})hk#C| zn&Qsfd{lCHy0PKn!U8%rv4QhDR$u}OGqVjql8LGAkBdc{-oi+jCyQr*lxjfupH@bk zg7QqSt~dn+!&#(nUQKV~P9M2gkLR?tbQng`i3JgeKrj{l76bdSPTC5~o}O&E@4iSNtb#MV9&?Dc(@0&SWn?Ulk8b?zg+UiD zT_l|PUtceGLku8=*JtLWb@mctyUD>^3>{B^Nhy=I8B^nPSF}hmWvoPWZ10kqvx_6p zz5qP&(UD*(ew1{v1Sr^s#-?LzzZWfyO{uFM6k^R1Bxn{`!vq^oXb4!mtY`xie;iAi zzZoClW2}Bh)>jIxV);y8Hl=h-z|Oy#+RRJ>e6u>)IrWyt7U=cUPWBh*-H{syip*dA z14)A;WsKz{#W$;q190`AT?@%cw6(wNUq4va^)#U2Wh24^3Jj^;Vlh}FPleA-`rjW>zu3OWJ+0O<&`z-dtcJWB|ByS9uDPlGLtq{h6ZA>Quo@3aS*!s-LT z%=0mH8xJ>NZ$^$@3Om~#^91cg=3X8uDX7OP_fW~HiN8mO=UK&uF1f4HjBqm$7BxBD zlAwZ8JpEzObNA>>x~QtImR4hJY_#kc&voS_Gz5_$XCninNA<=Ni#sGruSj8=4b2|) zcRJSZ`67+Ukgv(sRML=_lOAqsQd+Szn?emyb5v4>T-=|U&c61n4#qhJ}H8)l$lBr+TupP^E&0D?R5VM-|^cZHcvuplGUM5(4D z4Gt+VNbZh6tTb{2>-3bYFwS%QlZ~E0Xs|S3b_#Wl*;4@J8FF)TL9j6v?Ak5U73Ydrtp1Qw8k}6Bp-Q*cIF$P2xlruGNK>xAT7Z#NKyE;S^kK-Lu2hlS_WFQnJ zmvSdJafYRXCrKQfSpQ{^pPHOpUf6S4kj``q2X_ktBk}gi{*z;?ro%JP*s_L^eMgT)}dzSep94z7_2Pf0fzsB^m+q>J)bIFk*GRG-Jh54%;hxeELHy4XR z{_cm^@c=TCK*{5y)6fuf^!)nv_SM zow>}U#%xvq^jXMcf0822m1TyRzviCGGQm?KqN2NH@bJ-6RPs+BuGEd9|v>i{wh) zRb99=K&ZZDclM`}or3`}3aJoMGs;y_Rb^^I4nmfuvO44|Xj^hpk~Akv6z~}t2@H@j zw>3i1zKDy34pSryu{}PVHnw=@uU{B-j(t?aG?uuy)ffPqDD z$9ak?pAtIZBZvov!{)6mZGroxrY6f0m&wUZ(9v&hu5#J>F7@pinrk5STr4kd%FqlYRCEz!Wya5F)pz_?J+&>ZVIz=>{l zTywJ_67a?}6ZnB$6VgX88#>O%FCG zl9DM>QduFPKs7)D)pTD#=rO;52N$Pd?)nErsIeGHrNnd|IGA-crRt%gN0kj}={QSbgIz!MSG=fFYswxpCpE35YskhjkgxhDwT3A#J zm0w&^$|$p&Z*FY^tr8ob0D?}dsTmm=LENPaXdf?M8$%UEd9WZQH4Sj2xP(MqLnAL6 z<970y8M7}Ft>o<-ooN=^%M9xbPTv<4R@2tu<`;}L*SgfA3rgK0`E6bQetMi=fJ_4cI#%y zV?{T24=MRa!7eW8e7tGm;)W`!dNQ&keL6WTexv|l1Ckc<=uuKiYJYckH76&oSjKjD zY~SuvzyqW_B3D~S$Ge3M!rm@~fLYZV8RA(tef?FrgF|v!T5VnZ3#1Ku@)YjQLI^jnJ9cQ}Bo}HT;7!nF}`7(9`B~#516{## zy!@=3+&&^AgnX|(e=c5JtoF?51P%_3hoVG&z(<-~JfL16SLXWqkni*(&z!c<)y3)O zu3VSk~RyKa!?w(#^=3Gf>8QKJCNlHx(4hu7) zn7Da*Lj;G12K#z>OiWH9cTG)A5rScMb{57>6XHKWE zFtakVadGc)8*c{%yA8#m}XD6q0jGe&9fh=xZ-(lRo^tag4f@V%zUNbu*n zOBXv*Qr>=zPS{(Aki(3=gJxeJLO7gk=4DfNkVEnd3S$!zo-krnTYGyYtvWO!60QmG zAjuS$mQGAeLYsDUbfV3rKNGaFvurRaJ zYYRb?L)XsC(8u_R)C6*+YiJDD2Sn+c^+ZJCa!@-tnVF?VMg_$XCc zTIbQ@EH17TVPRt}?MEtV4<)7XOw_e>6lLZ3cz88*bOVAz=r!+bY}#^j=f=i}ypf)j zwUak?Y;AS5nulk*pG`*Q?HLn1+TIz2>=;xy)Lve@QIU)Ru(m6HoTQ(9II_qB0wG$fmVfZ-ibe80e;&aQ5Xsre&i zRR|167dIKFWJ2CNv`^v@5R!eYWa;4gnQkK9`CKnCDXT!wh(zBMK~2Na)y>|?#mUv} z;!y~Mp{th<*PY;3VNpa<_#B-heg+} zWiT^~@e5pIVh#-t7Zw%A6QG!xBb|at;%{8h-N7+4C@3O4A~-Z0=@E$+nHgFfdE?0s zNfh#VePbikU4fn+ZW1iI!PWIJqJ?&*M@K*0StrP?uY`l5orQ&kj?S*4k`nNf(b2Iz z&Ef#$3LH>gSy@G^Cf=l%#&Oji^cHDM@$T#E1JR0#jkR-hGBLAAOi4wk1`~rEkBpA- z4-7_t2gO1=u5RvTmR1Pupfv2pL4^7ju%QnsYZEmWbuGkgqP6Y(7(05ZiAYF6iz9m= z#KNT>tE$;4^RANrxJCMP4dte?x`v*SvAvTsSPgytVg;aHjnokm7LLY^vfC(VSfL&NN3S09aXFlfLvtNx7Ekb|XM2B-GvA9UT`B0QC0t@9tco zS7D}CpDC)Stm^OU9~l{SbZ~5qiS9~GT{&cl=+=`b4fgi+H*eNF5S|=(D@N8|D?*l5 zR?Wim#2ix!YT8~^w+0AOd+#6;LV89foCzv|XnAsSs+v*IDJ>%_E+L61^D)v|u}S_N zV=3<+eeFo#NFW3K1A~b400|(`($X9s5ma4G15P}=ed)W7q`J+ReqK$z<>Bf8Fi@-= zSx6B))+uF;UBRJY^mQ5%(q-ipK-Kv8L~VV2U~s5kKoDFOZlt22>E!CFuC0@zzui$b zWAYy>Yb&HFb!}acG@uU2s;9TNr?(FRJti*RKZrg@E5bh@m7BZh4ioj!V~Bea)eM3s zGA33@PTp8f-qzL*Oa|NrvghLUn`bXyy?OJtm84|FjT==L&TH}Vsmm+Su3e+|Wt~04 zd5=?)i(8eOM_E9S=-_IjD|K!_3;BVMldJ6Zt@_iaLdC>#@7!tl_L~@9-iN{>kCaqE z;gFy>c?AluUnjuk%g?W*t|9kWNmWbRnwO6NWb;>F+FMz*?JB&FP&F_(h{%QHJUu-< zJ26oqDOt(R9%5{KL~(1V62z~e?(X^*9IxKR<&}8Ie`&83A%Phc1Vy<>N*o+L)}Eb5 z+w{yVvuPp(z(dEh*|~WHFnBy%s<5~?B`u98%2-@lw)x}{(N#EbNo2u{B z4O&}Yr&?I)>*@5GU3^0NU6Unia$ zBBOA;r;lGmbWCeoJJ1DPj*k%U{R4x5So%mG)fAq}$;*!<2GByvBO`$d0-200r|F~k z&?FTXFG47%-n++kkMqo>D}2Ha1n=`12tVLq=TPJ0hfD`5GT7N-?%zj>$YNr;b@TSO z=PrD6_JSKf|8}?7#S2^Kn%>6u-$cWcW>|P>~3r8C@D!#NdeT0N=m`F@FwtIK&5Vb{O#MzFH#47DNru} zsiCPkEi*GbGAcPG6;Gh1y1J5?nHcYHO+D7W3qQBLR)qMcmlFs%XOs|~l!nK5%-Z^g zSyE9A!Fm3po9rE(1`-T8flN|SMRRudfM|yEWoBp3!RF@?(!wJnJEO=0mY3QSY)eX! zAu5OhY8x9HenG+6fmTGMe`@@zaAz`f^U~ttq{S(sNOpgYKVwYY*WTG>YR@ATufox?-IBT~}SamBH*G29iIYKBt)HsljD4$cPZ1*kxiq^4&87jM{b^iYuDVlxXxX^=Ek=j?8`6GD@fkF#dPN4j17pD;UY5Lkr!HJNd+Dm0pl}v5bJ~Lk75N2_ z>0pDnjk{btR%DYjADS*@!*P>%+V~~tTWLEv5sa~%l{F%F}@va;Ti{c>L`Lgp40RlPFU%o5{^ z>NYnw!H8<=>N2vk;n{R{^y9n#I(rAc(x(rcUm+*NCa2HN&8@6};w~bnLxhZujS^^0ga+Z0wB; zFGm=b;};NQzgK{ zikYO;_K?WupVg$K(rRjm(!+L+&c-B4mVrTun%Y23&0*BKt-S-m<%n|HZ_r-1QU^&z zwAj-ZzOu5~+0|WIUV$*&*4{ofHGRy{bXY@2Utkukqkjb0sH(1pC+}Mpia4 z&X`v~@1X*%+`%pEGE^ZbU=GA^$oxbuh zA>JO|K1ecr!Xob8KFiB16;)M$nU!D6KN#J_SB+z{^YW2fC}tM=MkHN*gJP!Jgts}V zsWY>4I7>?x^YaHuNsEX{K$lC)KT4p_Bcw-q9C`W1ElD0;$^#K0E^c8S-Upmqbqv5X zeEY2wAOGnKmjZ=^idk8$c*#vu@w_?u^~GxR@y`nHegJnVFfHnVCx}Min!Ym|3z| z7Fe?T_Uy>0Qv14Xx83cw-FLlMxoTBqMMOr%`S!Q>K5@=>(i;XRm$#lid-idO49H?6 zsy`Ez7vJE_A!)^wq&~BRzKPXyWWO*t2YM+eJOc6tEsBKw?d3K0_RqngBEbW4F$syV z<6&hzC!+ccqI-O!lHeZ-OUlBdV&Hai3ksv-5&)r?_(T8?=4W|jwWY1YH!#@CKQJ{T z3qlZBf|s~@{d#)Suv2bN*T^)n9O;V}*Kgc_he=6JSv#E4l&e(wdwP+IfA8qg7*8*_ zb1%Pu8nqhk0#Vz_K;XIacFfxuZPCLbDZX`MQKxDM^S znJL|x)~9(Sg4~}h5SAZ49vQQ;)HO13w6_QsZPt zho@hFX3enGt)-=_6BCc`+(E40xOo%&41|2~_%ZG$($>?znx6jd?c43*qD4M$tswvX zKPI;SsoU=hA>{Ve`}boj`^7y|D_eUrvvb3vV+650`ANib`NnOX7(#bxMfVn#SABUn zPkm|mFH6RN%_SLUNCx00cM(IO+S<{UtsNb%Ufu|6Pz`J}ULVuim8;h>vU3AN!(Y63 z3A%vK1_s8b=N4DiVXOh0l=MtUvf{GxsJQsZSYimHe{iUinxhTM`GPQ zT|B*!m_x%NVwI{48?}hbSJRnn1}jz|Jm{`He#}c{GjDjX3POXY$NnKgB5dvEj0`ul zwQ_~RaIvIEuIRJ3OIE8*EGWesF6kDADHIr$(nCmW>S&voTT1E7VN1(dGt(f6)Qm#S zk}Jlk)Si%#yn;di;Le>pi^~TU%+N+FYiqC{cn8T2x&RgsU+vo^7|C+7XKHFb3bWfA z8~;eakZWuGBWL)L?e~a~mb061-UwetO5fYJZ;3}AuU=_JhR(va!r_4je&~)&Eboxg z<=;XEe4|s5$jhs1dU|`CTiXCLa6Qb)=-9YjddnX|iG6I6>T>c5LL;N&l9It2Jh2qs zH>B(gC#}yTp`fO|0YQB2`i+gvEu0GG0a|EqWOQ+P*D3hmo8t)%*_Lt$kldm z|3Ft@C^0~o90zG{@2Ie`H88WZpwi=|vN0yJ#=()z7ee&YIXqYqGi#colgq57B_eFn z%q+pymUJTvCdLj@`N|;+W!u?z+S{KMi{d3xI1M8cGb?jTPY#zH0KciNMWs-WY3ore zteO?dL5W1h;U3q~3*hj!PaFs2`k72U6SEV#1_q|)Rx}1KCNLySs!}_-dEh!j!XuUv z5>{xm&En$s@83h*4vvgG)07uNdqKYLYbGs{l_S6Y=9?EUUtYd)1@|yMDWxFn;ga`o!NhmY>vyN{^d z+B=uiG8|jcRajb4R9e2+#g}FsGo6{kY zkcMRM_%RlZf%iRF>;*%^bzPl!fl%MX%*>Kv$7Dx}#ehveg%TkU8k$+!ilzNj>uFPy z6kA&)6dMO8Keg?7a%ixw_H=hQd?0YLwX<{e+I6Hh00a5x-1+n1Tgc}5#l`CShP13~ z&<;c|nEZTa=cc#!`L3=XQ3ue#Kf({h;tKZmBwKE!ru+bYx40REOiFE= ze)jD7&0DuNwuqH1msVEcCGZZpAU8oQH*NwaGjsF1d;9SBApW$#L=XR#ph#6&kg_ zw@<84;Z23wJ~cB7sn^!Yh07BRQK^tpGo~g^HtK0hi*tYcl)TRPe^EY_^4Ntc~nHlOdu<+UC@lApIQ?`AQ_ zOijZ%Tz3xloTe{$$-qDQ+&vpO5nYverkV0#5piFTNlLn?QE~ z37~lR4ftKnLY@cf!knF&0`J2}jEs%LG}ksX)-^N%6-e!Mjm>q9O@IwK=67jj1%_d0 zWW=walbw1xqrMmR8mDdT=tO8Dk&_Qt8e3WymX@k&>yat*i%W?4Np_C@rM;Y#?&Q+O z#^zSAM^1i0Ku9PQ8sTDBR^t+r%gW1_#!ip-ci|lXo}5v1y0>q5bQIT|n3{%5N=!|U zcXKDz`GrqDNe&K%!Xe5!TGQe3^NR@V`PkZ%BTDd!mB^%WWvZKdfXc?u)Ew}@%TpjE zpJ@!xXs7fI6D7pVrwtvQ5H3$k-)NA@B#%99ZEZoJ&TG1}zyrfdB{{5Xmct?GW!cCu zOfDxmYE@59#O9_5MdY8OG@4AVG`6(1VzBb;?E7i7Gt4IO-xS0as-D*l$2L=oj&vA$&;LdLUKL{J`~r3oSIuucRw0vj2?UN(nW4EQ?20vu!{YKC?CiyI zC4(z4v9d0*RNi^Li$5{ z0SQE1`nmb4@`}wvH#M1=3C4Kw;^p_U#JhLz{&^LF3529~%#ynQcvN3~YbQhnk~9(< zniFvY?T#zM*ZAqrDE@lG7yCyJ|C<~93ETqcrM;^gM8_qB#}GoUxv9A{BeSTm2yEl*;mH<=Qo-;OZ9R?80HNAgFQ=AB?OS=w~`Q^>8?~?(WFq;DII%H&#q^^#VU<`8+c2d#$X} zRReNp@a-5AhUg zXk=_`BqZcu1b~75(<=&1uOsbj|D>h#NnX(=*{W zAjTknLFkd?2c5hY@uzRxx`Qv`;_020o8Nk723rovw9bHEw zdOAvtPCr^)G=ti1e;>|9VvbX@%N#Qz{I_(GbP!R0%;dC=HA8iyC3oCI7oE^Gu%%0Gtp=|pzLWfi%3`S^xJ*v6OJBvvP666HX*N?l&Z z|3A;tPj;0hyL*wLUc3E9; zuFI>dfC_Np6A*+y$#ro{E2{7*Vg7;sHnt8pKuyce`MIr{{D|Nah~Yy>mH>={@|qy4U@H-|Ep6>dsp+u^Ndch|T_ZC&B~>sXcn6mRt6?k*4821EocmxjV-)BU=Wd8Lmmsr z^$rZJtgX+lZq_t3LblCl`h4S)Q^X2Yynn!=l@)?-*vW~?;^>=Nuvr|nK*(Too4H(K z2GWV+8FEE}R0dtNrFANt!__k~$>MM)jEze;+#^I0ZUMHt%YQX5;m zk6a5V`UV6k?HnYIUY~^#O3yf|-POxSmD~tEp+(k(MaRM4;|#zOJWP#RO@=tqKaVdw zrEdh;CXgz0jZE1*L8z^rJzr>INf|ke=1xmXQ!@)Tg=%VUc}mY9K`PzS(LVp#A4jYy zU55&7{;+JymbUh~k!C8Do^G$R^O}h;;tFNVRSjRgk zAU|MfVM(Qn1R^$H$ma5L(lY^OXi;d$u&5YZ|JwQnWa|9~55Sy|sZf=KZEI?diAw;B zLS_7~6?T4b`{`2)$?3AsU%UWQ6a7nLQ)JTppP~K1NjZ?_grBZ!=sJA{Mre3!yz5N= z(C9ei-1N*WxFRzrZ*Y9rwBGcOb)OxtJ2AUCi{J*#fWzJWeTYDqrPr@tqZPqofZ3~8 zuWITWAq+ZC_r)fqbaZz+xqJEs21}GSUV$Ozab2H;_puY&E?v1gG%^Y=0TI>OJ8Tux zZXDI2@(N}01O{dnU=qZNLTv}@@0{8CKSKNezpy^f)Qa>hVhV>;rS=U92DD5mRG`J) zCnTY)H6k(HD?I*Vy`rG>QY2RpAhDLn`uak9`zAvC(@!S2T%6g%&BM>nUuEYYk}H89 zSY%yeQ!555ODcuzL44P#)nisxAePfqYLrY4&%J75G9;B+T2LAetq6?DmbMo59~JOn zU0~m*1$-O}rm9q{lR!r&PvFl^m8x1SuI2N^43@7*R3;P-A5JbGH8ZoKGlepxlUO=! zVGeCpsZ!xa)yBp5L`AjL)q`Mz!XrF<{ff%Ufe?7GVK+B?ku|yO=A}!E%gaCxNgZhA z_kT5j_4m2`W={-pj}WSVlWJeSdc{cX|DR#~%(Q{q_Z~pklIyK4F0YU)D&glcvC{EX z7~h8vA0lbBoN3dl)doWF8dw|v7;p$VvkKY?$L{T)gGz2}X)P|RfY>9Skq3o_vjieg zM?y*(L>ltFIv}15U&OIfV3doe7myGWpBNMt(a_R*{^DiO9TXklgapsx3!UAG;!%NN zkp&H1iMeG;JI8_Hk?8nDVvL5nCr;??ABYcv*y|q}URhnMZfH3XMX0+^!~0ST%HiPg zYSI+T!X<^extE57L`O!&izOR|28$NvQBF=S9$rY{$u2JTLNT(tHJzE};1HrvXy(u! zROTH#tQE9sWEiNjsc>){kw`{_f;lE*#mux-rJ_)%b!^UprNxT*!Pwpw4!iNN>d&09 zv5~1c*u$R1A;*I)Q7Gdq<^;vcUm~S*c*qE4oP)=U>$*B|j!wW8z&yp}hHJ`SB&MW- zG2r_0GqUzIXCPi}ZNS6Rr_aVFCiao4GBY=Yg3TYEt^WJjej9{*{q@&HeM_c^gX)6e zM^B#p_|A`>K3_Y3{o(iR_NV{+Pe?ep^9K(fLVZEtf?K9$XCu2J=?!#Yj%a9f6bJ!t z;1kylhsObyH}5`N+uDIM22cT&$?2Ka_6`K7X1Yj4ZebCk**hfacqE~SjN(r7Wh!7m zWlvOa11&_v#ML)8Rn%1%lom7jLY_pXc61`1m&L|IaruXY62lgwa~SD^-00f4=P?%x`>KWeSvH?>X$2t~;B;EhE?gLGRvayamuiE)ThwPa`rc{VB$lM_WIsnl}0g49mf&6+~h zHZYQMdHqTyf*VdIkjD?^@>WfZ;aiIMdkKy?OJNd`N`2|3)(cg*+`1GdYIz-B<4(J$uy9-0b4%-PYN)y|W7!f#{rHSX|%O z$mp2Xi|sd!>&Yywg|^8rD#E!=jZCx*jMBuC9ZknT zx>8x<&`o{9gDne>zc5ZTddi?C<==cQO2=p%L@{xlfy6`7F!^qfp zM^`tZ5Yd*CUjSw}-PZ?Qgb*}}J$Nc2h^`b!=f<{s^v$nQ(NJzsGL34I?pT-3@6c_i4#e+6BUaoG|ERKt_ zTfRb3>F19uZtLXSDG(qfFB%)Uh$VfR3a-1ykGI)7bn$pm0^xBT{iE8tK|KDNzTPsS zqs)P*ZW|j%u_RrgY!{2k!qvOSjxAarJOJ_G^5}Ht0q{QkBvzpu*Cc+T1AxIWx3WI2 zqX#O(rw<+$cRbBt5Kn$w-NC)E9OurRAL#EVhZqvYvPMU5-MW2tY8nX+`3`9d%``DN z1*cBVxrGDSDKFo!wcTlK{Qa!P_Cwom=0o-_--xOkZ69CyrJl7TW5JDl=S`pm$RP(- zrDx{=KOie*-Vh1S>cOQeSFaLv6I)tq8=C~_2gtEYZOqCg2Bnu(R-s+tc0gZHM_jQK zOoH@})4JBX0Un>#eJ+ZTI=Xq}78Kb!ItPS=X6F%T1Z9;pbkHNJr9o+69+;48?PbQ|Ov-1lkG;<73_w_$|^k_}3J{W6Ar$cTb5OKJnnI(%S zD9A6gpwg96nYWLhmVvPqmF_E*#W*>Wng@zYEfAj4Hyk{C{5MLadfTW8gZ|lP%?yT; z!6f(GJ9cc6%LP$@H;SB{lO3H_HIr{aGOj$n4_`2AX1ZW)-K$oYN@YeSuq6gKZ8~2- zR=a8o3-=(_bOPnD~!0HvswT^cVjg z$M*hx+tTtfG#8{7QrGff-XceBb!*CtATFn8XTe4H9)6Kf*492k6!XP_*mHc6C z!QzS}shQdFHT^C*9SUbJU1PJBw)TSJ692?ty;^-B%CyGJH#86~HYhB-tgT-srtd^l zUq~iVAq5y~Y40GaLLT-$b#pliwLLh{)zdpRFlfrs60)j_&j(r%vd4AxA*h%$f zjx~g_4z_nl6^RGsau5puP{rZoNe-%+tm^BTn_I?+L<0Zbo3WI?e}i~ z+6aLidHCoNe9PL#W^H}r^z7Wm*4B-iw_Y7CyN*P4`fP90bW>etT(w?q=g<>OzW3gwor4un}hj~mA02e3FC z2CI_8ISmIQmdqVaRd1wF%q^^DO-wFm=2~P)B>h%a2mca_Yq{Jl9qmbqRYF80jl;t? zEjxV70y7gE8xI{z40F!TfwF}#CI|0)`1)&TNaTq1jZF|Wz&}Z>$+!;b1*&|34Z8X{ zFcINM`E&TN%x9{JwthYpTGHaMCTdTO^ zh&iAF+5nKabN3z^5Av+Jtpj}0(A1JwScC@7&daySwk2%_w}u#ky1TazCFbVk1I;!% zF|oF>0eKBzq@-s8AxWv}9=?7u8(SKiCk!hf%?ew&7(lPs;JcC+Ioos;fkTbJcFKXXV)N?6H6L= z_Q!sOoE$u|XkwhIP_F3fE}Ixv+S(>aWgiD(WwO}r9PXHz>41fWkFDKDmy%;=@9gf0 z%gc6kBP`9QpG?tcet|*m-oE}Jq3KyU0fE6uN>!Dge{@6?z9ZBZbQiJOV|qqGUOv7f z7&W($NDu%O?jG_2_USDu-ZU`S|NQeQ8=K9|tqT{4!SLiXOmaLtk{}WsK!T(O{A{G8 zkb^H*q>_L3Q^?;nLda((_wPTzjauK_BnEmdE$!`}!%cc~I7Q`a#L^Lz6BQ{ESyqZ6*Y>W;t(h?1b0kAQe1kTWpt~4 zRJU_NiHEO0f(a+AuC2Fsb7d;{A(4>a|&B~7GEe) z*r*+yRrZd63NQ(OPpp&Z=$zu>n#&icMPgV252cDMSxr0+x3fD#r)Ba5h9+jo0^ypj z&YX!!fLO|8veyg^NWhdqdQlISG+3a~NWTtXFc>T$i<4_>w|o2;F`HK)m^Lx#=dfiQ z9#kBY!;^EkLJn_MLp8*+#e{^Ah$wv7?A*MwMMcxGv2C?=VUf|^{(*3M&{WfDX~aB} z&;Hntse?T0IP5as(bakW;KAPv zjAsuX?4+b@`TO7b2d3}-#O*gg$k*R|gZTd5iqzkH^QTv@UV&$kLgwZdP7~D=i7`BQ z^M?%mwV^_CXziP%KRmvF`#b-A^!O3Gi4AQdX^`H&cN;_j$dy)9LL)tW`t-@ur#QJ) z{NXr}@G@H`XQa2bj?ScFsaOdLWV zE*zEy2C1yF3h;pUl`08rDD3RRqGG@sLnEWOV0#z0knl)1Z(llx2dKb%z!6bdoKTUt z$BI(w=tMsBfCcyG@{VciS(sZ!%H^}BCezlG1&xQ zQSk{N2V|VZU%v^inXpJ6%49Fr<3H#^CxK~nZ1Mk;2$7! za(bq)q>Svsg0sJI^QLutC)q74i?2ar<6mEX`Fd_)v8T5;J~?G+c?BrKA+d?ccF7I^ z!{>EJR7EyKsdgtBe%B!6s6&t|-~j^@6c*v-7vLWp3c>*osAAQm17TLN(zCOPg?J)8 zT(aGQ62fxx3;lwEeFB5QGjg@91&yh4^UW$M2LPhu5;Al05|UFxBBN8%GxCaxAxHol zV!k1jE|M!>oYqPgH+1&@nV27XZO)gu~92NP-0-T|;976EhB# zo?vI+z~cpq#1bBVS2GqEuQmtAHC^596DNwK(qZaBm+ly!zi~K$DWApGF*1QukBW`= z^z~26%Enb)+TXu-@BZV*Pih+)`-g_Wu0;1kOzeiC!G^wGe`@OV%4uDAL+v zo2N4iN?|n7qWkC0!CseDRwKJ3?s10a&z~oyrOOKCr1aucaov1;h-!$%KAbYHJXQsb z0saAsfC>{UDwKD4bSzNhUg$|4LaDU&^7of{*l24;#8}l(vh(v3Q`4{y>YKj z64rM#{Ip$Af|I)^G+Sg$9CiSQz>U<@)~S^?FdIOKoXcA%nkW=|R$*Na4Jn#bgu+FB9z4$B%}j?4{{;~|kj zQ0W?(bTAI4>2?W)eF6c_0|X|kY+C6IFie9=Wn=H?VCOI_mF^fBF1fl@R#ivECE#YZ zw0Ass@Q|?GckkY~Z~+EyZef9#{;;vJy1YD*oV=EqdHviuSjUfZkp3mMUmGFQYkOp+ zpW{(|H}Bm0rA`ap0Ns;H3|VS>XJ>w45n)aK7kXg-{Q2e8HKeq-o;ZVA17-s=qdL2{ ze*p3Tt=My>56AZO_JKn*cJR*j?k;xj=o@kL3xg_ylMV<83(Ji(sWHLfj%m)2Ll7Gv zjpnvCn1_g%Sf79(zEqx&l4eO~cAq{I8WDwG0;9s11uSYgRTieQpfSY$a*}pV)oQu< zdb0!~J7-tu7F#D*^O%Doj{N89xRdWA1;0n z;^K-=;fp0yCZ~gSP}87CC@kg+CPcyw&1Aziwfdkt`1rAT2IJi4pX28gg)*d6)Nwe) zwzd;g>Y9#D4TE_~&wyi1t70ssDRuzjrRt$CZ(t>k|;@;OUZ4m8o#BGqSK+Tv{$JEB6fW zlY}T_zAES|Sc{01XrYUY#^pM@dzx5ULvfi~)9GA3Pb`g&Psqs6;g<7&F>$s8UIM_w zQ69d2pe%KA9X<=6DDntr3&h}-= z6cvgkY&z4-nr32U%@K-qjZGt!DiRVK`ugd~DY?-xWIw^SuFn3(=F3;F&R-+|@Dn2* zEDc4yw6cnJnV6hfSzUv)h9~)U)w$o#_Pg%O`dQnb{_{V_&z>bwM@%8s%w&B3;RDPr zTs1N}j6r)>m#RetcpR%c?vdez5Cpw|H<0+RTqT;Vp}7T;27!y;)oHC{Qa6h3wsUr| zac~Zfjxeh-gRihIr*h;X8k^_kAE;~~dc9!T1yN#=yX;imDeS<@<0ofiIJ&zzRl3NF z6t*5tenBC?m@SOBt-U}dw`Or1T-^ggiDJIpX9&YXOfqrwlsmXWu2j|50k^4{SfzRez(OK~HiSdcRt-h(nDSbmAi0h~lE4q4b23}7dcI0=MgJ%@R7vmTY|;9mm^%6-{M&`)act%d{LVy2ge0+qLw{KwZg$oy^2L>1C=6`x6e-QP@Wsm->p?#Ro zo!z~@!6DG^{Nm!J%U7O0efr_Uf9ali@%Zs(OUuQnsjq*<*;oI45Q6RErOUX{kDorX z%^x&~?W-MHymaMCNqGhGbbN9ugwxo>+2qXh)P}VDhQf2_&ri+FE-Wp>g(E1z2Jxkd z(aq6ODRIqh?L#A@4Z};YOmOf4WrIlM^-ax94I} z78LTOGQib}!NQyQQPFiSuC8(gjIyJvJ4Yx1HgKwwdWJHIEX&3wNFp^hw{n(9^K8^Y z1`C+cGd7irB-Yk6EdwJ8#hOO3KBldEy^$ zDOPuX&OluVt<4{(-}B8k-@N+r%NsXtE-o*3boW3EW6!HsuCChHkifp&*Y^vB%-`Pr zTOkD0fqPoiyYO!ZrCW#<(68UPF*!Z6xwZY|$x~1S{KNX@CcFkY=J($H2apGS1A}?p zc}MGxA)2kLtaAzr+Pk`kN5>kv`n@8PV-u1*eEa|srL(W=Wz`PMA9C{s)#L{ud%uc~GX#m*kYL=oiv+WJOUFYnBpJjgHz zFjEQ@|8nv4E-xvWl}I)W^n2YspmPYqk;;WKh1$W%Hz*jtIJ$YrHIF9Qd=V@@sAimg zup~{pQn{lUGrMSP?5$AFn45t^8u zWP*Z?dP?6&$>K~~S;3-si6v&%^lXv%;L)`;rB$13TTma%JoA@y^-s@P(x%(Ef{;-(Mf- z7u^0^Aq3mp{6cy6#D8eI%PbRnfsMU$=Q?{%FR!d#y?3>2u-wkq8ENVIb)w2TJUU;aYlj1gE+gY}I~lQ}tL$LN-}mYuz$y|ZgXOdO0#NJJFS2y#P!DrE9D z4rj&Gv|J#NtJG_UwH;tE$ZB)|M#o_l5?GFL;epdQYiz9K@P@4@ljddt9Ig$6McNy@ zJb8S5V^ao|ZcS(ML=pg)FOkK|WaJKGRBOM0p!}i|mWJ5O)l z{_!!aP*z)6ncId2n+*Eh&CR!O-#&Qo5YBUHWo2q+1}(U|w+~)|pLzN6c162 zz|MR7=SnN8^15dOYbKk{jDG#~H_xBHKzoLU?xb;* zaX@@XPJUs}nckR$Bp9U8i3#}i1ZQV5B`=tnI=i_?$0s`B+MHYn2D5XJ+1Mg9M=~;a z+`@uIV=No!6AMS_>p6=hO)NIZCtn~iwWLs4ob>D*7MEWnmeeyCu|B>mzL3V@IlFnp z`T8aJ2eh=bk`=ofn_C8ZdOG6cE7H>&iK&+tFJHZS{l-lMD;!H+athh)ws-Qx#qsgC z#H^kV-+9tCRaduRWwmKxaiOE*#|h!X`}YqnUw-=FyEQ%jd@v&Eji$He0x&Z(OANn& zvAKHf8s2ES(Id%`)~O{Yee zd|#2Ja`W;*+}k)hYexG<blJY2gvb;&3Cd_y90$j5U)DEWx;^y14Ef zCpx151}QmNh0bL21VAhz8@5E($P^aEQzp+;Cg%a( zXoLqqfI>%!*J(g0^xh{YS)S_>j!TU{NJ$eiM!8r@k2JuEUB zclh~>7hk@9-QL+%R#}C+3UDAp0We5Xh)d|k#oF365MygA7=T!G>d8}h!oiWzt(_el ziyKZ3L&R$%EbO29goLb3g%5@KJa_)W)XXeW>cr&K{<-tu8FCoMjkVP+s`Zwc*@ezd z@~D4Fgpl9!_MJP^vvUwC5GQA+ra`H z^({>x;x0`I2sA1jFr0u`K|CrBi;j8q>Q!!Uu1Spvubx-gR1Ik1oT_Tok-DSE{Z3&n zM%Bg#Jy($|BySgAPmd~3d6tsGV-k^?CD81t!*LT@a_F)krA?rn1(U&b7Yfoud`~eH z8cQTp`P*5GS;`RGQ+204uR9_vBs@35n!yvm%>%RuS%r%aGpt1HpCVDJ9bDa1b`EJ- zIk@cp!66v^_~cZ`KeCXDzrACut({V?gjpgUVpv+s_=0&eGkB6kBcp(Tz}b)E*y(hA zQwth}+DoI(7#c>4#NZq@K1(XIfG_v-8uawa3=4 z;lp=y5n2Z=s5y3cdTM&Qq-3kH@zs+jzj7eqs=oT_{qyHUNkr&#_?anpck*5>i-e?^ z`Mo3L!-o$~o;(3FK>4h!t$`5W#Q^~1`_7)88#ix$dsW1brS^fA>xUDd@jXEo*RJ2l z$jJr0G&Hv$6NBra4Z2PfQ$8lAr;+wz6O;XdL*r9YtZJ=D3^^n`XP#xa>df9#CYcd7fF%~qF&3u$kf_e;islaIS??qCZ?Qh?#bFyW@Y9S z0Tb9brEdsxFSAoa(CeF+^I`=DUBfk|YDcHyvT{g&FaH229GuxGyoo3$89LyTQtBNL z1lOBaRGeE_l$Dp?J1|&LQ%f#$<)NwK127CT7+&7Ka4b5;rv7~X$A|82N|m2bG;U?t zN24kE!s9x6xgs%H)nVDlFhpguYM{TNqn$1igHfT8i0(E20I|YGuC~j`&0EmaL)kfT zBG1Fu0|Vg+0E0TL${e6sv~9Ld{sX zAA1f+ta9=4)#WQ!iG_(*h?+IOVNESu%yv;BsW#XAe92h)_3CB)$Vw3(_W@tSNFc|r zZ*Jn|0t~ooxb@({`wt$xe)9%+ICuX1+`=LV{^G}l4Nc><1L6(RcEih-&=mPFh z&B$N8=zeTS8Z#P=oxwGzG-Ri6O{vxl8?H&UiB_$adxZyp3LZnmgG|6ha$5(T$WHQk zr8=s>!^h7%fEZrx;OZV69tn5@9_d**!Ql~I5fNn0+cY$&1b#BIq&$I(ox?Pf0gy~n ztayB(ftiJqToEHz@)<0ULzb;A+2u87W!Wz~n1MKHX(^J*VLXT_T=tI6nniA%9GzTc z@)AD3Au`G@Fi7p-43>cUvT<|*gQa9-BG;s6WuHAeiR6W}RZv`#oSd>|d@x64E4#gtghkx%!I@>6Juzh4v{Fr z%X`Vx1U9ozjfox4AE7#`5_?AbFYXB-Z6eP;x};d@kniZ}?;jK%2^z6$v^(f%s&zv$4vY;o*E||G ztul4+a8uaY)46;EB7C!sp^3y2E%PFVq3Bq##MwHF{m+g3~@25GBc?%WjXSU zEh*L&RPwacSUT}Y&dN&SsSw%96md4vSh>VapBAGwY3MO;8GJ3 zlZtZl@UNW1l|$A{OvF3^{7hm>8e1TabaMW*AW? z;%YCJjM8Xprly%*UPuawzJ5E0Wry|#1`yw1R~%TEkN`A-orXd~A#q2dq7KG%(P_^f zKYpmGQ1jx+la0p4bK~P=nuB)uIQR+%{q@gehIjAY!BzM4_ARZf!sS1I^u>y!<2sFc zuDknZo#*e~ehq}|U*6ZM(;_GO?cMx&lftgwy7R|yqO#VJh`#Q@vDe@C)alO}v(d5C zVbfv*nu5wgtK7MJ7Y-gWpuDP@UPUMSdF`_u5%lifzA3pGcKP;N*d zk>f)%BVrSh9NpaE3J`);bS7+rI7H6LXVI5UP<%@nr$g>cdG?%jRa` z;gQjyVW6W6nvzy66_qd;_!O1BLy!0`3$=B0b#FE_kbTFq9QO9s_KO!Uwsv;Dw}D>0 zdIil4#Ra>8*ZcQBdtepX^y;;16A6h2UBl+)FJHX)xhMHMwqFAwFbvI$O&(`F>gVcy zZUq;R>Cn<%RMX7h;vUo&Su*SX{RiJRA^82p%a^xq-`?5Xh25K)nO$95pP8NO?Cyc6 zeg6D8qlQ5uB(^Jda%!5K;5IuypOT)5;DE69j|^dyFm>v5gf$|+kYHjARlJN|#}Foq zDNL4^f1t=u#)}kK2^q(aZI5`-`KEfG(9r}uy!A8WJ zR+>`yOja)YV2Vf{1y22xz7foTmVvP_P7FMN^!U*tri!muO%y|cv-e8~g3a&=2+GXO zOU=w086O8+aY@eZp0LryrDY3;#ePPF!nO5{#MCtGKRPk7xU>}G<{l#!yK3sp!|_vD zoG`U*uRu_vQhBRvynXx->)w6=<3gf1Jg6YY!_$MPf$vs-xH{aFfZx^Cy|uF&>mM*0 z9X;34i5m!e0L0>cRs;s_o;taA{McMX1hngo>o;~QD|ZqT&n+$?A3-6Xot*lS)_nT( z8H6;slq4wS?|AaP*U~~>=Q@w~_E#|~zn)Xa?fAR31OXc8%q||gMuHoR|9Ubj$LxcLL zs3%XJAS3h+4B~q=wzM)>93GwN>Ej3WMP+g6EY6B%VRh)MLMJDXXKQ;0v9@SrRF+7T zW^3o==MU|KG*MbqT<_vC>+j!OUV$9c($+pbJ6lj(y1BiBoPyha{=!ADS#EyenbOj( z+}xqTA+U5_QSsWw=2OiAQK0vl`g&a6-)v+U65s@RYW(ckd-v}Dtf+YN>eWtm_I6O< z&84NkCz5{t_IvN`{kUnmeV^XAadT&P59uGx29R`gcVE18>0>JT`s;7dkUayvb33z- zo<0JrAZXS%HsNavic5mSBhl)U(=)`l+S+jKI&HWk&pdDc;85%^zql9>7gE$xl9ZMf z7$2%vqfZW!=h}(%R6>5?NoYP$}&k z;*yfdVZR)BZIxQAv~l+Ef`>*HNA_J`-?+N6a&B=kC%+)4pb#dZuCWP{EWq2RpUGG? z&@UB>MKXDNXIEHwOkMjYFb3cm6!` z4bmj!IB5FY{pGk>aEE;ZgO{&dy?6iq@aWk2OBZAv z*l0j1b$4|utEdVH33GIEnWNFL=k}?S18r?(l~vGbaBH{;k+E^4+{f$dZ@!sdTpAmn zSXf#vsi<6DUF$j13&k}sJW^Iwja!X?#?Qi%(l>A4g4nmfHq$fh^$k-(;W~?Tad7Z& zwmX{d*6lmQ(v7XH^HtSLet!EsJ-@eV@V`AmaGTPG(@xc$GN?BgSnWrkLpTkMjH1=g zoxgy9e*5F==En)S zGBMeEOSo`M&(#^IP%n2`d{A=8;$Rsr8`dv_N(8}MlF>PFPg%+0lRc7Z)W6lzCj zxNT^#^0+v1)Z;3fogAOQ`<^<UZ@?4|SAN+*6K7ZB}xY-;W+|`u-+B(YEK=EWsJ4y=;=1EwIKg=%6Co4$c#%& zCI-nC7UQR8HTM=se|!;yGRUO0qmx){Br*!Q0O8w{mtSgQQ*$Q{o0DINuU`Sf zZDPD+X;BzVEc9MkRnsY!Z0qXG(5SId(Kr>fC^VH{P;jcP?V7v$LP-g^{LP>KL{!{C zb9AMqZs_anoIEid9)=Y5_T4)Wi40zNJ>kuZEl!Yp9qR3meg_e^yZ2r zNW<0j^+|qs(yezk$fR?f*e=(lC$GXamI+Pda+8qJ2y zRfx>155iheqO>ZpqUACyh4f?Ey2M;RQ!}ngEO3!niFDm!6llGHaCdNa0dIhZ{enWA-96`|2M^9y6p9NMFA~fA78PGzU(d`-ZOv2OT`Qx3Zj^oP_ry)(JT~InQA1YMmNUDj(dx z|Mb~2=)m@t*4fy&?U6hPrdxZR1(>*n}J-2ybd>4(#iB`o4V(%&tgxi) z_MJO}!z0yo_3-*hscE$h4csc;=XIZ(Rhz4v9q1fhNKTlnR_;{d;vE!#2u4J5)gq&6 zBjQPU4MmZsv?{aW1&g!|jRgs!gW^|7Y-lP`l$2e<5%`J?Of9HoH1cT%$6Ihx&)}r4 zffb*@aT6F(td8mE@lpk#HMn`cyBM%JrDrH{Q;OmwT&0M~7t#eh`*=r(0B67z@(FhX zrWpnr?i_e#3q?t(>5a{;MWyAr1%*&tK^dWm?TPI}oiHK9`Vb?dUu(Yg{Nm#F&Td&? z(2|AuyhL1*o9E);6&;^&nw)+9=_gD6{*bKxA)&K#^VhCl|Kf`;_!1e|72(*@-f?ku zmK@Esf8@xusmX8O^K(s2$nGGA^}qnw56IM+*}0vT77{l5M?N2DX(ihZ!Z@$62rcsQ z*!S;0;QoK_NQmz=>$iJ5ArruvyL&{3B*A0Pp8Yy~yMMj?E)eo_x3fEEP3z5IU5ZDG zzWVAtT8K%b5-d!@-ziQ$dv>uWMN7}3k!xbhur5x?MaSI zL=iB>iBd_B{FH&=kyAQ!n*)}hmC>x3Y>PvNp{FvA>*$@-3}K}4xF%K<7GKB_NsKKh z@EdkMP9*~+-eCb$76*nQIW2=&PNccTCol+#|H7q9P+9gt&gAu**O*t=5$iOZotj2m z!bt~*M+63iEEyb3Rqd2Y;U$n4=I7=c`~$`_GKz{ya5f0A^A|1xTDV2av$LIY`LL(Q z_Uh{8%a^BS<~m+}6a zH*XNu{X;{&hwG{TSrh%sE33PEdq20KU%Y(Tedf$!R@RP=&Ki&V=y0smuetq8B7~S7 z_3Vq=_iufhbI>*j2%h4ia<`e(1aK zPl$<2$jZ$FTC=pV8)mOILr|)GzgV~A`gLY=GahVZb#-EL%C^D0t-XEIKw>wOndss& zH#tc{H_n1VHn5ABOwY^!5RQ$HbC1V|mXww4cc^M*dAa5N^7LjO?|tjaUzP)DAzKCS z15ck!*spJFG_|w@g@jSSqo!7t#z`(z_ECYu77!J`e*2bBw69-S;7tP~RSoUiPR2l4 z8jm$Ku3r-pm%1n|9n>4o(sn5|&p`2uxK}QtJNDHPhQWE<9pz zXbAd^%hoG#YBf|`xiZI|_``NsUH`LZTOTs+haAHzb93`K`T47>t1yN>_F4Ibg=7~D zrmChkGA52&iFA2o1(=Xq>2EIrn`~mY+M>G5eXU$9UOB)Pf)_w?JV!=GYwPQ0W@meP z`wELo(lfK54FSQS@V;f`6@H9>Yth%WgLUj4JpdblGL@8;!IfIs+AFH5i^<60$KA_Y zUPVn-QRTdV(5dr$@aR;^hLp5~oE&hM_!U{PU~zsw0Tsh5{GkGz>2p^ZNkdtAJEe=_ z!V*@Tl-_f+^D1^~lBTj}g#|8%2r25S3d=~sB=940Meh@nx%~`yC;23Rdo%t7UUj4z(a!@T~ z_0?)XX@GvResXm(DR0Gyj-f?Hg+|a~;uGLaQ`0lcE30DS;s*wYuq*y)QVXk<5gXmn z*FhLzc}1nDlnlHw;>OAI7cYs3fu`K~4=9Za}+hZ@iJGX(ni%ZL(<+yF@IBRm2W|Zc_;vyU> z%zJcf41C?&*WdK6C5ZU5=g)yKp>{cWc}8Y;mY0{CTUhwMK$r^)3k5|*nS-v9yNz);BcF&(C*ub>|lp60TfRSC7ar`{ANLSOT!wwyy)~-qF!f zVQSib?6*cC!HEwIiK-d<-~p!@uY&xml@%B^O#SP$S^JA}P%R{lnMN?lG5Rq_*N2V| zA4SK;b@%q-rLMku1=@<}4>Ah0;Nk59cL<~&pOq-e5IYrlIUc$5Nmjy+6BaH+T;vlLIePLmen79KROF>yWaVv@6x=wqummH7 zfZR@=zi?hiP}YaTd0-oI4)Pq6pk1Ptj+n~*LjMK#Eb~NGOh#5sOItxz9fXIF3T&6R zR;H@9-P8}G(fxyhAGkd94G6j(ah;R9MUz9Egieo)jskjldi&HjHbTFxZSAL~reRTl zTNRa6z+HGap!EP;2p6AFz@Bd z<=+QCS&z zXT{5|3gQZ~)JQJ-M^;NoAVly?#F@)?6#2wN1ZBmKojoUURrX@|MLrszoS}k*kqps( z*>G8eCRmsJEfw{9S~sk2iTg;Z1zovmVyJjU{UYrmH5o!Xp>Rt@T;{T%n1ryTG*rKY zt(|%0`3ek$%dwaJ69YrSY3y}K+4|&AT||MRZfpwef<1^ktkzWrdr$K%D6txg3XY; z65bUDp#Ez+OjEpCUT#!X?f&OK+I~Gc@V*F2}s5dz?JK}hJVkbul}g>zx&j?<1y zI!K?o$fw|~c#eKn#8*^ORZi}XqMW0GqK>+pv7(f%jQAB91QQW|QSvEDk5xn@r7vqM z%h+7jx_eDdMH#F{C+NDiG1dAIF`=io7iyZAoLpE^3?)WLnVy*j)*_=@U|irT&@fjI zFYqgOrNT6_ES z%uG0)0o$Xpvz|YPdBAvHE^|CR>=Os<^wt)qruwq7SVcvnqhnk5!X93~-b^HLpYU$5 z*w^1G;`&A{B)&c#aOf!GSLZAzs)s@1Q;>?usVQFM`up48c~iEQR+hw~ILc>8H^NzP zc)(ilyFk^f*0$OCIlIvNfhB?61KofzFcM=QQ$QezaLMF?)P$!=_MVQi5wiRd{Bn1w zi9C=K5h)q?e(qGdh_A@8Gv|+sUUNz74_?7bMq$IWY`(A3?_zn(ga z;}r~5j?#a{7J(oEaC9&hj#-B5DyeIn;}=xX&@wPFQ?=)0xJZOZ7@3ESz#X$F;9tyKJEd*A-|nIE2E&K zqj&4_0|hQ`F}JY>(DDroGBmvd$__7E-_jr!#aSsCTohcv_7}s$Bk&yvBjEJOG+OJ) z6NHHOBqnkn3t|x{J5l{NZz%Jao}QuBv?V0w=H(+;b@lXg_w*pn0FseGUPNp>vE29F zZW~NZp}Wvv_@m$7zQsrH{vtMBziw;a_vGN}>Z_W%x}m>gPH=bm7A*wwwBhMV#>t-< zKf7hS?tO*L=;Ww-fjgsuF*!Yjz+F~e0Z2gnSv|X3MP+qOaCkTr0*d18>kq|pf9wso zrEhF<%h=e^?2hSOOAVcC%2zaG6;;5b;p2g(l#EpsLKT!Bs-EV%gnuQ~FOvmTP*$Z| zYRAr;Q!&s`HdhmtmQ=P=l@GtH5TT%Aq7JTd~!T!P#!sJ8!a{@@F z6LgxS49?EZ`FMDex>y~*{5()q&HGqpW@f?i!7g(03zptjMZlv-OiIBA;ZZCsEW(Q3 z{_#h4ULJ8q)GUOdA;&^YOvp+EAXS8*%$(fa9TxWDX@9~?$BRa&2QdDoI`4d=7J_Fq z{9+hzXungi@N?8M&$@b|Lr@C<5@9g0YHtkhrFk_HAPmF&WuQB4Vt2;svd+IQ+v%4Et1HUH&Uy!N6 z!LqAYYi{awwX~2eD=RCwjneq|w)FIsUDt?LR#)$`cXyeapD!#b-f2AG>&rK4A)g?? z2IUo%4eX{<@2Hveb@yC%$w*1J6gL+)_ujrf5{r8JdW*|S;i3EbpOGyUEfpj~1T;X; zR{yfHs)4NuS@s05bkoe>EbSc8dlN4+n+FaWkF=B=Rg|=@fI`3wsQypzno%_l8%v{rnmO{Nyc*z-P}RbRUFlILvI*a znSqhNefvjOcemE{8<{ydgaAONOUlY%=b_;=Xfxb7a3eh`niS3h5=+a>x_i%NTal>xz<>OnXEdLhy9xP;Waj~hnrMA9-w+2~xNBR3k zwLiD^Q!@j2{e>-sLQ%qeG`@tYgr44B=$Vh7ztw%)vk?@pI72)8@QItv{Rem4Ex5}4 zFu+as{!KwoVKw(FO6phC{5Ye)SHrK0Ny`a{i%UDn%J|7@TV6*TP&QQ;ekdwyq97n9 zuBxj=So;yi5r-g0RZVS$C!8W(x{Mo!-o}KN%g|)*J36bls1qwyW+>Y^IRZR_Zmotv|Fbn^7}dHHgAZS7Ch z1P{U2BtsgYUW}+{a0zydecjR31#2YQkEa3dK^j5vP80%zEiEnK@eT}9D?sb+-zU(k zNmP`Z(@xTIK|k;*vvc!OZaM#Dk^SG6+2J|0pZrd_i0f}(a(J*U1kR7u){Ym?9ggHm zO?M3$+x?Aktf8Y3FSxw2@@mvo&PZ#7a&AEZ)DB48Cxb@t>1 zJ|Hm=bRlsm1xsa#FiAycWk5CME1H^NoY7>X1fzhE5DiCd1BPK)eK~GG-G{R(SS&)! z+08YA5$W#rI5azykA6ueMm9Ptmiq-4mzElvn$t2fT|ysSioA4&amF#)8F~z}0q=ht zOjGm9xaObV(o5+*a1j95`g>J|9>*mlf|IncD3<|}mXMST@QKF={F#}Z1E@AUGKx!$ zj*U~GYj_0Q9UD73y8!3Fi%R0+Tlo1~&zvrIaHy%RL+rxFvI;8xAPRxRmy}nYamXaq zBAw&P{^I+6@!~~ARTUiH+Un}!^z>)!3bJ=Os1~yN>J_W4jd;gx4*;xsobIiWsNos! z6%-l-U{fT;s+C8wqV`$9h==#l!yruXdc!xMv}$0WpSUc0Vm zV0iT8nG@$OAVlCt>9!hwn82kleuRq)g2I>Yy`NbR6NH7Fy(Dnu>NR8YyVS&pzKXn= zl9{!ovbv_QgfzIgmcexiKS?of39n!ucQ0=VIe70BUEbjekO4S_JE`VmuS}#2+&PpOwyouvLbOxMH@Cf`ld84V z+1c6E-QL*=tAK3);$PL(gVN($L+K68?j$@-^7i#BDJus?wSVALQ&*Rhn+J!2XF|0( zSXx57A_G~qb@jkqFR^`M;_VK~`o|ytVdT|cbjp_Z${QU0484v45i?TL8a1x8oj%3% zaQ~Ve42c{(3jw;tyN7M8Y^g2>7%;aux4cSKN(OocC{NAEe8HYqo|&D6a`Zoc7F-y7 zk7gT83j-z1D<~Kre*ykVS_(S4Jn|0ny%D0P>!oM!;)KuvB?dzfm6sL}6<5=|$}b|O zbzN6YTU+1JP}xH5oQR;jnySQONzn)~D}NhtIXQ7>GGqw&vfpJvaY+#= z8D(t^eG_9~E!dUGox64q9Q6#0oF2L2bH!xjZX27LTUlG!*m(Lp@e2%c_4Go-yAgeZ zAU(STdjd8P4WOgWuI|yX@m?8Tz-jmh>vU_X#{i23PHz~LODsgqBNgABR62%|z4;y3 z>MhAmN8G?JQriz^;n zu$95!^wT1b;lbd!%D(W!o%A~L+3gub#y|Uq^}g1J&1&^;cW>gj{E0};1iaRy6m8E zf_Cy!D8H@!$Ti;kOVil#Z zB6xTWfGg5BV}5QfDmn%jhAdIUvucZu;;b^4zr4(o=#ERlik4TXv=pGJnVqNAzl3xZ z7Lb1MEJ4A=ulv#OzGVvm=p3CKonNHh6W$^~0+axuf2Zk@g{1|dNFkz^Hc2+nq1Lu` z@J_JUj*d?F*R+hx*n~v9>EN*NC;kDqBJ?@yMrbPTkG)-7-J{~-BqKPQZ9q4KE$Ig6 zy-&#H*SUGq+Rh#r{ch}Cj`q{|R5Y{zTBPL__3rC)a}X8uRd3oE2!sfnrJYr@Q@MNZ z9!b8Ua^{}}+) zlYl@wM<@3vPffbky;z%)@-jS`y84FAz0N8s3E&(Z9fPsWZP$wf`kh}`gx?6i@xH#z z(o)Pze4Xj(c%;vsKkw`BXZreb=0Jo5xr4k!sLaT!DxCY4T_xyi7Fz(a>P<^+_wX2H zvA$YCYp#5w7DAPauMFpp=2wVU0G9^8XcFqQho#!EDQGAO zdgbL2oPokXyQc;Qnyy}Ll9uX@k1ziaXhDET3B`Z?<_!*>|B6W>B+cxreH^cRqZTqf zJ1rK?88r$l4cOyZW0PZEMP3Qj@oVcG`o6Zl4ph+nz68_i>MNK5u=w)wazR<4CS6lH zRL0ibzLD9)Y;H+O&w#FX_x1vcSlqL@dgG?eeTU5I%v+xN?mk|T(XpTtz*JCrGfOM& z8#itm8UsO@J+M&qRJ-AN)AGSR4LdFE+czG0c-ltWQM5Zu$1f<@%Fh0(zMg5InO(qr zL?O*0QNRr0ryPCf}yXl$;GcyTl(*nZ`48nt8|jC5uTdI%Kfy9^!D|Ejc|tu zL0fb33-B*+7sw18XH;xlSw#gPKCB4N6&wRPgk8?AZa#hia!RUA%`I@2R<`zV%(e~> zGU^>zeDIpmXZ>8oN>L(t*SbOwII- zOyrbQagr`YG#U+e##(Bnh(FoCmtf!k+7sc1a%S6jqrdcen za)y0ztINwP*?D=Tp`oNTE>lT~TWMiy?#9dIK{I2MMbTztrrv){VjcPzNUPm76O@=nV?tF3t9@^?0*!T%3LTe z8GrHTXyeP5FH6eG;s2<4<;f}hTU1h7Rb7*jozvFdZt8PKEk?~G!Zd=x@b(LUX~865 z31qnv^gbyy4b;UZ#zr(uOx5{{=C$j(w{8nbNCA408F&1cMcq+S*ASPLGqbVeqw|ru zLibS5NA8{u&JTx&M|AZK5Sm=v-1&q>%`B{NO_(UI-__GYQrVvm43<`u8&_Ag)oL5*_P*{xHCO@z%d>3jW!pe$71J?O-TzWk=KHlE>0nkFg+1k4UE@OUv zo_plR#s;p;1y|iYy_>q*tgWuL$jFdFz^ybI377V>F=x65p5u5V=W zR^=dR7hod*_lO}~J-vYP>umAzdBTW|Cf#{}^6df~pqfxhi+i@0Rn($m;~%6QDM0uci8f%4Z3Zz5I*h)GCYRf=n93Y-(TS-}&o9E;FlEPCAj)l;3=~R>NvNBV5bCpn4 zlQ7fd^fXmXZ8%_B8sjLtlh#rCuNuZW3aaXX5h3-h4H{RkJqZkix?4NgY1&-XwY^D; zibCAz?du0T0|^19LQHt^V&d@=Kg68q_&B}$`blNUz5#(AkG*5#6R6bmMrKcMFPW5B zSzS?!Rig@KGtRU=Yu)85bCrH^W(Fkt^~T2X$_k<%AQqx1AQN}I3#8)N^XJ=M1nwX$ zGmCp9ne^aR=&P-7;GL7j-p)t{OG3OwQ)6;qfNX^Ml@=7V2L-a6onCbA*eC3O%6DQR z)FQ+d7L!LOMyJN4qosak{47h8%`M7jccpp(Ou}7w4TFJ>!X9XmQEq8&YERXi@|>WZ zc;BvGo|i)T3A-@$GQaQS41@;26Ivc3&rmQAvjBtFGQTDoDaJ>?q-lNC=Kcfl8RKY^ zpBO*Eis&T_aPhm=HV#hC>N;1A&CT%(Kgu{tmBY&oxMO9FBja-u z(n~UMKV+n6WMcSaB&SwTRAtmI-x`E!W4q3=hzwvUX&IS-Pt)_$@%8bH{78JYZ7%|h z6z%TO5fwH4e%aIL=olAwk#Tn3*cMxiE~e}6-$ zEi~25+fyJ)fIW8fzh=I7+#h?BqMA58oJLLUJ@)l8OXD5)df<^Ik{+&$n;M_}1?TnBY7bQ5yft9%zoL-rm@rRabYg zo2Iu&Nv^(U9d-2$3-7C1@s4)vBrFF9PD4W>!PPA}atB%ts)b}XXCG&rz>7DFH3NPC z?A~<1*wmOrw8T%0BU0g#i7Cn4ammiXPTiy%IVrS(^oe z3r18+NIlr0Rv*WdU$zccpPIKJHa^V7UvFjE*Zut=y)aiijSW^j4eAiFC`;A zB7q*4oS2tifGr4g_zhkT6Ur|v%r7X!7V@vFrxzGJ97JJBNpVSOX<0dyVMS#VR)TAR z&2vYP?epxnolt70(};4nojA@d`w3)4s-Hk+W@o#)Khm*avbUG1snIMh)|s06^(Xbd zs2oTO!8@?ex93c3)A@E9OBtUY_l@;uvYNOq5WZC}@8|$suBff@3HPkX-x4*^-Rf1_wf$^c#DXP z)G^X)YHuRV2C8am+S0s9vOMZ z9^iV!$aIJ;uj?GxMf*3_)?TrDRP5{izkNB776Qp=${-CA?9y$wO+Z>$SO7PenE0T{ z3y!>%a*J4qNty|FdwnAlfF2Cwp_>OZgVok<=iu1W+lO7nrDcuGCP4GF!gQ%9>67$R zH^TKuZ?t#pb+9-kF7atbR+g=UqrR~T&_4)wTYE=vSh%H=jZvhDHSM0)lP7>`((FD5 z?DUXzSl#b$-?p^2`UVDuB!|W4Cx9owAU|3sVUxf+Y~l&L@`hc`XLxeBj#tsGPffv@U%)>YqL|9U&B3O%c= z+Ga5-EF>f?s-KmU3#BK!09qOvsa4ms8X7UJ z-{{BkD+;S?sZNs>RaO1ZpJ@bY3e!Yx`x;u@vl)Wl9vQ(E3{1?lb@h@`QoFl*IyyVk zvoo*QY8m($_6<;m2PlPj)y!hS0=v4q@u2`kRBmO}{?@PJIi%ziw)771uA1T%#6TRg z`s!8LcB8$!3-DE2M@L{r4apb&)i!ftdYg2w?^_P4h3q8j8yirv=L6diTqp_@7v7!l zTJCljl3|L$zu1k`9w&NFO-(m8HG!Ovy#d$`C-J4_<%lp;skMS=GTJLemr6)Z3ZZ)W z75D`P`DOZZN}144yLh=JBqcvhPD#(q0)mQvngl`uTYC0mWIKq_N){>wV^e=M`^$Cho&r~95g1AThO&S3w9`EP`g zf#FekkL(ln8P4$)fBTBRuP@(~g#h#9vCBhkyBGeRtM|OJyu2*P5F}b|nQlpSTW7WQ zKci+3UahSmhCsIlhlc7J8Xg6>1!M#w2xRBvws&^EeEG7ug_Xt3Bw4)|8GJ4t?)ROX zL7+3TvSB~C!jphN*bJPps-GJ92no9Q*!UPQPf%zWj>Hv+W)F{ydU*TfmlU#kTVAd0 zRh7-d@`-~NaL%5c-zJq1)`&ZQ$Hrp>8SYB}J@MRdk=51J?~h~bMZOaYdGqFX5MMBd zo!%nJ-Pq}Lk9=x;=@Ns_i{=AWV3(Gn(xQNuYU>*YhljDHzOm8A&!3E<#>OY~_4hY6 zHRCYAyy&Xv+c5^-F;7CnX)veB$th2tCzaLJRW-Fyv9U&G=E_&=qj*cnmY2X{U==FF`$!hClYi@O^xY{^Q+eG@AuAy!E5Ns7dGFI{W#wcGY{uKy|FN$h;s$sQ;1f6Vlw7`n-Jqppc@HQgC$qfQ0Z~eBj=h$O9$4sT@pzK_qMKpO-W04eE29mGYf!cn9Vxad6wf< zHPpN{`Ta$ugFUW5{stdg+kdS$Ahf?yN%7#yfw2%62BU%@6DwnpW|3Q%KQuhD(~;rj z;n3-SGk(%#+!z=f{OFZ|uR6QB$=_fRTvpmY)K7}O;U}djnfDZ0WE31f{4t;x_;GP* zSw&TqRfZJ+(s9P|;`U5_HHDeH(_v7@I~kmuLMevS?(}1BVXp0Y zTS8J2cS}WeHJ8wDyxzD?H{fUR-%PqWJOvlK>*}&PCSL)_p?B zP&`Zo%pYw^vBumN(3VX&oa)2m51$e*zn}Aa=h{>#wn{5 zE21u7w7ko^fxDQ^&5VTT+4(BuwCbwRq&i%Dt?BQ%E89dyZc`IYf2sCISF z2WVt=ZdM>tkgPvRtxqMVG&C|1Nt#&{B|y~b`0~pt+S)s~`3Imw-95dZaFOvB6FaRl z`CjF~SO~mwF{@ZVRX?COVD||%_{);A((#G$-DkLB8)Asd2OfJiO!dtm9eKD1JRcYw z85xa^i|0Knu`ZF@)48jk!YV*$x%mZyLqpKN)}Ge#>dhrWAQhbzhN(vGneH1rO_-`{ zYeDgGw}>j0)nA;7z#zHf1n=105k74Ou%M`DtiOM5{3DO|yO9H9A$t_Ow!Ff^ zC}tM_#`uj|;1Ma^#G;m7H8y=Pny zPbBKtz-g2n6?h8JkO^!+}M}AdGmW}Mke>)!J%Q2H?S|E$_NDnbo_jz z?f9{-yzTqqerNK%SqRYf)b#X651XEzsc&S0ggmW(Ix#yjJu{P$os*UGCwO~4_S`?s zKQb|9>$w0_Q!{^7S*~wvtmeUHTg`w7SwO#OBh2!VyXVO0XijO~=)~xk_R0Wt+;)Ma zjQoDDu=eT|OI(ao{{6;{&wYd6qI_2t(%0X=7nDqT`E5!HW*2^L025tWS{fJ|5RazX zpYuiXjZJ<~Gm2g1_D@6v@(K&#E-!hZqs%X5eOqz@zxvy)A418Q%A?fcjUyl#2Eb(llO7O!W5kasTbL4nPc)s?p1w$zODF`hvi{ISUZib_g> z=6SRFDt2B1)!EgxH79CfVzRZpEvG)GuA}ZNOf`V>fP-=WWjB8-|I#0Npyj|@2qdX4 zNg`U}YU0)5iS2t0Z)|LYmWG%f2W z;xVq&?l3_7g=ACE!{48f6941K_h2C(6-E`Ez_4?X=ci^rSPTT%lfSp}%!`YQ{=p#~ zom~VAAuiR{*S~$cxp#PJ`8G3#MBu9H8@HMF_zIbeyn;}+X`rH^k;$!&bts$Uhp>?O zg*oF?W2tBsua`|Zdwyx zwsRUH5Kd9aHd7Gg6_sz^e7*Haa0;7NBU4#hUw%^mfXWYHA@A6XV$&))Y|)P8G&Q%* zw*Kf9+B-T)B0WwtNV!c;R0vVnH0ZfYqAp?!sj9e1c-L%w9RU^D^fa1Tti2>6{DAVK zSjbj^zUn7%YG*m-Iezqb?lL``Xr9UGUv)j^`liERA=?f~;jEgj8r`THv{D*xcGf%g zBn%kNr+$xp%li6;b5wz&yGBH20{LmKij_um_GO66z-MwiT3TIK-E0vpP@d04AE$;Sw{5clzxi zHhO=@hEmZr(TWL*)>+nbOPfcWTie=qU+nc6<>m5c5VHn|+ zLkrnWCZ;Bi(T|ZO=6)5voA&ai^m3)Oz5Nq%0jc@N9Tt{+Wr3}3&FuV<<h6gT8#OO$-4`XY1=Zmon75~S;DndxTDE`uTZN4s%yR)Df;&1&_X^UFIQex zcU35a zW(5b_+}_M=Yx;yLw>`a^o0tDx^0Y4TB;(|7jAKz1yY(6ULFCXvK2dmATyZ78IqwAH z#IKCs!Yjh}TO{tq#6(xmN7@B`1%YjAKjkFj$j8YahLJ-H`9x`9ZC-Q^Wim=KB)X*( zqqMxbzci=yjg9=G;*THqg#}^IyU6DV*KY#_Aaox$ePHX7)QX5?`7QkVOyeogK}sg`;+jBaMJl!GD>of%{cccYwK%;t%X@l zSxd`@OUAz;Ikb@diGPVdsr@b#DZI3_Z#sS`2Tl$xWPdU>J7t4IdsF&DJ@7hNOw2V-92;;-JQRK&*%HTe+;Z) zEzW&*U;Eno4w07?M@Jz+fq{WR|0W@#00RU6@*zTo1O6f#B&ZJjg0)u=7lNJdr}qNB zyta{0vxk8}#eMm}!X%{-!61#Kd=nA;;gWKYij?w0w63K=l*s_CsWPU~p#`*$7y9rj62<3fWh)#%16RooWlU zlcgc)OGGZ}xjjbt$R}~y-AIO`B66W z7-ybZRS%CCe-v5X&7b@X1zHL=64#Q7w~#i$O_6CSJa=$ zq!e!_7*Q4WZgirOVC9|j-TBimrZ*l*XZCEH@;cGeFz`RU{ZYEO8`rpG^{lnNp=g)K z(5F?+Y~Q~=DK&A%xh*IN(;ZfG3b)H^NlQb52)5X3@g*nD|^BJZ9X?lKH4xIxq>=|0K#{a&dz zvkP}Km(}Mk+%BIk8VcOjORmuxpKo7#-KO7K^YcLy7hX-@%dC?-JG4RzDA1FotsC2$ z`jI@v?=2Z!v7y-n%zl*l|H&Z+9MO%fafc^7OO=9ZG83e7x%~U2sr`CoY-VHkpny9= zvZiZ*v%1j6r0(um8hk!Doz9_o6J+78rBR^qCs{+K?0)HR)WOrL$45>3WLJ~wwE*EC z{p9p9{;(@*Kvfd+K9HcUcJIJ0btWwGhmW*`VAkD@yn6wKGK(5+KdaV)?md&!_KJC=R{JjdEubTC>BhM|<_N0L+-4wQ|A`}pnCp<- zPpTJBq%wjvr^G?RJ>{Cj7TAb#R;ddB>5!idk*8WWWTrv`m(x(dE`LYITe+9ao7vVH zmtUoELNSWlBI6+PF^rZR;+m5EiYxLx(q zk_BFin*D4TZ*y{wSf(;B&o-Ns9XRvB6v0usy;CM;g{#Lzyr_M3pKVx)CDPFa%hb;diJ_o z?I{dT?yHY}jWoHb(R^~#+St|VpDY<*KeR4fT-$EqS$UMKBfJ|qZe;M%f8i9=m zZo~w9L!1AZVCE$lEWcMUkv3OhGI=B6t21Q&%!}!rlzMA_(Rh@^KV7=3aG7+) zsdOM;AU2y!THTcQ{T{RFa{qF0^Rx-$;B&!Y%34?ANHFYdb7&sl;PVY*8f1>?S2HA}(1bv#dRM~2O zx?CZ9oQ-(X$YwU7-*-YuE0g*s$?5AP-r!uTGc*F+boSti5RC5cEh^fJ@x31th-<*u zuz)dwNOFa~e)O%v8>%!?Oa!XpUH8}KYp9p z771>+uf+I9s*Fa$eAjdgxgh1T?Ei`Xs!r@z5#f+3zy0;8mLBps0cdF3qQwffZCxNr ztK5Le>yy2X--~<_{oX(*3V%%CFn#6!Q(|Vhxr8qA?YlfC_2k6LgPjTgkEtu5=OcIW z<$LC}&TH<6mfZ|%F$N-jo10bgh3kuN2iLnQIvz_|{jZ-Bsr(~J^JIT1Zp6!-2znzu zIEM2$IfIIQzMG@Rgd-J^d8_ zzq3ESoV{K%`h_0u73`obe=21otTItY&}gol8rx-4guugSkhN?Qzw5|^>!gbG^XJ=M z6-evZTpNXZ&Ti-RQ&D^G1_|aocaFHiX!x>BfP8GJF#>fl3b?vkSwMfk^mg9H_OR3R zzgk8F)H2?PD{PcpqkaF~h!nMm!+G^N6l|(j3ET%RWN?@Mej9C1GMa@p^6?j22J zvzo3qL&|I6-uZpE@|b&s>A1&Mc6s5Qw?c6(TQ=O%nts3Zm*G)>^4e@LVAyN^^8Djv z&c|5UH=pFrbZbe^vriZ9^M{^w=Q;^rJWLW}ihq;LG$)2JWQVZkR+i^l*@PQ*PgPeQ zlae=j`Afw;ZW)K=mrTdWTN!P#Yv^8T;g93*HOgfqHQY0$E3KZ%B_%6~cQb<{rck1zh2c~eth+Z4M#V0iTe&8Eyz-= z>lgj+I*zY>@|C`b!*{hV2m324v%AZ*v;x{-8zb%I2ERP9S&gIP>(!O_EilLY?RO9HX(8u~-GfBEf=3pl%GIweEu&>H6_r#Hxykd#W78;m=nM>)t& z4g|K@OJ|xTf0xd#Fpt^WN0RD%b1~C7dcyMmk;Y{CvAF)KyZb@@9uN;J$58N4_$ThX zfw&PzK>g^sReQ8RGM4l7?-fqa__SYyTQCKH{9D`=~Wly8YItu;*}Xv)oMU57vY z`LfPl-GlA)oJW0Gt>jWB4I^#AP1&mnN=n?0CVuaWHPMbdkAm&fX^(pI`G2me2MD*G zlBYw+C!#tw-b$>o-LiZfes^e9ws@Jl@2;&;A~*dC{%F6J)s~jboV8$qP&lcDHGkZ? zwc&BO^}3nmiK$aD$hE!58|PK`IKLk)s$%E+{}p@)kVXmDy>R)14`A55t+=aNE)Qzk zj?cP=8H$w5RCXh*oF7`Diple7F;QOiCzg47Ep3#L?cdh-&_&OG$z$ z{v4u8kFy3oyEWcJgJ@Gqb|vQK)difzT4sxJ6rBMKi)+2}xq-ud=L7y6q~im8!Mao

    BPwn3^X*DS zuA=y;%lX3yZk;8h8MF{rj$PTV-u9W@XqshmDwKiPz@Fn@=8^&2fhY@)l3mMvgGAiL z1Vg`9-x^9eH7;a6r<=Z_Md&aoq2226{M6FWFz=}|{&TCJ#~I|Pt)!aJ<~BW$y-S>1 zIN!YA+g&{>^dA`b^#V4qL$0vrJsBWTGPR}#wlWJebyHf0_!gbIXG7Xh-S-F{yx-qg zejrG&P*5abND~LqVV%e%3s^Y!z(OAq6HR7{rTcjt2^_!nUnn8ht ze(OJ(6*$W6uRFy!^U|tBmBc2OkmeD!g8@ih_yVMKZSQZh>Y#NNbnjIO>CfOY%#wJ8 znRiy|Z69Y!G|q*ec*_Hs(&DsZ&EWXl&$<>v6a{ge2w^^Yx%J_hl>g8nU`8hjc^93c zGRC+z2F;Z;UjItJx!qsDn1==e2%y9Ot79n{=# zo+}NP%pH?XLKBB5Es^%8;41^OW7M}+zbQ;UJX(5+27Rc$2@RU>5t1;H{6_@gh!Y=d4gOU{F1`Tb_D?)LKJXpfUq z;1L5dHX(Yw50;-{Sx|qPrMvg&%<>~NOkph-C+$klent1@EYSru-IhErZtUi{@ju;l zsc2K2ub+3OgF!=s*5NJ|HUoX+uJ`!zXk*e+Yt$fh%K*+?u=Lx(jWi&!OX2}9Q(caD z)*362mX|4b7kFsDEiC)WI1$8Q)aBH@2w5o}PmZQjKR(i_T_=l2AICz+rUJ=$QVWMN z^}~fv)|al%ciRo39#M3!Ui)b7h3{wqh5QF5@H!O0(|!b_q}%tRUw*;O(HHXe{Z>bz zxxiI1&ePTmwph^M8!jyv|6@G7UB|m}J!%0if?wqnaT!(C3~l@*v9Q0gO|N<70Rr0{dUhX&_cKXEril5<>Ey7^M(i}j0B!d-El$kj@lt( z{rB1LMN&qm`G-zKv2q-Mo>LN$VgWZHmw0Kp7s_lB$ksty9J8cCyF7kNEw^v3gtM=_ z=O%UNRFfuovoy!h2^S?wWs0qImz7WT2K6RdLfv-r2mKRc_#CybCU;5&;WR(u6x;`O zH>X|J*U!@#lh)RUn%RU;e)m(mWO;#LuvX!9YL+_oYr_(~5`XH%iG%B27_-(231{t6Cj znPU00{RA^@@3hyAd{1lT;Z8F3(=@SsopBq4-EGz_|Jk6-$=%@Ts9O-38v>n0MOGBl zoS2J-#J^O$*iZT3zCZqOZ@AI71;=r$_jo4T8j9{GFPolPoz7pOCoQd*$XtUmWBIGp zAFFen7YcMZ6K_t#E>3By(csz8f9QF3l{>h}ev1#jng<^Bjp_P#+BdEmJ-X zu!22R+BzAS!d;x!dvtyl3-vy9uo<3J))E}US*k<3gbcl~c|7rn1SQk+udq9M(^U7^ zBQ)|%C=(XIW@V9H4-Qj470J52S-$vSg}@=+>K!gVa{qFapBRP17WJxV^n)Cull(t{ zZcf4sXcRPsQu>GNws{0j%;dfZ+g}y*Iw4Ld%36_CnmWXPTv|E4Ty7FxX>2p_BW-QF zn#pAMoXk6#&2C$cuaSeo3Jl}eVgv5~@Yk@P|GngO2st`JK8hGjM;?{^*6B1V=6k}g zb6+@8=aQR2AY?Q|Hh8pY88y3ZM(z6qvc-RvgL_f8KVe?YS9HGuI3j{+>2kDIHZfT` zS5^+D8#|XtxA5co5~G$Ru(>G39yZSpo7JLU4f2*{rtTqV^r3C3=;mGnbP{lc7WHw>=eAmXAX zmWy@XR1zV+FUoNF!VkDYB5|t5c*@T+OHA?u7@`oVz~|O(R!qmo7HE1Jx8J^!+4q|M zIqSN#el-$zw;5nIO{qG>}{)(5LShmSQUZi@e&RsW`zsZKeq zoR;FuPugPKXk2pfG5^6q`ZgM9*MTwt{A|dP*Kb;wCWlsi!1UPRkPAfUJ4qYPNK6km zzmq5uc-U^b!@O)bmJ9LP;mzLo`1k|41NJ0H?eDhxeQ@bVt9(pA0a7&6G@^>T^+(=9 zNbM&D%jE8LP$&V{%JkXLW+GiMc7x_;5q7)KW29K4{zH9;xClJv3lx@1CJ@Q`7U*;X^5QRA`| zfV)^2bHJyM(%73ge=lVnAQ_tHY_{kIYEv8z1A}L-(3vl{ zl8dw@64D*`?}Z?}^y8^mFIxD80BFGhYt6&1)gUVYb#7!c-xrkBg$2ea7CK3<8HrG1 z+3WV=Ww&v=K7Pga4U+CK!G(?UX9TWmUpil3!QbT^7g1B9qQeteVSvj|=1n1U+s$yU zCIngWHPe@Zo9Ty5qfpTfMzVQZ!~5J+^|96GcU(*gunu~jM??{A4%p9*!H-@qX%1jG z>~?jbKMG*>u968$-&-6f>f)4lye)rg^c1wxJZo+m+T{w)wN6e$Ol*iE$3aYS9g5^h zOW<)_bElch5=JI4xTUeQfl;SuUv1g>rBvw#$`QptQ)CS=ssHSI!2=`E7(u9 zUWgllVln-35+Mq$F%(`A#>!y>vi|_0Kh}I>yov$AjYsQd^`?r~&B8r+#e(~2!MKf$ zw9;-&cBPi;2CN*NZ-d>IJdh}Xj|=F|U!+>Dbcg^|0M~pjdWOYf_Rsb<9t(Q&ypIx{ zB9ZFUwmZHEopxvgZ7^uVz9usBmKs^%48TTPzNYgUSSTNkZ`x-q6((1lJ(~6610x?! z`tl3}$jckO%&#C+2$^xaF9XvMWvd z8RtLfzQ!@AOFGD~#YPMAFIUM!^y&M8uwUvvI*=+B_0;0fp74F@uH%-S>)Q6O;_6nW z_exE0(_ITjbryz9laT^Ym{u+4yHYJa{vp7;SzG4c{Douo>d=!KlVI?3*87U)fYzy4 zHi{8e-CN(cf5v*-OmJ=u`KLxd^Zj$@ms6To=d>Tq>%!oP-#c_z;s}iE!xC=d2xq5qtXq!CjBW+ok{z>}N-ChdR*$$X zk<(6gJ}Ay#Sl7<%gD{O7MkELl918<%ivrDy{t!EvM?Sal_B}hy(8`5B3a84}Edlsn zsjc|zN2Gw~!#QoT(1HinjOZt%8Uu(4(En}Xbcif7^3}DIgeV!=_CkB6&>KuDQ-d(9 z=2Ke@1XjYRdP{8h{n93*I7{)qV3__7Gc{w$Y%|L!T3yn4cSIwJd(^M`xpOaqRf>4h z4xYNwaY#JbiccSeLuEhYO-pFdnKx{k%psV3+ni{pDeVm^9TG-zl?bY$S6RZKDym^#v7|zH#zt@w2vH_&44Z{I4g)QkDyw)Q$XyBhzp8nL z3Ok4z;2H&<<0}7S>Syo%lUxo`Q4db%VBMBoX|cyNZPrDdPJL$WA$?4;dSne0#L2nP zi+*MCV{;Sk^G&;WrqwA5ed9oS7FM@73W0T!;@^?&n4#C9J%OT;X5_(ewdW7adM{N- zlLRQzXJ|$q@adUC`Cmt0pQ{hAO$N{^m&zW^e9;>ijfptohAUirFPfTni*Nnoem#Vd z@{PndAp-~$?FTNs-^$O4=&b6f%+rbjKWv!ihZa_a5&AE%yNY$Pi=>bkq+4y|54yWB z)C(nWxbdA*|9}BHaHgHKgSw+V=ccx6){RGK?&$GY@nJ0X#&k2*P8`7*E3@VDWyu^{ zh@L3U-#=U_ps3h&6%S|D^F@N;VV%=6K3h4sCt5`H@}L>&edR zc=PK`e_~XgjaryJKgoFOI(rH*ZjeL(f`iN;^nGiR>AFRTf=SmeTRf6V7&_X#cbJM9CoV1!#Qpg9ODIrd#u4ap=-B@gh-5{2_u{1pzB!t7A334qO*)w0RSH%vcA}J%%F9|{B6ur=mE#W(N&}qIxl#}k(@z3;(QszmF$T;T+!1*Jhh6;e z0*?oSh*I%b-*<*8={uRT!ojf4H`ISb4Ao{LOeyUVB8YXrQ)F>a?OcAuqVUpPXMOYR z0qWi71M{WnQH)~A{qa$<{+53_lEdk_%-4aW_WtI!2svEYwbqrI2ugMJ{PZ56wVqzw z>?{;HYtDbvOPMGZ%U|ipBXEkpb;80X^E{w~-0f}i<7rdqEYp|lbbf*nE|R`Jh0#E2 z)1Nz3$4eT`qaS&Wxvt0Um&F~*_OVm=R-1j!OsZ~dUi*@VDN~rR!3{i*dT=C|6$L6Hql9FDj&q;X@&B6 zmnKssWfs<&d)somn6GZa%7HKLbF12JjK@ttzgqWyP4TnSzJpm7s`?vYmGV=j=%Lg! z|F6pABCjM5qdUtdFNGTkHj}C_e$YDg^zl`8Rs8d zs_0#`Gc673#~{xr#KQ1pGP~}O3-pjLGYan&TfFYd5Z5L-^H_&a@c3#xuG&wHErG2Y zlzV9)v?wLfs1`bSx55B4w2w}fb5)HD0spD+*YxW?{PstigkRIaP2=fltw?N?kK0;z zAj-diZ)O`bgw%!!z&qT;V#Z;)l%Q1`02tuC00XABQgGO3XwxU12Xz0xZG86Un7GMW z&$sN3ZZoa7_l3?JWzREzIp15760VYCJ3QI(5FElgBPJdvMT*{=dLW{=j=jeWer6o$ zln7HLOK5=Itv_KNy;}8~A~r^ef{7wKksD5$aka?nnIvsXxLH|JzP>Z-WwID4J@;FQ z{rESm2fb}a9$*1Ketz+)!7|fy1CO}w>qFv<`vW2Idk!+WM+b?GyTkq21NA23B8_8n z*U|K*W@gJn=5$m4Zo^=vTq_aj={WM#lFYdt{t6Ac##XXxGK3JpY{%c-s^JT7_afNL zRn7mR1pGbo`Hf-1L%wsflYd`S*U~V-sq{1k-Br13TLuX_Gr2gnj#!+^rypXBpVuYO zAp1T*Uy5-&s;N#&BX$Q=9Z-|pCGzpm;?*P5%F4o4`+i%QLfvDkb4~O0?=DUPGL0Vy z%Tx%eY_a--1(0B+sqlN(RmLlY0x%7MX^7tn?FTOjUaNCWXSLe;s|wRwKE!G&B3CR; z64l3p6agg)<@U*L#gCa23?){s)#nD3;g-#G^@GHJr3oHF7mU6A&;_G#UI?JJ^7^bA ztNJ4$h{?%?_PgGplnRq=3xO6#@w|J=!>djBVM=$eRq^EJ1$Znrmufk4%HQnT{wEDh`+7^CCi(bPs=cqRZLQO z{7Osluyf11$tLNffvvHRE*m`YAn_2`<^rvHQ|0m&8b7cJ zG!JjZ7TMuLNtqR9(S);`6Vma0ZK!Xc$C948gQzuPQ>)YW+%8rWnk~j1pf)p4l?Bn% zO6t*7LTWbtedKFkaI8S`Vloml&(bTwV$U?t_>S{y%={CNWh?~yj8!Z{E^$~Fl_QPm zT$6poepE6+`nXbR<(0+2Ji9D>XziOdzKV3r#lzcZVl8F&b~B<7+qA1_uJbDT<@9*( z%rnvDldxe_ikrJk3y4c8>0Sf>sheDp#hz3B#%Kr=BX<=W<SlzIToHC)n`(us=z1pHXxNlI@`z%%%hJT&}q{mH3s`UUGLmZadi`^ad z;4=-x83y&qJg|Px(S}%X|L(l&jfhpN#J(;!7A1nyt#jFc+Hd7pw_q}QxH4FOTGOOx z4;^Z++=!OSD6aK0uJt0Y{YG|HUzGe8TsG_=40055iZ(|N0O>4CxHN4s9rA?l+d_t3 zv;5ZSp0lNE=4B3lK^FxtG=^X>&XAi`@NuNG$gqi^w}gQPhlv_=Mhmslqfp=e&>&+j z0}i461I)@VMX4tG!wC=Vs@$s9-IhWV^>%Ingobr&tMCWc_gB(iNBlyjHF=c(zAFTi z>4%;%c2z5(I`lr^wLY??VDoZ-FN-r-#n97u-HvxV^%SRYXaT~#M^qay5D;6NXSAge zf%_&obFbVB&qb?q5T)Fljvcq9a|=&fz$ALq%=Ya(mJ9jUJ#21PCso%D`Oos#?gA-i zo(ln>D}-zFjPoxlzlZCxV!rr_WLgzaM%DPQa~Gaweo*jBu(z1|0ln;QkJZ3tIj^D} zN#Zs*U1V@~wiz}kRvk6n!!X2UVgqus8IYR?)42{O9Ti7nbIk_Wq_h<2vqFqGS*({+ zluOh%x3<2>$+HW?f{qdcywWc`mu4I-obtc_5$^t3@`;!GB(n_qZ2wf@+Q1f>KH%W2 zIp5_F5Ve7%8&YvneCj`Y)*qH88RY8{QnI@9_hQa~=9&!QWG2Q3s) z>f~iKom73#Oke{{)?PSGMpDM4Wo(s{?r1i0(*!eaxq}+GI00!m4%1Jslblp1{mu!8 z_Qv@#^Ow zRf6N5ITT*U`qM@n`Ks3<{Q-k$ouxfXKZs&&+eFRo-K;{`N5$L+UeBQ+L{<@85^OoA zG^*i9yg|H|VYT3kvI2GJwuP3MQ~DQ{NvX$_w8Oh6+L_zOf@yM<(lP`kTsmUEclKP+ zu~ZR~C*D5|waxA+RSD%;6kW9SPY&D5_(IplFvm+X}jj$e++PJ#=GD~HbPMMi;qG_L7Wh51kNi^j(w?n-w zuP1$PD7U<0{`qBR>K!`3M~GtKGHKHfZ-zvPY~f$K!1OTq7oD@~>Spq8bmectl0&QS z7Ldzrf7aIH;gW{JV*XUAoc2W1D>+g9 zb$;O%xdG&0L;dO3SBST*dcuKq7Ly!!7rl$89x@{TV^-;cB?QAI{zh7)lOD2e64t%} z-9{~VZFm!kH*22W7b@K=Uprh($rgikR_?VaP9-PJWFk8%w(gaS=bBkEUS-qHumxBz z>o=-yZgupIydKr0f&_&&OMFfl&m`pyU2Ld9CTBI4j(PtqDJP)mf!5G!VsOOeOZ5i^ zFbZP({#AqH6mpq|m{B%5zh6?;uUE{J{YXk~SF^ zBS=9=kt~x8gcmAp)l<3IjdXn!Lt>)dJf$O#IzQg}x6Mvu8QC|*?*rX8(Mgj+Y(Cf9 zi3suqChg`Z>$kxrc!~=pD4m#O5&-2*59kITA;x{}BQrlZY4L$3AWqaCqZbaWTa0Sy z1)dW)GQDpid5vDWtG+OqYgba+wwp3j0VC_bW(k1=kp7A{q!^m3vsDo^7Uaqql)3U` z-*Hk5E-JgSptI#Nc+-tt^BCOm1Im$)h>=HWI@>zk^?JiwwC=2b3ab}2q<=wr<2@{p z^j^qA6kkzAswMCAeoU1KG!&G`D`KS8i`hvHfr-m`4G3HQ)K(hl!gSHn=2cmbXQ>c@ zHzEDu*MY{c@TRLAD><2O=SV5vNPg&_MP-UE)mj@DXTqhlVWVCfQxkUBa)lS><}VZ{ z=%u%4NjGPCXl^BIy2z5tXHpR(``O)oac1qdSyv5ZmXr=I~_7O%6QCC`N*l`Y2yaPNIRjs zJ1Nzua@}rzHwk6pjI;Pgr9=`D)yigPpN{!- zGPPaIR^^TM!5R`bzl~ZTdNs%nOjA(+#HNC0+s*XBKHL6n?als`^w#t}(amcYzo;_d zxmVq_zrR{7Z&LQwfR?XR?>_pS)hxmo6xCWLaJb(M;D3|!YiHNQmeFM5xC7dy#g;3_sQ~ya@yd6coq*)j2_dl5gICi za#BjB`gUB|eu(s9s7`8x-+)f%UasgmZ zv1kfuwYKJY%;&`#gA(*7kG_6?GNv1NEDWDq-!S4LXk(BI z5Uc*o36*~b7K;m$;3P4J+h3dO$aNMbATI-ZWOiGB*Wb87O)0}?`LW)ypp~PgDHZHW ze@`XMPiiO4XE9y+vk#R?q8R13I2-U%NA_U-_5M9TCFyNNc1{LjEb+2^bllrYFBe^s z=ppX_cLn45XgbZBcYZ5`wvq2@ce5wrfjny$ah2U$o&XwKyQnEa^T=znEJo8uUEn*M zj{)Nca;n!k12V8mV~UH{?$g+N1>As0N}t-}~# zHGqgHb+F3Xm6d!b-OC$r?I4($;84W`w>{w=o3z4VwgGZ17%mESV#8(;zO>?#fgpGj@dHaPXh8nuCN_#*M=Q} zRo4bVp>q)3Ca=%;EK)3FpF;G#F}tag()okUoQrumOgAXRdjH3va*JIrsTD}^ahg(x zqQ+qABiA4+FWw;g5HK$EcvSuTwaPY?Fl1d5GvPrQcBw&^s1`gYd`MZml6h1@Ze1{6 z3jr#jS>wZkp_d&8<_&QdoR#PQZ#~iC0zQ1=CXcP@?TR{mh>6&YZ{mhO2hscVh*awU zOY9U@Gqx`YMyH87WQyq}Sfg)NaFh)jiC`)SZx9vHCVky`@WL;73>-84Ut zijY0*BZtA(T|Ca5P1(!NjwIDh&~Bg68tp}MrUQH)Bs9{ZXMkM)mui@5kq35J&cs8H zN8zPlpTBX7lY6I&BiOxfaRPn=9} z+<^P@Uh~&5N!JG~cn%3(Jj?YkZaU<(rGYdbi&xD&7fU8hyytnca&>A`#_hYe4N)q6 z=l=A*w`73pyo@HNQA17BiokDYEIEsB@sY`@uWHoC_R?zGN+6AQ>?LJIV{3uEVtf}P zVY5*DAx)=GX8nQP5A-x&CD5sIL8^sj1w#|Arv&h*HdNhuN7Dh_`51wi5AHQ!`w3{+ zBHQ|cP7*zT4Hs@@3X&hzI{ZY3-3o9r@5{-ds+)f*+yM>n< z-TI7RF3*&Xq-q^KfQp(H-q%P5U1;{{B*j&J1{G~MIr(he(L_Tx@@Tk^VS^?)aQt8= z7jq~(kP--1|4@cjQaj(p5eLNzkHn8;vMvpy_e#HXFC;)A-+MPvH*4n%=>%7 zCi*F!&s?g`RE^6~^2XmB6bb8?-5gSSYVtu)*Q8teKDQ)J5~MA<2bx&{RNM zG0a3+~5T4&1A%_Y+N%Q=|@PFyu{!LwnMJ(<^ zuyi!vXvI{*yyls-;n=k7lSyv27kX9-5de|Yn9lmT7Mx=kQKBj?BqpJrcSu3s2H2t8 z^mGhg_I^_S|! zS~($EAxklypFRTbDY7;^@2lM>AQM3#R0}I1jn8qr+**)mrWl=QnS3&*sB0vGqpI9nfI~@>Z+S>7E%3*A{qC2FM~YkaB?Y0 zsy^1=v87!2WCh1>6o+>SE~!%DCZo`A=Z2pvkCmJ6ns*od+>4aHNTB8z|4fSv%uNvi z9G9e zWtiz|PZ%oD2)1x?bK;Ko%qGUlOgGfX7>)YKSX{k}Tw&jS><`~_DDV=_OHSQr7mQ@r zS@cjbMfJF`{KSwqQ=K-s-tQ@=wEul}vgTKwK{pT5 z@^yQc*g2*&{E6S6&}nXjHD>Kqaog(1lXio0i4rPTG9D+(1rDgWPeR5(ULw}Zn%qCi zr&RyOK`J0|d=B0geNjJ?-`R(dUW#!PTMX}&9_8kRPO^(v5R$dIM(5;FZXtM2*<9)T zFxt9|yP-ONHQ~cHFkYw^$pZfzt{KJY_FEkB!3bFPjh&_=req8BAo*7sZGq}ll&lKB zqOVhNQ)Up_9Bs|a?C{&W;H%B;t`RU$yFcPUH?JwU)?Lp=hb&+B2lt4_3VS@qg?}NJ z;H}h+!fVYx|E{q}64y#&)KEz0Pcj82sdWYN^~;Q*)as|jifG#)O==|(*Lh`g8Bs2V zGJ)YJAMgD!{IW~&ExcT}N`O zQXr0|EBMJy#9S6{PRr{ZsMU)la=OV5{g&LX$oXBmW(u0hVE$ppiI?bOCk!q;aX*EK zZ=@R}ABlj7ba5K4EJ*1M{bmDwr<0*fQmNVHHgQ7p-7PC{N&pKu}q+o{H3kYph|X8cfdDK*e}Dps(#)7wm>Z6c>dlL@)HypyIFE zgfB&@4aoxdVTCTX#%npuV@2oxxO!hOIbfBK{zG`_2gdXn?ZymR2XT^rW<; zWUhZAxAk~?c`vX<`qG2@#=;a9a@VcdsD6*V+@EFAsFj|q?naHj*k`AbC;Hjt=#nCr zT{v)G!j0RsOXja$JG{OC`$mN}R-yMnK|#T{or+q7Utz`?MKFpE7*Qj9)T^wjiiwH2 zK3dxUuo@-k*BkyQsuG^UIv+7T&;7Zhk#sKfc)(oIfxm0KbvT(bo!3>tv!4h9`M;(M zShGh#MWtp@;_0Y!IbFx4jc{!V=nTY6;x+Tlb|@4e+dm~BNUf7#>kubajZHLq;Gr_| zC*#O9qu_Qhg^r}2Vgi(TEEN#h3M{H|?~ZpG8QW6$+_#^En;w09t+aSR5?X`kat7+2 z3A8ZDSxiOL*XIpUFNCw z?(1C@qM^|3&9pCW2Vf`Y16hxxu?~F0@8JHx zTNJ;`!A_KGrQO|U`k5Ng)x*O>r6&(C)J+7uL84zb>1d?cG`|Om3KMW~k*#PfH=o`j zV0Qeghwu4xI=`$n`2!ODuu`F~`! zL;-c98`pOH?qG6@*>WscQQ&TC<5dai;`~*Fx{L+5;wE$()oae^_)2gL#ym6vxPhg@ zFmax;FqtP;rqkA1yn^w6f|@hy*>NC%7%ndYx2rVxQ%+2`jz6iG>D0)UHFDO~--f@o zCJzkTxNz{!w=#G&)mlVk=IQF{TBK6OxPR?S)%W}Li&K)|Eta;xs6{=O706H=J?${0 znk!4@Z<5Wjm7JJOKtr3QWQE?#U6e15QQ1w?a0Z$u)jI4dB);_r!lm*>K?Q=YAp|@- zJ7kwaC%t?IE9rObe()5fb9cF)Kt+7KJL8ly)tcCvS}^?3*Yxy4et20v;a++Bx2<<^mZxU)In49C^$L@Bx+v2=@AiPwj*2OHlS^)_ zg=EIM!904fY23=PprF8HB&EF=%Ey?@yHnleVpWxXPjC(TFfYFH|F+{k4hjm)7;-~iJw%@iS(OZ7g>YMKe(>ai?ucYWEp zr6i~GMAb)hndEGoM5tLH3q^AffT|@@4{#4ThDA zCR#x~JXU4m$hk>AM%QLZ)NAWd`cX|W2$U=RL0J5Gu~H?7?DY>S%HC6Nsy;8c7olLs zWi>+ynU-ro3lZczGo@PY?ZiviaElvT9s1#+Anl_bmanY-SMz7zKjB3-`tU(Z+vz); zWw@{vl7v->QyNnR#4)UO=hy(zqg@Yk5|&=VWhI*yn(J&^8^ih0N0}Cz1QFG zZ^M1zrfiZk5;{-=1n1Y$6z&tk@FrMhTJtY>vW~CsRf<)ye>b}vc7FGv>U$=BN!IY2 zLkv*6-gEjvz*m&b&G#ye24&NDKc@SU-jDb+n2~AD(u{~NGN(}!SUEap&QvsE5q;8m zMo(I;8iq;W_WI$Lc}|x6^U-s}6^(|)a=`D~w+*CB@W`CaE5ED{XVEufOn{ZBo=pJx zaxA0lJ*co?J{LeYH|cd+J+_Au_h-vShlhWU+P-&5gs81=$vcjzzNL{6)gwQOSWo#;dK$;@{ZEYZ#Kgqe*;$qv z`8YIoOG5-}lLeSk3J##ndk^kk4&*N%Ej6Vk@_+pJk)3@y@p#L=+(iQ~6CCk}cztI5 zJOB34oV|g|;;ku6>{8E@ktiL(7L!_H%>mb-{9mcBnQNbO$oTmBwMi5i2l@V00n_kknJ^fAV#(TQ!?$!`8rWd+3Oa zlvE6Ads1#c*z=SQtYU%3T#B5<#(w)7y|E;KsyUr(r@pzfsxz$t*d^lx>+H5O%6aha zE_8n>s>;a!do};{_t&W8bQxE5bVr={%c1HUR`0 z%S)Ktc2pL}(8@7tH97#sZE`-2usTPI-dvM@wwuQE$A}wur+=fGa*1!<-tKLlaq~A> zp*(Bc|N3Z0^*GK-=~Hcqi+eB9GTF6jI(jxs`!Rf{g@4MVd?x{IW)^9hCVlQv|6r-r z$`>7OH)xpu)?j>)&;y@%c1OaN-~Z(Uo|G@1K-d71r+zl*d3wCx^85a5f7JI^t{Gb! zUxc&>m<837nEGT2)+3|Dun@LhPQF~lR4B3U-h0hCYi>Im>bGpxRDG<5?AFdrO2599 zgihkhV2i2Dh*B>SeVIspu2E-|I&>TYp~!WhLOx3e#&lF$!8g6!n6FfMie9JShu&3Y@eM>`7FGaT%DUMs7^Ym|`! zT(r1wuvjJ0p$%GH4@7J2ZD%>)5eb{8fhMQJfxf0iWf#jKjAhMolhIt7+SePiEb@u_A9@YY(+ zvpDRG^~cakoyR1mVa<-O?fG`!9scZ~v;Ekzj!k|+&toC=w7u(|7pBU?rKeDIF~*4B zOv1E}B(jm%JhVz%(Xjvhd(pi9cvpm7;L5QMpvZIa_|Rq*gJ42n@&a^xf&gZLC|?i}e-m*GJ<^k2d(?;GB~X1@P1+Hh(6VUZGAdzUINDX_@B3K%N@u^*eI@ z){$}ABexM;-=V{4>H*%tn1n1S5?wq5`j81^zO{#?Y;>)GJ927dJ6B+Fi)t&>w1FQQ z5q8RjQg1$8IG!zDrdIWy1OtH@w!1K*t2Lu|?t5BvQEi1Kx~qsMK1rGWI5has<@;fF zceR|OPJ3vzcc!@-YaTih4%~vipkJu=Sor?DEp5fp?{&L8Fosk-Bl;^p|C8(7eVSKf z&R;L~&tiW8_gEWV#AL)&P*l9X+^+*Ia0HvD8>xS#Kcb5c(J}lo*DMqw_?P*%#HbhA z9LHpuV@DBgT|~E#mpH@{@}4~00JSN*b}mYsp{}P=V#CpMyuOf;J~t3U8PcdEj&V@K z_M7QouQ`Y*+(S1S-S^0K;HI-4%RfO)&7=-AZ)Ga-d5_w*GaN8--u%62fx$w9N6pYoQ}|Xohs(Z{>jPty4X`cm^Y5ddGqkFc)8D! z&{)j=!8d08&99{1>~=Ij5mC=NKxx=K`&&t`HJc}wzML%{*%^d+E^b*W=94*nQHv2F@7*Xh8$f4p<{{Ge5J*$s>H!Dtv2vK?@j0Vo$# zBpG%?))P73;E^zR-LHyS7Z&uFV#J)C)uN*!RUZt5A`i}r5I!G<)`*9K6mhlKh^XV% zbRjexrlf@>Nnf+JdktK=of`2M-hcRiJiP;BCrtAM7+afcY}>Y-Y;10vY;4=MZQHhO z+qQne<^4Z*_Z4Qkr>bkZtMMIZ+9HX>I z192D9WHl=npLZkpxNi^dUPDA~_Ax1_E8|4SPz?w9dPmT0 zqtuNtqWSLDC-q(Gtj9SU89?zF% zc#lW(J2GM~Gz1sy#aWyVQmJ(xj;o6{ z{V@<4#h+00THm2^j0uw%DWQbpAz?^LqHW^biDf9(17MIf|GH6}er%YViu5SlXLoe! zw;ShpF^Ne5<5vXA*!uA+K*RZwooYLO#VgRqta`7gxcGXb1(7aCJ|}SGxirh0#c-t> z*S@15(l^|?U-7H;7E?PE7n_XD`dS$2Q0;$(*92(J4EfC{_)eF{3j!94neq5J^BU*; zo$^9>)w9@vhY+Y`i-68sxdPF0u5pzNzs5Cr z*dusQI4=R^pi90K$zr|T0>4nO9{4q01_`;jHyHUv73dn~T_RnO5Ws{eDXB3~RV;8G z3I@04C5^Yt&s}TBr+x+zeqt5;znjXKXOSfi9Ce?bPWcJhtL=iBU|KCvyb|U+ZZTmT zVg`|D@k+0%3NKIA+Dvo&(I(9=@Tlf7TkjxSD46+sRcFolXZlZF*L(neHr{)`gQ2dM z@#~K1ji%>jMj7lJ^hSH+^PT9%Pc5?CxI$}XrJ<^+sriE~M+q~8xw+II@bu~oGsQ#` z8WuSP4fT&vP|wJ^HL>H@4nM_a&;MAl_Am~YXQLAUVPd6(LLW?!5=;+@g;ldr6e?D> zP+x;SPsr3Uo#{<8;yMt&voTM_^aX~v%3U@ zGkS+YzkD)reMHHTzS;{}JS7I7KRZ303a}yN0r88A)W7~6$s3;ReRS=EJ~gqEg#5el z?_1CwN()<4=|MlsR|J>Kz~ujQsl%23yI2gJ0VScp0 zTHt0D!i*K#y-y3tw}|Jl`T?5sn0}8EOkfJEhJzEiV)mc**k*Sxo1X4}#&RArPF>V1 zm-01FgVQ1PF=J+WBo)GqG##TvHMkmJ&dM`4FE(54PlI77#sIBvY>BG^eZ^+yi*_Z^ zX?nayL1>5vW6zl?htddl?`hD=&Z{jEq{7Yr%LT?S-*S16C%?uU@kTTDiq8;Yz7N;I zkpq{g9o<&ukl#oOs6qeq3bUm(dUg!71puy#Xp_A`?d*O_eHtT2#`7>;)EZcZ-^lLKK{t+#80sBIQMigY%DtV`{qbV?n<37A`F8T2 z<>Mh7LFI&Uv2vz#(|%Lq@ph7uf-eiLz?wcRU5d)R5(LvaYNn?Q@Z{5zk_unB=z8qA z{02Uey$Iy|8ER!c8cY|QfTWp_`V#^QOu^I!Bj%12A%oa$sWaV(Cm3u0KmMRSY|9MP zekw2zNzB(-GR3Qsq2g`-dZ5Br^ZFHYGsVqR1LeuIi!K>Yt9Lv?6lHusjlji(4rXjM zm+TYyx&OkOUG*m015~ERo%;APn6xElK@M#rZ22$n#9=kEmAu`x&15YT^iY$b z_lM^HfaV3@(}D7p}3;+y-f=fbH}X%ABFi7v_dLjB;3PfnJLi4qaU zNw-!m)erb+3MO?!mxidA$O4?l*Z4?4Ut|0I#=v(TFK{%y*5Y-0OthG=gp`c;GKmRn(7Sc0kR>4O4p2P8@VZTQ~3~E1YXZxjskR zwjvFxSN3^j74ztNi0Jk8`j%Mp(~A+1YuB`tc4=m?CP+q2@FRL@|A_NoZIO13#i2o} zM@8Knw?UA>+b$csSlK4o>AdBiE2F^<_a^B6?3WUW6w8;^Ak;4FrbyY5QaX&NVHMGB zMr+-k@@#;YE9Mv+1pGC7j;GnZDemUV?N;L~G38^roA!0_FfYNYgtJM7w1o1L&Emqs zpPs9?Pj3yhl1+pAr|aFzf`=VI-CUN{b@lT3aXNI9MN7@g`6_z}UQQOgsAt_qypQsL zfi8Cc|3Sar|1lx8R~WK`x;o?6xo^d`S%9ig~GXs6^LJ}lepW&LwG=MO$$z+v4XI6dcX>neOS z8g0h1Kf1sRW6?$8a!lMbFO$~U4@N85L%O^+Tw0dQ)EewF##uKO-F=4nA#Cv6keRah zyTlyt{&M9Qf36G`|8uU=>h>hv!==Qbq)`-L?6vh~ zEg#m7xql@L?1kZ!qIp3Kwo=$1qzcU6sy*2to%-T%}y>y*{ftT{M?}s<(uErN5@g`pK@bq>M(zb$aLh_08JD&`Zu|Bj*okuUdB!YR(R1T#HEbO#g zriSXl(2iTm@a#~=Q%2@^3u8apM5zY;?YBMaC=0;|Sj~86pwDrljk^9^1+jTZrw!{t zp0@U&xD?R;Qbap0{b+JL{dDFR$sUc5b=?$@cvqeT**lgtnrNM31>d1)>${c zk}~$Ms9hQ&2l!I;UMJ5!n~q=N43<#G86z#n)oCu4?~lYv$=TY?10D>c*ip6*4u1;E z1hMuI1tfe{md|=>~)B5m7ic%0VV*!G=x82n6X; z>7VKcb_voE@+n#9@f}8SeoX8XcY0s=a%EeusGMmXoI%exYHU1}X753WOel33YBg$4 z$jg1!n|}a=cRVUT2cCM$9)@St-fOOt{^A^$!!ThOF>lANC^Y8&9I@Z&Ihj^Fsvho1 zluRan&cj-;iaZXn8nNU+cq>XpEp3w=iOPh>HzKeMCf>_w7x7fb&qI z>22gf`__yLzoL^YPD+N`c~6iyrzR2>E44l#@qY@LeH(IAwRtn)#p;-r&Y|RQsxL|A zVw?%CB6ZEfj-qO>W@<$1#vf?Tq0L9_hm=ub7lifmncHr=GHvoSy{!0|2(G?+jR`4? z<`8uFtuZi@91*A)Y$JCQl!a&Wpx=e?MAg`Sw*36`wfJ%Nr(mOUwME%47oyzIKD=0r zStwQJAF6Z{TIkbOE6(46`H0?(+T4i=;xE-U<0cp@o7Oy?q7Ckyx8vRx=i{4Q{`$mVY$^Mvy9i6SdgIEwG(Z9X*fH8mN4C1&ov zkKiv@$A+q-3}{iRVGOn=xCNcLl0*^zs(PnT%pPaYCEH(D5n3=O_^3@sJ}AHasf)3>hHtY?<3wVF&uGOjHg z0Q|$=EI0D;=#FvDeL0nBepMqYO*lJ;e#tM3mOr0+%|cDnOZYb*`VV8@SPj;Pc2A*W z!ej!!iHw8N7Mr(t5KWY{$Ojv(#Ii!1mh^cVkzoXDzx%zvW4a!EWAZ1zzh^yeD4^Ib z-Lu1eiNXzpk1R_}-!v4`C13;5gigDlpys*#NWg@Iu+EZ53WcD~c-OkQI~Gpf_>E1R z15Pm`zooCA0qH9y4Qp9hZYE?i9^FR|UN#ee&l0#(_7BmmRH&?KUyJpnvg} zLJq>HiWdgpmW&irpl&1g(b|l%IVi$V z01WOD91w6mC{_Z+8h8NJy$R%xC4bN!7UFe{C3J}c^2&LuHy=85*{LLDl>5d?)xz}X z85v(+L?$O4)Yjh!%QX#nd?3%4O-4os-QljIX4^E>K`-Y`pmH|991NMc`L~vlr=q;9 z9kgKC*8dJLgv9>o|DwImL{Y!*T$Xp)U%ab7x91k_7=QaBT`iHd|8t+fVhRbZng*nX zcq-v_U(!*$$^7ES_E4IAvF@C{=6?CkC=Hsd1`HZ^cW{*nlseg-=u_oWbCbgB{!_Fa zE+pL-UaPMIimGO0I3I`dUU{8IaWrw@?6bk*eobevL^DSeTCO3i61C%q&l0rV(%6C( zX1>`EoD`r(VKH8Og`Du;KR-+UqpL21bh@oUre>=#M-`tEg%kqDuEq8xRPo^?VOK|V zB)ZnE6w$>j64UyPrIs2w0fiIFV1#}tEFLzTgcAy5yx(!K`FG6|rn?8Wu zWwJ9PUD`=+r}zMIk(oLt)5rZUc z!)ec!CZ30nfb{6>(oY`k-+|mQ2Yv$H&Qu zb)ueUW+%`~R?IKmRA?Sbem*gws&kY83CV&`aMb}lTkoNAW|eRP65;X8?q#2QXY+l1 zu41HIunLt!rdOL-WkhXcXssaTEZVYE6Y$=)0%_CTFp)lerK)I!t}u`Sk8}0;_$;mU zPmtG{Msn3|=%M#d1jPPr0!FcfS}l?H6uaO8-}Yyki*Jj~{Fp`eAOf6IE>SznhyFZ0 z0Qm(+hDSa|O88P8x+pEJP!T<(49%8o3v_{5bs!$Ct@ox&>_%Tu_bDS$r~$k z*624P88xy1i`X{AYKTbSZV*K(L1{^$A2fXt;(7L5RQZj_Qd7x19&Q|q>7VF67zwjax9?E7`RAABQZh}eU2~Cdn8AF;X3TGboi358A z92BgnJn0w!lA#T93TRy^%klGFu*1yw$zBCW8bXzn<5v$)+t)3|i=4DZul>mbW|%&B z+}WSR&6s|pwAer#DW2irzv{Rh`)8pxL!HgO_Yv5A0cg|{SCB5Nj#!6J=iVLI!Z!)U zEHVHOfJxIJVmd)G`^D>`yn+fSdgE(wn=B=!f6QM0uA;SZ1JM4diKs}C!YKSOs3rKS z;6rtgqCZp5+!9mClsB6 zf8!UWtPl1=P`e}s#{sdm&D9)H>-CeA06ox?z-4SKrB+#rD#u;CdQXTUn@Z0b4(&Ze zs%wcEwx>0Eh06M-hnU)WR!hYOY#ui!Te7Rc`s*>{_X$E;h1~w2igy6;G=8r|Q#|S{ z%T^+GL9%seQ)qnrPXY=;6Qx6APz#DOQr!2TAt_@ZtKgwH^InR4*n!Rp|K6}QzuL&y z=`?-5C430k>@s-eJ|Xu?dpzBuPCIN?(N}{*9akDU+yCN>&IJELEpy* zOnR=uApjr0L73D(Y|r>@=JhcW+~5*GjN9uCgHz;-Px_|B#a~WiCNWmT?q`bt^+xei zW4B58wh#(RwvnDq+O3Z2AbDCkqM&0|Ea%ESaEwMIJTN@h@r-~HLrUS~!r;jfgC^6N zh<#QaMqeB2luerAb?|)+h1N>wk5HB&tP$k{kYs_y`^g1)f!vL=tQ|IT7}%fNrmSe! zyg>n(Ru(ESzuo*y+v@A>2ugCZO8z}Ip$`aQa#y_0q+>sag_9!&-774y=_z{? z6S78=IPP~loI;Om823iB_<4yOh*3rmu0o2}`-3*+D>4!Tw__yYRmgpm-aj(b))23X z`x%CQJVKVPFI-uU85Y*J3A95MdQNhst7n08I%|cx!05ut?mjph?)c$34eUW`AWVb-4uf62gpG#|kF;hKT-IF;xsH>Q{x^mD$`Tt_R=j;j) zwE`^h=TCH4SXoVt&3ivW+98Aap19yFmeZ{T4e}oNoN9BPo+~S>EW}!nMhS{sW);Wh z;l4Ov>Sk-Xfc8i8pT_R|JL(WJ68&}7#?2ezthL@TRsZv7I~ON6pS5;Mue|XFPj!7K z=;)<@;ue^w3i>wyZ5T_RL-O~yj11>38d%Jp1G`LP-_GBgwun|Y`&w|%QZrNI`S$we zj>p5aZsiBq=&qKe2U2m6NI2QzJSD$J#PwAk3IvV4DtPtZyOv-z8I;&0t64CLD9;ZosVlV_b9a;|mVi^wTGGUqzeA;o~&YJfRaOL~<@ zOHMF^dTAbmdiFo?h@5BG6Ml7ZE2RuJBR%568oxWXJhde_fzFv&4qFMBJv$7R2=scQQU70&(f#bs1{|6o6dd3S@g1ER1dbrsPqtJf%|%(w zrwtywq3OH9^3-F=#dL7-ZcE!0 z`+xLqqS*)5!y=uNX}`C2g_5)-R5%rry-jh;oGZqrgpo*!zpc}s%K_58n*pe zgDv;s^4f(4WBz5s?|c2hjk2^Hk_^^%);B2PS1N6dyCvtiZD* z_4s*%oy0OmLJ3?Ynix`H?t>qhA+MRq_HP`_Lxmn4Kd~~8attRC5p@kG=wqJ6d$1ny z-sy>9JaI|$x1;=xked;O3l*Wp$d)>BcWS}4r zy~%>*u1=L9KJIvl!XPnc_%}qXLS9!?s8y`1LkFE0CG8nXy)v^4{WU3Iw{-JB2PP=E zKV$UezG~q?f=4IL%@8s8Rkf|RAe+=pj_$0Yh14~>urd)o;}F@SDGdMRyh0b;gc-=EYM zVl>z4u7>Hlnh`PX7iPC3Je+eDiXJkjKa&wodu(^JvV{S17yOqY9RMGu+o2-*pbTPU zOX#Ta0{E8Sj(7-eImWzu?i*&+l ztgEh}{rW`X?~cPs5ap&PTjLWFMEg6C{>F*Rqv;&H`nZ1jhen8D+XgEJ(H>kozow2M zEanR{0PC94Np++nsV8D-W@Bccqbqvx!V+lUB^r*FK6TLWJ*qobyG5Pcr2cz?8034s zu-b$j`x9pGSvE!&FDA8K+Iz=h?r(6i`hXr_HVg$hZd3*~Zp$eWEtYtazjPwxYkmL! zKd)$()k^(0^dR!Jav3v~s?;PshlfU0h*V%W+9$B( zAD~6d(bjf>iWRThPwLtV&|=P+noL7Pc`0+~s+OIwygdhNKL-Fc*Gq;9DTEb}@GqEL z-Z#>Q%aPuO2iOHCQEr!j5iw7{1}-H%qCu~oS|~j#QbxzI&~U5#yXR% zFcD_x=QXQB&|2jxdiWSbBO}9x`#--f51^BwiTq5qC(p~zk@xrQ`g43II2P^G#O#6j z#L$4j(Qu`#La27hM-MlQigapn3-#BnSj)@_&r;}*P6|tghEb0;XF0aVNb6zuTPXhQ z)oneGsNB%dCmgR}JwumQWQgnhCVcNR9V&8@q)fqLu^l+LVOY?N;F-S?9_})=g-jnl zwr-!YYWa@~OpRf2SduJhk+;5ITDy0+R)|Y>4_z1y#gmx7c_O_sF*DQav@aTA)0?l7 zhnBY>vPLgRMH+(>UQ%FDr1}X%%COin_%?$$xLb`M*G9Scc+KQ$2)9pOELoekR965x z(e9QjPINt-PZnNiv7XPbQ$}ih?2H$~@t^-}pmo1(ROxkgTvz?ogYWiP);FZY1q-&`Le=3#ZjJ6JFO%k zLmC^h&O{w<_Gxh~8Kro+bPHxo`gj?A@7fYY?ve(2feawGQ0myAn(Zl?kqo<#EJdkK zOi0NFGdF%;$R=UYK>JvLkEdflD_eWEZHKF7vHvKAiDC7rZwV|9W5c0XPLlZ_lURy6npmIE{8E{} zItp(XKAEb^p@G}-tTI#JsK@eSJ__t zSDf8epO`f%YB4x0Ez&QdSKL!JI~OC{^f#_n(?M^7$GHbbA?RQ;TEFW)G6I;JM=n`` zCX(VWe^PoEN&@*;;ej1Ts)_Fqh{eN%Qo%vM*efhinba?ucyJ-YkPhP8Tk;aBg<7d@ zk_kgoY|70=EE9r}j6yEUKnTF5>wU)&3nm_x2@6)!z~SPB-5oZkhYiKeytXZmyfq0} z=EjLU(?_FYBZLMBT?mZFO+2!pQalRE0g57uqT|vNJXYXlBm9&M_b{<-KXWb7cpyGUh55xn%SW|Q9qn`aT8YbCs~=AD{8L7`uotAr97%B_%DzyJS(;T{@RPMmZSKs(*;% zgp;NYmG{8r_Iktl$0dVrFp4(XwKtzb{=)}{`rz*u*2@FnDMCgyyqA3W>tMIMyc5Kd zqJ-*>p0;&P1jDW=;eM6mTmG^F0{6NZ=xtJ-EZsF zDfl|Cy{AQT_+#Aa6gyC%JW541QsXi^?N%o%_?{07=d{$SZr1!A29x0-5VRLEw1w?e z7wgt0Cw>k$?<-;7T297G9^z!rS#uX-Rx=2CAEO)G{Mwj5o+-+KBw#5kiNHLVcD|-R zD>2f+*??EeD`4~c>DS?Kpy7%6fOw+PKl0^S^VN0Z3Ix3w5uzBh!*Cf22SO4)ICvZ+ z*KvlaEw>9Xeq&>6X==hOf}YEWB>3P1)T3ln_|O;i#_p;4r)tcYSos+FxX8%v6%8!4 zy_}*3?|OjgrlyVR9|Bcf9dCE1SAYk){e|iAT$^sjx!36~IrQIpUt(m|(R!Alq(tUC zVact9k5N?amFi-K=(tqp`ke3c1NS>#7yV{I$LJ5WYnLnma)@I&Od3^dE@SkwW~e&z zP@$wJ0%PN%{mAzTd$cI?1SyH4&P;TYagK&NdPdfl4jCKiabr@Y683F#{c6Dk(TOw0HR30wfT)1Fs8G4f7YY?0rEHN&}ZH4SsmpmCkg2bOQqGf z4pa@cmfh~;vOe0^pD`ulcO$TnD=Xrlq|7TU{XNadSSI;k>hbl^_0p4iKj+`Iz|YB6 zx3m9;cg4B5SP8Tzs3DyCxw@B(9kvooY$QmchPj+*M+UeLuGQJY6cpL zGay$6nY5`_gJ>*7jE|V8iK0UrEmTXwKz~_Z$XXF&e00xap=l zcVocT4sRoOgLw{-WPm8v7Q_7!H^VZNmH(78}5fjJU7iqw8l*Pj9lIyu3}=r&(r)pNs^#k^He)Z(XMOSWKR$!Lc(FMi29MxxdfTw)&@DQak8FMgcy|Jhrfhr; zxUVu6b`a7TYdcKo&#}+bo@PEy`3o?9V;JeUJ8N1y;}I~n1g}Cm5`26V-PcxILf|gOYT-JhybPG~@)+qwVo+#-Ga$BQtMrIXX8jw8E1W zAnmwy9b#X%jy*H)a1ZST8~M{!yjo7r=gS^@xp}bSGMj+D5%Hdn)7g?698a6m{J#Om zn|H^{>|U^cmi?th8O6Hx{C#R078Qp-MksidmEz~Cjg0Gggwyi~`SD|zpRZsz`V2)t zxl~e`cUSxGCUm~fRs%oOg}iT}d2mb`Fq!mz#Qo7AeSfd%!cR0` zFPixXBOD5IkfAqBEcN-zGipOMVNW0ZzJr?YN5mKhX+uXzlik?}RqpbKpxzL)NzcKh zmw(5^1y}LVyEt<3@Z%Nm?7r307?@}WwPCdQ0N?Zlivf1*at;&|uFaV0c<>^e+FaF0 z1bHpQeH7r%6RHa3^tru;38H^fry`bl|B2*u!F6)I7A+Q{)m$$9=EF1@ zAMw+kEk}SB)y}8)_Lc6(PkW!QZmD8LVm8{E?-yBa;O31bwy2Pac!J1yh# zr^S=dRQ3&Y$HE2Srv=%zVIbQy)lhJ;RWx+`a>bQdWnEDLB--1smVPS%+j@Y$Tgm|l z8jM+M&5)23-uLBWpG#?8Be9~sSXESmWv0Wga1NWFfs3D+yQSOeTyJ7|r8}?`Q9FTc zkRfZ-I7iOAMjn2_3Q}a_v&}XKU|`^rnVgn2OZJ))WBnXlYA&h#Knr&`fgDO>n?I{x zTYZ?~uzyI?<}ZAq?X!gseNdO2w#ZjzIsJNq;i?GYg!XR5AS7wW?G+Rxg*?M=zxS%o zzXK>c%!MOpIO-6$l=`CU70oEGF!+b4mRc)!2B#5k&G0D&f^=eUV5izt_ZvzX6(^z&d{(CO;_-uA3 z7i&*xEV9)2YHv3#`CUCbeklY5o`F1e&zBkzL-UT@F474|n`3ZPr^F}_1!x27bNE(p zo7!U_r^xM-h7BjC)IAt2_Witgh%&3}cJVs^xf{H-f9CWSTuW9~EzuQRiV>Pl8uL%n zOD;~C^G^MH?C2)64`bR7OM7EsGUsQ%d-~srT7}xzAy)KK6@H^<16vm-$qB-0Z!@z&&FvT3A$c9K++b*&Y_p z$|3B*1XuCxtEpc2ZMPQnIt75w&4mUd&x!Pw#6f+}R7! zi90LjmXxRcG&x|7n)FA|7`$cfMn~XcHnmne0DWqDFVd|vz5ljGvA`0)M=Xe|s+EAQ zH@ka=H&;*CT3FE+imxCLFhm3;{?ZYQ+ypq?@bTTmb)au8oqLpLr~zvF`MELLay*lJ z@k6fIx;2SKuMU2Q4LZGU;6r<}iGPwT?uW{WvRV&xb&2nRejFxoZC~1}qSfWM)%SZ| zSqwskgWO9RFM#HX-{3`f;CXXlxC6e)*P@{wwJGtuu~=BEW_b~({3Jdb(bE0240k4a zi|Y%Juu$^J!(2ynCNv4Iqk>B)Jq8wyf48I|!`7?i3u>Up8}A~9<&Cs1MI|jf5TNh^ zt8s`MnXeq_6pZ35dVJmR8=`d719Axdv7TxmKz9s; znAmTT<#wzC6_l&w$wFr-jp}lV@;vEoY{LbWnTaxuT|XqJ{BUKL#%kEOh9EWF4}%#~ViQE{-MNkg@eFV5b|25Ztpn(hNHO9!iUEeevBv5SpUp;! zykQ@sI)B1HbHSP=>XVy~qs8ZM(3ImkWkd(=+9ATiI!v7f?&v$@ zdb&~b7?SKFiBd}sz}ww|{7QC^ybZw5tK%1&#TZW06Qu|-mq#NyXjHkCEZqGbp0o5mT+v@JN}9jbwbAOKWn_9TCW1fw9XK!%RyvWZ<0Sfzf(F^N6J#tC z1wgU{Y|>^ngC_NDKMbKc#Q0N za?o@tlDH?%P7&n1p=a_RT}_azjwX5pS!T(b z>Wv-K)^*%IP>A^j>V3-TDp}*vvdwun8#w9@bB zAmkyNdAlOe)YrB<$gL0FiJ%T6aCTn!B!h?G6}8GOb*iGk zFP}-iuIi+K@>w(}e_|>>S)T?~eZF8UIq}jZCcE}@81K75!`E&-xJ@1pM@|&0HvKp8 zbOIuL0k0a!-pS-Q7Rhv@-QoRHWk;Kx+Okx{<_Op?D@cYQU?`@s>@e*_nO7{6185Mn z9kV8i{V?5Vz_iOKS|85|CRY~6ylzi_ge3tkaz77=L)$H}^|Qi&z;hsH;9KWtCwD;R zFP2a*Hegg)>b8I3VpyC&oit1!=kFEo17N7GI7B(w2}gCVeIk$oYY0cy(wC~+?~^Yd-pv1@5|YLx;NcQ0N222Ubm;!d8-Fu;^_o)-9kZ2W&61(3sYS_ zSQZuN^}qZmrz4>Vzt2fgI!g6(0(WDZa%UUzFcN?JeP|P2xqkfRzE^KO1Yrxkl^R!L zaoX@JEG+Er7xKRZ1MD7ax`xu_!>gOg&j=wTKDIK~!#?ko>+uLLsHe@K0>GNs;mY@n zf&W2P{+d;(uZBpSI#mk;zfbz;#B?wT64f&Z70GK>o5BP5p6Bl`%G0iyOP4(jCKoKJ zjoXsp#l!>sI^_ft84i@dkvf3yYXZ;NzgkRH@3M6h48~`X?hRI67WE_wEw$v}WBXoj zg2BNo(ef-Rzf5oIEgzx|EY;vvis^s`?EqDeB-rJ^;AgU#G?a;HK06RgnoQHs(}j36 zVG6%(9weidy&|zXvIa!_|B{*#8QPpar@%rfYG59kaxn&q(0BmdGQa+H#Ey&=MNB#A`0KM6SW``TPu zz!d8lT|VjGjdmG19TR|QRiw*h`9;-mP3fQJj_8Q>XWjL@1<&#;WD(gi|Dp{rzB+8%8EJ7&zcmAPF}1Ra%Q))Un=KVA~Qf0K}q zw8D|kNA$5nv%*PLQ0)nX`35Z@EtzlZDpFEb47J5t-7WXuxztij%_-~mV5wrbb6lMU zkIL$zt658KV5(zk(qit8*h^b`z8pkGq!85#VDBD5U?spaDP?RNR=Lh*NxBn#@v<}C z98MoZOEC*ry~LWd{!;&GF+y0x5~#vl)KajbkD7s;B!!3NZUmsI^vUm%rti`E2Yy!C z+G;3w8ZrBZMu1$)LhW6K3D zY%ouY-6lYbpF__z8Y0=(p+wE2Mkf4EbE;Xx5T5Oxkg*r-n(6j*TvXruoPCs_3*Wgv z=$nVSf;8KRQpTusYYv4RLDC*h*q!7XC-`_HDMeSY@*Lk68T2q2Hjl>gnPXmFo$9y0jf55X-8CEPB3eAHIYG9!dsjG8m$z0Cv)+| zV|r&Vwq=jYP)X8ZIJ%kNl%{ad2d6@B~+^5-#*4+`m4 z7F{Bh1NAE`l0b4iz+0`p*fbS)ZiL`kQ2Xtbn)A)%x>xbOOu1_tj`F#$;IGp@(T*Oz z;?a%qgiAh>cM1ufzkIF*1PxFmPN)Ru`Oysp=x%X!oQ|~ZMt#s7;g+%Q<#3m5ke-F+ z)a17u|MIC04K&?QDXM|Mizps|f?P-+5&v|d&P%nEr0l<-@o-G7{YQJL9Z+=+>OlCc zuc+#9Mkr9&V2!YoIqY^_k!OCEiDmQqbT>Bs)|cvt+U(Zy4*bDHki4$YP^ed0A9qNd z9)UELTR8~@Te{piubx0yE*t^L{F1nrIMFUf$B}zPZ(B>n7*d_GqSd_}G25RGFV}ch*II0zhv6rD>rX@P5g9&hwd^GX*wNcT3xa;+ z)5nL0s46L>I?_=Sa;0$=dy$sl?xP;{?YydfT}U2vo7yIzXo3p+Njl8!iN6NIQQHk1 zv~A1LI1XJE8EIyb$P^;+!tN;&XA8v^Sa>LI<2VdB_2(ko=H37=;GeAv-x;{3d^;Z2 zQkr)Pz=@$8o6A4|Bc~5&-2$tCK#PD$HE5V;Qk!r0?-C!a*V|oA(Ds?D?d<}?9@zjr zp2ze3f{Cckf?W>ZN~R2z3mM2?$?vePgClEgsC%QMb;=ShKOlp3CXbI0hI~_J>B6m z7zs1nQwzauR3ztYPOnDi8M(BlCXNjhy$)I&Kq4Ot3A->SFhesViZGj{ z*Q-3Ju3^uwBQKAHge0%5Tv%1w2S)Qb%u?AMLUIEd=^jgLmz+Z~KJ7|3z;NrPs;a84 zu8wkDrK%O4Fopv%Cq=kw3f3Dr7Gt!P2cgj%fr{SE517K8fXoH!R;khiG+rIf6`fq- z8NNh%J`I+BRg^wUm!0_ExRF~Nmg>AjZQL%pgmlBw!9PfIdhGX0^7HfSr${-#{FITp zk=?d?_VU^~ADIIVDLGorNAtt0>+-~<4HHsbRnxgMV>8{EX$I)#3fMlyd6&VJA~41% zP#idwhXCp7HN(-0!lx56doX`UQaZZo;^JTi3hHyFi9hsfcS7!4{Y-A=-GZ#VX*`cOn^iKult`9Jh5dd${E6bXB;xSP+h?LbD+i-B>hQilYcV;t&Jg=( zaUWN0;nPN`W0y%U=5P!SVg#1sb3AQ} z1IXdAC#mHNs_UZXp$!Y6MEsIejuk8(nda36!i^5QK0qh4=?N%e<)OPAdUn} zcH$uWtf@h~;f(GHq@gBi6;c7uMo#83Ifz->Cgf%oTVDcT|KFOcLGQc5iL!1{SH58% zBhHvK8oh0QK0h_JbaT^kh*fm^z&33ioHPjv{7J;g%720@Ymcf56m|MrjhaV>Oi4@( z;_fZ3H=pdrKI2Z$2`|BBJTO5UMB+S{(HDUhKHdE|1E7X@(fPBe1g=gZL{!Iv?rdYn zLx4VK8dp_e5xM+@>wL-qQ4S&fwl7%W3lp+`-v3eC%HDlHJI6UV6Tz2Rd3LQ}+>==1Tn<1G+d$%q}SaDPm z=uRPQYD`zy*^oP>|>=V?@y9cKX(rI)37@0Rzj!r@VX!CMRn>-fOy z(5%B(LF8Z$DBSKa`Wv?ay~xpTGQ7%8Zu?BUE$5iA(vsn_GtI|a+V=ZM5MIaqq~l)? zP4U0I5to`Gkju)lK%<2E!dxT!p*Y^XJ9*W~-E!o2JX{nIF3qk?Khh_Q}%4fT1m8lKB&O zJY>u|bP^$E>qbxx%zLEKQIj43pY!}iG94~chhr<7nf@@hjj4!Of%l9(@lmL8Rj&3h zIgo`rtip^5*t@%aDNvJBuMt@5Jx&Ea;S)w*S_weBXTWW=aRX$zD01kn2a2yY9(1Rw zPeBZce4In{cS@T;b^!lWv!-4TFB_UJ)@^hnTt#~!Seuzi@( zHXTDD9LgU)V`I`WN$3=!fjw9xa8yg7EHlh)no>pB^N>3*N~ZYz%QyJT1Y?a1wp|iU z6_uof1mQq`Q3aA-!E{Gf1Lro{0D!`e+e?(z!}9iBK*|$lXAys3=bwpoXZ87_4!M^< z6Ln?{h)Z?|toB~?GYvQ1XS{4BJja%`HCZJQ5z@@@OwU#TPGYv^Mw$tJV;}vWEiC9e z8y;u9+q$1OS{~nlI)=sgpIkG}@RTujAn(WB7!6zI6353ZWdNX;m2 zlm&Ml3EH81ibsFrEF$UA3Q0%{wob`)fU0n{+NV7|*l89eghy zE>@@K$XdlllAJwOQs>p+xlFJaPe7)k`_D}fXp7?lF|~^AUYDR)&t>}*@xL746x$CR z(k{dw+M2h?&#`QobOjO97D>249G_~|GE0`Ei}cahOrZ;{L9_D~Z~ z9_P=LzD|e(9&^#I0e$;dGgV%#-;-=z$&LSDZAg|Ig>Fx_s+vrDOnuxQHX0rXo;7R@ zp?@yV*m7M(XJFHtyX1R*Nx1x*)ZpE!#I*-!wYGS@))QcbpessD zIzs(vNTM7IV`WvTM+QZf_}Nc*M6x%ptRXJ|940s+Qgj%jkKcO;Nk~sbL_|p$)@i_& z5mqa$#D4qQlt*FI%}1I0*!yzA_hYhZ0Et*WB~<=tU%o_Cej!y}1^NFF^$pyCt-+SD zZ6_Vuw$&Zmwr$(CZQHhO+qNe2?wd7#;jCJ9c70n*9=t4uoKH{J3?&CCKD=*lrLDQO zf8|JyoX7!;fyKP0BMbS~=5$yusUS)#C7bAPxtwqeF$^5n+5 zdYP2jXOb|&UqtZe0G!k`?KnzJ5Zu{g+nL6f(JLdRhFd4;BezSTyBz2Ktg3~@Y2ha4 zC%iG=m?6V}lSTc-#z~_9C@7&8^b(#Ry45xfunciX#Euqiii%fY4 z9*D@{2~YsBd!d||3RKr(2x$FAYK!y_pRjeg-Sqct_1wm#wDf!)H>W6zO^-LcEuF*9 z4rQxE$4v7*W@a+WOmcu-76FL8v;UeyGS^{E6Rm6P(+bDU_S9+S@Ne+BgN@{mt`~K9 zw|?GEqYSPeuRkv0(#_ z5d35YRtHfkJS7io5Z)Jz3n+1v{sRcp!T)(LEAr`a*}Kht;BtAiT+s6lxrTm!&T?2G ziV*YmI1QotmyHWcaAUdqCqbC4Qj?GzPVv>sndsLv<((?OP~>IZ0_U55iMy_}bz8xce)$v>@xiF zqG}pCsPaCcem~Xu#Gkh@*b>oP;+_YoXs=sDYb#yd-1U?o7F8(etOuA-H@fF{4HG0h z4m4Iq(gA2+Ns_g{e?0cv;w zbbCPfg_oWwWN>-hFA14!YuNeh-a2s(iSf~R zdp#N9<#ql_p_Cm3Bzf8|&S>E?j)!*XpsgMwyRCG-1v+o?lgB3xAz(ZtrlY93f9VMw zgs=Mt+nr`^Ov3w9@gMh9gmw4KK?Mb>2lOvIEUwRrK6^#C1>0o2ZH-MhGvvv zz1eyvdmHCAlHA$)wDqCPx8l6M+u@b{$!L1t-x~z83mOut#67uYxty6hG|-iOee)n( zQv}b~ZPzJt`3`iK4q%BC&!8NPm9i*)%da^$7^M!U=5pZ+uyb`GSteh%^niVoJ8is7gqsht&I5@~3wo)&KH z62QmXW@_O5WMlng=e%WL%yaL2;7diohq@3sXKX#_Q|<6)!~|ckW*MQ~VqCEy96`sg zQN)0h&5^Qg(P>>Z0yDK-V5ytRBS-C1G3KV@S#z~13Vdn1!KsF1>-~GxYW{lV+31?y z*BndLs#A(IE!-8DU7BA`%Qq}}L1353&mNe(0&M8+vq(vT*Pv5ui|b&!j%q~aCBvni zuoX!)+?ThZ+wy1+mh9o#I#TO(`M$1(##8T@PJu0J!?uVtiv+SE9%~y^7!mAk5i>*y zakx$|IGK1Ij5cs_CR>HuTX4DcE(qUCyUQX+wulUlxw`HIc=Jz1KpqEIXJAz?h9`^e z{Wcxm9nPG!`GxgFyQAh>-qgG@F3-khTjvhA$u_rq{RYV6IQS33vsvcLMJH$3A5ilV z$d^{+G0dSbq_ST9t40oD;=|lpypt1rwzE>$DdasYreN*mIR0!nw}H@ zF*`duQ&ZF5%t$No;mv5o(%oHZ{py7AWX)8r@7_~ZUe!i7BFY<#`r4CGeEd(np_7E3 zx7{EzJnbICyO@SzH&>d-naNRF>f<}^p^LWuOwSmjhh#j}yDPnzu@iK!L$0raf`k&B z95i{9Nx(5{W5Y$)71#aqF*|IXo0A7wuAB~aI_!D7?W`O8S7vpWFi=+JqYcz8WN-_y zRzfHir=_|(2DA({=s4(U&?H~xyp|Xk_O#K1{r%sx*dz(u9?O$)H(3O{kB04Zz&|ib z8{%vwHb|EZ2l6mOHfQq!(!Ii_K_RL&JqVaewq3-}`Y^Vq0#dcKdl~BvF6+|@%6#Lt z#Ra^dUIyngp=C*UO-PW0JJLt>%DsIS8Rd?!d$GLDQsCz?+C0qOUlG72PZtff8Xcfj z>TdF}_U$v5?A8mXS!NpL{AIYP8o8O^gwX!Tm*Q&V7HH=V=L=sB($)|a{tgQi$UEc>maJMJG~;xRA(i_g_s(NePvJGdC|YESt1sS7vNl5~-Ph=gMeVeMYZR zy=s|#FLGOGMiD*X_*9SUA9Wq$F^Cy}NRY=j`D<;5(GBCD57?4-l@DIs@b9=Tw8dPh zs>o0Ce56I|$Ts;9%0oXTIoT3+S8N5~@_-3{<98|iw(~PCbhz`LLYJSVuXoocPusVy z9Q+UV!go!YbE$qLP~>cpXmUh0#Nndi=F$tqzy4i1I~-@$GvL|3_412%48W|g*PZ1K zBdE9@m8WxF-Y+3t`0pdl5`T`+&-t|^w9FLcK_Uu|%jt;mY7D|T6e<0FXKlSgj>y z`{ydeoPRrpUdFR=RhoYVCA&7HOB)>%B_`i$r>)Y`Fe;ZHSK7Sse@Sz8-=k>r0Iqn* zI}rOd1M*@)`}An(+OSaVw#}9m>>RiG+sVOGPRjZ>$DZ<2T+b;9 z_e~O%iT(?x#XxREH#TR`=dHSr3TIoT1Wmg{3qn z)^`dFo>upgfR(z8vFBZK5cTk@z$%w}45dxceI*9F>1~aSmFI4>T#T3FdCsrG!zmYS zYf?P3Hy%c%$T%oj)?tsb-SY44F_}u_^#x1kQ|I+p%eK=Fy=dTpRjSZaOIGPwj#87&&mVo)s*ZWR!4(`2?DME~xaO8(In% z`(-aoO?_>vX%=ie{I*E6iKVgP-5-Xi0p3erwJ!}{Y}M~A8Z{0GwaTsVcuM~C(SM|bG6jMit5lP zn6a4&ey_Yb#aMC;(`Gyt-#9augQ#QO|9J{luN_m@&pEy#f;8bTMX7ipx$&_08q{+L z7P`FHU#t7sdTOs6JMuUs;^{x3%m7PP38gl$D#EVLd8XiY11p1bJ|B7u7J7yTt3h=e0$+Pee-| zN-5jWoYA-K2K~`369p3GL5G=ox~vM^-I2;q4PygCs2AMZoRwyWM~q{z*q^Sw6~MXaeNHtr$R>H4xEl$(fELw!`+z8k z!arAn-KDla-q<>LpSU?6(m8@a-N9?=M{yw>`HLk2K%$SXREbt#;`?bQ;6{^8!w0u{ z^UJE5vCRlC9j2~+FGNH{ITe)zaO5rQCL&B?BGRAlV2h)8~EMsS)Gw3EyS;6}J6a)C38)`_&s$QMZRpxn6ClTF%q zoG~icbQTlVzt(05k7P;O(N&V1aYrs|$z?(>^l+foR^~5)a(9wyaT&4Iy5TCB58W#t z-_8-7PS0m^>lMIFl`MnZ$`%_0TQ;LU5v9*maTv1bHOs&&j$IxTzA7&<8)#iz+++;g zG}(XLPx8wr7IN>k+`GdPuiuQaZY~QtZeFlBZbV8oZfC zBMkFFni$*r^!#1qs0aEK{??{OQCq>^KB42T9#JPS2pp0TCK>;xcBhB^FD(1z?=<5r z9hI|CRiT3V586h2Zol;Uk0FttI~hrCKfD-vUW0Q28{#GCZN>L<>KM^VCT@>een<;V z=}UO(Hm*|WbfTl9Nu_z`_o~WVl23fYfH?(fb0tKQcRd}1bYkV>)8*WT;8y$h^H)b- z*X!5K6pFWhR(T4BqdmXg*mB% z(1zT_%ZXSxz&M?epgN|G=zUaHK5qV+;*|HS{_d%FiR&hyAC2B-Zs?uEdkO+$0tc6|GDYbMm?0z8-p9N*Tp_fWwm<6~?e313b(J1sHPVZo zD2G-Z)v!%iD`0N~P?il9N-0NMe5>C}U^`!DNuY&XjpP)y3-tFA>)m;TrbJdv2lA;^ zJ&K;MX5R-g+HS`;M?N0J27Q^FT2X=uNERf#^q%ZkVP3-Ynj#Lbdlsl9duI%BZF7h! zL@~oSVbr0cB{qdBCW57n`RP@nQ9TJdHr400TY8?&u^w0%qjApx$|1Pczn|WAuZge_ z;FlPR_k|wp2l%L~7Y&FM1`e%WeAK~_5~oPoFl zbrv%DamD+)b!6XTy$ajc_PH)C$= zA=F?uAECR2sHGi}sHE7RFYVEQYkY|IdsEC~IFq__GzDm9$d0$_jut9^W_<5&Y5 z!?T%4deZOU?CoJa+xcUT>>|J!8yA&HqpRP6wl}YupLWD&ktNBC_W_E@OAD&nn~Dn} zbjHp4Q9{o;Fj_3F=?leUP>ldggG{0k7n>rp2rKC4wImGH6pP#!&TB3&OXvqj3ObpU zxk-$j#mzyYCOIJ$pq12`pW+69(}Yo*A5C)lW`RD!a=K_-mfu8sG21<~;?sKbm=7ep zc);Iq^mvfXNdMnBqwOk!&%AP58>qLRX8Bv=-Jqiz*K2j1z6``?38X$=Kb2}G0j}&q zrkc=a^liYwV!xVVZMW;7^Jgmu4Y?p-ca$D_&X;|+#8WatLllg${ycMc4rdHZ-7om-<=vgPSEM}O@cNit~&A2Oe ztLrm?rTj7@@JFNLW6mqaU%ontc)9m=Y`z}O@(hOg7|ycwWccn6SI-{MMTkk~;5tGL z-&QTFZ#{lM2b6iYoZkSLcWesPt$Y={L!_bI;WO8N3Se#UA-0AoXoTf58LsT*9UbX` z9aALzyLoPxN@?h}dp_@cF5g2&%GAt`nW1ab*r7959nloTHA_LABv~+I3s?Pttm-9c zq^>Rr3@jSffPjJR;xP^J!6OF!<%Fv0@B3e0Rpkz~)@hh6>yGlz_v5!pj+O-O5A1CY zL4tE&qv7-Bq?gnIrDs~^A@}Z_u_p-P_}0ULcGx@1S*v4h;JFmj|4a);digWDXNG&F zn%X;9YmT*I;U}1v*0G#!Oi*3(ANt3K(m_>+4>U=%wIN5fAy2g-OI7M;5#sZ?aJVGqtSSeAof0k%5yc^mu%tXZ z&O0vsh8+NR$)$S0up=$Uu*K?aLAGkNLazHQ)L{h1H*b# z-s1`hlG*ZI&lY9ZO3XVBTPI`fsDjk;CGx+ouj%LLahWZ1(j{3SC_}n{5#e^MVZjfj zNexKwI>yGuIXO8?mJ3Q3S_$ooOY9j9U&}2*y9gix4-bGaZntxWj79oPSW!})1LoiP z)LbBkegm-euFMhPoPZ{+?@}*cerHQZud>Uen__yE%-+~&c~q^?VWCy zlr=-&1ZFyW<6VgIsGrl<#|{-<4%;g&q2_1&`@cKO*z1itTXip))dnfN94{wlOaF?r zcF!-WN(K9dhe$-VDP%O=-LH4h`8-F$|73Bub2nNJrjpEj#(z72lCuFZ84^&{;==Hv zsG#OdoGkj98;FtUPg$n&7Jj=Y`c!Kha#inthywwWNW8`w;1b&4giQQ2S#fB`v?pY8R~D% zom}R>tK}846hk@c0m|G)6{nY`RW9DAi=2~DPRGnnlyq)HL{5}f9IU7%pPI>!y(A7D zs;ex&nw){I9Zt~=C+p$Xg$T@>OSd=X>x1re#o4%H`j=BuTdYcP>9L0Xjp$f$*o;n5 z$=o=_Mh<0uGelcT*sZtpLpDlD$18$UA6z2F@FE&0ydg`GQ?zWE{73o#|ag~LcAeS6r+z<8x6JTD%@PbR9H~I|ES^HFAv?H`kgZf`p-8P zR#~=Cr9gkab55^hJtaIb<)n%-!1P-K-y}WvyX$Yj`Pj|BaCW=MCV@}3T|_aey(Ue0 zHD?1IrvFsBG)tPbG7i%e(3x*pPI&q055(;tCPuRAnrOQo%c>^%u+JHdo7&F(SoL}2RYO43%<_f31Plf@nG#9&K$c-E5^>_WaoFv^!AlVRwD+)ysB?dN(`# z(tg=w@_9{&!1s9^0pHPn8GgTQDnat1Lofa#fj{Q=71KGt0+u5XX%Q6}O2xT*?U38Z zuigH5-t#@wV&+q%(6Uc9vMlq-c6alyWfS2o}FiT#a5 zHA3^$WZGEvOxwhZsF5YivyQul(}k#)H9+N6(6k6>1F$lk7GVEZhVu^so3-IEHj^C4 zolcLkp_4rIaYNPWZWVv+7>1@Km^5~m*N+b)(KF_h=}%2M@92lvWk5H$q{d~*(@ic9 zhZYvfN%y7(_PVy~2b&_>cTmes%+BJa28Dk=3rMj~@}iUWerW8-0mGq+s;YSW;lz>N zPu|%hiLB~GSu$`D3OW7qgFu~WFImy#KRQs16}#pRZ7h5gxAptLHTDDwhvBFkqvx@3 zH-@}#*_IfEWCDJBYiVfS<0VjZo$*S=8bfnY7%`{Ij7*cTQ2HkVvfE0tQWqc#X_ z;GV0_n;@;2=B}&HbA%CZud7%FXM-NM`eD|-V%)}1fEu)l#;3;je*2@2V+TIY4%>}` zDK@Ch@jCg$3%QERF`N7!0$F`oxxA~B;5wd1#aJzR*8MlKInRHvX&HrKkC~C{?O+%? zi@~-GZRsQ2If}#yd{jI{VubMxCVmSj z8-zemBcn?E(*#5f-29Nq6^^vh$6c`fv@i5G58c)~YgSt)cDMiXzXU6SJUU-oS^0RM z_fNY!JY;wn_}mZpHH`w1LiZgE)Z4Kk4r)kU4TYT zYYV{1JYih`vm%*`{O&o4#V4j^X2#*6;NaoltZgjxwg1UcCaF(~o#_?C?!QVNv;r1Ni|cs`wR?mgbTp`gyQzpm$?40#IPj%kXSA8XY&@yJ*8pM1vP9 zgU9Rp$qdHlq<0lLS8onyb8%l^8(Fl$_>U$^@63%R`m98*nlKl9@mY3J zF??S_2E1hiu~_|6&IlI^h;E)9d@XEDbWHaW)$yKa_FCwrNY1y*FR9>}g+#Q-WeO&E z1jx&%eTwF9)5JqZPl=7qg%`F*-1E`d`+MPU-q;+fbP?a{QT#V?QFKR^_D&K?{!8D* za+BFQf9>*3VPpoU&gBR0Uzsmme6I;nwix3NiSNGK<5ibRkIskbgyTeuik7z3-Dly3 zL&_{f58fRz4!Oj;TKf-Rj?jb+9TUfzOW{N6T(gbmi|0!H9?3!Qt*hLI zP+FP+%8pzBoZ_T_KHp}R%=8I^0>#4mY?YkMBpBRwBv8@R zL_Z<(xf)nU8Z^}sOa~*1X)fxeHg|6A*O8)<+dv@xvt@Yp(dkhe^XpmMrfCl0hOv}J zW!PK%jK2GT*Np3lo^^UG`qDO-N(d|BdXCiM+xoO)~!0xy=o-ok<;){ksmT4>5^Y^6ny@0mqF!Jj zqC>-Grs)tSP&eapu_7Iw`qT+$2SmFjSlr$28;Rhf+B{y_m#4HPaTInZtC_K46Tqie zq1mj?*XzXWHj5i_kyh9JUbf550v0o#5n<0&81zji)9IiIh14z=Xt`~z;Ak~cgwD-m zJ-E%CbqKvgm>f94h{3%Ym}B)@L-ScsGHS}?N_WZSD(TunRMPz9;%}8#hLWS6dnQA@ zM48roMjy*ugNs@P^J-gL*@=k*7nF>He9V$0MX;sK+&vc=@j|>NMdNbOpD}~IGSbuS z9URoG5U)n;V+zRM>9l;?2WH9hPQv&Zf+fu2ks^XDRb%7h%*lhHJ1YA0MLQ#87bOq; z3r?ndJmxDIn6@)IcNg49l0;Hr42+Yb`WnYPNd5j{Brr1nGEV#dRYfzLR7}-b#+6 z>u^?9^knf(61vIa;Ay9%ki^RTR4d9#V4)ZTg3WY&xG@1s*w82*fgwI-#shP?uqPa+ zIeiFTN5cpr#N4vmIt2l#=Rnunb)#4(qJpJa8m zw6_xGo^GmZ{3h07KUG`shXH6$;;E}D#lSwg|Kc!1187PUz}+B3`um-t zNoDv*?V{aCJydsbZF^oeCMfJQWSL-^?de?f%((qL*!Da}SM%r0KCVWd#-^@@vd8h0 zr~E}WIM5X(fNfMm?t+u)cq%_HYNcjvy?BpDyAjh1qd1zxSO&Tq>vfpW;=^s8aST6L z?G}r@I~lRL+ezMvD^hlV;|m`gDh(IY<>Ap?T^{yCazwH;(2@5@(|yMCEALoWFCZk$ z;d_CFgS)ysL~O^Pyu=*9`b1_LeO$8cf!{qBHdG}bGNb6cOhmDLA%;Y($jABobeIss zXOqfY41=N0Y^Zg8uzu?f(49b|ck>^#ltuOrjKMs6x!5kx{MWoIq@lCZKx$A^0Qb@8 zS+*?V$6-VySulr3Q!QX8D6(LlNu26Zp4x+q*;&qAheAp9Jba_IL_RskE{K!sJ7hTzqj(ZG#mC`XeU&-o(##&r!LWLQeYUf{Z1;0qP#Jic zI=ecU3KAziySAnsn=p|yC1{=3cO#DnQzrvG<2a+Go~Vs=jK8x;t}JL|^Ohu>qEwA9 z&1HVyp0!q}K;AZ=n9e%IV(#e51A)jQ9G*XC0HPvcz6^9 z3etJ&o{fPBI}*kWv4O-HgZFhmML*F%W)mNE(|}FY3oF#Ft-T0-lbrfobGoDA`*?<9 zsBECG+-ivFdO@?tb#r^%rNMQxm1w0AAsFDRIykJ`P%zf|bPc1S$ zOeKQVsJ33WbKJtvFg4s`)NFiWBG_3ZAd81_qA^nwHX}1$Ig!$@tl}D$KgGkfdtflp z5J$dgcy&_FSyc_97LQYOdB>($%`2qj0S4c{knC* z+AypU>2b7r7}Si9^TZjynobUE@2?Oz*Op2iiS12ELeo~3mqx{Mg!jkHe+?-fiHozn zv4P7YjTwQq>5r_|a8kE@g44|G=EvG9k+cgNi%xTk%I!K}R6;Fg3fc!&I&2;M0{GK6 z&+}9JdSdn`KxLusUm3@}gZB1d-`?H8M&?LThUL3SU3|z9WTS>1-M^X~mPcg2Ena@O z-y3)~9s}P`eq`@3PRzLGR*YlLpA}@qHSMoUW_%r6Cf-kQcU4tpofppp9E>D1ygW1v zN=5Q0DTgadi|33ayDS9~{t7AN1V99FZxHNa;nEiH#ktJk_-)>YQm}&@8fs^-TzF^K!p2xcNkW z_6PtKer7K6yq#2NvUWJgPr9m^p;`S#{%IAlGNwI6tDDko?9-=Zr1j(TFd@UfqWgbn z1f$FMe2JrT>fh1DCJ9qBhm@pC_m*Zv+*<@^o9w%Pmy zy1dPmm60BP%NegS9#8gjMyQg4Stx28%IDhev#d+b4Kv4Cc3iwqn!F$Vk!H^c#cNX8 z)=X}1T_Pq-Gs#rgsR5_*REfT(vd!0=4cKXsFgom~i6?P?>!zK(n_CWUP{PIwXZyec z`5kD^`bwsCqWCzi$IloZtS3K@@9G!y$X?n$HP^MnfFywy2_>~R)PBS2hG#`Y>>+!~ zq-N*SF_ESwty5JA$&Bu<%E$z-o`Z4J&zi>xFI}T4yRe6SD5e)o5Li}N?Tt=c!#~Dt z?CdiqWoM1(=3I*sxEt*BRH|MdMV%)_3{$I!(>T@1v@|)szk(4L7L~4jyK-I*ke{Li z?&-pii{y3yH@Y)(fA`sJC~i;?O~YJRz;oF?wup*q#AG_!&d0pCaF0K(T0CToIMi%b z3f6#c?|CB*ageN;sWFQTj)*|IZ3T}E77b$^_B=y9A+_HPGB>K1LArhu@-m_+Fttz> z19(!HN}_kMf9cQIytHJ&)`y*m+ZgUDL0!8$>vU}?X@aG}mLKGDa-ZAv)!z1LGn@#tl{{PT!I0xOjh5&+t#P z(|x1Q*&QMi$-X+Mir>Jj zuCh2Hy-zM)j*Mis8lswVE6MGs?k?saYw*<<@_tt(sq&!0c0Q)euxyr2o2H>|-VUnmJ3i%@iJ1k5SX@xs`0!Q1@82Q4ZnZ z?kWg40LlptbPriZTR}NOk;S5>WW+IAh6WoE-Vglt{@$9KZ*OAF^*xaeyxG@$n1Jn>2utu2%L=ibnU~&|CDXHcaf-;2Wf@ns-B1vUP zM9jqmzrr9ePw%@s(4FngUojpBhqLC79Ty0wIO*1Cur$&)%0g>YJ2n907l`8qcx_jQ zy~5%@fh}^J=J@l0yh5jip>3p-8_WU19!YmX8nB`rc51R7`8Yr-4Gt0CaoDh);2gyr z)La~T{l<*AiCFz1M8E#?zt4Ok2BRsR+P6Nk^w=ifO|;8K_mMPMum1c4#*{iMaNSh$wb0l*+0K3$zT=w8_WQRX!xKSj6{tXR z*?v=|%5?&dO+6S*az#7HIr$`E4e6<+?ovPD%739ULu#jr3B+p|imOq&A4U3IwK5tk zKz{^DuWj2L+lxAIg6zYPs#k1{m8BgNIs{g$*`6_}VZP^3Sut|>=%LCs;4NGI-ao*| zN(#-$OEZ9M1Zf|jxAPLgOcVLXfVoG6M~>K1U^%TBN8JeaLkZAhztIs6+3%NwTBJpD zCKV5p?<(N4x%XbEXGeH?f9z}p#gPKMj)IExy9sD0FzvGiELC(8zDEX%n)+0o0H+jx2Te=r5+pxg&Ve$O_lfFEqJUueLbs=`p7VQPG|%ohrj z99xzNA$CD82kgCZ5hMDX531H1(QAzU<*$MQvXJ-E(h@%z%O`x~axgJwsd+J0i`Lus zRDuH-K0UUxyP*ztg5HT(WCy+V=9*jr&2XoK@v`4;&VGmg?j@muGb^*pUyvdH^&D%Q z_ujjQZ}1T!G-DyUfBbOI8OC?(1^pqD;EZ&_B!nCE4@TM07{}QA!H+xc>%#Olgkgo>bLH-dR{i$zlU4U<1-nJ%m`N> zgo<;EWGe%auMWmdZNss%cSC#Oysg8RLK28YQ_OQ{JBu$$xC?+TkZ(%1df znvs%c!QAAmX4Bj{rPfa$Z!OyjWnqK`3FLuTfXUL<)M%utz~Y3x-azkeYGr0P)?2=5 zgC)-l-I-;(q)Aplx*?<_Bo;s$^CJGr^fCwHIbol&BikKgzt~`0VjnYNIBPcx1Z!b1 z-^%VBt)zfE1w!~%win#J+GQZ@oQkb*=WIVsOHWeJk=Ia{nO577Ptxs53*@x)ZY>2X z-S%*K2bb@Co5dz)4(Q@LpwH$e^zov?3RBuHOVk>wJy)hdX0SoW;3)sR5;5otd+-Uf z*2d_M_P)35jJLT9(a74auOteIg>POLhW}!jZ8qKVhgw+NqH^=Hjtk|z% z6RqA*t$+Fm$9E}KToK$HW@dyzKTnM*c{nUsuVZJDLbh5%mb0 zBOu}Bpk-|rn)R>uQZcesAkXuq&jgTA9*r!dz@e}0Muw|4f+tUt2vr8I!$Et{H|OXDGB@DusMkyX;wok}_%{uQZ_MLa_0rO3Mb}=3#!3e5 z6Zi1A8_^X}-NQGmze?qt&uUq{Qz8ywgpWMWNt<%74cQW9>7>m^P_w={(x1v))B{XGAfJ%H zOYzoqp+HK(k__GjV|fG()6-e^XJC{%Bdvq`x!AGT1v!2=d3xYOnk1-%ulr&0=`*Rm zKK$3T944$NFF$Cx>37C)<3poJj!_+G0>F*;IvgAVtS%UU274IeU*+R9JwlZ_#OH4+2#h3Ljt=(yMaiTz?|seqId3{{a1$i9jhI^1N|s2D zwKkBF#5R>Iguzv0BGm}!;X1Q1`nGg^AL~)N{E0M&pmX=0hhT1=G-dQf5l(U*0yOlLD z8{lshS)7sH_FVRBUd&OiI)402ZDXRNqaz{;pu${^_@qdZq_r5Y-PpK^4v|7IDTY?P zNT2-M08{t%dN~i-(^Y@Jta9L!eW+~56fj*5xa7y-J>4=bd8lPM0Rol6kU&5}IEZ?V zm0|mV8DcC|m&Ur=;+IfKHtFKYrinoe#*I`;pGq;2DUBBdRO2zzbi01|ana1Z?!|K9{7*xW>r@_)rJ!F^gQTc>m?nr{#PNZ$6q^&P zhbBa-8soKOzs@zXeMjw%rV&5bSx{d&ci#G?@Nxpb#iLoHgP5DYJ4k@|CsI1b`jABc zrij8TMqs+;^<-V>l8N~eAYg20k!EECQ^E{}V=h+-IMGVb_SALco!s6^C6uHGPZ| zh=Sc%-Har>qHW~~y4cargXej#@|&pwVxXFSS1P#wkXzXucUcc1)t}}_5A_~GeiZ^~ zpyC{B!(U&}vtOuy3Cai~G}7AKdLD9o_WiXx9og+#k5~R;^5X8fj+Y}n2|~XcA+=yc z0%$-kNVH!!2gQbv-^9*JUeVsSrDrC>SFsMjku@OMe0B)}MPC{;vjTe0+4sy=VQ!vom}^XPk6PbA_ys(PNTLpqwZcihKkC~K(;CDlioi&uPhkSsF-Q}lE6 z-fC5;X(=cq#Km>J=lCj0c!UJ{cJSdV$R{sD``~yHNFNqz3bpz69qt_u+ZO&j_q1Ng zSd7I22c^qub-BKs4V1v>pSv$3n#)&%>}&L;ux-?AUvyi&wZVJeZ*zP6P>le=i9qEO zrsvu(Mc{ErjE1`W*UQFJ>lktymAfikY^fo?Dwa zs@^K>fXmvXDFfjsjU9G>|H%ZP-DpEOJT?>-eghr!2vP)DBWul!bhef3Jk&Z~nC6F$ z%})f2PLIDlOZcq>t~3A>HLy#Mr&0_H8?!xLK4lSf&y`$Z1|?Kcaq*CpjQ37YiL0fn zQmMY_BNIc*Qg8ph52hMZS>R;ioTV*-Dw5NJi9=mH%8klkvxA)?B$6vI>q>QfsL0qD zPMyhiG@oWuVcQ7dJi2{IF1TESIf|qECpDn zz3RO3?kLYsAK*n=a>ChrN3bW1z-laPupIi9;p6a4L8IkrB`jjy zVq*^N3r(+2PB<>9+3E7Q%f29?Tai~ATS907((9H9uE9xW>*c~((hZiKYoN~au=zg# zw?Ih0Y)u2hB8}CZZs#91R5rlQ$e;xS7*7Ex?5T`emMxYqZOT zUiyLDPh1;IHNZX+=FdsGWZkgf8Hf1T(KKX@`+*hEbBzk)2dqP?G8y zaM?HN3A6hSu_!z`P9l}Vz8Z#QBKp9zJ`vLZ``C@*mNgWu6+_gCK*6Ps5|=U*nv#_e zpAwgnnfm0Jmr70aDv*XBh-zzuITY49{GjvFh@8?LOLbNJ0wSHJn@_J-fQj$9A9R;Q@x0G-xRwzE#Z zIrz94V&2E}#DFErIuEel%%F* zar2Z&t!Njwy-4zQ4sl+4ebYNvm%nv+(L(#hKi%CGTpQX#wPRc`R6kE^#r*W?ak}$m zC-sDgWF;TY$CQFJ#6G$Q`yeo>)#~W5DDEkOZBm@vmbU-N;k7r`y>jrCH&4E`&-d56 znRj{6yJ;Xa$4vyruamAqMLXz_rmMgzFvU02Ww!oMWU+*A6n_SQ#+q>_#OHZzD zwfTPE-Cy?H-}UXqwFf+ZJK=q@L|LYUqA)OxK||n_jx~CcVt8qe^#Bbmp^+B+#li0KtA=&sc34}0Ers# zq$(LkKt>~VW1_-gH#UH&qz#q(w{FaeiH2@E3dn?tp0XZH=kMfHwsDO%jgQv)4cU)U zuqgzfwgue2D>!_l?zi9S&b!Fc^PmGzNBu09QJAt@loMZDnA3FU!R6qDr$u!oP`5HU zKCdK8>ti#l1*Q$gq;~~2$9$}?j#5QQwIY=?kTPp5r3NK%uJcM(USW86Vs&+UNJv6w zrx;#!LdXmryf;Z*-7eitF%Q!E1tuJGBli6xGh(07jApc#7!5Vym>*TL%0r$9=UTt@ zisi(S$41PwowfMf3b&M-^;j#)iCXAmIxqj~Wynt-ECV{o%gV~qlOOXe2Zev3=cjEP z%uenYU{FrUOw1}c0kTsfsfvh-JbK_*L0X}(Q4-)4>J<=NFW|BD5*og!-Ouk)W@=fWzFeXoeB4HZ_F`&d%D%@5YZ)tzj6DKlk?vO9_pvkGNHQ_uD&LE*D)-?1)x_PE6Gfa9Oy&AHTDUphyL! zQUb-pGW-)!pfk7h^k!_F`}FZNY?43 zaFr9Rz#-@m1BZ(x8`hV6yrBHuxz+RDuUz?6+0{E_Z9UgX4J9cNpyfH~DakQ;5nwaw7!5tf?+!B>+!HEdv~VTKP1MFp1Or{23bcbLTx&nZySJvLz z@QT%|$Pol_MUZpItovh+rtEg{J*z~;#id1|S>eZnEtcO|HQ9d7CwIQsdUNBmjN2l0 z6*EiK)D)%Xq)JGI47LRm-OaxH@wt!YT%C*9^g+&;jI|p7)~Pu^-QQWDE|deMP%hR_ zV7*ErXx;qH&22cSud2&V%VJ$>80&R1m~8dbLBm-+J3hO!z5~EOvqk_Uzk2Tf?&)`f zkC_g!8aUEs=xer9zrDOVN03cG*%qy%k<>N##q@JDL=RF;ii`L;d+zU=W%&kj*tD;? z*=siMZo9wxS&5H~p%Fq67ZR!$Y+*r*LiMdt|1I|YzNABYK!k==p@5+Rp&Sa}iRy_u z+j}-0>UJ3=LD|A*2`rXXVt`e(p+I_k)~6qSo)DJE9G*-jVIU}@IQ2=u!=1lu-~8RC zq_Cv)i0p#Y;_}+c4M#Wr^5o|^HnXSLjU9L#F|!^#!EVg(BWB;;`qm@%nM5JiJ1${c zJVUScAFz*sJdvXNs@tuUgnYe>9P?g=bet}S-n!rD;YqGr8)AE^G9?xS`L%ZQd*01( zEwc(L$Za6hfKeVRsd^> zr0hjxDV~Jt&A3{Nee4NvE&Vucm%>W0aclgnSF=W$RgD5wV->clNYm{o3 z6o>MKe+;v$NeLsl!`7YacO2wDW2 z^TLaYvw?2~bcfT1D<8HU$Un~|{m+^a`;2BZqrJpfFA#%$byV%eT<3SM&KZ1qV81hc zCtjMk_}=mh`B$sxCM754g6q=AUlN|xmq4N@%4zrkomY|FvJ=$bsaFude8>%)xO>ScgozCUi@N~<69Hl#tgqOVDZEE zJgRS%RTe~eMldIPw>^= zD(x0@#>PicYz&X2&>o>b{p?C5ulg{OTF|B(@-m2ilp(g@7l6 zRnt<2{L0EocK@*Zy7NtDz)%6o4V6&P{m}jH&%1W6UGa_YUB6I|@R-op{Mv%G7Hi(K ze0ze;NaQ5!;enh+hMphH5dViRAG!Frc8LUB9YW7YbJ+Jg=3{}7`EzOHHJx=l-`*<0 z)!M%Zu|$!OmU83a1A!9eIy5xO^D~>*&0LTGVttj=+@;>$O`1U)y7TA7jm2x!`bY}zNfw1Ntzs0?~D@;RVFrUn-u&CHP z2=-F21-3NNQ%FH;%WlJB={0Nf%}fgsq`5C5>4S(6q-*x;s+B96i;JOeQU&A%#T7xp z>Cb)Aot%?4ZAkuPVfOGr5sMbq9sEt0Uj+Mw5V}sgq*n18c0eZCq83t{!pB{IZczwj z1)06qw17(&`F1DCtzUPXJwdxTHU|3FW#yJI@8SzO0|Wg9-F#S90#Aj{7B*?sAelWX zDN+MP1E6pW-NSims8mS&ofcqXk-4XpE0|-y5bX}bMiA-?J)&U)q=qcBn7E$ zS*sPLBiYuZI5z>)bWiF!Gn=8XKLM^*unjI|J!n) zj?YdkDk%i)+cAK9XZio;dF037-LJXMLJlEj7YC0yGyL_lGp$qXQb6vnA?BYyTr>O9 ztWWN}|Mk_Sm1s5~siNros-pUkoVevK-;6vnqp#Cs#B$h_Q&TtJUVS^sNdOu$l^N!K zRv%h_{_1bDM zN0)WQu`{It>S-v1;V}_Hu>e4ZW-A#vfxTXZuJA&1)ODt%WXe#51jq$Il1mpJjWA#3 z@#WjDZ_II>wZUhNcdKU?Xom$I6c>fm5|qQPMzEVI;B9QQj24Q!t{+k^!TWa6oY>^2UCW=A_J(RSw$Kp19n-W^dlYzH(Xo) z<++usELNO&a4<`l+znc#fDavklt7M&j~&`?b@VtUPy(H#i$&2^qdo;%c%t zm2&>(p2BMOdJ!aA*HCTmaGqwboCbw-Qdw1h_12}RM1PX4q7{nj;$s67Bf~oz>mh^D zG8~mMAA%|%z1+Vl_~PUEFFveZ_-@{jV?4WSWd$Okd$zk%gw0BaMWYT(p6Kx2>({@w z7KXS1-cd!%hT6*gV(D?Q@CJ@11JK0Ae092l1p5a?adi;g0CciHnR_TG1{^pjm@uZH zA0i!O0tO5O{Y@#PZ{5^MnQ>9D!h^36dw2zV`(>QJ7<2GY)~4@-lg8BcMXHANulo5X zU}4cEQv-c5l5WH4o>shsZcuP`JwtKsJ}C;^ZmMR_h+FZYbo`KlcW0HaS(zCUfHMaw zm3PI&1XYz6u|S8ZusRKBQv*y%!7w@3l}@8u?r5=()}>U^_`k>{{m+^a`;2BZqrKcX zRV{!EDM5B~re})h?TA|s;~w$>|5>DWa9o%Yig&T?+H?^XL%ajvPeh^hMzSk-4cqoZ zLrCK?0wVLC1(iIy8g0uUR9J;)H(iCnc?uJWr5MnE?-N>?7}{XYp_o}#qaoC+djN*~ z+KCp+r>EYsdgJ}e3#K1>^H+~!_oF;~Lj7P7Fcb+BrL{&L4f<`J*Sb%=zgX`Bqnd{F$X{8?N}v=j&#{K7QK^w?B*>~fu}PzQf($QY*jkuR_8 z{xf4)mc71&9wv2AMQBJ2sM|q{dEn7yx0a4OH_qf_|8a*+Kfm<8Wzc~}P_BefrDjn0 z&ZVorZvBA}i6BXlO15bQPC#xW?)g6Ya?9!)cdt7*+8sN6%+CGX=@_SJ7H^uJ9@p2Z z-|%w-`3QlVzXk|uCM^xW^BU$Cbf6EfFVjR_c9(lBF9gPI1g zZFAl9$;D$vv?2W}&8FtRw=Us+QN!Ud+r_SPCRh#ZciIfGpD@XL<$KwuSCf?sP{}KN z;uRr1ThjfVy5k{G*0EQECZSIZhC;^~N3HIzQ{CE)$>DP!L z62z212DFcyRP*VQf|N)glYpSm7;oQDI|q-1B(USh`UUSv2KC_~2;a=4dms|Mb#qy3 zCuBHWR|~G~zeck;q6BO(4Rwx4g1{uK4l`(x0KaHpd6rteRx|G%FtC3+f|L&Em%VP4 z`kakYE&@@ZLAAw2+N%z~7BIHSGG&03y(8+h!EFQnWmlO0rWvu%Xht*IOHIR?VW%}t zU4<*cC@n30^5jWpXD47)_n4=8+Lh8Y^yj^t`G7VR4Ne|xNK=tf7jfk7&K-}|FTMD| zH@8>p_S_s=_mmH6HLy|-zEYs1SRlcBuurEA24`Lh_9S3t3o)DiRjaDp_hiomhtXrN zj~Qq_>;so&yKe8l;(uMF>CgavOr`hvB+C_dKc0SZ`ec_GA3XecN7#-Z{C=2e@%lWM zg)=YCHM5=E@A$|WF7K?k_2srZo6mTjIB2syBR4E4%F{Q&^JkZRE3ChrW$~e@&D5c1 zrVQON^ryQUaung|ilFV!*1cjoVT98N#0nXCX2{a}AMW<~DHTogEss3xV)N+vv&7`2 zoZ=kjI8mTh0i~3dU{GlH_Y@1Xk|+%D$uu;>79cadq_m`o=298~uF!}SIFD2M;HEw1 zeLP&4#mhT#@7R@?D;}B8a$57bG=EwRghFvlR&+DgEQPXgYEE>G#Xhp9l0!?A0F^1& zej&IVC`16317D81@uD`+A@r<`->Injkb1P5A=k+02vSA$BnE0Y4<+{i&T13>bL^uD zwkTNZfF!wLffPwIh+HJg-*{*Jc+2sK1%jMKCOMl;wI2S%^KXNMzIT#uKTq&}{4qIwpngoe015Xr{;{bor+kQ zjJ`H*hTDV*)`MO-GVBA}H>`1J0T~P`le3zN+6J;14VzN50lEXw z3T@IHh6g`-RG3=KoQf2ciZH2HU|4lSTVhg-sJl}uZwJ)ZZlBGmNYOxl8HQ7kN97tI zRDkd(F#p|b1W92#l1Y%^HE*ph`SEu6n%lpNWtLG~n*t;0yP5WXu1tO1Aj zwa=eZHhfgw&>!Q2ej^{C7>9&6uM;ZihDuPs) zm|}hUVf{@d{Y@K|d_o=AD^85-EXdBFWC|`tSc3u;fnq@OKjq(z_AktceMU2y(f-O9 zbTcfo(32cjnNeF?8x$18HCtmIQLELzQ{6M{&-RCpX2sRyFq=&UBmvcaADsSZr1ez9 ze8@n%2}4dy*yz4BLz=3BWJ8c__7EE_`V;JgbC7&O2p*Urkr6Jpc8SGjsacjz&%mK+Ffc@3vsujm_yziM;N%t4@~_<5KIJ+d|`F zt6Ce`OgN%jBjL&*^e#LOqS%~%9AoH^R&&0M%8I(=%uMFN8dZHnW8|a4hwer9!&^hD z)a42wCjcP@ir|J&yHvZSmlrR({L$to8!n`tFI7~coGu3qa*Fbrg{>k~0?AAWxd0ZK z0+Q&dw$w-J2@Py0A}f#;yr_F|%-{T@(~Cd7vU1b&4YN+nSnTkzMaZedt~lQ`A2}_D zysa-f)&|-*F*f3#P1`ikHh$q^g1?J9!(=k7T2*Q8f9!4hw+CCB4Rah}a(v+YJMYfF z{r(3xKK%UNH+y{dFE#({Nt%}usAYs)!Q5Oxk=m7U%HR6ufCEchKKsFA_d5sPv-P)3 zmc%T#T=d4VH-?#yGd(*7IX4KgL}uTZI?Z~dP0E3v?r!|fdEJAg2Q*M(A_1ph2YX-O zFkcSp9n#F%wg5VrQoO*#zO*6%;7x#(kka;R&JO%G0ZZ+&7h#V|t4hQYMM_dCDkGo| z3??DAUvjgqZYd-f?pI?fCC(O$0EryLMu9Juln?Jy+#l&4*hgk!R{hbs>I1hj4tU-E z#CG-6W3NrHpVIf#pvhO?T9$Njo2>d{0XjnnrU<|ro$4PX5+7XIs&)AiG_8dFZ2^|n zs=nU>kXN1|Ly`xMEk>q?uW@RPQGqHH1mDfQbG+cEtu^m0E_{1&@lji4X*u9`fzY7z zhYvj5ZaUSplrop#p=a~5rMZ2P`0>NKW=`hq-JyPTpAd8dHM`tk{hMF}YNDI<_!}NYTy{I1ZQ(u|U(9jSS6;)DF!o7xhvA%$V zK|tpBztZ7j6@yVsDg#27WY_mEEkkT3Aa)~=bL0EiO?mhHBCopVGB}Z7@@_HfN&0i_ zE9Y>AjIh zMvk=|J;Zi!-{XCz*iU}{;{4rSJ5PEY?|>h!gq=oc%E0-m4cET3#^+`CgH}G8;sj+d zU7VL@!{8{X>ZIEIs(pX*`ssDYIYW+)m~&;`!Uv0%1b#8pVGOblndvr?(Q(aqt zKnPSyKoklpY{DGpJAN|n$tS}c#!o)`>WF=#Ha=eS{nPbt+s>M3F@A*eIK-w;9~V>P zFf#4@xH-1d64BttId^`!xbNblORzETKoD!Ah11Z0!a+aV-4UG~&LINSpHsqBFrR?oQ5*Uz5!SQ2_Qzw)nNOeCLDHb3D z)QBmC3<5}B{@BUgE54|XN>p)fU5!-5Rv$xAK$2Acv-`(rf6t8AXEdW3?WM+Wo1xx{ z-Z%w5=2^~@*V57w5fK4(k`0XGzrTv}(rj`cL21+?1rXfKz5U6}uMxYch|_q)X_D!M z*S~nOIYJO60v#|*3e~VgLiEBu+CUf<`ZzeTGPMdQ(rS}`Jilp~%iJ+G1I#WC?R#+$ za;^{JGGu_0*$~IkGp@fe#dY=&yD39XPc}U^X58@!pI!NQ^{tOzwH!3s3K`+lkHJ3V z{6NIA&tUtBldWg3bXt|dPmRtE4G8fhVaF|{K;7}c=cemj$L9f$yLfG(!4XeA{c2j; zD1DS{gT*s8dRZuBVgJQ#BHBc~-_ z-`_C*>eBgMOAyDw$i;sBt_*wi*4)j$yK>P=DInwk1Qo__zO&2p&`9Q4#0eR6(F{3) zOmP@H-g?+Ow`cV|3F~%|OMMYXWRS~{v6g1*?|c!}>ffNQK4yR7h?Qk}ei6Ie!QhZ^ zPGxv47!WK3O)i>*MN*%!e6NY(kfrKeJEA?hq$u{OuNQO6=Vw&q#23&KK#M?Ld`*27 zY|uwh1d8D?ana=!HF41e+H_1BXmFKWA*R{XxM+WR&&HB(K5qDSdB?8p_1Et5M1c7N zK%b_D?sYll8hiUrmV2BarVa>_fck7x%PBE9BBWhVJOto82&4+2kpYbm&^!$g2?$w* z99&Na$i!#_DMtF%Av4mpxzo2x`43~lThoghLkc@BCY(cED|7fvwK2;@k&uhSPk1C$n*vUVZP;ve&MDJj!y`E7osqe7?;o z+v;fO56dsVKIKIJ;m48D_GXi>%^2=5d4|)wORjvgVBeC*anIU#E!QqxsxB;IfrhB7 zFL!c2lad;nl$M&Flb@QE7aSeiBSxG9f3awXi0`FDA@4 z@2FS>P1t^qpu`8edE4r$myYo zeSgHs1hE`A!|qMje782xT7y-d2st_X+#9Cn`ZH(R=N7{JTpjkckIb)>u4TxtO4(@T zZhtBI>M1|#2Sty4%NSS`au|(c8qV9tVIOky^a6+ZG15pr=u|>a52p;wN+v@Q-v0ya(~biHE1lV-eoE)^gk8LVvW{#h zBucMm--#3_S*JS8z5W66D>CT*aO60G9P9hV{nh7!Tmw^M60>uz``zE@_Wg)cqmlCn z;*KCL{f4>>GqvqwcDDcYJLCI1L$@w+#{{|D7qLRd95%BEJH!W#7*O1}bARtI<~Ke3 z6x#45EE3lcY$s(P1Ii>&bc}v64S4hiPsd}v%f&J0y7J?0-F+OLQ7u(~XHS9%5ec2y zVi50_iYwazXeWTQxS}wpAfLyRCPWv}tXQ$WSb?CRt}>wnwT0m78D3f>$V!HeYc}Gq zj4z5$tIlaq3Tvh9Dj*R8K?6`;%t(j@AOwI+02<-QsR1bgDlv>D0~rPv4@3h1t_fh{ zWq`b2fy}Q!mPDPa1DA5^vOAg+LL(*Bt+a|;H7RI-$d#U|(O$O+VbSjl9CMG5?FR!DcL!Zybe1c(NZ|_g%^2rDomV&bs z?^*VJn;7ezFMin{c6js6O%C3c1@#GVmcVZY(J4C`vDn z4Jh{B@qFjh3$slwjYiHPh%GYIVZZdrJioY8af4Cb(QrmBK)>EZJd5` z5-b5>j{Fp2;xw4~X=;lwoY$XqULuZ2pUW`F`|jyCExeDQY>{$hapR!_XYH?eFtNz_a@3|h&jKRov@G!R&hnjGTep9wnl>(B z#p0^HKeq*h!)hQH=+^L9UA<0bBgs|;6mlZOG{~(6k9~lXEAV^{D(Yc8oRv|e5;Iyi zd>1--{F7IvM6CQmG=HIF@(jsVX85pC&HegijT=_|;hT-N=91cSt{=}o zOcF5K-!mij8O>-$`ya-&*3c6k_h0?*zeB@YCW5hGuKmW#OsP~ulw&4w zQV9qF4R{UeqV$evW|M>Vtb&k>j>p6;b}Uv23@dLg zC1FV_q=ZL>&%S)LZq|djCKrbwHpnom!HaJ%cCEjW1d0OGN&CaCwmkmvO6kQ=>GOC+ z=$Y_S>s`M!4>aHBaqyt~(fjcpN`U9*6>t80%cdopye>VvY<)Q|C70P@u# zx9h2QcY7RMe0t>}?<2oDZ}$qn)6rcUofyuXQY$UJ6nHnVIO1OTt>o&+tok_no2Od% zRT@Gn?d*#44+BCJPLEtGRpwwxoZZ;wTqN6=fYh=EL1;t-|MX*P+en)=57&%4K4Ik1 zQSUgv_x zS&U=|X@Fy&h3@aU#+~m}wGu#$6Kb9Fm1QmnEQfCR`1s)A&SuQeA=b!n zhanT}M$NE&<^9v|{B&=7kvdldx-l-j4;bhO8un@ZyZ4z6`{1=@&YxjEr3SWK!}TrX zIFwXls6wN1c02DI;vJakdpX8=!>u*D!nbb=-0=04&m&udU@0jDMT_NfzFUy`w!BXLMhR3Vl~Vst15^b7vkAU@c*&*-EmQz?b;*K1qA`Ica26()YvuA zXw;}N&BPXa?_!~MkS-{Qfb=Fv?{(?D_ulIQ%l6)8pKqR7BzaHXobP?l$vNNm$KJn< zf!*2J*_mf}uIIk*>$?2EBE38vb^Ycq+bowY^w@o%=F4M+??MJ}Fhv&7TIhpa#aRhr zv{?drT6$`;i%O*3ilxP6{e9&2J`fm5@(m!hb>fv)II_88;~fP#1ZicT&xY8kyT2s2 z%-TMzleYUgU*bkkjPL1x|(!6);s2WnOjfC%qdA!J-(BQ&EBoW0t%=F_^>Ydde;0uLS!we`c1Ti;Xt z(#yx~RcTH(@blxOr2{iduxod}ymHM95jk-yeaaL>kl5zObMJja6|59}PF+qyX_G_G@QZ z(R&Zij@m}p?<0mOfQiX*37pc!Qcejp?!*xW1~H+Bu!s|r7~Ry_2(f}>ssZ0G_q}WD z+qJJgUHE#www1Dmp4?9R4aNn}-P?UGM&4O#{KZ=H&-T6DeZuERSZ`P#7_d)qbkFu^ z;kMp3zvbj^5BJXnBNcs@4xc~1``E+#k4zjadIZCw^~lg?qNUDQPwD!h-i(v8g_H(++u@77HaqqjY&+-p zi$|Jk8>N{In38Fg>5rM$|B8&7WXfm+7i@-7!#Ed}Ed*RK6odmpmPp$F*j&?+5S{4d z@rF7<1W+<$Jik9XxH=%ZH8Q0wKCvVU>it4e2ll1-RLNjUXT<%eOZ#1S9trqviNWf9 zulMY8-?rXv`665G8AkF-R&!JhG~2N0dJxsV?Rz=5?u|9I;YC|Otgp^* zs3`z|L}3r5<&;rbQ`PSRLZ_Qc$@M!8dk$3W+uM5g7U#7qo5h5;Lv2HDUrz&vL6vGK zVyl3h+>DOyDxBqj10gmQh^QGc0YcUUyl4VEH^%!&3laeq7sUo1HYR$hpc;Vll+hKm zjzxek$6?LlNIn%xws+M$9cyGZfl%=MzsJ2lF)s=PC~) zNRKMQn1M{FD-Rq#%!rByZGBVvgg4~!K(y@L`qoZrz z@FZ}2DUhpCN_gL=9ayA`KGbE7v?>99BHZ#{pbY2Vz8l?1zuCeo+spHH#Rpg zc?`CI1t&-<7;dIywUyXcxo-B{qHU`>>lvc+6j}UGX`lV3!=8sW+HTc&wm{ubOUHWd zQj5iU2}VtzC$c{7ZMruTFd8~4jr1Ne`bIDi1V-7ld8I%6c;fr(KYnp>_ua_nSHo`E zWtlpZo9++UgBT$y?kb2mhOsn$aXM^Ze?K8U8k>^aiNld~Nk2IMyhd-yH$J=0#{Im{ zW!JAhr`#*tL#x9o$19_XquZ!$bU>p43KNRVLGJ?r26dQHB|H`o({XcE2#WY9D6Ok) z=xrWg^cA;de<1i}RK-%E&ne{ma&dkD8D+WGcy4+5`5bFyB~xUckPpKCtVnXA80>%qbsQFmUYJM#c-f;41d zVw#wkIXFyz`pnMjO<;Fj1(e5;z*b9XY7N#6^(PQvd=41!^4eUQ~RIOsfDOze$``1`0}nq0z{uO~qPT z6a*p4&0xwQ6B^1a1!PQDO`<-NH%E!TY%$oj6@32#`0j@$Gs~W$5{O43qk{w0Y+75v z!w3Au3wyD*tIm{YC~%f&H|y(n8kmN>k4x+s?t#OK5BOZTJP7oUYHMdDr;qxEF#W>8 z=R3w`&E?7|0YpwTM;qwq0G$P0Pi;Es_!pAJ-ySm9Ckt7~;%|#Tw)71Q3=|d?*4EZa zy?3Rr{*e@nYX!r6+(#Ry-^G5R-4eFc7l@^1p2-R6c5l3jnwq4m|7q=*X#^?#TnuGq zW@ZWm0-k^i(IL*3$WARPpJ=!hcmET&HS>l0LG~5?Nt3bFs0~JY#iD3r!^(b;Nom z@&cJ-qrTticw}d48|X9eHtJ_}@g(E`PggffGr;3vFjNBA9}2^EKiV_@=92lQD^@zJ zU#7qKaQK1UNjqk|L=f!p3|aV0>5R)EeVZGB-tOZ=#J-8ayWV=An{AtKyFk%OY5uE4 zt2{r|GFbGP-Ok$ydQMfIQDdo&CGJ6eQT`p_@xy7&pofZMxG5z(30oo}Nks7bVkd(m z08OnuwT(@X>A@%W_1Bk`XGvi41<$_VGQuQ4jU~n&4?MEMVWIpz3)tG{-Q#p?AhKKar2WI|gM2hccxA%%Tn-c&jbdId`WMW_C*>{(pgzArhQHFqoeUtX~)+_gK zr&s0M1Uf!(x#Je}w5mOdBy1&v$q76c4KvIVHV}|1qEpQu#-2Lcfy{12<_P6xbCDS> z%ht9pLvs7Yi((lm%l%qEd>x(nn#fnXoeft?L<^3`$*<0=YF6}g@@%gXcf zAX_~-Gq!)Q1ukOnli1n9EXc2g-gFWS&Y^4p0iIB?%bZF8(~1#)ViA1@1w?Mve|`Jw4L*ayXpnlCiXiJU%`i9UV=h z(WcEp7-S~4m(-NZCSk{|gnR2=tyVBW=Dt8Qt&jz_h=wU5Z;HrS$w79i1%lWiT27iL zea@th7ZU-+I@r3TIfpNvY_2JCFf+(WiV^b36foM1)*p2}I^S@Kyos8kwYrA+TwR9+ zi@X=9yQ?DRh?ceD7gozJxg1Z-@-N7UPfLs~s?R%Qctrh{wub9`$TPK;Q*xZEWTuVW zRN3uy0957dfm*B)GM}&%@@f-j>BgCwt zToEkCvKZ`PLT&<=Lx<*gA5S0)gE*kUwZ-vt)DPS3HlO!7W1D1KIaW;ttR_xtaBb+d zch}B)o;Qd!3Tq4-5ccvQWtb&3850R`TMen%2Hsv+S=X3fRmK5mPD4pjSz1Ka`zXTu z;)Z;-n1KNiUoY?K!pf!={OZ{Pwo)xY*>%vPSHvxb!Il z)~u8i2A3v)9bhow#G^qzgR~E@y|?#$QY06CH8u;H8jI>1S{j-=u%oK%Xl&@I8|Mvp zCA$7%{QXx?4<67z`nEV|5R5S;bm=$2794Gq-bZf@F21|C<;j{Yx3q719tp2;Yv$LE z0pTPZoI-eM_>-kcITiW*vzHpQKkq~44QMXz+PJOv>tFhfeJ2cKg6@Vsd=&gJxht)W zIvAOp4v8)(fIe(u;PHw-e{QydLO+7gEt+c|9IqoRk=kVwg`vgH4!t1za*!afKshoCAz)dv@+`P?$B0%pIKP3g!}+ z%Z6F1gCnz;VjUXzv>#k<20z?qe!Y7{XK@dr*rL6Vxoj1fF>_=FGK3&4T56+P*E7%j z49=b5?%vmW^aT5p^^_UNgr+K(EiclYJ^0g)Jtajr{$)Z_4Hh4m+q8Xqym_8x!wTK8 zU7wG9d!Xvp)s(Idcwj;SU~^#WKvUIVT6{WAL1_V?1H-6g<>1t>4YOxAAlT>1Qy{$; z;Vxb-I)4)l`XIHhk;UH}GT0{zS;*q=45>>S_{aigGMNMdAwNH#OeRY=z!00X@+-zM zA5Z!$Dz#z2d#3;x6bWG-K?t~PffPH?M1uFJNzV)oIy*bDU#ByOEEY>jF~KmO)GUNa zXFbNN&hKK?N_$+Zb65APy))zYkCJp*?bX(|8mL;{gMNNvqRErGRFAh z*azk05hB7P|Kuq*UF1doUHum9gQOEZokoGpZ4wcQIGUA~US3j7r|~4Xtg*ChkWHs$ z6Vg}`IvbY7@#k^+ummSYuz@JGC@sGs4}*PNJR4eF(b(NSiY-Q(TDuzRYwH?HZC@Je zy}0|pRnIKYHmAL8j7O1r{IOY7KHyFOhJKRyYP+>+rV6tkBkN4GFNOU4p*OM@wy|M2 zhYNN1D7|^H(QG2{@#r~yp>6Hw*@wO>)wAx^ds}t>?W?UXwyrW&U-x|Bsn;jyd$ zIlulGK0XS+0)qkzyCN|Uw*1BwK|vzGU^B71n9XZ+E_7vNN14CmF>)>c$hV1M_gU*hq2LJ7*l zC3&Z_eYolboW$r-M|ptPtyp}K0P2e{Bs?E~KN}kxDOp5n`zzgBadB~OZZ43D1xf|p zgs|Z(J3g5>F)#th(e$L1cAp}8h?)aT%gwY!W;!5_L1y%LUCKcFsqXTh(2i!Hv=vl@%t8esy^d|He#WoYO8@fR-SF3 zqocogsm1EmFV`<{TBK~LGW&&^ww31Ams_p#Y&y^$o)i{CV_Zn+z%5Eeqrx#lU#`Ii z!yCcZY>F-8CZpd~`*d(RF@S~AU?U?`mxHEj3_2i}4(G?0M-j>i(lbtFGB|+G1nj)V zypXuCmga70c2Fo5@dSLRNGCzj-(eNt@{Afno$Wg$ z8O3&0-`im8Y|CWQ|3Nn!cc1v3ZT&BYw7ySbvazFqC1x5eKPxLYHDkDE7z1;#%#NQE z3WJH~B>8?p?i@g4V=DrwstMj-2y;sqpb&E>1*E9-@cOPQ>?@)oNpnS8Z_^OA!Eoh{p`qxarsb%mV?1xG z1BN}IHh%L0(is6o5 zdslIo!X^5^Mc#Zix#Hqtk!k`}AR@l6Tua|nliM_kTH{Lt+st&{(&m|}uNyf7BNHXNs^AL~2H&<01A4?Eip(^io zjq`Ek=pw^>y-F0>^T)VW4y`_JXa{Y6o$giL9W`=j0f-Lco)`f_NJwY_d)L!PFw>Db z@N+np2B7S8;}zqmK!|qJzn{zq+exHT%{?kM*6D3_4rKsspg`#C?Ckb$ZCVCAw5~Ro z^wXT^=u(=8H(D^1`CZ}TQnJ?3BMYhwYohL?XeA0FHR&sZQkF#_%-PgdDAW-|9Labp;3FS<)hIhB9-<{@wNX7Le zpNZ4HNt6`6S7q!}EhKE~mFz_xCLIa@;4Wc7k-Y?hgbRTe z`c}Ocibq*nIXRor+vo=yeCq^psZOL#1-0`@GM~>sqwpWjRQUZ^+2Y#T>WN6`D_e^Z zgnHIH-f-8Z0kRG%7C@FS^2m7{*|D;@{&axeb?RY+#CU3-4xj;q6wkZnT9)VFgrQ`G z%$iczAzGoeMwjN7(y=iu%#5DTyP{}>b7xgj_S(Xps(9M^(h>II`iQ7;e*}Kqd{0m& z(Fu>S?uVsLPHogZ4|?gB$?zRBL`MQrfm4zH6{m;fZPQsVy12z89m7Wi-4Zpk!(-} z2aQ4oL=7GZod7pdWvn43shU$%-FI;Bz2da=x|+-X86b`i#1*GJr$`}`;+a^yD#mte z+Xs~sRR14PDGC9ae<^(0TH9IBJO{DXgK&Rq8S33F;{CKu=9*@4zA8(K;Q(uo`{p!nTR{?SS2QO{H%QAhEYx!@^l)9B#9=+zn~t;Vjn_`<_~rAz z%lylz^4{KVv-hkQym+^pip^Hai?N10XP`*>Us%Te4csr?C78ze&TZHkeuo7lk%;f| zt-6{HCMPK+MXJHsFdX8AYe>ehWTVfXt|T?IBto4#^{}rQ>28pJ5TTAkkdCdZt;^%+ z{+(27@HZS24_`uB76S_-^WGX+&VUzCOI=HKLv4rbkRi=KL#+CB>}0;=xg#liI?I9= zyG~Wq+KkARDy%A`ey+3V-Aazj+YC>?_96qDR)zHEKI2JmV znud#$fi68vQ4mV-@ozTwbI;q<06p7J=Ophe%s`~;jrM~Vwo$v(%Fes#)K}JySC&Wm zYc15_GK5n$_!gWUgyWP8qcqcN-_hYgDLCJsoL013EpFD&690i6uxlODvZ60-kGTd; z@*O-VoBK47fm%P9U}0FmZ7Z@s9%YsV`vRNejbrYGbc6gteO4>2*J}KJzuU6=DDA+?Yw8PD-o<-%`Xt-SiR41;ZK3{7Q2|Ip%1Fes5fLkHQi2~PwCM!Wh!@eyBhK( z0Mf^H$Nv@=QP(e>Bvz?!l~>;l57wtm2Xz!J%$A>njijK}N%$&X>#I9sM1#JA@#pe! zIF);TB%iL-to=QRbNH*1z8vD}R1wN4g2>H~rl;Y4QLUi$r_)~WjJ2;R94Q|;V?$EJ zUsU7`h5*ONt<%>^Cr%*=DJ>`K_fab=mKMwd$ov2;3*a7VZ^n4Ir|OwD(vR`KHrRvk z&+qCEoe>srV}0Gx(Gd$9+kqxbvT%&18bB6)9{i&q9sf5lJ4hDPetqObxmPrJ)jPD0 zW9HUP_C}WtzFCVo`)C3JA$XA@KaKu!sNdaqAf0T0Qy?Euw>ZbbZ*!OKBG*m7_4+{+m zlc-ZGawv&x=M@H!EhQExU#RZFa-_;pbv>+#$S|#)BZ)6jeZ@&2u2y_1k2aUN}ueBeY^c&cFtMEMJ25K`)|`;il)m;eqI z9Sv=t*Fu{~a^B19#s7c}*M}5U$T<0|l9WxW2j1hgorW;4ywxced4%9~tmJ@hAkL#V zQT#iPQ;H}})f(h=2~{@bOoV7|a8TG#QBuR>==K!i5bVo9Tf%+{@667fQInfdf~JNd zu)Bjo%hD1kZDbWa(AkU*o8e8qJy^_8E?GI{FbO3|RaHj*baf5|1&z<6((UXs?J-gP zQm2L+2O<~~^yrN^ff%FO@LnC`v#9=z)Wzi3clZU_@9X}lnzSXROy?b~tqaVf(LG@4 zn)uo+yR}_S1vcqqiJ4p^%jfd7(Q$Q}qCW<3mL#F?$QFI`3DtFjpaMA_v?!{U7Qd#nEJwJgJT+>3uUYI}0DYQdt8Mk?cDOd!1veh#G@r9-$w*s;E$5o!RYbmTB)QFttitXgFN+9c1D}P5QI-3XY?4NTU!kqVqezktidve& zB8J8W=g@X9!h*vX9ys8-`Dfmk@U>Q}PqFi`&U%$%G$&MT=3|)Sc6C}t@!s^sqh|Jv zHj*IVGfDMnIB@6@lSyi7BEsJ0)mG_LG5gGnySc${B8YR01RON zQoa=U<@fso2J7}hk7jzud`ul9BO_9hlKBM%Vxpp_&hIRAgT4rR0bdMVU5gkY!bB>G zDS~i+)v2tbqmj2+E)N8LOfO;X>-J{(c+=CfuF!FY5&PL1XW{X?d7Wp+=CY>YfuFH<(>i zaoX7=r)uV;0W3I@ssNkF0Pj=ABK9INd4a;V=wvpwfDuy#hs_h@>#|wJZxd6W;7PK= z-$qLIZFT5xf8wE%5{0}aPnl87jC=2aopLDs`9mrzeg+7{w7W1ruQ-gU49Cy7It{q` z9xQhRXL>{yXCcp#P95k5k-@iwLm0T2>`-hE5rK0rU?Nf#s#aQ|7a(R|ozd~tWHmxN zmF@EMSJYr*B z@xIng5mWdwZeu;87CI{~4gnD{;7dzGgLCTf-_Vcdf4B4BEEpeTu(LzjFAr<$%Hm=& z_LN>@g;nly2v6rQ`&&T5QIfwvF5JQx*L`LzkSNA|xa=S{{mm^$*>b$xZJ6FC*bjCs z+8_NKvr!<>T7s*KF@N!tX$JlE6-7K@%M6gD;le-K`~#SE-f$p`LKo86kbb&O!eNsy zTH38O?yQGGHn6+BJohiRoXqI5?qGbpPCtlK=ydrW7VLI%J~`R`O!xGzF!iz6FqQpN z69q*9(>Fai;%aL*C=HBKEcL%s+H;zHdPQn*spw)SCWBh9IVO@r5OEToQgj)cXgYDa zPEX`=3#cRP6E<|czwl>Q=7*<&X#ct1PDmBw>v`)V5aO#P&c$_>6>Kx5%Ol#E8{niR zuPQI|_FtR<)5g+wWp1Y;pHL)Eh?bJ+f#G;en`vH~VMXK4awFe2v{sV;lGiX21I{8& zHEobh83_pqseNlIF4ycwm+eD>iG!S7Utcdxg}2eknzt#TV3q&*3lADs)NN0Zo{B*a z0CZ_w50F7bm=T;_!~P~c;u+t0B+c27mQEJw3&VlDfijd5m1qWX`(B>g$|3|{(1`(M zHvqGh)A>>8xwT+zix;~xI~$jr9D+OVM*#ebe#|>vl>LVJ%}<>eA_@RWszwr~xP%X` z$~Rm^?hPF@9!vbI=cE%k5t%|m6*_d>(euRRdMPZaSRXWl{;+x@1SA#lfQ?lVolmYR?|Ks#e|^E5Isow?**k3)x=O&kfcoa6qh$!bZsI{jw=A=BQ3 zlhYg!eK3Gl^3#QYPGS07Y*mN&sX z3N0Tj_hYh-DAy326EF%lHyIdfpmQq&p0l|Hqs5t$08jRlUe3>tV^s6S38sLI;$j(x zi{oAcpYssYQ-x6B!vQo15bM`^FQ!5ZM*~+(?V$4?^f3Nxbrj^psZRDg1tkK7* zsI6IB;K9Jcx7T8aBSGgZ{%J z91||<{`2i)Z*TAI?d|0Bpx5p)vG2$9-#F=qbgIYweZJ&}x6ssS5dxktMng$CJ~m^0 zW48yu23#A`ryiPXhUEIh;ND7#PIWvmPZ$E|(A>u%=}7I|`M&pA5|UHt-RwEE&~$-s z*M07@z&Pv?w@$pvr)NsfXX~`8awXP*wGGXBVsSNAp4p>-%+~j9RV6j^}o0uH#n~>T!I&PbeYo zY>Bs~j!hB(?PH`uOhZCX!s7u69p&zu-gk8gj3Dr^4$4C^rstjU+Qm%ZkXo4xiVLiH zexoCfEO&UDsGp;3Cm?Jgd959JvyrlzkJ?>CR#{o|uHn1i`p@)XJFw$8UVaF`gEIvA zt0`!qC7?-_PD<2FU+TM+xD>CEX#T316V=bAgKx2y_M2D6%T?Qoshuv4#C6<_iK|nr z8=6u&Gf7IuO3F5Fm<m;1v|ym~mLp;hHp)AppwjLh%JhEq^n{UPfc*U0thBf+^%Ok` z9IVkR2M=>WeRIgPsewD4?)>@dD z(owR-X1AvjR77>8ZkC>4M}uJGj+nY6(oj41OOB-64np6r>B9@pvfw3R9L7+3v^$a9 zLL81Bu#T4`zYmp{ZhFa19uXttTPSB(mLxS&{Rtxf`^fu>^$ZEaPh-d32vLf-Tf=3~(?LkL z7ys#L1qNA=wYevH+jemE5YQe^bevP$i@QS0QvU$256KQ8Awz1iQ+XsU7z`pva&q<@ zXsd4yE8##~u+vjqhPwLO`-f_~N((!?7tf27cvXfW8bm<4MQmf|$3(JXu-evvI0;%n zlzI{tlB1-Il9UuRLqo$EF+f>=D~2z(Z^5zs9oYZ2DLEiezV!ItZ-k#CBYI5iC8UXf zyum_P^dSHMn%WA=@+o>u>ght04^}YtlwO$ZNGrhY7_+a5(NL}Pt}fPS=zxuSn{MRC z{o%FrOZb#yJ4%^Wt@>3@$qU~pbR0VOF^>3nR{%)e{!)9us*XyD6W*9T;7@a0V8%>U6mZM6bgPLM}j(3(XxH};~&WwBeD>ZJ{eh)+J@p|JIGs1X8 z=P~Q!a!YQlm#yfY4atiApd_cmGRbKVZ`)=A~isv0$$*A4g1 zW(yb(x(!sWk9A}Jh+?<-@376{XUOP_2iKHv+b}|qBJ^$IvXgMKP%x$f2PF`J;nVj* zkqDAy9ABUP1`+{fb<2Ndea047V_+G@lCqFu`_;DYdA+#gjfA$35-Q|C)Q+b|(yeX3 z&fQI_4%nCh`!!8BMcG1Cn@fZKbKtkFl7Kcs`~a91Fc8;wObWFEl&tyoqqv_xa|Q}P zz{ls~;Goz7&;pYt?mvUsd3^psc}`MEe2`ZrNE^4y=NK}OJQ8`FtBfLyZ^teHB`@?5 ze|~~|HQb1SkFQgdUsd0hPfgZK)Q!W-I6k;uSeqX*bjZbLOo&gL-^$vAkq>N0AO(gNRDfI?ai`Lkz%?8{R4*9yaswyuwgV>ivL zFu*H&tK+<%iz}YWB-37`)y44zgQ1Sr$?2?~jh2p5Ut*~4Psbn2bE>*2va!`S${|;h z{r&@#e(7p^V0L_%qNJvW2!4Xc z58%}uAMZDPE&Qj^-;_lo;2Md1R2>z1L)`0-oem7AH}~G~93Vdy33ki9C8JZ=h-8Q& z+eZv;#HYaOKX+y?Z??*wEJ8#2%QOcb4^gT*;cX{cxaOyiBwSqfwFQV7&Yb?HNVd;K zQ{k{N9gCID$Lbb#PMZ(h%Z(oO*ct9c&rqv0N5_b>Y)n`w8AAK@(BtDcmzg=-zP`ts z7Dm4~7{8p^!P>&*LL1MF6K=)jT7~2mK?GpqrmuQnUA^pR?vBg3f4{+PzqbQUtjDXq z-U6uDrbAfg_!3RY85gxb)|+QdHy5))QaGwpEE$m)l;y3o@(zpXH|_Ri3D>4GM-8xu zCXs0knWh3(2ug39XzdSAXxp%D@pEY8Kqz?#<%Z5rZl;L{WB{xt3~vhiTPnv^rr6Wj zCdFJ2boCRiRdQ`lBHN|bakM74lf6qpt#&2I>w+l8J9 z06sWH_x@q<8`T8gw~ww9V^V-fWO4QeVwsKIbi=(t1ofmx&b?1dWLj+NHaoUGAL&0w ziO(2#spi&nPVPo#=N1lq$1S;3KdK57{-q`DY#Ng#%l1*Yk@_)e$ zQR*7Ap>GRsdVBd*R8*9em8GPhb7oSAI)A)BB{T?ri_jtW@nEE$?R{TguWZg1URX<>&Ja8u*KtqFY8v#i~%x*xKIOLR3Oi8EaQu6g*5pfpMB%XZzLt z80AQYp-u$^qkz$fNitfGe8++0(>k@~q-EDiH2#_}O=YXmUJjTO1pE7lKk-+aFs_IKS2w4uc|0qDE4(?DUeLi2PXm>?8}{FVCJ^yQ}CH1V?ZrbEA>XFtGhyvN zu09p7?|h*)zO)xkl5oe4ydwJG2}Gzp*i~(K9NcuF{2&RD+t#Bs5^Uf)-5Mvgx<_^6 zm*AE=9=KjR2;;yotM?;)4^Dz88GggPaXw{zW3%+=Y3G(1w5euomI)In&rNpk0E`7B zYcmffi8=5l`8A>2?-ZwJXcb=*IBdkKmiNtA(37gV5M4=&NkU*igxSdXi~_r%g{PO7 zy@ena?^W1$=UZ@A1;wm z=w*DWT3@cgBLf(Dol+AFL^ZtH8u)mBvFtsTlKmjOAUn=h> zH4Q%bKM)_qq2zv&YapmXq{rv$P=dC{cLyWU(J~bk=Xo>WLa)3OLx&A}whH1&CJk%cfGA59Hh?ud0TVB;UT`CRZgjmDJ`yG$5Ygnp zPKNG*9t;y8G}UD^)sv(Pp5s!A>E{u3 zDO6PHTwr8pNO2mHr)16jpip4Dcij1L!<1ykP#(kpv$?%_09EJsZ5CJ+b&p;7q6wMj zLFv=I<2Gf@x*dGXmXbJ_>sY5D?2B+q8Dk#`Z&Rj<)&BW_d zPRp*RzT4doUd8?OSi%jc8jQS752t-%ng1A5T|?ZJV6Yf{U4n8`pca}j?2yj-_)^i)?{ zrpoRbLibn{WLZSoXnfuT{r9(d@k)-e#kb=9(@@=A_VQ^!my>)O*IV`*g=gJ)=pplQ z%uUOTYhpy&WL7Tb4Yx+7;lQv;E@*)KhH`nJ1W4&a(_j3$7T*HClJ3_$bTpeKy-)5hxEQV&%{Mt^TU-0h`%REJi*vXEZH*@N zL;qUwugL=}dg&?kga+{_v+zNAr$@K%KZY}jk_??_(Gz<76HpyTDQm;_{IDcGvZGwuhx`D>^ofU+~FA&UR+AL_IAgjpSq`bv4E0 z*yJvAn>7+O^B>wWwt0%HGqM+2$>;R2hJTq={Q^HaeN{EB{;An0DLC=mixU0QJ|Z$U zK31fw+d2sfl$I!8K+Kd8!B|^1 zyTzVPKE;fZf&l>)jO8R%Mg-;8;uC(j=pF{fzsM0b?EgF$Q>qH_fkEL43}hxWpK|f% zxa^6#V%GEm3XyexQ)IA9Z@$Hja_##qV;!p9Sy+M0K#lmBm40Kn$pg% z<^kQ;>E#v+j|lQrQHE>}f5o>xXy?cGe~uwI?|$USU87%eRAEylpJ11tK<`n`6}CM| zV0&G02|f%j>sF~zUgR%&Y#0liyW3msnsxFlG^aP|9}L6mh#?RbqH|2Zb;dS(_Ha6g z^H56Ih8JMk-x5*=3bh=a%ZGK9nPUNSqr;)nkBrRj`6aFPb^9r^8TRi}?|M~vHpq~T z)+eD{HyoIkz-e+__$lD1TJkr48bIZMxXeUEr}IlWJ-;r$o4zhS9loQ4m+Q+7Co%4r z>Ulkv!H10k+mV!Q-R^$<;QZ`Q7%kx;Ku)~&6Q==G_I-LUSI;H?b^(UR$>Z*}rfrNW z>Iz1Kp6zQ=c66Vs4!JKzrWelodDoccfMf3g(hVKAZ;ZzS*M+MK+v=Xr=W?lLtv{!@ zfC((UX}gM=n(Gns(2@txR1MHw1KM2^+;pZ}@{Mz%o2JC3qgXAha!fZ(wH46ikY#0O z9zjTX^4z>^i7A+{yT&}zKws|)FR^d&(GL*D5N{qZQc*+Dml<;ocs)AQg*ooZ+64U(Ddy)0$uJvtZA4q3!uNxn;;d-j3Fez1mh}aON%vtyf{Avt9bC1 zF%HL4(0gY`u^VSC&13`5q3isEU6!2hiTym))@95N>Wa42iZ}I%_kt51InKnSkp$rh zx8LXa8CwSYd-zh{z@#W60rHs73;x!2t-$~}_Da_4)kn+OO}6Oi4n&XB;@+S^h7la6 ztyq<;E!%ectnoLDlkgne+HRfu%trKjEbFv4$BFJsGx{Uz^CEY<*VfI{#c}hwnu4ty zP|jHi_s`JD;kp+W^^FC+cdPYS!IRl1y{55xnka^j=Uqm?%>!Jc7l?capbx|>S?)1| zuxY2PsF|*hdsxi@_=Xjv8z1y$%juU{SVMd9-)pcAnVI~j{s_SaDyRF2dk?{6;ZX|7 zSbk1m9ACSh+V8@Ly`p1k1S6F}ZIhURf1vLZHJ#lolo)}P0kO$w11R-#QdUHEQKWed z=FS!di8DtA@|2x#Uu+p!4fr!Ud$p2Z0mEnh%#c3e%KWH#`)<5|GdbsfI5!T_MFAzC z$49FOIc_C1_~!%qrdF3*z4B&kLADuQ?RQf2l@?Oc_0km-6y{b}IT#u5KvC+T*fEH&1z27(xnpBic2f*m5;`-s0J?7 zpjW)1lCJ@Cw1G!`_p`D2bt`O}=tS_&5``$3Rpf;+H^R-)fY3?-n=}B@=BjJkmRcYW z>qE3=O<_jZR|2-IHd4xz5)Va>l2U(N|7nEjc@T-Fj_6Z)mm8Wt?=BijT0P=0>edVr zVApsucH3+WwV;5&ozEUW{68MjhAe4Zki|v8#z)hd*{Y}-DC1X>w#v$jWhco)4&=EeeO+hPOmr>HX|B@!YR|% z?u1)3PrQwU+X9~l9UbK$pfV`R&7j-vU+3R*;)77;{SK!3kcWj(<_VDC5?y>gbVtXe zylW)lsjM3|UvxM7_`GMVuc4DKzwUjHb)ruRLlJie9a}FkCqeDcZQu`O^m0CDFQj>t zW0ji>?rZ}T1I&#$ECuurg2y@6j4G;`qeWcPNuM%*41!Ui!$|)wFI~LJ{G&QRY8sZ? zHQcVv$IhThn4Q7f&%W`*7#L)|uZrmuUw0qh*rU2%r+apr9qp&ga5N4D%YAUFroBIzu9 zNjKWFO86al$tGQ^MsQuMb`2kFM@`P{qwQ-KG`(HpT}ZAUJya(T@U?FDzO9u!(hZ6E z!>%%K)!mi^RV&M;`8w0-b>`14^~)CZ)ylbMrS^y6gT>ASB&h@s5077>Sw`~xTZ`EK zh$TG<@UO^>p`oNjkWxn~!_E@sumPb7pqirj1p&=Q*oEx;Od5U=9jLUcQg_>LdyPGR z=IOoYF}7JFpP47TGPUxO)sitaF(-fwcMPXLL@4qWrzikY9p}l*{-ZK9lBeksX*~(G z27Ph{!IV$!6F~`0uD0X-b~^tn766B4XI>zz-iJDX81dCZP(sB)VCbAm*G2j2O$a4N zr$rNFVF8IR9R=Z355<7P!pr#YUxp+o5mCr96$iZ_`Il0F+rPqOKu-X3kX*bU_IM_| z`OGc&(=Y2jEx-9r&S(h1%nGn$b0`#6FvLP>8$yrbVGgDSe1Td$O3C{u&TN=^K7Q2_I5;P#4(U?WySky%hQ^ zeDm9=*IkG0uaN)+2~MC@*>O0gsQoc_eD~y{@M3b zV_iWolMRiwhztPM`b8D*>Xom9xQZS+qiVK6NlYpwt%G{omdDAE8#?ZRZeh}(L0o=O zy|W{N5GW2(E_jo0Ce)vvIiCp}rL3c6>exuWXORRV$dByRa2L$9}F)q8kpRfJ8{{mKG%bH`k`i97SENJ7n_4M z;((E>Wf>zAa!HHY-MH;G-D&4ARw{?V$1oj9THL;)7t|@bRwyEC*3Zu5JANhI~y|$&m zGB3QRcI>>hM`zhy2C>IK`?Nau!0NeXF)LWN-5=XXT**wA+|YOs=+)@B^p&wIiMo^BNNnV zl&Bp?%rpn`Vh$(7x#ddivJFUSR(!W!(tYFPHb&z&jflsj5w?+b=-Va#Fz?|pg9By& z?KgLF28WS_%!pGJoaz<%^JCyyJiyQh`_+Ry*Qfv>Cf2PVE$R6eH{^iL+ ze*-^!xv4r^uZfNFKgGDdfmJ8xwg7)Z%^5K zL`TfLz{Ow@EzT%8CE9*3(H7WG-K(GW7gf7jI~zVP6Bju*gFEYnPj6Tga?5Kr-1}~u z`#-fWX#5VwH$5+XruNVX`|Wj8!!8Hc9r0VI!!`$l_Kwy*L2OF)+zzPMOf0MOSYh(^SzFd+?H>Mk%xq>ZO!H6Xwj05bA{xRDBG49#Oqv+m7@RuRTjg~B`6c2~OC}0~d z@UcCn+!J7T&^>kGCc{;;y6~(*W0hyPR%>8=O+ryVBL?041C#xmXJm{h+W5G%w)SWG zZHA>$3Eo$=);?{%5GtwN3h#aHU^C&%Q@@8NmuN1#aYCwYTh4S6_0R-w&z z3gOZ(@v3gpbWMHXE`7N@!qu2oDXRm&nlJJ(>>|cz1I_(&VdyLwnr!RCAYmnWp(YkA z@zs0Wb4av(?Gq4$50|)m4yu_UNVVxGsqC%^c|BAbro{&=0myeX&GasDc{w&`ve3DV2_Og{EGZGeq;~;qvd+mJtT|Aq>vpAmnFPYF9mGcC z#Cgy9Wj_g+zXF|t0RR#dL%`C(AL=O7-<7UV@Rd8v=~g%+S(Ot27NF7GB3wne zl|Rp9GjZJH;=z{hlp>EGEU1QeWxZo=xyCMyH@h4x1ZI?*Rldv3OivsmyOL`7!YCEt zSx^Ih8w%qa-lwbJ`KKV-aHlkSz4VJ*2tst%*JgZ>gv=J}E&)<(eoz$a_&b(}gMtWE zTs|8D6`~eA5#j7wu{t#tsd8;y5g!+Idww&l<)PnPI^>Ttk|A^8=U)Oj5~I4?A!RB7 zkk;`YbtvTd2QM)7ds`#HWQKDm=hKp)(Z)%EQ5n3^^(Xfk;|7U@&0%1v+)F_)+45f5 zEMu!&D@KR>qsl7!7anoPw|_V~@oK6&is-4|doN<(Y0J1W2jB~28Z%la%@?|1F1A8&F0DChe{gPTdLOg1f}`$u|BEuZ?6@xQUvioPx^t@GK-hiUctzzc^*< zfpmzW^1kO1Sd#Vgl~^1kz%e?I(bRXCD9sK|Pbal0gS**oBqEs#Ug<~rS}$`y4biQ( z*9~UVx8Rh(wgaunu_6+AmZ103SdZ{F9Y#8saZNNx@Pm{vX()cA z=;tBwcuRJw0N2-#rN0VKCIoQr{EUu6E$H{xp3H34KFoHLxe;5EZcg~7Bi9Z@yDd78 zvsg&T%U`%SmeqgwU0x5{vP^B-WZ5MToH>+87Gyy#jM=tr5z@sLH|}j z;xa%f1&=Sv;|Z*KZ4GwuJOBzg&KRH;FxN8r$h<7^a!qkXXyoQLit6=Z%ocQs$VR8R zxL6oa9#!DtThCw={qzHo*PAO&8;Y4K_iW%B)yitQDq!8Xx|oOr7!b43T7ARBY$o>+ zK|IDW*BT;+y)a7Q@`JBWw9al()MoKJK?0(AfFqVH*oVj>1>!1h|0wNCIG;1?^pNCL ztMSEeYPM$V6(kbkr}%yH#K5XLNEAQp^S-RL6$4!pd7rAO28ge)m>hAaM7vXh#}~$3 zsQlk?`{KN!wvEtr>7Ec?a2m4KZ67_$sgyYUx&@Z=gZFLS+H%5)$8_p3qr`x=)J(s5 z{??>}Tk&**rg^f#8rTLWH(gP>qJrKx$@Sg0T1PbgY1|fIE%3W7meZwQEmT9qShUUW z{*TD8*_|Wregg7k$M=Qzw&5uxm|blLzUfZtuBEmEc1SRcAn6VCh8N_L&*4-_Qdm%z z+x3<6>Fy7XD!qNCfq6dkG|*6oF{~L6soB4FcFYe5eOU3AUU>NEo8m2s08-#MiD2(; z7}CwE%=&GbiCgTR(5tk2M;v>Q*2M>*To*NqmxZfh{L6W)b1(PVA0;U*^vjEciNU_& z_Ix-=U0a3IgqCMpJMM9>4a}eqsQ4UrVS>vG$0pxi17?x94YD$?e^;)LLjln7W@cMy zU414QO4%iT8VI=cjEMn?dkLZN3yR1Qp%?7Bn^^IeTiW~m)8DvcbOCu_4A%7wE_!Bn zs5xPJZd@~|b`^Xpw)=VPJ>6B0CkF!u^oAdy_6I-f>Tj~WpaOBf$HK%$Wx#Ao@S

    }BFoews(TeyaT?k&*Y2lq-Rk0Nrp8)s}A2q@8FCz?vlYM%*m(&_}J*kI2jH!?G z1?gybo~`{1(;e}Nlzn2Ty$2F)N}QltAi;}g!7i~-|1QAV`FS>t_W8H*h4Vn$&+XG8 zI}ki!&s|!lkI-&joyxq*`pT4>oY}Q4i2e{Jlp6T>{iSYi#>4$Bu(|EMy{Jnf>x%x% zfIzm?1R!{x(w)mWDvhn=3Q&XP3Zt9JGe<5K4B~& zz+6;+IuU*)B|&H9++?OjdDHl@DP%JE&qkD(EJ;wyq4IT+hT&zKoeht7qns6j3XuD2 z-sU!!d(lR@u`e%HuNZh4T?*C3xKco2_|ZjiZPcRlENZPAJ==XLeHnCbOsJ)(`P+WP zUKIc$R?_!pS_V?o=lQ%1r@NVq@s^t+TJ=q1ef6>$b_o8Q@<0@SLR{A7V$Y+;w7YHk zAa9VD=et&6BeTxPT5Fb;n;#JDp>752CWXH{v>7|>8~hNL7eWT%7{kzU2)sxn$s+j- z(EAoUENwHf;+ld#5Rds)Zy%G5<)4m{&@%jwiT%qN0*E1rjmdiqKfAB3 z^Z|OhPXov38r5$7ba*~pKep#pdb-zIU-nb^9}}*eH%{aB6UQfeyl^NtJ5(Y0QQM0P zdw!={RtCKN@ zpnM6xdpj#ONWllks~YE;p#A)d-35A_g^I4>K;e6~6pKf`<0{*6mhH+{-ya*I_$r8{ zpb1@PN4RqTr4>KRN1%Twq0Flp<`692bMBj~cf`kVj26*qlQ?`DL?4vN2={g$$;&ie z=+1_V3)j;7-f_q7>zv8D;s&+`X|7SO4Z&%PZiGf1#zx<)d)g#;P#w^{rhd>NT7gf; z1j(5*`#XeS(1*YfDIxgDS(`0D=JB;SJ()4p&xQ8 zEamf!ncF$JhS3)S01eW9G*?fr%4aV#WaEUZ99rLor_)rkNna*DUzSrM10Dd%UpR77 zyXl|rSMuU=n&0(#Z=eWp1k_ae`9ONPwn*>7l0U%g@F~>DY17T-(C2}*kP-Nx?W#e$ zbahEseHbp0uH{$DdW@aY$jZsA-3jct=>GG(A53zOV)W0y`1cQU!5_V)gE>Er&>tJN zh+nve9UyXC$m1%9okaUQ`>3Sa< zgy3z*P>}!^eBnuDM35BeY_3g+3k?Z-6Q2<_AZ+X8HFb(xX>hojRej3(%t?zgFQVS$ zv=*>&8D}Ai_YJ`gOHfEJxSMcy=eylwtc>-n#!j%BcEJDeh1XYJCBE@`7{ zR9_{<7j#}@TSIhg6o)}~vA6#m`0k0PV`Rd6?CVr;i_UtV-eA6EwduM`@2_3_e17wr zg*vvluTJHO4saraha_z$ElqQUsSh>ATdOR8J5$3{ag2@5oz7MSQKk{M!^e2-P?g7J_6EB9CRf}2Io;{s+iJYI6b*iO=i%gP^CcyLCqHXanG_lEt%d}L=a%G_)MiB&zITefzx~OY@%F~x zo+8OkTlJ3CKEFK?eW63Re}`Gm@BH2K%y`?0lf5P!_`KtC(EexPf3k*R@`J2lTMDGt zpyIZ`^p`bAbt7z`!vXAq;sV=<2qK6~CdFV}Aj91~-?`;b90KIIctkPA$U-a?5krNJ z;EIe)dA&AvU8NG@H~6T;aFAV7pHtbC%YlL(F@2s|p&GYastDSfJZpPJp?w7Tv7DjV;L->J7 zMp78q5?F~aBBeDaODwcJW>4^2KJC@Yol(bL48Ey_g>2X*!`*#Q$c4=f1HTi8U<}UXzqS3)ooJKdlvTh&Hg-my`v6#zkmzZ_mIc30N^Mp zamVQP&7D;m_#W2Q!O+i=eTTAVFG^Q8$|LD@;97l(kfef9U7zvz%9H10m#@PuyMzEV zD*%p~iUa_Ls)CHHAkS?#GTG0{k4z_ke8jHYj`Gi1Zg@Cty@9{cBDWeVt*@|8B06 z=dSR+`M&>cHi~_tHfsMS`~Sv1rVRgwWMsa^<7YJpyvc|21Tqa7)Pe9vC_MuOi3IlH zFGireq3c&!u76f|$gz(rkH&tTsnL-cSuXqxc}!VHTK`v z^b~5EWs2<Z)4620!fV4;|E-7OwoHGduVkm9?>hxpkae9V z>YM0nf3fP9*Q-{zPM=__r(>f@GAAkBRhsQK{cg(5c0rpIkJ$S#OVkuFxL1Y9VLVVlM)LK;4oX5k1xd@k#R>lZZ%o-VpEFQOo+4d^$)sfdnrddtX zw$lPuK;KJm@`FiB9hduc`&Geer#M%aaCff=Z);zh^4^kuaSv58kenSOmJsU0Vq(B@ zpp=6$ptu$eT`P255_~}0Yo5;I84LYaolm{+y8c-wY!VVdVYzh6r=ZWvmafM54+9h- zA|tAD*aG1Iwi%H-y{LmQI*Mj%)w^+RtFC6flJX!>VBtM86nn@MnTAWLvwvx}i-p*> z27BBq>U0@^C#$PW*urC-5S*_8hMfTL0HER7dpP!C4;Jx^fDD8ab)?JZadvI4IU+nT3m_S=2-OAYnUttf?C_mnH^765HFAWMKMd-XbKYY!`G zVL(X#ZHtPjGm}*{iU24DpdHT3HjpJ2mNC+|@@UU1EGD#M2%`#2^r|iHnYp4v`p@*GgmOsI3!L)xz3F zl+GUMvxnXElops>#4D^7H+JGMh;1%q*hs+M7yX8P68ZEo(wybnv@_D1Px8;X=&$MS zTgvh%_Kn)8joSa+_MhTwe{F(ABVm`3=@Oej^Q7=34;?R(sz9hMLuPVlzd%tu6!btb z9U~q@z7dx~4qqbsp8Wnc&!Qurj>5kpH{W%y?-JBohTquc`?HR<4ln^)_C~9ouYFbf zk_CswQa+Iw1i8a7IxNoO?)~!o92w-6_ZPS(xn796wcKmleCPG&BX9WRK4-uoiHwDN zYoxdv0mlqPAQti3S{vWL^UKZ2N=ix)i3G&+*fJtzvzaGPp0Kbmqf+s%TnHYr#t2Ko z$NSVW%9#}}QeH*XM7z9qy<&B(hu)W$U)bH*XKP{i#>X!sBO|Xqze~`AfmD4@T}(z4 zRXXJM(dpcyQ_F5GTx$K(AMr=FhHV<>JPzDZ(6rT2yRW*`eeRkkOQ+h+FfyOE$aC%P zH^1)n*tX-r>QjEd+Lzy-<}lURb)v#uWuu3NQ*8`yMxPskgMEU2I;7Qds_1Y?1bGR$ z(K*fOUP+JczO}e)ZMJgVp%+h6|GfKb0;Huj zd|q>5)j|6sPt#r|bR?E?a@$~2Iog=UFFPB2YmMFJYk`--Is*dRUL1J2dzS5-DR#3A z?o9mIW4&{RT_qm-?qNWhOQiRS*B7<#YAN0anx^WrtY@q-U6I!GB{J$wV|gBSu0MYY zZlm^6p@;)f4kEzWQj9_gDtFrmMN*asVI$~BDyWQDxiOBDa8W#o4o5cz!XZ*Ni2$n< z_YjG`!Xn|iMdf1#ud$^wi zijIz{mVxP*RgSBFar-r*E2THc%41`zE7#2*^tGDya8TMbXufPLWs;^_kq0d zSs>#;Ne5(}jyK&HegxbbqhzkDcYn-c`}x;CoNhy_MEK@MBHzFim!uv0W6$l|x41k3 zlZSWwLu6b+v`>Uc@vn}-Y;uZZ+m;M9P@n>+7_ljlAy5<%!YeI+oW0|>y7)po7GFMC zm{6TTL70{N!Pf<~ffaDib^6q~ECT~bA`2Cj`AQ_NB8d)2-9V*}Oh%NIcv_l0>w6V(11y-;ax_ zHqP{@6j5Fow6GeSXjreVNGB_EwKQcIVrr_fjK>U}IR*Fpnu_fJZ0u zonB<$&2`^LgTFmLvKRfA3d)RP->8k+zskP4*}h@le?dSw^NqCSkPJc?1K}YtG(@2) zlxcGrmDjRc>DAC8?9UpH$OW$Lr zhU284-)}z|c(Rz0gWx%T2+j)}3=u|hceq?WrM3M_L{xTKR$gW4^C$a^S92r-OQ2v2^z3w|-k4;O zXzp6#x!7d&`B#5#zqdWLH72Ds^1`z}_ImB!@Mb%>rlDhHsCip$wbwkmoI4fbidWhG zwkfuDX%=bJ_;PvyRnq4l{N__!nC~0^z*lh_7MwV^$^6^}&&_{cn|g8Ag!`Lk+}?e} z?4h}pUwUi>c3pa9t&Z_Fb8ak{e{aceFMj(oE~xB-HMgCC6>yiV~B?U6`)cf z?s>#u2>0y?I8elf5+1^ph-DbYi4c4vAwmhYUtGC|@ZJ*R2Y2+yU}arnab+bN`xFe% z1Yd^}v5Vj_SY_o!3Ar&WNQ-WX+-kdJ>!U5x?@ibJLvQu1H8-A{6c3lTuqhGgF(SM) z98Jkisio8jp^z@7#%IM0v-<4?^&sbuF;>;M;QxJvS)d(9^o3uXkDVr#sWH zy*kl}Rtq2%k?}Dck)g<^&#&SV-a9&5xqCX}F-a+3gbJ}Q&gbL4D2#bZt61UD{B!48 z7S8R_Q>OtyC8-Fsb%Zl!poaRC3FA_%ELuuSXw>0`B8dvrx)s#=fg&4#0RV~ssc!s4giIPFR9EU1l$q+95CFkgt>K9V zq1UgLx3iF(a zJ~hbo6u*KR(efC<+a+V54+F!QhmMvnUEDTnM)#5hJr^#(fDbU@3$(R{JGW4#Pbi;k zocG)Q)+dkgRx*6yh?*18k>xMQBfw$Z!Q|x9P_dQ-RP}DQn-CW&PRoIpp zUHa0_@6E#(VQ&Mhop1hTzkBMP$-rD4II96WMLqX%he8f~=?D@*8p3AcpDY}S^T7 z-6z7YYzzKn?)erp$6Pjo1}bNbzx?T`0Pv75B;>8uNhC!8`Yd5~6IRPgwLSAQif z);I6+?gs}q-rM!d&GnZqF1>$jruFXC4yR1LU%q>bv2q22MgLqEdc^WBi895Dg9XWZ`g~jzbnMJAL zvFTBXElm_kdpEV0#%Bm5B5biIk_(B3twISGmur*=aPmX33&vs*^2XRjV+uyn`UJA? z^1~1W|38r)Cq!UTZDUD&qg<>eIxN8EJ=nYC#k<|WUg4sn>tOXr7l*)#rasc>K+DsBn}|aH4OzowFjsT z;0`<`zM?8eNu7l^?I6<>mBj$G&-tnN>fNTZCZ8|gPTskrcKJf8fjai$>WSmpPM(Ir zQSi3K;M$cnKTYkPs>hkAgX)kuQz!7|&g#2xkv+hai%1T0r320`LGG@FZqAMF&b^NI zgC-_50q<4@KU*0AreoTS_$pE+2CB}M% z(^;|kG3j?YI?Vr}5i@VxQ7l>Cyc;+oQ$oQzCF z!Y3AHZh33o%iu?+EsvQ5np?yYdr!z z61!4g)xJCS>C~zZD-1oxknSmueK=mjM0cXy^l`RRwatw*&GnV;sj8Z*f=j@_R(G7O zfzf@viI>K0zPjV_2VXu!0)pP(xN_C(x=C_GG=|qK$gLDKFS)n%kMJA!%WOA2+pzE1 zn)QzJjjkC_cb^NafPy`k;xXoQ*ujGCG!cjM*z{3LN=II3)pMt?U-n<#cjfvP%kvu^ zota^~W#gk$QPo9-6-^j&38BC(!~NI5-&IWw!A+oLN!B(8nr1-X87R9dfJaKe4FD%V zvQ{5!YP{8RYrd?wm|qiJoyLMUJQ8F*%&Oaw(#4xh#>DzC~ZDbLF+$W6}9Ny^I3%*iS)%Bn7< zN(KEK21cbajJ0qsk_e#$8!PogyvU3W$0sGXEVAxc=6gJ;NlxwU=@JqH03}Er|A8S$ zNjs&8(tzL9fdVXDIL{V4=Y)hx7TEK}ifMWuTu|S_;NZSd>^UBT z+d6o!K0Q8cTA(=#UepBhw9>dnwK~)q) zsz9m&`t`}>zx_%nE|XKeM2yn@!am8j$qZx7VFbSx0-`kt4v56&^K1J)IUfF(AjH4W zMzL?yM(y8YD6tzB{r3revhT9)@LymPY7^cd3Oy_o8Ka0Qlo~>j5tJB0I8h)PH_V;b z&6+*L{R2f4Aue!6$Lj?Cl7A6>#XiyBKkTmx4~(}sxX^}>1vxB8FJk0BEA+h;Wo{C0 zx5f70Rp0xa{BFX7fS@ul4_lHe%ac=!8f&|;_h(B6N`}%8J=&phmNfg(%t;T&uk%^% zkZD%I%@RR6zQvc}?Rs%{9`<5}Si~X3+^Ayg{qoZD65|uftBRviKlh6|*>E_gJNbF) zqYo`_4X=)yVmIpto4HGDmmc#y^s(wqQe$M@KrtH~U`YpE-aJ@$ZuL5|wQF3~o=7=0 z_sv{j4FHBv9zfMu72E*Rz31szjMctBPV0`*WZNIs`fgb6ymFq&oVgc&IPUrT8RwIC zT128V|1-M-t$&AT8ur?a_JB$IWDsCn$Vt#*eZ7;&%Kp8w(Z%WmJb+U8@_ zz4U)o$L&Qld_)U=(|)$DZ(81y?dK2V-oEXIjiU<$pGf znw{TWR@vEvyJiGmTY3oa)+}sMe{|93QbBPfE9SST6O&wK7&?tz{&KNT|_A0v1Bq1{3^-960&=#l-BmLhQ`3;#K7##k9oO4xw-Krr9~|*<*jWP za6Jx7IeaT*=F(64#z|uh3YY#=dgn&pI6VLBBSd{ z>Tz*!Vua>oyy1WOtoMg{aqhT@8E`~2d-@FS~MiK(}7wto}i-8Rt7mhrJK%z!Yq08U@t40Jnz zIs3uqYgk(8jaBC4dq(i+n>VRrSLb?HFwy|Q*U06b+isHz2l0s<5y0MJ!5@c$~P z3V~v?qIQ(atGh10wCsWG)av~CQ3s;I?i(wJ2G$}9a!7vnKzCjs2JN zKUG#*wALjG=xrnQk0W6@Ho=0!`1Jm*Vt2&PC)aafiXoc``3xK@cznp_KxT`uHD{9rYEHoCY1IRyG1(Pd4J>T$IF}T*X(?~ z{il};)E;Om+*X?GFlnjVqVwS=-SQlZ#rXrUH>B$0X|Iz%o3Awb(_ogz3=Mm=v9I)$ zo&xX)083nK`G*&CcE=pp^ZBqnn9!H=3I@M%?x~(2A?B|Jp z3fUISvYeRS9-Eb$G|<-F+tX$L=E04?Tbq4$l5Xfv^q8+@H$~li+#Hvs>z%hRzq!UM z&HGDxR8U{o>PPF1A52j+Cuz87s5vW>?0}XdNyA22$3{!bRA-XywAq&P%+jnpv%QBQ zGr2swy0LNODt+s{ERz%#XT;}3^}+sBLDs(D!%DZ*$+qf-4!S#DuD%&~ng+Xh0yaVf z0`V#!;zA=t(3U69wMS4qG)t_;VnPhB9z!<>U(g1gQQE?K`XVYCpMB0>dDv5Do;M)J zE2^}sD>3!e+Se{ExqY>;IDZ(IL?JM22tlY22`mr#^TWkc!GHYXyZhq1Gj8#YE&PVo z!N!QlFznN7sw>|I2QVN#yD#&k&xyT0dv*ux-uZOL<5>5M@|2Xk+}71vR$VR>igDkN;A?0SmG-iF&zhZ!Dvyk*iAbmqXTq*wlp=;Srl=45 znmkm3fsw~E-)2hZx6zKE%;#@BAGkgB|1h+1?Vz!)P!VwSaDBVx1xuSPTqy;$f|h0~ z_DWx0pPRQ$8BAE$%>47-Rd0WHAufKyXF(Bm6WaL54qS#nvkxeXfPzp-2~kmoO1L&M zAAs&L<4fl(&wTJyHg8q>j|;Ik7wX{2d^`Y#NvdKJ87e5ExC>2ztEwf{Hmdz$$ym2@AA;KEVH#dr#C;hb;k}DUG-37?Jg3akiejZ0-sExtk~50_${<_rCK<&B_zYQ z{*5nQg$~jBWKbYNg)AW&nUMU%_fhX)3nCRrr84Zg<4KkyJz%6?t7S1nW62o@#%VA$D> z#lx^phzf8DWlD)j?5|P|Vw)<(lW|4z~?2`~uZrl-x)DcG=w%k-h7zW{aRJuSSR)Yu$ij1Lf92AkIQoxe}0aRr{ zqn3e{8L6gGfv<~_)7V`M+!cc-@?n!~E5yZYTX0op+1CU=VxGmDEJy{2jE}FE5aHoT zTM{{s5}<%$6L%kj8xj;1`yqziI*130#C$GfeMtEfk(*UP>EsG=qHAfX3X2J#!v2rB zfmdBG^&;(3$mDVdh*D1!I}j!=3HO~cD>AYk3q{;sR(DcWf@`$P9;ZEP9oI~`H)W#L zBz03YRR_Fb2FXDMqdGNHwHYoyoJqRi)#($9BwvcWzU%qERi3NJSCqjmpy#Eo;0O$# zs*iD3RlEhTCr>4udsyj~2P>Y}1sr~I=3$I`cwKaMdye;qCmXh|yK(vQ&1;udZe4WI z_EdCbL_oIxVT+?C&+dOIjsKVw&Xq|d#N`k`VLl~)@0~q!PtEz^?rcqKBVa=YcYvO= z-ll-{=J6&SqNcRWI2yH|51Dp}<}>Z4l5A9fy#lZ$0UK3i8#QfP?Rih;&#;-b+;;Ug zr(KTO&hLBQpN#%vg8d}p#}jA0pT970*-Xz_hF5hqIIr4lyK%GGmfszZhSr2P2;10D zP*PuBTVFRa5+u0r2<}^DvH}C%38j2GWVoe!E_L}?-*%jWHCfe`G~vG9M(ZUti~^2? zE}8lzm8J=ngw}x{w$7-94srRHT4Yi!r>Ze+0_q6);Q1BH&@lM(wJ;E&LsA_ zO{k?`oF4}E?QI=N38{V5fd)pCU4-32(?xnW$LQJW{opj?^4qiSK@Y2XOJlRXc>8(@ zg?!uz1tF!QEh|4$g3E)5qM~A$G%j|~B04%!%qxSbnHiVwUa9S^i^+}Z;ZQ_`uNvEX z;I+EMPN#v=%5W|BV*oUWN!Ntoe)zv!H)n&L3571S} z5-lx^?U)8*sMF`9nc4~NS)s<`)AZDP6#zp8h}9JZS{fKaH7Ws)DjqrQC#!J)U}>s1 z&zSM{_apA_0^WAE&Q zrc5x5IC3Bp+nF&q2+=_i7S*>pKl97TDHRhLdqm;R`a{``JrKv7j@sH^rVtCr1MR(tU_a`U#-!xJW@!yX!x z=Y0wxTMX%9qN}vL8;!hf1QNBwxF=W!+q+;xBR=?eM1H0ap^c=P$iBugWD*%F*Af3W z+9>vo+Nk|oOos2lG3-M|uurb8O!(f=zk(oA0@^TKOP4_}k>nSVc)CQa3Q-b7jiGcb zl&M0fj6f0x>M-Gt4IRIPzBha2=JWaWr=*ZCLoKx^uNJ~g3>jtcy%-G@3iBZ{h|1?| zdD)#zhNpW&sq~xFpPbC_1sIhtqCBCjy0{_UKPoFI0Y5Gtw~y5wT@asIQ&isBg3tJT zVZ?`E`oNGFl~Ve9LZc%izC`x5w989Yu;21fi3p++iHM*|l&A}p%Ij35I9zfoSmhRx z<{Ml$XO_a}dJi>$1sLb4J;lyotIwK^Za<%VbNqa+8&`kgiTbYq42szvu!pY^+L`oTV-JRJiLDOZ4<1eRd~yMkrCcU#1Q~D6$J+s zU=GO6O28WEJL((VpRmzm`-6msm9U}^WzQ=a_zds)iGhPr5;vPbE31eKjwaXmgTa=Ztg5g z4NXTd)>lvA8d&MJ{HW)Vl7Z6d*4l*ll;Hseb_`8C%AMf*KbX$dxTl4k8E_4(wOVH8 zXOiEUp4XJ=7xE%9HXK1kLNTv*s3##cUP4qIrzEBGnc@+@H31gbfd>VD@_qfPxuZ2X zE4iQADuK@Zca1a_j7y~Xzt76eYxok8La4no$HYe3m}MT_*D_YE z27qRD4TK2b@&M>DGNkX`D?W0jI3f`qJ=UbF9;dBPZ>%O6qYTM-kKHC!4^bO4);RCGGb%l>Q!0mH`2)(6*J}t0I1o2 z6ywEmO*NVVXqrB@XPBFB!8?d#8S1++WTtvE z<)Ws1@kTK+QHrWTr~+l`P(qR-KnfKFP=g2ORyME7$i7rln41|{@%~9*xW9LD)UDRW zD_s705j-M+M@8@|gl`}$f%3j*Bc;9)!bc*-13_Z>B>Xj=D8m*J3Eq(mqC-PNF(1O~ z(uzBZY9LQc>1atVNJ}V9X&!3B4!OQb#Lfv+#ng*>MU@bWgcnr@PkcST}nZ7K}1qi zX@E)qh|Y-U6?iLg!WHn(^q>f8*4sJ`HI#Q z>-ptT08E*~dU99RWjNiUb)-1z(Jv&Y_E*lqdS37ZSAV&3wgsE6K{Us*tx z(LyRdEViF-;H-uB^K}GdcXeQcXY46@-~}$&*CE}}TwuH0>XY>V>?%ewyc%4`Kb*ES zWb2mjBR_lYh-m%P#cPmCac658@wnZOH(7kV6<;6zF+D&CxrH@(@hP#`XcQp4*raGGYgi!SXJzDahWU{7 z75h-w(>dhq_g3ldzyq zExZ2d>&(k{16S{LG;yvrdB~dgbLql0Y@pl>K;I+-sH@yNe@0ViFw854R(3tVA8TB; zs&V_yVaEsXgFg)YC~9bc9PZa`q>O{z$f!Uj=8UBIVfWvNcMZY5T6S7O3T?1YD5MJ! zTGwDhNOaiicL~pY>P+wS?B7+pYg>hd#Xx=qzC9pX`rt}GLcHil$cG#Wp4%tDBLgxa z-rxCam4*y~_4Ux+Uc6%)Yr#z3^4Wr)rj=XWrggUyJK4WwEz3jCLYYkZziB4^-)y7U zH)^ByZ!x07selN!{SEtMBi(gHUwdZIA*|#>KdSIN4_zjd>7$SYp%RoSArgQ{G3--E z;Iv`>%8b07uC99@*}IizHdAV)OI!o$`izp=_*}+`M zpmf&9Mt+vNS%e~3-7s8O+>VzD4#AXw7)ovx?zItr?f-_8A3_wRVoyq>BEA?`iDP%c z&X0XTYo4te>oNhaZ+HZNv%-%)KkW?Oxh;6x50B^SSQ)CC>8Y9;YMV`1^mN0qxQn~r zA6oTj{RW>+V@-@SFOyFN9cvRdC8fmE2B>mD>4dP9`f}VE$wXNh4QbDkyka{dBRfJo zb6uVld;IRbXO79li6*+^Of|@#ZmRV>Ru@8{7Z1!eggrx(5#qN%&0cqQ}O^4RC1K z;>t$2nHkAq9-HWNhKh;A%-GD#n)(*GF9(I3CRWws9Pg7Me=cxbz1U&p$v1zv#oE(h zp9peAxEza&H-de5+2%-=8@|&Z+F{9XC(tm4ePY>B9(w|}UlTIP=->oy?_sgP2g1;v z0nf0CZATLqtuB~6rFg-QRi{q0z3`z*#Q1O(;RPaGHstofZjU7Uqi^@l_nl3$P+aKs z(~KL_|9E`T_O*RzbT~Gy7<4WkcxASj23ah%UOB~fy8c50T~pny4(o3EUMQ#Lhh%&h z;Pni$`tnP1^9yqjD9$O&s&A>|@;SwYB|UAuGS&$5VegigTkP<_B|0*`xq*_KRmfwH zxUxiIncRQW*H3-q`LwX4TAoiPCfXEoSX^w}l@!BOD>K!!s(?Z#P-s&h+q7YKi}!Pw zlK~I!%R0Op>XV1`G~34LRcPzPt82Vp_jBRyZRxu<`5rl7_rTWfT|iJ{1-y5;d76Hr z7AOXwOhcQitVlC7Y`$Z~dKE~$ZI$f#w0HN8lBwg9#v7(AS=hc~2Q(Vn13?3mf(w3wL2PGrg{eEMiVWj9tl5YUTkesYG;yhXX~@Avn*zBbJ$8@Q3ksP zQzO!db`G$$s68hkPaqKRA-9-X@Vo1;JKZ-Q5BYubi?y3xuU+A@bj)2HLrYz4OC?1! zqL$Vb12tt}t~uW12a}Jc{oF3RAVR?B@)&H$OKHtIZFAN=;Bj_VQEO9=g|nS&*rVsg zFK>Ukn@Y=kTl;>a%a(Db6Uf$NvZFFE1!Q}5V6P0!0A6Tf3-s^l9Q64ux3{oU)M@$d z#==|Era4SjG1XMQZM49B-T99fnq-wCVl|D(@pscvue= z5xUFrL;?ncF(|yr;VohgjOX=E5%m5n7`P!|MnG9NMEb<^sGO>(j2_>Bmf+9S>RLR8 zC>G+YyHLcH;E`xH3)1~k{ciZ(+~B%lmieq@W=oEHov?fVprENJIw9tBSY&H+dqquk zLQ&G24F9`fX4^gYY2L*=qb@rDPSIKKu@W!x^=q3zdRaK48)92ki0~iou zBbtkyKpB1lG0L?u>zOb!V4QvX2LGvp=;YpJV~q{~29*I*0rV>< zP!trK#~IKJ^@^4)|LpWAH>Iekm&>Ychqq1JmM*DR0+r(o@!q(as?F19=B?lHVaMK( z#q)}cHLDEC4bvx8|F}GI!eYl|zrWO2;10&80j(CGK1fz)>Z%T{`Kj;z<%XB;McMJK zP1PJa6}q^VUbvEf!-Tza8*j=u%8ZF^n>V#Qc{7Y%oi;wa?~$TE)zf%VuD(|PINhF$ z=Y;pnx?jE(tymKcKpFtOhGVfK8N%RFgVe6Coxf#s$@7~qLS{uVY$mq*xTPwUX-4j*f!WA#FK}xZ;eVM zQdEF`YvIHvlmefZJzMAgGwC`BujFt6lip2T@_fyyn9KWu4^Fb4taV3K*IZ5KKH11z z!}yN&!Ju957_VRS`uz6p_x=6{clrIIcgV;m+?UViCq^YPd)RWNz{;Y^%)In&q}@8y zZ2g1PGt8!dv%uhio{_7O#se)?YaK;16=idvY7Z2gh@3oU>?etQ+i;7=-tgkjxYC#y z2PYxqwQyRVg?l~m_jGggc>f{T*6+c*i$5BhjN28w=Sa%ZYfI z9NfX~NzF}?J66TGvlf-R`bG-GiQG7%E<%LJLv}L!1sJmjh~YJXcuRlJaUshT6$Bun zYJBC#b_D~a4f!oS?P>XCwas|Z!(UpGgczhy#t=ebL1T47Rc>faq(hXGcjD8)tdH3> zS!@v=TJ7%adj8DE)9X=CbVw_U(gO!#DT!~Z1GAX9U9!%s>I^$~>*p_?3b368h6_A| zK0u9+jVr4tj!lZj?vc)}w#0-uIj@LNab-hgTE_pw-djdTm33RgyS(Bal0a|>?(Q^= zJ2dVNK|%<`J%I=bmf#XX0>s_j-QC@#l5(&8opUPC{oH%sAKw`F$1|RrHO@$>r4t=xWwo9nV3~8gC|fLr^!Qk zFo?%z%vu|C_f^fal%cujzk)e|U`8>}E(6r`jT=U`uTOI@3qkfIDN(5%4ba}EX44<3 z`;HIIUpYjib}K7Lh=8H1K6w0ShhGp3ieuP3%bvE#YW1eDRVz!kY^-|rij!9glhWaA zy+~!bP5}6Tzyp9yr9fF30RUxMI>~noYdiZ;OFc~WkR=e%q`AJ1_NK(d$h?AN273Ux zGdux5H9aLDFgPQvI6gKzJv9s4_H|)2GHiFj$Lhi&9$(laksuEah{VHuK3%})N`4u; z{gPpk{1#&$8RmWYTE2XJ?37uJGQ@5&=$JF9?fy-AY|My6%J~rfa}=@QZ$FR!dod3C z#$!DGuLMSw9pabVmVQ1-JVtn9Wa{YVELl@r8}1Ltq1 zAxFaOgI)GvHcN~b%H5)>*el{3z|=3(rn@X!<*{vwy`HQgRn?rLY6;ZM!9sV{$;PCU zfm=`f+_B>E+!fBtmN>7NVY6Vh{kPJk$deQp4_;(&#Up55wep8CZtg)~|h29B8w z*cV?JlGBp%Ex>Ej;f;Hpc4}Lo+SD@cWF70tH=_;THTgWPdA-H=&#AAclI`#WM>25K zq`FN4CSZ!4>?QA`MeO`x)a8NxEY<1EkG(Tp)F{>fm=k1dl;*!%csOX^y~rz%BJNtG z**N9BNM{z;u$mAAGv=W-0;|hv({pqAXr>hTb~vbgnvezgjJ}b+sI2t5fr03>te)=S zl&B<(n?piN=8t#G6`zYRo6IBR?|i2i(bBc9S{;+P*jKt?Qz*{ zO^snefklP+on5WfwG|QZ5pkJ`O+y__tY?%Bxs}xwMFqug-QRe<{Ko6$OFwLRVQF?w z`n!*wbnbBPKzl-BG#^#X6Vmy#$ds_+%KVR?-sI+F)K=FZe=Gfm!;?s`?g3# zKx$%yNQCTXQ0g-R1d+cD3-Ukl*R?NRbq2;Q8+Lc-uet7f#HJuUK z0`b}9CE3YIv5gHiTrQh0kfdcI-$-UoaZYY&R#aSkb#?PU6r-4u-!vLD&q_n(pTuLd z@W1&YVr2I*2K|^Uo)oq*8Qi+sCcog~BbVd19Bk2^*fwVdec$#$8_V878XS}I7X4FF zR5&i+G9KeG9{>4)aVbWGsP+Uli;hj6DAp5WkZVk?0u`Gb!gv@zI6`wS|ACO}0wq4E zIjoV&ch^Y!Zf*0Vy|P9b|Ck2{qw(xZ*6 zZ>YXdn(Y+h|Ftlsw6|>(tMH7af!LVDGY#zt!^(f+u~sl-3(pjw&vx*;jXvGq<+g2- zwKgyZRCfjHL!~XAJ1%}YpV%J3hJ8Xf!WIo77&0Xza){Q3mTyF(P=XTuMj#`*vE+Mv zSY=1E6!wYvW0+6ELko_b+yI?GV!tN080UF6_4KS}%Nxmphk#;ETKatbVwcTI#<~C#V}~ko0AB?WZ5{KfUns3cGJkd2I#$34VQ} zokGawLuMPR-r}3_{h)hVH?*`(b(G9iaSzB!4w^d8XUo`4!I>&6S(BCQFtxuY0C->x zd5$C%E1kvnSKV~FSyYwV+14l$qYfg~-32j)K|Q>d<5maMZYls{K>bJr_CU#O!YRKS ziJXEG*lb#4b2RqYocDUboCr+hsZNs>j7T%?Dh5@(W55A66w{y}g_ZFj>%rQet0+%M z$a^V!sF59T+RjQ-EHpK4E6;vBf5QFACwwnDr?|cHcSF};z6cp6QNhvOJp))OR@#}z zL@*zM-?}6x(3}A42%snN=u}>TV#nACo`E+*BnP zlV}ja;DUzwQ3*}-D?7*)Ag3zLEN-glNJ%Uj=tteScyu0`YZNij!2(A(BHAcqCe;*% znxtmWd?9a`#;B}pjEPE3h|6qhZ0B=%eLcMq zfstA9x%3`xY*2DhR;9GXE4L`y*4d(|tD43dC@#q#rHw!obg+V=zPI%>S=*U*cC{22 z7nYaTizIN2Z!SQ!kkZmp5QJBhos6JLkwnZzWrGAkLB5wRorsK%Kwef<1?}AWuH5UI zzcT#D>HNYH*xLp3lLz-~b6T<{T3#zdMx{-25{D#L4{)VaVymir#frtL*%`2{9a`Hx ziAzj-_1f?3sb`lid8zB{o3s4r8GXYu*PPd^{2>GUwPc$W@Ld3uQIzX}YW`mjq5R5j zppy;e=I?TVd+j>SzR@4K*1!2&Ra)2(7aSe@E$!%@&+At;0#HfOU;uI}Ae72Y=m(%( z4)ApdaG5q-Hl4d=UEPiQiH>hVd}EWVx`eh)rH&4~^QTw~XZL8UF=feo09XoW@1O_} zXqwt>dk^$~@j@GI*}Sptk?w)cl=Qfm_^{TtM&wR11{tYIx$#M*`DG2^Nr7=00eqC4 zfi@Hf1?Vxx`4Im?c(5O>4+uokdyVYbM88@sC0OcNB9U^mBuz~%(b3Vlx!K))+|ov9 z^0?)}BY61&&%q8B7NP^HyB}qh3$cAA_FW654X3|!1pXQSbnp=VKaPKh|4IJGKm2!( zao9H=z&rk=a z@@u}=VYlP69xPBb&{Q>(-{!k-hr z>S;Wh2#n+@cB;S#%y(Y6!}QoiGoz5y=)&6Kw9Evon~*gO2Lj7Njy^qo?CS-MTa%|g z)SGgD<|fBYcOnfpyga0+uRF<1Ma2Zj+G6wzcN}oX11CVV2Z|RJx8B?C6Zyl}_eWN0 z4tJEtW3Up_BO3&j2BFs$yqYQFga_7uY$rd_cCS@Mwsa_F(FaG^=vO%MxW#vY{{iq^7r2sb$3E?Qp89wc_C-Vo)z>yajC3tTZy5_s%JP_y$BPIGVRHl=2`sLyk5A8x zPK?I{O_0yd{;d#%tja?1SWo{i*92*X1zCKB9JIYsJPKh*R(jj$D2flIy>uuSM_7`R zRa8=(R#4bYWAa456uuGQ$3s8`y_{3{G}mQ|$F}*FOLm*@kE~3jLqTCfWk_6DSxvzx zyHx@QL~uxmY$qfVHkM}OvtU=_P)=%2QhZcYRM6+opWE7dxFXopJ&;jS5}A?M$?j!A zRzSS(^G~kG$4pF4>TE?@&GHJf(sB}d2D|N@?79bAQ?fF+d{h&J!9+(ZgN1&~h>+yu z=(P6wT68pGCF%IMs8i<-1cZBY(bmaf1Un)Rwp75kPl{~shgaqg3?7do71t6$5uJ^7 z>j{U2v^sv1ZQ|p__ABH}6;KZ}CoC&w0v0}-b#!w1VGKx;S49r|BCa$-uQ4B`~ za0HbZK^+66iZJ4~wzjCKwA7R`X(|!ruEG&6>}JEhDsHVqr27H4GrM0N-1>6E!8bdM zyzgIstH0gj#1zYwi=S>5Z?oiBnQc#-m-$mc=N!{o~FC_bpRh7HC_|n0;&JieqyU zlYjC@dy$*j&K-lWJms!!7G2OI&5tdkkQvX_!ptvZvsIn-%B)5;nKrn&i7d3<@ z<}|YFoYJ4{`m$5)vAUdv+_qO+ANW7`UK|)%l3d(S&Jm)m0U}AC1P)_v%h>yh$DCS( z3}#nbZNSejH?G}`h)7_GP@K`h989aq3(bmd8fmH39IqNwM9*75o~Ej;cZ9PXmVmsVP1JpTNiq|QCO5(e)-ac$hh!P zAs6c_9TLG_C}GiC2VD(5oZWW!%L}jkob>w-uN}Q`?y|uv6Z@*KU*Me^@Zfg1WM0?x z%h1#eUcTJ5YJK?OW0_lbgiM+7Z1&s_+B)wvH62&4F`GE$&%-AlSUvfE^3sO|%RDDf zPS8-RqmViPu7|2JL{b_A)FFT$1fUlvG%P&THhq7J(ySO&t&mkqqSh_?x3xEG0; zJOmA*Z4Z1nGQ#dfO+JKzoPy%8u(;ylYC4@M5OTy~sUiSN#HBOn93~T%l)@9o8rG~B z*4Av_uz|B}d!3s{*Dyz_5X$CbF2er}_We%s34U!@cw<0N^4oDv@Z0$3SJhYw`#9J! z4*SMqJjUZcJ;Y*hcXxMgZf;n(|C1+ojvQS+Z;sN=KNdLI-TwL2F*fvRQTBT%DH8Is zd%NOuvVR5zeEIP`AT}m1FTZ)La3U7*I9wD2io~dihX_R&ErRyPF)niq7OERf&~VZ~ zvs?}Y#fQr0LM}UIy0*c-K{Pc0rM2nwq>RD-AqfI0P`eAUbQ6wJQ!q==?w*1C!rHuo zn*Jd=rd&uLmGrV8od$W~73r4Wp1Q@n3aaxd9!aifPQ7k)>Q(sj2cI8Ye0tv5-}D^bN=L^Z*uQv(2w-wr1HE%37dy} z05;?m^p!pMX8x+=-Iva9js^B-ypG?AxqT$y{2yQU?f2iY_QTwXk14XYRA8nJ%ye+} z@)RTDVv~gzT+TRnm_yw^HTAyq{(8>#=F+>XmpQGFHKr&y$bq|HzT4uf0r%cy z`$U&!(8RpNyc9NM=5-{$PJVjy#W9`JlTSLIoV{a;{ae#!dUY+Q)-(6Z?lU@rrn;K+}GoC8p>rc$kBaR}@~P@o;k|5dbB)U%m;$mOr1fil!)fx(w9jhF}` zPr_oPMMVwu^q~z?5?EQ+6c7=WnV*aKtnhy=lR{}FPFl1V{$2qkzf&W~H_b&Baz5n5 zr^O8n^^bLQBA{G=y2xY|W~Jq4rDSD{ve-gCsuw6hHv>E-9LVTRI`;0^47<6q_L@r0 z8VjGz*!pypd&aYBMiV+1#+GJ2Dgh+oqx1j?%DupJl`;ONnAKca_2K!~$bjUi@Pyd- zxaN*V3l8|#qd!?L=@;Gn3)h+Gf* z5MR&4l$dOPfBzl(cU-#t#L}^6Nl^0e;t8F58$OsaO$LPJSbI>Z<41} z%Mh!y)w>iZ_38>mRKn|}i(lEhB6*73l#}q zZ6&lZ746>AzhHLDygB9i_xfWJu?++Q&e&|$$Pi}g12Ii($-r=r0PXnY)YO-RMFtfX z=g~&TynvA{s1R!|5b=hj>@=YOT3dA7yNSTl66CUppixOK?ef*i$~tu1qH_#Z4H!dv zzag6Be>%kfBoztAWbMWOG@ShP_!veUhkfHQ9^>&J7YNi72n1X%S0E7l<6&V?Xhx2kSu-H~2=nA1Pn;qNIl$)8BALy6;@k4x2SYlgu-xwx9 zP9?@*p8#36h25fopGAQ?Z1&7D)SGCmjk^i-TxRZkb7cvJ?`BrT2E`mvk znL~q2DQSfx90MS137^Lw#Bu;|kj4y8OpeM-p+n{{9HPPD$nsd9jELaE)QpzWHc{sg zY-7WAA?yha`{Ml4I;}X~+0$14-nq2okj(Um=&WGx{GU#RpA56yj{4tO=e+0ayR%N= zcIAU*)!lW+FP`}L`Q3LfpOVZ%Op-=|+#fFFB{U`lHwPEMe9xAbn_jF}yQehOW%|1B z>&@y-E~K2?_H(6%oeI%hn_w|v;xlb^Be^MOwf7nBuz&R^yCg9oGbJ`JV-N~{)J3kh z+rG#NdGSOo*NNaJLE)N`MVwW7PnJ)VZ%J8Ucvz4`!V*GSRa@Rmzh`@|?J{t?yWqh5 z!>h%xNh47^ zUhUL0o&xSG6U~%m48Uwt%~wUQ`lWjS7L?k}NWrFP3r?Y9OyL}`wD#Xk8MQ(BA^OtUkF|B?++}(S#)^BW8makTp>r$5M z#DfM6c|HXUOjf7qUx#l$!q=Z0i>gIG!;5%gn4R7HNZ-6HyBN9Tt{!218}$6#_TpXb z(^vWRjVQ`VOK5v!=z7n&AEG$)1?I#i01R@)N zc0j68p3OT0pO{tKnf8mt3r8g zI*`d*ysGt~5q$3DlE*3I$IG;hRS!3xq)Mn zD=%D_f36zT(`GA@2Go^17cXhJbfuxC?Oz0zC8#%`h>faFLE7kGVp3FOtbb#3nGm&M zY8)8astFSn>571dAiwDon+*+zvvSdbGB$-_`$EZo z>(%>zVZO1BLpTQP{>#h$r5A7<_KnAQjK_asNb9^J(cenA@I1fCX8j7Ag%4xzf5`L>$c?5!-Y67cv{Erkf;MW$MyK_4G0-xx z0Bz#t|1ye@pmv;e25nI4*BMcg`Zo3R$3k!OP`f=g$L@Tzh;GYngMHQ93f~-$C%zA| z>f@?9OCGdD`r*^nQx`tCdk*yUhNs7rvs!{XGfgAh!a7o8>*8HJETZGShbRB6 z?I`#9{?WkTuJ31GzfXRnD0LW$*l1Fto!R^&-}_y~>$?dzCt2!{9s;r*nCPK+Dd*;< zPdj$`ZdNxTsM-J}YanwU%(o%$vfQx!>L0t#ZAvbS>VX|eEy>@D{a)m~-Ribi{+1Td zmshaT*0j)-zo4-E*5X$oPXa~rEIi*GMsi~P+c@p$RY?`Q}VmKqD_M?CMyBD=i zn_q32X}eJ7&LkznNxEk0n_QRqGzRn`<3WtkNd$jmzF({oh`qNI60*wk@_)V$8*b(z zK#?~vAfnMY-Rz3AzO>N#+LG3n&tLuU^T{u0`D4Ay_RZPqYGnu@1E7mU<{*fQK%x=w zOgxS$OX4nD0QF`t7tQNDe-`fCo%QN{v!StPbZGowXM1==u=kIIr!Sjt8+QJ=tK{O< zelyE%zaXZYN9dJ%`d@l*Ei!6!fM-!CP)VM@d<9Kc&t=nI6HDhWw@sW(?LS+&M25uk*Zmni zdv=em4#eU3Bq9Pag#;j0QGhZ8E&v1a6fPCTpgc8s4Aa4C@GjvlWOmA_ zrAV+0A6n$)kMg;w(Bs2<4J#Iu;z6k*py?_z7tQK0c{Ey6i>XgSY#a&%{{s8|-j@6; zsFV6wihjFU{sY^=|B=}7-yi??igDOC9^)|{{|kZOKfZ{~N44}A(q=3PYoNK-y}E93 za}NPR6%JIR@s}_Jw;)W1a0rqL+~kfsI2=Nf~JfUKr)0Vj0N66!DQcI5R7M5X<&Se&u+0Qk`H41C@vo za5+3~2XipABEjIN=~|nu>mL6x_tA>2Pxk-y`obyCvnM?cAAYv!{JXtZ-&}GIbN5dR z$|%eU^bLCD=yCJ(?S$C0oa%~?vHo3<-3M8pL%r(i%SW49-a9(@eSZ@c<0%OXeBi^8$&Y3#7-$i1C`_@K zIn!|3UEeG2p;oCS!MO#o=H~a`yndORo1dGUlm0Er+sSjymX%A+tZB$Amd!BUNAtwOaR%Gd&e1KQX(R_C=)kTK9+VzDK8r~uWzLD`ZL zsVzcoTx3ypD)QBGSYt}OVgU^N9v&MJkF4erfl!*P9fN(`U-d2t9Ajxp>%ORf3+C2_ z;lHf-WX71s2}k6(PS@^pyve`92K3x%2tjKi;hR^W%nP&P#W` z-@np!`{u{HPkcP&kY-T{8+#zHj!Unm4VL#bmv+|nOGX%w$AM^npKz>&iwA|H63EOd z%>4E(fJ>KN2?y=IPa7pfM{*X#xUU2hkIP4z)@3YqrPw$I*>!UOT;fW#wf3*V$V;4m_W~I0`u} zb^!v-2~buB6=xyhg?RMBMkqici-$PCrxK9IhpANLC!b7#*kum_IW7WBm6b&l&@xMx zrZ=-!hFXRvc95uSSp>*}o+(r6?-_L0xAH|;>!(DF7D+j5lq{~VpRsmj7X{Z!A<-yQ z+C&}3j-B+re#qrwa!XQ0Cn3zuD)sRSF3hdwv8CB#0h`k&5cXsJe@tX_@&yv%-{LI1 zveH`rpQW$ddsi=xJ9`+CLB@hUvH1On2=Z94j{`@L{WP*o7jv3~ zoDV$lW-e6VU^c$Pyr~@nJA3+HaCl{?i4(F22qhydZe?xV_n*ElE>2%Qzptz)lZeC! zFhELD2E=MB@#O3MA>LV~NJ9dn$Qxvo+*QkT82plLiw*K+E@r}Gv(pRRyFq6c7x z&Af~LhBo1Dm9*BnDO!lEN!HxhNfr1Cv8o2w= z&ns6$40pI(oOa>tjB|!_PFrYfzP0<>>(IhZ*GLb&3zN4#-Ln1d_M4HHj(ypG;@hFk zu4|UrEM9KAXxp<5Ywed_@i}(s!?D{RFQs)SJoR&X>gCbHVzssQ)+FS=vT!(a^SIu* z1#zctpuzUkV_iFe8^z~fF(}RT5Fc=l5<~f z7ISJwA&13bW9$quqoX6p`}6QXC%P62`6ARi7+v)u!{VbNVmU0XNQmY4q&!ES*Yd$pr*7YEiofKJC6Z{Kg)w}1>Rocu}a-mS;?BJ zVxh3yWtmx`#oZ4^4_}*pNqL{uo7x2jQbr4$GCc2vK05dH%w_L0!JR*A#Z`2a#fx!- z(RCZ$1JEEXv@H5V!zxT0f|GPLOfY0Ba5K4E+VN3fI2xmT|rN5@P)<iHxq)`m;2q5E(Dn5J_S{ zW&qHOqX<#BNrfZ@h?C_5JOdB}059C~Csa}AD{D8XOpQNtzj5sm-{t$?T{Wv&x;bm% zs^+CDIePPZ7Oc+QbR=-y@q6n}+?~AGMQw5%j?xLpd?3?@S1+2gKJl?3e{ zA_*eb0}vccMjr1|Qi2p5n*?ZNFsLNYClYx$JPMA9`2KZkhwndV??qhznIcqQi6<8E zx_kPPGK-I#b~$vSXycy)+S5DbRlC-%g@+Hp#CTX!DNWf5yV@I)6QZ-y(nkjwzstUS zwv<#$7m0^ioIYf?iTQTGp&?E|er`;3cxp=Kz##1Hg;5dk`jxuP8hoR)}+}^*67)XaN9b*LzW41Wqs9K3%)jbo5e{9n~w{>@?>_KnAQ zjK}{%Ao!0b<}l$P7h_Ojb6yt|e&_S|i{LD_V1f|N8lo@D&R^vha3U;3wuKoKJmC6dFkQpRV8SW2JNB~ zLbSff`WF8wwA3%XJHDP@#=^uq#C#4Ein^N{(qm)dgQ5$w3K7KC1$&I+jkkQ@kpt=JUd#w#`_w0Xm(mT(e0R^6~KYV=q-NwrK+{J78#YLaJ zKD0E~qZoz8ZA!1m@QcomiSuxB`uWNCr*E*nv9ZlZw|gHaM*UaFiPZd>D-v zZ;e;5Qkv_$Xs_o%mjw4-IEWHag(#(iJvdZVkXMwOE)ui3D7Qn*Wpkt}TS`jWS6`n| z?kGpX#ORh-V;z6&zu1~67NYQr!-FjBt3-uhBvKt`5zI_T9_jB#7I?9UJ0{sJgpDP2 zX)&p(nK`3S@V4mNLGKd^mTJHXWt@;r2zx&5j2()JE{sY@jbrd=OgK`;DYr_n-u!yQ zoM%(DY)P{mm5+Jt`d0a=OEApj!qFiikIonIMh5w9MS^O(q-XkH?v?QJXt0F=Td{61 zF^Wamkk6u5S6APA=y>#u%i>ipW-N%sDOCVmEl?^U>y(qVssN>E`9=tE%>Z;LC?UY8 znv8Fj0W2Ko(bekNwY}=i^KAd`k&%&w#>PpqWN&$DBN>5lcp6!yo1hh`yUA|N<%_!e zG}l~Pbj;Ym{#})#R)~yDJ%un#!Avg*I02Ew!N?h0EDP8NWQG8>4^W3>)FDL~63{%{ z5J8p(@Fhx<3b!5YF>!;Nj}$E2kUwc=v9?Z^tXwTcHc@lxkA<6`9=-9y)UjvnmKuO> zLi2?x{Y0(WNsG$<*jM`bGrV^By|tBnM;rU-fhZYr=mbsVeOZ9S1%RfYEL5GqCy-mI z^6e@rg8;MwK$k`3T3AZTXaWztFu)?pVE$~1Tmk}2!LU4*|C1*pKba(g2v1;AW$0R( zofj?$e+EFd7;?o-4r7EpG7=Kt@8;(E)YHQ|IP+UL>)`qFMeAD?C$z0v11G5WZ(P;* zK)<%O6vjs-hK7A>Y$}jQ#+d$N%>!uuO@QgIU;^M!z+|&3Ynl@hG79nvJ3HF=d@kDQ zA(f}6!NwNYKMY5hV?}=f8Sl7vXQ@_wO0wux~uZV?6$M0=3`3iZjBoDRl(GTqty)4<8sF+ycd?A+%*P zJc{$*2l>DG_U-GBpFs(UsVS+Mg+(P@J)LM#N60TJD*pWOi>WQjsizv(wgCf@0EDm`EEg3i7T-fQf9>GR;!!AX({iMO3^0LChjNHV8 zxVoBpWX_hbkcn703Ww~It+u>at@cn()l^o)R8hx9L)%H6V2jl^T>#aBJk?D1g7;~o z&v)YsBdzVsgZ%t0ADI8Se$)A5$6q)^4p|o;t&L^7(Ja^xt3hE&S2}yZrkiw|kvXu$_qNGus04J%vTqThG6< z-FD}l?2IU&SPQ6)fLf-vvPoy{o8=GAsT*ohtmIIeHA^7pq_oR_i)F6>xm}CTvg_Q=9ZTS(|IM~MTzO985}rNLCZGwzd!%c95pjdU_k)p0QVA0 z_*nt!Gpv$pg|Rn~&+CK{cpn428wq(M4F|P}#uKgvTyjf((#WqxAR14Itf^uVk2^Xt z91$Ht=Zs*04o%{T&>R|SjoL-)Hghs<=XTKH2zJnfQjtC3Shi0H#UtWT24vFU@Br+k z!J&TG%RwCi(WevR6NZNe5!i)kuVB4aEM8qNkm+F`DY86Y_US$80B3v}LPLkQ4OBr)U(3;+hzDeTEw zO_wjVH#ftY+KTUfv3vGDU9~)I@j{eHfv2*7%ph5-i8M7-b*0PFGq+b?KDhY!wpB;3 z=`DXfX;OlwYCRFOkpWYc3aNMj2~U?-pikEmO`bELpjAOuD+lroL^=M1DG(4x0A&QV z^uW_7iVTgZG>z$%GMdRs+WCN(hk#Le3YUWG#etGpvno%X;hj9mS+Q!6BtrvKE|Bj4 zvbA!WLkfxw(=-c~%=(y>QTyYk|Fh?L6Et7T%hqG1Y;Eg~2}=q17XI~vZ)iwVO=CMM*)4))9q^tNJa8PUsf*-@jG5Z>%g02Q zPlgxfbfJ>is8A?f>f0vfO3k}}D=iBJLS}VMS!{e%W^PVbUq3nxVUJ@*lz(%}e~CvT zI8ch!c)!wdLKFgu$GZ6hJfTp)!Log*`!gSNo#|)5@?v>}u@Yujn zLC2`1nGKs-1gT+-F6}VhFU|!yiFlZXyi03u!{Y$Ezdr6)HI!4glv(;p@9>))o{4UW z>9KW{wcQw*Lz>IsG8tSr65k)X&+`vG3*9Nk8XDK+b?#~`eZF|9_i}|ds=xtgxhhY! zP(A6r%QF0KUU&Q_|K}q^os7O-uNNK{4jp@VQ$Hgq>8sc0y5{<<()`Zep1R`7kT3qW zMvotv+U?dqHS_AS=}vR;CPZ)_OtaNlVlelL#})6ApRe-04t%*pzNZ0f0KtWk~35LX{drDA8oMbkBkg6Xd?k%{hX|xynFLLE-%r> z-(jQu<~c6&RP0npMuZs-({6{{DrFY8cD1xswV}(mL=x2!w&U#@UYFuU9o za0Lp)+F9Cg&Sd_^^}UOhcpd)Bs2wMgf5jfCx{43hGc^V~D6wN0P52kXtDPjw}I^P~b(ABMeN|rcavC ztt8(LzyPYuOl|`*9mrtAfe|^-dT2}kq22EJ`Rzer)eDx|FItv=;1E>P8YL?*W#suY z^dLzweb(}VDGTCpa)H`9bP|q(YP*m`Dr!P?EwPN8gsRA!IGe6Mxov{(u=YfOqJl)0 z1m%d3f@i5IA+R`h*^1n6-yxj^J9}YRcxF^Ya&k;wbVyomdg3hJpOk=BHd++(Jp8S zCW;EVgOJ~v7yl0OYapi{a(khOH`buTg~NlKrpC66jI8A3YDuAg1p?K zxcKDM%#5!AUiL4|tvv3eHu}AaHZt_yYY?~1JnfKQh4UzCAA%rEVOVbDlO$JN^C@!n zvXrMpk}XdD2Gyh3gM~SSF`$?MMI-%8Y5TPZ(z{`UZKBD}r<>MVEIsaW;8oW1?`?ki zu|_l8mut9Ane%$GrWtX*z4o83OWjk=4c*Vir+!aQ2#fme_u2jJ*&}D{tRDx5gw?k; z@*$5SVxX8p1S<;a&F!4F-8!UuV-C)QM0!C0&VXWt*S$0G>W75g4ahvr($-R1M={=hx6 zoTkg%Bgz_+<*g_<6M&q7p12Er6@AelXpNxpo&W8M&jI5HJo#QHxKr@{W8Kqdp=LHu z{lhYHi{V4l@+mX?6xFj3jhIMaph%b6w(mDccR+MDBpsM=J}``KoSC+01(7PSqVvw2}sDZ zEkx2Vl_DhL#6%#X;t>3Vl5o*xJ82&tU;^1uk_sK*2Z*@Q2{MwY%H4mgDzGpvyLs2c zKdS7tSM$!pSt~a1Crm_(xyouBs!9%C@tfkL_ZxQBE?x{}DLmxl1e`!c73%0hfJZN8 zf(}zddtmA`-qh((SqV}J5P54VpaYP9!zt)WZW zmCI&ACI@n%kPhM1+s(_C_bAH^;Q$>$r3#>K&$iO8PR#X1TA9Us*nXE&tBV`b6La%3 zi-(7hzeaVuSpu2}@_xZADfZ$?u^>jv5M#e!_JCvk2T;oI7D?zLA?LraIz-Y}3t&PL zJaN2QS0`0(TCY60VD-{UlZXASZRm+G?)otDM!%XzfBPHpFfPbA>>H2q7?1z>ko@9n z3V&&t3i)l-<|ny;F#U8h(Q;?(E?V+`A?@7JeX?~elKe@bpZCYz>fD-z8l{j zmVKzDXelFW3MSaf%&=24Ot|Fnt1Hm!p;mF=IqxOMl1tW!^&4||`U@q8}PmI@4jyqkvHQ*~gD!&?(6 z<~UVz-25j>mqR!1eLj7WK6&*6&AqN`&we|#_w8}HD-%?VHI*!tH0;!s%@ry42yzcd zs+NlK<}y?ZGR2BSu_xngaL8RE+2er;0t5*dje{s}r8dQK#_St2++&@%aEQh3MYC^G zZ#4-lt*pqeDx;weXBb7AkCGZkdHo}>_g&J9ttM+{-kq#*Pko~Kqz&)4IFvsrf+YjU zNzkTvvDEu?6tY5^!_Ru1U2L~lb$X*F$iRqb^Q ztXC^EI@;jw{lITNtdV{R9La4+F!_APBg^e;?dL>#awqH^hODB-s-ltx36^_og|ydI zUdKaDZ~VA(h2!Bx*KV%g@2#oyd68z(sqL&&XKKDAv+hT{-Tv(e{((F&#w(fw<=gl* zR+}6nUjN8U2}#Q+s;?+0@BHH3w{=r00Pza4?eeN!a@uk8wmGahd+3n)x_zee_ZVz= z;_K?_u;^h=t0FHx~gFvnWrxQ4L*SmEW43{1K!|B75QArmAcDR4BjL7lHs3{Ci z>-&|7T`Dp$$Bw4Bzlm%erbQ;zU%Q>Te9eG@ z5*Lr>0nj{odM6U9w6cSWr-pLdJ+k2MI|wE28s+rPL?K18BkMYX-{}LOuSvHMMpQ4YA4XbA`_PXlpmTeQO()SWE!D zWY9NzV&|Wms!~!gM2Svyqhe{_q3}1#L*1(}i^6f(Hy-0L9{=$n`Hl84kt?)ApBE7t zFxcBOrr9R=H9w;0P$-ZBi7{OdF?(nvD={r5JT@=4sDEU*ai}T1HQ71R;!()CtAX2e z?*cs=FwqRmvj#hDrwqYTF6^hn;nLyCvoEhqd@w`j$uyFMjIQgH&9DA&OmL3Oi%-Z- zuNrJDZZ6}}=_nVK*6khr{;>POLq11!O{Sg-I1|zt*b19Ud1W@SPZrp1lz*g+Hzmuv z$SAtYBB*G(>%_Hg%U+khu7DkF5LQ5#D2AKt-NEt|M=Xq7KRyjh^37_`C>^Zm817|C zc%88OPUszQ8?z5~1?fcEN=I;lWT~-+doLXQqOX=REsqV{^?X*cIRI)Ku-V7zIC|js#SZk=5tEgM4 zQw^!QP80EWfQp6E1Un7nmkAD<6Wnx2#vLba6yiZQ-z>uOGG^d%Szb9UYB_iZbS8f)$x+sWAV=lH0Kce)R#3>?X7h zhE;{>ko6|-?Hc=)6OAV*8Ol#Kn>6YA#BG+_y)wRijrOhYYT}9c7pR^Ik%&#hHXzoCJ2*qfjuujq&*`Mu{^7F)# zH|tM*Tev)5ol;H$)!H&GYMS91iygEM?ol{9an|FRS{6#PttKrpUU1Ci%G<>7;Ee2V zp>ctcDPfUqMg}d1_b_Bo0>lsieMH$hB^^H_r;@L!@vZ?L=FbE6pZ+>;@!KU!e{S0z zao;TOn#uE>C!A&FeD!7wO`Xz;pqBYl`!1glzVYZnkbYHFn~T$DGn1I(M+$asN0nb> z6`_nKjiMW(z54m&&H9J*9g=gp(MB;beDUeU&v>ukl&Gig5?{RGEnd(lOYFjfQ5hn{ zD6bf#4&%vL5kRyO? zJzZf@KIIh!4CWx(Gbb65NLXk&Km_T1gXJZ;$;sKV@k2HaEk};ktlQAEW4rLkA$a%@ z=lJoyUHkGsc){XIR5&uPxHC2}CBGn}qoZz&FfK;jSlBFSYK?~_^Q2Hu{4bNzf6-?d zBVM5CKoqn|%f}K_K^y_2LcTyGl1LdS&m10= za!cp}A@d)U4yk)IQ-qe7$6?=ijK_HV$A<*nk7I`Pn1q`|Ff`oj7w9X-Oj?B)pHuR; z(t$uIr1RLL=nT#m@J1Mri>8JIOkrh3?T^raoR&Pl(r*{v9p3kL+0FEA)13j~CYWLh z7Mp-;Z&x(%qiLc#HXLL?x@XFV^;TPUzTdyod(WQF`=1p&s~6WZM9j|aj+D~WsI-XG z=qT?uo==}!U$8%^a8X&-OilacELmffjjkKs*LeJF_dVcwK<1XRisK|;N6{J^7&akbI!ir+s=ec=`59ULG0D)8A^ z&ttRPb$~P4KZkQ9>Kf0!`K8{mFS znns&Dw|(!f=A-*VW8->4Q=>MXU4HAsU8|(W`=0Nac1K6y21VCaUDb>@!2&nU3^(sS zIP!R^!JG9L?3Ya139fxVd?xs$s+q35^(1hYtYE38=BOoar9w4RkTF$~HBqD*%Bq>D zs~M>xcuVP~;taFtbFJsfUsarJI(5e5>GJxr8dudWe7d;r(cUjHUxqj|sjr5JE8%g3 z2n5g0$`=TKX;O@lbA=2z&<~prI&(owOcpe18>9B?9&EaYZc&pY$sQgB%q4+^C~ zp8;9EH9i~MR;!yR$eWSn4Jiw3=N)=>@LR$6kjyYPAPIEpZB<7@AEFBwxOTP<491BF^d5` z-Qm%_JVi>If*hMHOP5t{IdX>Q`c4>_*@|GkJx9Y9FG-s>y;g~swrO?qxr>$W0*h>4 zWys10>CF%UP$f(1o31JPYY+VK4U?l5K-LJmqY3I?P1vxyf`BVgP!bU32Z2(8!Zcsy zSU3GVDT{|%V zC8R+yP!v!R6#?lG>F)0C?wNs^f$2HF`^@0R`}_TT|NgwMAFu1aW_MX;?t!^E_v5_c z`Lul%ec-nkJ)JBnz6}r9WSj)x*fP;(5fV(n@c~|d$50Q$KAaR`A4>R1(f>kJCn8WF zAc|x|6)e=or#J+t7@~1h6`=xwO9EXw8iR@o?dvwe)tjL8b6DIA-v^}xM#g`5opX3c zn5xPT60HHlO$rXf$3`v9rp?=1Z{CM9=alFGmq2L0Xw^_bFAbT6@QH*@0=W=7G*pN2 z=v@7>GjdUjDhXl~s6y{sFuUQKAB>Fdh)zghjdU^D9oVmV1UVFAoFwS&Xw6GXN{WcA z$jfK-4T&YNts5p}!1vzpt|>fo9xh+bRG?>1HmqE~zHaxvbiZIhb|Is+uU7<_5H%ri zM0}1|FbZ=#zIbpL%^S(E@83Qd|HQt3&D#n`&SGtbR<_=-6RowtBt$XLs zE7j6ySELCEKtM;}n|$K8y4FrP_HhJ4_Gq?GB>(e3I?`j`8}-rc&)di9O4 z8>h`q{eJ1Fk@e-n7CWX`YLYAg!5NUOXj2}{KjU$;WVosoHk*atSovfo$(l%ZqNqJm zTJ5~~)SJi0%)g9Z{{F)K?we+rpZqJ#OB7n96c?M#SoLh#^4BZRWd2DvQls8b(X%$xbDDx7 ztg?+BxK9W7Xa@F%dJlCsyjiPzQFEQ`%C(lO*I92eyfka_wds>?>ua4+n0RWOY2bs? z*5|&&`qy-{uw_l^LJ4eY?ud>_=<4YG+iXoRihbB`xD!^}W!+o!dMd$6CIRCDz$+Zy zfv#?>yZGU1n_!3Lfi{^yFZRGrX2l;1IG|&xtY$+a-vu)sPrey`wO}|uttNetH`w0V zmX?qd6B^0sV_`={eu3YYkJ#l=RaKOb6yM(6g9?0NB*}=2ZmXKA}OL?>yh$d;D_WDUW5_qscni7>5BmpQg@Ou%hO|jhr8;#UFk=d!1%+@!j#K zk*;|?E$q=Iui>uVs$pIqb`6#kz{Ps1oqNpr2E;7U~bJj=2R)rWuq0uHi}AZrR?jY(=XAxj~uAR#6yHP;8sOym|4Owiz?I>gododJ~|x08$4a z4FQq_qf8|_W4vzJ$rI&C$*`+m$`NwJXh+BuKp~qyQdL=>8J|&>p2cbF8P#``DJ*g! zlKp(<4v(GU85&|gQKu4xWuV(NcV1R>0_+(=Lq;PI!^f6EP7MMqDa!T@GDUKccCKLf zC-#Yc+B5TI>9-M?AK-te>&aBe#Xk$eqn5kaQyAQlriRwM$fT$rN%c|5aKpA@74>2g ziA4lr8W1Ui+9UfL8=B;c99JwEmWW1E<8l{4h{RuH*e4yszOfjKvG^|wnMS^JREF;# z`&Y@otXHLfHMhv@v9JYP#KYESv5@&AI$%V=WDYa_p`q|o=^k4w`NPPAO~Qwg0ZD&3 z>?wf4HYgf^5{?LR2Vmcuf>+~jXsX@^(?2R4tzLQ}f4)uT_K>pMkp&jf8PC5*x`u^% zdc1!gkr>*^Zwtu@WTGNqJ4@Kx-O<=t(U2Yy_rlrE)zvZ5-@70u!QtJ*<#!h8IZZ}X z_XL2Kr~{8~L)39lP_rcg8$h%N;5JaRQbT$!j{we+h_loH4=1VlU7g&Jt;c35L#PY| zQ%CxjHD)dFHdDDdpLCVJz<%b=$J@VVeXe2^zm9d>^=dus77myIV1(DbH*N9VHP_s4 zc%;8J4!V5MbMtgpLp6I1ty?^!(@ckas7`LpIH z%~%w?<9LjF@PJ=ZpWXY6)xWqeSy-!0>7fAz6>ugfLrvwf`&R9*zmCjZlBcWRtfJgW zCs!*{;+HM{;p^QsN*w|;z*Lys`_!T7%AZ0asKx=lx;pZ}pd!&bjs}sZM{Av+m9TSn z!pg0Wcl>6nH}BGvg>MPuAIi#oMC6UeC6M@d$_OAbD9T(CwHHt5mep#7$RrJip#9uglUqfyAoJC$Vl|%}$mxh;h?-A?gET~<#wSMc)&h?v^WMZizrCUXDXxzAg z%QxX;N4Q}ld)C}W-6?6x2I)Yl8c_Ri6pTc91YDnvX8D!N-3`r1?gaZ4V`RkQGO8M@ zq7vewqY~O0WWOSTY?PM+TUudDFYLlL6B6j;!Q8a7OjV(S1o%Y2BI7GHHKH)=U+0am>+5n7l6(Wh z-!ybo4)I}CBiywwVeZ@lb){M=XjKADhHB-%?klM*hr@g{RXn+Kth zu^5a0%0PKI4ipXw`2v{)3d-H^V%SFSP87ZD3 z&LE#F%K%E`YouI$c~B@C7Vyv*EM)fy+rzo#)|~9ig1pgI(}yt6u#A-C>e?cah}At%;}-06@$HGN?we_k6v0CPTAJap zt_-h8>E;Ycr7*Xk((>ukCPCNdLjU6~CuU!swc2>;S@%!fC@zZ{x4AVPVM$Z^GZ5az3^5 z#m?E5b2T0q{1$(4ulMgO%r|T`-FCt4^wS)haTismJedd{0ALI#ckmPboU-B4p1ns; zhWN%;=GDnuY()b@{a^iqI3rLjH@cPIp)rE%hw|bW4+?5Tbv9Wxv!2W%Srbvh@0n~a z_%3~(?XqircdNKH$P$e$;t0`GYdKwSW8M9meZsqgi`iuiD9Z2ex#j5Wh4#RZ*BxD@B_$cj z$%(1yCBDIVpMu-2o0V_-HA{C!(Ds9sCXWW3oSHA3d;Q1RM+YyP*nNrg{874b&)YdO z()5*@sGw6B3{TO9yLZFiE@yuJF=%Ypc;t`beY;zZ9boDzeLlJ??xR~?c6x=J{Q6p9 zsBeAvH+XPY4FK5$L~H_-kOL#K#2ChQom7P~qEf`HRS`x`U8b+PR_~8f(v77k*Wn36 z0QKaF2;^b1Vi!%hT~(E-huT2bm)Fy&L(4mw4Hy=Q~3AI#8H!|C$_YrY<#jQRSIEU2tOaP;`e zb3(aRvTR}lH4#b%Wffuc%@V$tKO*16O0eyyP{NjH8fCyIEi0{!2#-olN$BiusI4zb zNlT1N$W2ch_~t!){I~^;N{h*iXWQ zyc%Ao_fXr5&MLdwJQt+TAX7~h%MfuGZIY4v{ZDm!iMXe*FVDC?aa(`LWbXGRytuQX zQa9`lJqWwrl(?^Yxk=LkLnt!1MbvtryY$w)J-0U=xw~uqu~om{ICkOw`R86Q3Yv<( zB?jEIyYoKG>uZFMe@H-DVg{>~Ew4z5(GturYb^Nu!y~dj=%m;2g-#2!?DfE7GO#8p zywp&4(A2fi-t=b0rk5)gJ1)|_r!~XLz|cxd{hErV@p#&kX~5?&tto}h9Rls$lX;Sj z4AIKG6hoiu1hesmZ}f;pfOv~?^2en(W>OPuj<1O|4>sTKb#R8;5>;C*4Re)w7KZC> z7VmdIeD#OX^~h@%KVLF^XJ_u@lpK?ipPXJ?m}=s1`~F9>tHCDY@6RIrsi1aCd6o0h z+hO-M8}Iz(;hr1sZ#;N+^W4K-*B@+p^6cmrzdO&r+&JrgdXC*X1;@$YsV1*tY zMnJ_`YoKUJ*1I%i-KG84?pj2COY!sgHrUzA;q>O0W|mf0qa^M>i)O-~2q?3_>w|-V zbzys54`|%epqfy%pK4Q#X@(Zlj(8y@34TF_~@dv>1M%N>{t1>I1{ zD$0uI4Af!Jz-IPGe9yW6FmA=>AfWh}tesBL%$%{P=*aPm*RP6RJ#o8x_tMuN!5B3Z zHZgY{aXNA^%Wzz(GOk*i&el?4oVx&xZMimY>z{a3y!p`L_L}Yfg75Z}@zOs3dsu2= zeu2Mtcy}|ep^BZE)cw)DV$s~!n^tvD@l^oSB#hg1xVdgjb^$|*0`DJ z(^sWWTAW7H&L$}}$Q;s8R*yjBlIU#|#X1$GMmo6`2dZXFg9LmB08Ic?C~6J?G9Qqn z2>VEEK<)yh9zfus=0Bw2#{ysgd#x=>H*4ru0#J%0*5h%lBmzT$#-WmW3=MiN zorVFv@JmqRbd`3yAcCF3D% z#lZeOD+gY=Ub$muvxauHhHm5BRn@!xD7^eA<LvIS4KV-|pqw06Ai_RgVOv{A zXh>wBe`HZ%Wl2d{cz9@LR!VVkVNW*;yK;uQVO$WjxZd!`_LRG4i@jZkn#xB#zmY+u zRPGe_Q!-HU7yQZV%VXF#7Gp6M|3!gqrbTQh97gaY{i{tz=8z;3%6s!hhjWpX!T^K~ zurC%ixG{6i*T?P54Orinc^`J=AV3_JNvNRje~0X>0I!tS=RMD?cUU>+(ezc0t1bmxvI;bH46^w1*&ho}&sunX z_ToS1-S;+%s0nZ2H{Y{2xp?QAt)oL$cCP&GW2--xCsS?}NTZVDI!C$~kP*jI_O9Qn>+SKc`C64oszu)bYc>m^yQy%$mJ*&KLh1{QeW7f<|;})1sp80g)n$+vl zYF}-JwaZ32H?cZTb(il;b~zArX1tju?J{2bwzk&gNx!~1pVW}Xg@Yo8UxQyoyY_fhN`bUO*4)%65e-!QWJ~!p_H!l-jYXTItVra+ihwnd${=CVXHoaC$ zqiE^UvPYJT+!ASH$3S6mUQ%LAXmD6kLTYaxwrzmfCDj!*?K}H zVL7k726p#DCI{6PSsXN!ED(&6fshAZJ#)#)i1+vP3W{41Q_2_47O!6qkDY)gFH4V} zD>JjtcX7>*3h$QkVQy}F_>ZWJoY16H-|F(Rgy^LEcW&F;SsniM$mMHKwr%s7H9JmK zD;v-%0I?Z>7TL%x8jr=Jk=RxM8Wq)gk!CV&fJh#~;n@I()LZa`Av|$_NNP}|XX=hi z+Oxms#2KigQ>{R6qf>_f7$Op+c!Ct!;NnKm+%ArX$4LlysHO~66=Zg5gi`$(xq35` zRmK%goH9bfa|l47Ovae9O_AK7KQ4R9%#SB8@K8U<^QRq`&ks*AXdr@S8o5_pt7`j!`ho_S zRSKOQyKY>pGrHB`=>@CnATnE%$SiTu3dDtfsW<=CmM7~)5X-8)La|8tQ$0W*qyKW%FM_r)0Yjm21u#s3r%G~R{;R?ve`gq)JN61hXA zROZ$2b7d7Wpr9HK#*QT4V1;jGe4p1HIj#EJbW!OIL8UhwXn}00m;*W7kzQ73Lwk8q zX;yktW=ceOLFlu(@0UwGr!j(cIDTZIziyZNQcn22k-|_hqo$8vozfclI?4XN&o%3i zM?s~&4ZNy)QJHz@z0+TQoqA7ys`2=F52kIm+G_I7)b+DlM|DkrL+h>9^@sX(?{nv#9=vLM#5=|%rXZXN zM;fJ^SFv9oc;5NNWXzaIhPw@r^@Z`=y^bfs%2Ed*KOoGvuCD0A z$CsbKdI+WH-pL#h$g1<$otnXheb~Vg_h9%gmWp^{J`a0JV!$Q)OMh6M_?-8}=&g}o zvR`OoSY}pEe0*YQZBcSgbX2sT_xqRqH5IkFd47Q*{@;S7-3*A@{2L>)U)FVp_c79- zsHeFep1j^aeH9OoyNM)*rdreL6(vqi<+1T)h51E2?JejwDn?gbM?WVlKDoB7TPl?& zbRkD19Tq}cr*t#Rraw*=oWEH7%(b(w8~em%*&!~>%7Q`ugRU?0Z(eb6dGX=>d(ZH& zn5ge9M%U{O?HX1j^vK3h2ej3?e?8omo(5~`dS5(!eE-s=xAsmKe?Jiw8XX#(9udL+ z?P&j=Jy3I8F95kzmEr}?9@4Y(=dmB@G6d-a{fYiG*R9#-5Ofg9Lz_Z3%d!fhQZ? zK-eeeKRmukO*z#--`m`#-PERI@v;;prF3moJ^&07fJ7k1Xbd}ufJee1u)_|d(nsLq z@dFAfElW4_F4@?nGOij=Zk3II@M(An4*7Wu=x8=inV!1;Xvf@THGr0f@hA}p=tRDT zTGR3kNe54-7)<-9qnD_m%%TEF0ul-lV(*XyIEHHc&C6RXY|^tT2U?l%g?snTy*YdL zH61z9b?kW0V@tSwd*RCENvoG-pFG6>ZGZd2yJgr85kc4p!oJJr(srz`S0xl{Qn*ww zKqZx|-%{2$j6Sm7e#l_J#wOUuL>mvj1YsX`L1GUQv?#%Um3N_BIb}pF7K$Wd*`5Sp z7h@1sR>8ys7#ciMUx(UKL}EC^K{>$2rW!Z*m*4!oy}dsN2Za|FR1Ea-&}J5cOJp#~ zk#z)e1p4XuTSHd$1y3CEQy=!9%=oe$X1;)oD#+`T zNC$>7$bhZg;`#w`8*+(-+)`HIouI4BpU%^;q=Fkj;{na!lGYzqzgs`Gug+_bXCpiN zIugpG!?MEb>-ySj_;F$Rs93`iGDbK}oQ_Yaz7Y*E<{?&>-L4pax_8R_$j8uUY<>fq z+uYPp5)d5m!7shE4Gu_PTxG%a57%cKPgAwk0T!Bbyie>2voO!{xaxCjztg(kUTxc9 z`HPXC`MKA3=O0=3+WT$yVDq4)wF8zP{j^)%gbwZk`V$<@0ksbTD;zMztK1?Uda>Ch z;8}cn=1X^NLBzm0Lr#B>g{(f<(hr+@VRLD7PIG5{b$LZ*Qf7We!RzO)=g*(t zduUHo;*YnUZ<<@`+Z$UYgF;R(x2Jy)yHr~F_1@91YTDByqP^?t8&OMDKddONd0QLz zXYLz^zVIjIAAjV24gFrS{zxZnGR85zcp{GqdZ+1@zIN(kb;~Qu5{az2P+qoAFUW0Y z_Vlqh>=C4s*2jVIN$}{e)jPL{RxA;(Ud6g~51K!#i!I=JM>b!){o~w)=GoJJ96wlQ z_Mob?7I!Tnl@dj-N_7@lBz+j(5OH;d))RwX;L3~O@R8-N!dtdA= z{cWvZ{Bh#Qwq46N@7Qtp=+pTNUDs^LT(rDy$x80b1*{3vsuWZ(%&H{ghLornx=AtS z!y(0EIra%rkryvUX*@g+k8jac%hptX_S=P5c5lNr>~d7nct#^ss#72tnc)g4fI|UH zDt6#HDTFEtzD9{Yq(J2X&;~%6j$zHBwS!Y;@rbm3nIJQgRt7*o#(r>ezmig^;na2l zwGj~eNhA?n))Yu5*3Fn1xn+0Q%(?DUru4h;wvbO-2%AGGT2!+t0Ub*EWwW zD>I28f({ypphiP6f91-!tRgX2J}o9iiY$DY&zfw;NIcpt_g9|p?+IMd&(RG&Uw~54 zL&H++!U&J#IoPp}9_G{d z4nZN5K$b|_iybm2HyPyd(fB$V-Vh=wO4$`DZ!H**VV`6a`^5iD`2BBWJ~)PbV=)$E z@jt~s<)|e5pHrM@y<Ed`1h0A@hbnl|C4)+yVQm#$rPxcfXeq?plw z?Y|}M;--rJ(uAmt4^I9GLD^6y`-c5QwH-}~#i=Yf%!J&`*1VVBUOn-!uWKzul_SwG zkHN&gGdCC7J?VOAS@+^&?LF7mx<|*W-X^J-k>x|z|{9aweEMr&K-8&e(>Fw;TY2Tc5%=2t$`}dGs-?yI} zhgj{>;gR^r1fQ=#KavvDOR}Q#!-CU3M`ZecPYsBS3#zOrX{u>xuWsjd4OeDY`Fr}C z89lgw-qXp=Je+&)eRgB_e3clPy}eGXerKHGou#hLJ=poEy%tjIL)xRi4Z z>1(ezC9|fLDiM1DV5uu|_4F%euk<-`Iegvz_w!eOTfMqx`2ya&$xR1$ zHe_a?yd_5K7DsD_pF%NIa#AeTcKj!%)$>|$Z-|B#fL`*|F-?G-}BEQo<5QO z;f?jZDH)t;bKTVR5&@|e5IH0YhGmtL^rPx3VOO*Cn@`ls*E#MVx(*-lJFqKg@0Q3F z^Pe6$WP0u9-R0|-tloHF?p*7sll@k#sGc{!W$KKsd5b${EJ#_fEO-3)Y67Svf(|;F zqd*=}pbCg!Sf;Kl!T~X!AR>}^1j+!JTBWa}Pq~YC% zG;2H8uRiHs@9Oq%k2<)uZtmpL3ECO^cQ?IxBVY_6$ix0t2m1PjQtZX~0x3@>#XKUC z4vR#B|HMA2ykAhBc;oVUYz+HY!?3X#T3U7evbkADEuT(Eo1~Yv(;_l&0gkTTzFrupYb&d**r z8NK-Q*yp3e=lH0Es?IVVqFDU{0ujHatu8Syl?%nCEw#~EsW}xT zsRcQ?#f5{z$lMb-3=+kEbgc0ooOyb5(us^P~3Z z9{1v19;dq>vpaUi;dl2ew~&_52ed)}@)3z9` z-S%#qnz<^t36zb&O6R3_gG?W~JUwyhT4P;neS1x6OVYJ3SLQ!lJkx51zKN2-Lq!8) zon_CL&as>^@t&^Qbw&Ces;}PhDRmkA8V@-lKtvy01e zO0p6%GZHgXqLMw|el#^Q-MM9ZXkd`NwN*+&Y+7<$L3VmneB_&8uYJ~k=-!yP!ejY@ zC$o=v?e@xg(IcoXsmgK5PhQqiI05!(3K}+cX8RO{wN^D9JRY`aeZHPvj-hthqKTD% z9_-4FmPx=c_>ea;+Abx6V)kHeN_c-)xlCvoGI+3?2^}5b;<;V=Dgp`+DG?=9YTc!~ zlILFV#I?4~JKF##paB*YbnV*)t*n{D$QBISQ82_3==-Jl=7oyemwK*UYO%0tY3fG! zP~6rOmKyH#!p6`4!>hM;0b#z_iHhprsq5blovk6M$l9>BGdKX2SHOm5 z>9u>&6Q;UOH_Y0;3T|5j)o}isHaIR^vE9Vh_L*zi48vGm#TG?kI~m_hB2_P5*|T|1 z&V`$;`wn+%YNn7$X_^}K%8IoBl#z&Ccr?$!$KeD-3YVr$lQQrTU> zcHtg5s@gA;O z*$qGqi6RC>9$Bev*36892SUCEq^0I%cJ>Sj1u!iUK7Z0|Z{2e9X3iV8w)!U2GlFXO zvegD-G9l`n;K-ZrUvJHls-2ztb5UYH@*f5Fbn=a2lb3^|@bA?PZP>eLYVVCnOdaHlh6Rw#gfE?ISFLTHGE<7f_u;5zvo|Cuu0El1 zR%fCWdE9-%?8g&V+pIqL;rt4hQ%miS?zP@`(dCarcDv4hKDEJNo#FjSS`T#S#>(2x z22;Ie>$vEV9+Fhdl{6ozD&M9l-X-Hr!TNwTIk376ijp8S?&WTmKvmX=k_f8u;DQm( z{M_gVwQ+7G!CU{_wdA*j3oI7UZ&B1NC4wWUWWzW<8jB+-Bc3lT+!@(Lrx~a-#~8!*i12a_k+P zUb{NKaCRt5Ev?R|>uVZlZ|baVZ0H|mv<$SR6r|X{bU1(cY)xHdTznK?z(L~5Vs?33 z;l3vt?;;vMEX`33e;dgd>SZ-8BbiDV6 zVmpV1)5r7nZ%IFYq}1C@#Oy__U?SAfH^hf5frtsQJ%DJaHaBjhuTCUkaU{cyUHzE_ z{68<2X()xNkju%Skqnx&w960v(d_I2_Z+R$9iNQ{ZHfSr0ei-T-un;wm`F{KDT0kd z!(Ac4@jb2RBZ*I7CuQ|vS6yCnl}}=Da%1Y=+dE7>ZyCM598vteQz)uuz&GE@&fL!a z<2)QUz6aoIwRC#cuI6psT6Oq%z$3f#?T3Pvtj2uq59?JdwLi(;l$qQFT&Rv`|d3vUXcD9yI1BKLqVIL0g z@pvJIeN^Qhs#dw8cIK8XtPdV= z+Fz1?Jym_~vUtvH?wZwb>NH5AGDy^ZKkIsZv0YjJkRt{u?17+T#H{7q27% z_#s85c7XGnJl*x;^^fUA6)!$I*}Q&v^3vD6zg8?-Dx#|mlQnw*J$v<*hU-Qx7EY<| zUn5_6xUe{=U^>hlEUS(V3XlBg370Nv8>b^cl&K&Fr0P}cDq<4lTSzXps0^}@hD%E` zEUV z+KxIJHX5pCO1Miv$66Kt0L*ro`MT-V;r1OCrk za(y3tu~~n6-HgYR!EHb`r_n9c=(ki3e>xsi8x~)da_QbR?*zX;Tux56n5}554V;vK zB^5XrkZ zri(L0i9pqtTG%_^?X`2S$(C;s?}pksq5NhyJesp$cJ>t{WEXrd>8%`+@KAqeA7oUB zt25cznS+@xzrQ;B=uBF9YD7Y~7z#T&n}b8Xea{OqiGQGWTT#;#D475aTO7>{s9Tda ze4K5Q@>?L!>mny)J0tl}N2zgPUUp(aL-2QTHy^t&^3w;(@D*$M~*o#6Nn!l7kV`1pF zgMP0*^;*BKU%UI0{VQlNB>_*$q7eHDI3XU0aX?HU2}x8QNr6LE7*JR4)=+6vS8h^O zsZ>%eR8T3S(>thCHUJzNT?Fv;+7s$`9AYh5U!$&FJa?W*S#dy1L#(MKQBdL&h{HI- z2q2DNY)4X%$R>z6IEnyI86i-4WU2@d*c44Z0JS8gruj?zw(l!Ha5U@~odoR-E7uat5Lk3#}cqPMob?v0_L^V^EnkKqD7#-ctTO66p#cR}a3t zk}>iV>HekoEDOm$B#T7_zYPoWzJe30povex7tBXQ<@aNWOVR@R_X7{d+oAW5!1%PB_MSGB$iYr05zGA``XmTEx@28eY$4M7 zN=?qlNY7=mF%lKUxX^xHOnXRJ>{mD}L?0Cw-5I6w z8=LZma|QOBHd6TGWcgvzm5pE2^wT6+sS~ z1F825dOI|P+~?R#MtqWgT}Ntg%B#H2n3(DxzrQ>+`{{gr$BC27rd|GSprg?+bylFm>&|Ny zv5p@7n|3zn7}U{JTJcCNxdlh5Uc9nm!iL2A<5olh}B&L|Fq;l)BY)n`SNT(KM(A zpjwH}qmsBvN&*!X5tSy8&4oyiZySLJh#Wv2Wp3TktE_~bQRe?f;b1@bnl(etFW_sB!m#Mj ztemi>n!16u{=SY5U+e&C%SW9wX{p)W9sQkMJ)e9&rx#~3Av3c+E2|;9PdvbZJnU9dUS97J z{Q1o1)5~5in&vQ-aEVB`MA9=d*l4{updyF|N3v5hW4t4aqw-OU&afyeJGHpBu5%dL zd9>)vO`J5ZA5SR8gZSCgl2`xo!6iB3TVHVSP>f^Iiw#efO|j4dHvq+$VsLZ%kvAu; z(jJ?p-I;1JLB)tlxB@0y>nwMexz>5!hPTVqO_kAT#bbbX#o=s#-aYkI*B3=)NBReZ zHa9V`o0@x3okdB#dHawjgpOTRx3fL(q?@jJxrA^Q=vZl}*prDi$V&rfMxXS=V2$&< z>+g?!O@8$`%wzx6-M?6_9{+$&w?!^<;0Xa|tGd`_*{nx1^o%EHKhoE-)t~Ts5&*wYlhV^sX>r83}x!L+@IN2^kK;4 z$_7O|&$Q3`UmV)tzJ8wT9Bp&;DOQFHjTbGywDRtg2N$njec}8%GckwD7|yFKHT8R9 zmEy4Sf#LL91RYbL_JR&xDF92-ToX;((33x!-gzcJ%WKUpsK_l$PeqNM0>~S{b_YX) z!sa?ol~?|^clBY%gKp|tOd{RI>sl-B`MU95#ND*6JfD#79j$CWTP&NaM7P)xo**-; zhS3jux?p#Yj3Wf-E%&gX+q>rTXYzM!iN1Y3&+}swPYPu!&?6~%$@%$}2s1d4(&UfvPcCN93`dTZtVwMynX;3m+tG+b!A;=I>sb8oXgsidnP#z(;w^D-u?^lQ-| z87BguNgiMgkffuXFl~X0&6~=(8*H{5iq;&TL8Miw>oO+H zfa(T(Eq$K8K0{5VLP0TU!IB0wtz?QqhJr#fojOEBQY<1Ij)%i@kS8yGSZ2S=!x6jj zgl_EX04D}G0rnNda0CG|?NERu1rAx6p{U-6BR9#EoEbR4CX;wH8jnaCL9c*4izop^ zK1ErK+<^%!Mb%*?tuAG)Rwa#gl0rLGjg9f>)Om2)EU~V3udZsRDxp@BS~Fd*Wx7H0 z(s{fIdcy=f7ny(HvG>E*)`nV|%tZ_8Hf#tjEE0CMGfQ);zWYZ!wX+NKOL^+huyu0< z040DZ01A+(Hau}k_x%U3tPcBwLs>d8GI5lr_>jEjPu7x^Z{*ek58FU-h_8P#7v5*;HFDSx#0Pqs$OSJCf&;a$yLNNQCi<_(Dw z!_uS!2qc4j!h8aJdqmwS&B;HSqH>0Ef7Hc(FNq6Ik520?uq?4%$tU+WUjsd^SYCDX|8&beWb>uy?Pu$jEiag6 z5cta)Z=<_G2X5Rk4|H+Q53o;i*=4_Lo&8ev`#3FgMeXZT7MX22?R(~8`04qMGbdPT zPq8*Q_+{I#FE?KO^y?AVU#445A)3<>Y&`|Q6lnjhbkg|9uSX78-7$M`!N}Ik!TJ5i z=MfQ^BcfCxHxqVT3cS6;W3|d{p!Zw@Z-oT^z+FJKQ&G0op5hz}^o{EzwOdqt-Q4{>?~kXt8d{?VS4%F6FRQ;krL z>AjHUS0Av`eLMXgS;bCag3CBfQ?cL%n`ubbbG1LY)K!+1mP zx?kY3O|2pE@Xg2m{YTMoOz=;V29szl*agg^e>cAVu5&E$M(H;hH1eqHf zkq<~*WY>mc;HW$t8N)j!o-jlrb1^zvem_TR0TmtA)r3#cN5*$u|BO##~ z+0jG94I1jRe%a9BVBO;FneX=EYp`EnYEo=MT#nDzrc);?b#)p5VB#nenWtRqg|pE7 zaWh|pS~k%bl^9Cox{%0AO_o|lAwm{GlAku-!cpZEi9{|bC1=DDk(k@kT$h@hk`SF( zR#4U7IV2u!2jpT4kw`Y~gTWt0|IA^?lZ@u{r1E;PeDL;P<};%IBtOajaY+9U;QLSJ z`;XTg!@jW?i?R6ciBSO;Y`Mj@W|Ighugl6=LINAe1X`T1|wF(g8QPf57k-U zQgWj*tMjWn>Oy0~#j+=laKy|Wc5uM=^o-oZ%8Cm69IP*-?gE z-5>|$6s4nm)>3r8mheXe*vgral$H^f29ZX;h%XUT6xHO!7Rai_%+L9ln#KhG?)c@*)rhcf9E?SI9OxUBY4PD>d|PoB z+-1jyVXEE&k262~%H~{ivpkN75 zZ=V&?m!aKNHp?-0Pki+)#V&LUj<_lIkZ@nG*&_6S@ys0X+DDz8_&n@2@ zE1xV=G$mt5O1ukHZYWN@IQ@~gg~dzj+_GX3lFH%dwdFl@f1K0XR129=?QvJXoZsoP zVf;fwRflCuKb*eo`e=a1!uUQW(%=1!{X6eh(KXpVnJMLU18f%TV{{EkdNUfbTob*P z7;T?rv6^z1NP7&F9^-V(w6{DtkjbtONzcNr4+ITp5EE7FIy;&((z1E7%#f6i@|74v zU{_0hOKnVY3_?Jzz|HkVX>qy~aD_ftu-Ehvv~1{_x2UVEryp}#_^0hgH(w{u&tFYUUOjipzjwWB^ZL@2t8ypK2>bm) zcS=6&W}ti$R|KnDSOt|GuRKHlyz_Ly3X^Hm0>_W5&`@UKz#zszcoLUD6_OPsCa~C{pwZMb)@0a+5*vQn>AN~Ekd@&4-fa}(DsjIS-m9ZDZezd+d%i*3qSuL$-%1g+TqCa+V zzg+PcHRy@ueGFYd`OK&w2O$@BKgT=fm|`B096y ztf^~#?<;;+N=#N}N_%qyT7LY!=#EX}3=MT)ZMFj}VJ}m__)&|;=JR+0Ha1R;`=bXK zGtn=s_BZ(ZKL-5%<8cJP)B^s$XrtISYNIx4|JcM>?KE0WEn-4?IlJsmQ|N}4H=4si zip-D7^p6vH@qfbFK*+0xg5K)J(y+9cdv60mQcaXMCG_k1{g%-=FPzg9AL zj!)~3O3n$&Ecle47*U#9+S?LQmh2F1>0RZzJLE5In+XQCdMYN;>Q0IZwsP|xEjSZ= zD!wYDWw0VLBlwid*=3F!H@)4n;L+TrF0;2BTeV};rku3G=K*gA2D=&D_N46h;YrUD zv%-rTa{aDGK4R9w;gp z?2XSZ_4R%3;_Be;<6_}tYxm&J&6lpLoww1mgXiR0MOmj{`OO-0BZ<9 z?wK4}_`t&-vcbbN zryrdGD=MI=S;mzcXJS=w#_d3=I&K6XL_m}u1KCZ zOa#Mt(1p!m>jq$eEQOFzADPT2koZzksE`T(bRq%mt|gHKRB5iXEQc(`z!Mla91o8$ z9Gdy%;Uwu@JOWoNRATw|z3HAxm zNFf+Rm{MLAk_f$;THiKrW$xa`pEkV{;98|+(J{fu$xzTHUlxhX1fZ5AQwKn~lr)1X zBgPXsC^F)hcnS~A-r_lO6sRQCq@nyK@abS*4@5rLVzH3PW(_i#EFrvQ4`)tAV7Em@ z9+ClDicog=P<3J=R#}JofdL4+v0y*Se8`GWG6`@5p2U(3B`D;Z-`~%ys&2^p{3R|S zzNn<2ueU=iV55$P$3ce(2_bWc{awA8$rto}AEANv+Y0zxp+F)%%17WHViSA+*1*C2 z#|ht`Ci>|d|6~8h-@x(zRU5^=Q5&^U`^P51KFpypA>#|I^I@j%9gLbX6s12LJ-au0 zYip!QbMn*SnnK7N6bk8m>~1qRn>R5brPZ}G8t=QFs-Pe%DJeEcC={}6L9A}*?`u?hjcVc{?rvWNLYiHV5F<)ci-Q8Fa=S9nImAU1esyT5&DI=FUg2FU5s}eaxh+ z9J`lZAq_F_s-8cLceSuFe(d|?tg(4SUQSSPd?)OEknD5i?zz*}Cw<;}hZhG27ChMO zxn`F898HJm;F7eI4NloX27gvf=J1+ne|qcAex$GUZ10W?t2=4uA9)$r%mt=Y(w#BX z2h(v*)1)kBjx$@Z%43s(x#1M|Ng58yQyv)_-kYdlp`&%dVEU!Gi!U!=ce}s zpN7~&LMUj1jkYmnn&*`i&QX?oE;#&X{|4iY4?-SYv%8*Lo)VDz_~grj`c_K#i(vdc z*;)7X{(65Zq$#1ix4f|BOL~8yTlmBF!2vFf*In1{X>J3X8{trYQDs$7N}AuZ04r-V z=Ue8_9{9e09vT{w6qA#=`{uq=&vzYpyY|qlEtYX+0XgqOvZH-`oolPgxh!lB7y4fI z7L~>3=EtJQz(*y3(4lQ2ju_HHA_F>lYI#`j<LcIV51)GGR%E?P3GxW>dR6iK#=9$LyiZ*5xO~&m`ns9rmoEhk4Gocz z$<_5;$vFk_8J}Bw1YB&M1P|*%;Pt_rguMK;yqd3-F;UTpiP?Fdxu*_)Qy?Z%Kn)2r zsj1d0YkZYc{YI3oN0<+RCyERmka_^2PexvZWlquI83Zy*TA@Qmxt=0lO^|B96NiWd z4jIoP<2fY2C!mu&1OSzEMhCb7!XP$Wj!6Up3J{Y4q@YTEVj>QcWx@61D75d`hoySa zebL>Cpm)4M)6QM2ojZmNCsazyHUL}?k&N6#laoXC?U7cd$!hje)tiA-ovgB$ipB!b z!6E3Z4ir6+dk#=3v>8)-_U!#!QG)>&nH%-_$Lc zy^B==3eZ&|vY{$Z{1HH8@lcI`k-l4zkR|*XgOhZJ@Hs-^Fc$OswGV^;y~E?zYCq90 zNXPwcI@^eezBh2Nf2-&F=S?C1zuGAFjoPUF8{2Qb;t(eU*b$2UOV~td9bsCUEvq+clO>Dm&iM=!M9^dqTi&xycc)($df+} zoejtqXpU9uJ{66hIp9_8$L;n%(IV~-3X6=6OMde4`3B?drk~6ne({cL2sSml3!)v#+=Wa!h*7+GF{`^iHf>jxg&$z%do0X<9Z@{K0lWZJBgv)5UznQ>#Ao5E^hupUd8R-Wn;<>i&-ZWeXT z==EvuY>z!QyDS3Cwq4!ws`R4SKY@kDc9V+W0O(?1D;t~+C2+=5g8rU(b-5J>O?N*Jes)rTU%=G=gW>)wq9Lk z8hGqs+U>lqbUI|V_O!fx_b@v%Haa@Fvc8>$d|F`tmzKoxwk#gis{fEfDu1!pFfv3td#?;_ClQ87IwFU;A?#O3zY_iw#FzV;0bdfGo!Kgj9q zWb{VoBsH*G?tZ#=#Q4z5kk?*50fhy%JOQ_(tNr2Ax1lj9nZ@bIrwff7^RR)iJZ4K} zTUvNdUP38kz^2N^xX6U~m|P|e#z(=G3o><;i#3#rX3Wi=GA}`Ud^%A!AAnAxv>4#K z<<(iz@@yi7ji4KeguvMVfzlzZR41caNmi`DN!0_=5T3}PkhxS6SBl6Z5xE2c51p}w zXJHK=XyzBiIJ`)j2nlG+k4+^&96Id{Ip!cBGO&U?6#H;Eh{uNiQgD!rACRV&j5Da1 zHLK3huvSL48Bgpdl6e#<1n;irp)U#n3b<-kZEApqoKy6lIYT80Rf#OLRV9U zR7$&o;+ONrTqo~_J`QXj^RT66_y-vv723}P!WfrOsXTew0D!o;wzzAi5|?qg(BNNH*L&@fAg4P}=2D6vq4 zPK_30g@7pejSLX~t@cjvoyst5LsQ0RaR92-EE2JP7u)+mZ$Gf_2P*P^xrzO|q>JE} zFyY@Kg+EGVFeg0Ht@4W>#_l2cPl5P$FgZK- zYh?u{`o};~5HcEjzg=><=oIe0#&*ln+v~J%=#O_EzrcT{U6JVkY^tcwi!Ki8fK8d4 zH1F)Y?Xb5K_K(zqanVZ#4RUe@3NMG;P`{x~u_uxp@XDrgi=5|sW_g6QgnQ@v9)D|e zF~!`#Y|4C_+1uULT7JBe&&@pd>azCPvAQ;zvKBx_(tBY8-(b79NN$rA)wzaC7 zy^5@vvZm#9(m6G78OS+NrESToj!J4)iZYi86C5?iIVr0YF=$_;%|lpACNtBEI` zpHFVfZ5Zqfi%a+r71qriq(OEStELKmYlcl{U!Pd)Fn5}r;XJPy;~mGSo6D-oqQ1*z zi)jcTvY?=gJ6P6MbvMu}>q}B_@QcFS^x?klfu8Q-894xEi`TMeM%^6NU@DH<`7uAeR2vZSSh0x@6#EYaaA;F1ppQ1h|C%*{rb@RKG zRhs%S_+?jnLqbe&TXPwNod_c4q~^wjXZrZ~JG;4CJKX3QX!=@NUe{bp6STezdGhL0 zOhv;W1_DBfzzvt#9Ut>1D?Op5z8U=z@nKwSbaGO9abfT9AZ%)cJGa01V{g^QO}QKY zc)EJuBRQ>mavIq9MBrdBB=u@nJP2*XrrgPc;OoJti%p&qT?BQwx+DvpOI zhzJA@i84f__Mquqd^;eu07^F?F!2;jRE5ANqM2d=8u=p%N1}Q-KEMlbco7~C2?R() zm6-(!XtCWeHnF#Q`b?p!dL5BmL7~!6h(voth5_KqP#LRMz?ZL}r(flXv(I;JO`WQn zDFgD9h`ow9zNR#sq`}^{W_a7K#K-TOKR57$Grq-Uw?xEOlz(gOALwFo>XBDSz=u)c z(Cj)~zh?NKk!GsGbT_4EuTLfIZX?aG`+48*FK>tEeoBj0(!hO>Jsw zU^1DapK$g)m-^dE_4iu3e_ojf;fSOBb;z3#b3r0W1X1*T!?5zV1nz&R+xvUp$4}$) zg=kp|y7AE=gF+To=YobJC6zFkn?^?riqVQtwpgMg^WSWv*f(mU_U~%q?^R6y8|)KF zu#XG*+<%09k|gKIGW3UH!mrqe7&;77h^h98ut^C5k&rLuu~FwG7!tznlD;IX*vl&) z&mZTfqvWKzz+=;)2bbL={i=uRh8ewSaWN^8vF(km$RTPw+l}olUq^pJPJ_kig@|Xb zC@jrO7mIjwkwg`k*~zF1`TYE5z**A|$8NkmcI(x(+`7+nR9P4HiG_5?ABHd`FTbm& z8+E}#1{5<8?4$E~yk7aZeY$u3h27Z~CTpFxt#n#_E6*}vID`p1-$p$NOnqP*Y_`{X z_d&lyufDzOg+u7r57CGlM_!+5W}R>LgPjj|=-O*5-jrE(Xa3GR8$IKl9_9GmFMIsQ zhZA~kGu52N>e=XM8p-VPT>iT0(Xo$b=H6LGy+$Bg0A+WIv>nj2lV5au<_7b%$Lvqt z5A(lp|Jppu*~%sqsvVHN17y6Yy7psE-#&8k;^E~xS5M!&MEmqi6>CFaiwE8S?*d4d z!4mVSXWb7+6@)&E3$%ReFzxbW11qYA303#(YOVE_iW(^ZWK(s+b!OfFbD!%OWAn|P zHz(ZR1Ds`n2MIW<69bp)rJW-AFDBbk6>b2{>tO5+8Ldm>sE75W{}@9(s*F1ilwGN0 z2O_wH)44W7_v$R!>q-i?a>Pr3e2G9h59DqVNjCw(787}~0eB}svH=>Fst7yo@Z9GX zeb4iSUt@c_MSTOat{ew>V0=vTGrUg`ne4L78cK_wK<=S#(Q`iA@<4R3n7~e87#wuGnbR>WnXjiQo-)2+&&~`pYj1l8yGy47x32j3 z=eE|v+u)L!%-tK{V|N((7CwL15RuXPCa%TWJ2&tnx43*n5*e}D>VYQ@n$8?=IJ6($ zya9{LAdQWZ9tVM(;SoJ>A%iJ`!wB{vOw`SxSGH8;6=mgQr|0Hm_4W197<7R^gf{t# z1em5LZiwk0d+5{uj)@VkS$%nmh6(0`z?wDw+ui~ssx=9*iqH{gcII!r^$E?7Z^zyy4XP%yzc)>v9oW|T+^Y{842ycoSgu@cGSWGHbFa!s4 zd$XUF1YD0YyBTL?9c~j}9p>}IJSpeZ-4NTukM;v28KSLpK;8l< znM%oC)hC}FuVpe@_JStS1gGRiCfftNF>cI-2}%carH?4cpOzncW1QRx`3csBnr7;X zRx$(|AmdBGdjp&oD(r$P$Xt-3{v|*6{QLuFcRSfx2lzbv81OFW*{7(`$mD{QXBp2H znlI2Z*Oj-C!Q11IT?np3ikmdW3@r4}+p zJ-P#Ybsp4%aN!W$p?9#5D`p~>bvhR^8Dh>5kJHCs^$yZ{`v?0{l2X5wR`5|UfE*g5 zv$NCZ@q^cKLC=z2ddK@+eQCYw*2Xc{RSc~4Cz)+j-h5GBGfsulr9m1{)@uh_b5>+Q zgC@pQ7JXiQ)y(jB&j;)3Abj$n?B%=8vPKw{0$;pr3I15Y$7;hOV$zL_t_S$tul`zu zd^`HttdAj~$ff_q^S1%79(Rh$LcTsc{pj!k-`#ufZMBXy^N93HC`+hoFGKFj;MR&^ zzYy{>vTCi&lg&-O{dKTp?dp_;i^CQzO_@5qSWzu(!n_w#miZhuPFt|`y{!IYqRI=Z zMkqxgkwE$az*hojBZ4M*nMMW0S~-OVl2j8Q_F?ME41i}!Q6YgS$=foCL_V2}io75U zhbQ(+$qf*tI}sMdXiq5Vv;_nfz|#SqhHVTSflVOskt0x0G~^O+d`w5hK z04gZbEF6VFlItbObV*6k37}S4F5{1V={~*zA(4Uc(M6%}T0-A*++Es^9L>04mUHi3 zOh#7q*E$%T0=JcZNn`<5In(AvHn+{zaYqqSbS~8!ZDBrhdH}v&|4b4y} zMyu;ahW2nqR<0dA-I)bNAu(}L@sY(P1@u1p&q@^35D&n?Ci4zq)qav*jsLPj%0CYZ z{shVY)T4-h=SU$ocupYx%~6jef{}F&Q#gu!qc&=z_RsAf73Y0VOh8GSoTQ-SZ&ddS z{0Y9-`TgyP-~R`rJPsOL<1rzZE9MJ@d=XmAgkV>HOj*z#_wBk@brdX=q)aJOZ|NR+ zw(&)|S1+vTV>RSursgDM3`}MnMr<1c>;}hOS!iXn}@>hLpLYl(RC$TMofKS!;ssb(Lk8X59~VFR0J$fxTU@ zzX5h9lz(vy{HV44BysH}(u&(^`|isBbx!@%<^@+*Y`D8?dHglGciVtH33w2Q_Y{De zvR2w*@OUn;r&8^SvQ{`bOWAQ=i;A(V_^832N91i-MhTra>K-{W7RG3^0pM}wQ1vx zU4TZ64Cqq=EUHvLn2@giS|rs76(Q87_3RY)nh$(+E3Wp9fVbS?zN0lO*C)@I>i_AT zATg!c{`P$jzZdqdo*X9H2in2zq(kWA_deiB{KLDEoju%Ue$%yAr>B@|Y2ToXv5}wW zF=e~gme88ue%R68S5r}wm0OgOke23rJ9@>U2sQaA3JBFy%T?3J);0JhE&EkbwGxfm z%DT_p?6-8E`_g@P3>I0;c1FNs7W z5m|TwlSmQ~$pQkAj>pkSKuEzsDF8`;MIm-6D0WH7HsMIEm_(ll?e;)(zW)*X@I)S- zAdseFtsdxnwq8YLnu1EFf?5Mbt^w;W?gZpEvTQezI0(SEaXOWb_RNB!#-!AsqJqlW zZ?uf`=J2TUgsjHmx;7fG6=B@YZg}0KTURGfO|em3wsX8r^TECCPaeXQB)EUiHylXQ zR27pbLaJ=XfdlX9%Qxg+A90wBhyop^NLF9x_W6NjT(F?q(LDYjfG(* z`laQ9`Ll}dH!}N|;q3p2eE%2%|L>13Ne**mtB1yx%7d|2Nwx z_Kn)8{rj8vKUBT^q9gDt-|>F|J#3*V{2Tm|dhnnPBvgRrKcAtx&1)58b%txkOYU|#y> za9hw;n16tizKz~uIo$aHvd{}aHotAnTO)8wo8qNSwo=fx)0<*5{g1ncS_K_U0h-eh zLV84IWb04|kH;Btog6e0*$0PGdOq*-JiWqdr(KM9bX8h(c49?Q1&`12j`K45WHiNU zn%qSN?OVF|bL2&yOH5;K){3j?Z~(o!jA(dp1(3mkOiAbgeWiF^P~1{i@gXS5#_ZL; zlM#EzJC4ybRhsEEVY%y!Q=tb;i%loppHFnuM^0G5%V4bMq$OSpe7?AUVHE}yMQ*rp zVDjY^vR8FUR!T%W3dxG3Zmw?NG)B{vpyP$=B+J{&$T}*5>i}m*R&r9>{CeG?$2;wx zo5f{?#Uw`Mm3dH5;mio!Y`z+m9F6%zd&^=eD7&6G5WeqGm0l zdsznQz(Da!9~m1K}75DUdyu+jF76GeImEK;8*Hiz`A=o+IWAvIc4F zVIdTB^>-&_q||q`N-mefJa&9WGE>axVYfbuyT9FT-Iz-XRBJRbO|UjtdjFx?oOF3Y zn-UnNfZgm( zQ~cvI<8t!9RM++-qO-b8Ry(iRk)$&|nj)Qq(4nkMhn)0~G-Z&4>m%U?r3fr3JfDc__VJ}; z*fL5j6sdXw7$gEd8ExGik|VXND|aZ!wh#y{DE1LVXkwQr!X%pcBO5?|q7{CUK3oJM z0j`H4-KL=0WH7Z|XF|Dx)>n$mFo7Zj#C}X-h9OO+smnKPUQ^G%ItD*31P z``20`eszt4Uk>?W6aVd>|FT;aVNZd9&AQ=^a2^)890_wG{)>*@C=&b+PW2eYzEK;s ze`ouVL4cB8Fs@|#?qj}Ju!4hMT#=>7b`Z;cHh5$V>(ek+dMn|ton z+Ut?~&%E1p^4&h0yqmkY)#?E=PW=rHTL(h5AR`LXUv|JBw59G|i zG!uh!cTe4PyLss3Ut9m!W9DJM;p}d;P2)6AjhDTwMzTX=kmS1*iWiycK$bJ7&UT;S zm}V24{JbD5rRi%Gq@%5tA|dQx_lH%-E-+s*)@rPr6-Cwwr{oEUwkRnoSc2IugoDBJ zG_4ipdM-HVvDd=O*ulX(sj%Q~-M2rg3ddGvt9F;j59R6CN80riHb6cNm2Koou8G`{ z@y%!xIzoP5Yrh7?MIjf^4%!fMQ+Q065bgQsPpOGI|M=KUOFdOPKy?JFZsTXUnT+2V zqO6;vrBX9yTIWK0xFi!!;y{WJDnqe0y>5I$prP&iDeHu4N)Ui*JSb6=s-cpi?>>Y* zT=+abjgB>JX62^6jElbeHvQ`L&*%R7cJhzPJD%*LR*}oCuIf3BmvRC!cL2c(ta!4} zE!U|HwjpOl8-ic*yZr;xcJ6#PX-q2~AgI>{0G%Ms10)8ZFvwIk0)L8XUl(r;IBV~1 z;^lnN(ec>zfbB;@H|)uqJU35Ww^&xGo`Dp;d?n!(>pkWNzs#`=qW82KtUOA?74# z9N68@>}hPLhlH>kt>BWGkPP}MKtKXQRYlRn34LqU_guUop1tH7AT%OJBcgJE9cl_+ z&-~R98s3(XogWn)mX;jX-`V?fAv&^&B>fK~EwdvxeUFp$v&$*UASI<8}fx?kiHpz`YuC*hQdRV>~^Nc9=aZnK#WHrv?iTMIdTuY2X zs=(}%u(uR>tQN=$rDq)d^yl^$YiBvlSmV3RDAX~ow`yplc(J>?BR9V&87=ISC@nD1 z)9iyZ|H@ZOAFoyS(8t+Hk*%rIoTpw5zTolB?p`T$Za| z9ItbIs^PT>2fUVAha5Qnddmve83>+^bD5=PJBDOV!aD-s3Mg(s${b8LH}sBo^9#7Q zVdVxVJD2xCp=K_(CT^N5ze$_0Ux{*Akzgt%eM_EfMwT_EDxXo%IHbDb{L<*0$hwBw zlH#wCQSs3UDPNjuqN~yl+&wnMe44(!j-m}w$DO?FmF{wXt*LfIgBxIjzuxB1IYb8; z8B5KLw#z*N%-+2A4vl=B*ZJ*ERnpv4pQ%-kCRezP`*6fGSB+5n*r5GrIe zn*pUju=?E#<}AiRjH)PEB|qY? z*eAp`jQnsyOc#2XKH#L%=3saNcz#R%NGcR zOpyqiM()wVZoP1i?Fn5f1K0Pvv#%)bPgWyo>*Aioo`gK9oKEcQD2{@BGR;{@jCQc+Kr z2==2f23lIe%jV_^0iP>znqnGy7EQ48c9GAD_Zz@1A}TWH0JI%ccRt!+^32FL$ejiU z2jNg$ed6u-JJ*BF_T4@B?CZ-G*g63FkQZtMi+_l4i6zL(hV1Ir?oXTS)YUHm8DlWP z9BjLY5B#t435kVDr?LC%ks}PRy&S;xOp<%DTY`2p*%FDJ>DkXYM#q7 zwOez^CKEMGrq8rnqIpbnh1o3kurpDWZr&+Zw%%EyZK5ZARfS?ILvdF({tCPwz!+qmzjAHgg`>cbJn8Iw?aPMRX5+?Kk5fFatZ$?@+d|il9%elQ!idu=Zb?nrxDv z{U$3jB{3~0BK~toWNuh!der-f$Tx3-1N}d}e&*ofd_Uk>$1uI5x+*;EQ)6dUy{Nkt z4i$D3WH&}UOuF)y@627#W>2slD|d3%(%UzU!=n5<`<_DHiJqESttE39)rxukdQsQs zc+6aVdc&$YpLD?&GN{&~2ne84LB5+Pn{PO$X!Wja8|Na|TTj1a)`o{MjLtS|D-=qB zZ_P4Sd|ZVMC#Y zMi~LLQE(hQp^rovrlM7Y?SPOsb#X~$AGEy(m#*f^$)Y3ah+sepG)&U3^7Mo?4Y2ep zEGUNkL#S3COMtbVjnI`)%ws`5Q_}p3Zs;ExhD|g96mqe?2;t8GzL;#@zm9#sdeHw^ z;sSp`J<%^e{}mHLCLJ~o!BU#2h$$*%!*4va^rcq>=~xm4bHCyrZF&E4lKMYwqu4iU zqxNrWKU;Z4*vgWRNwjc`{b!fN`li?o$S>)JdtU8~s4;MXOiOz@hAW>b(Q@zF?dcNz*gOedCuOt@@gcG|( z{WYA15Bb3vO*w<8S_F5bKoEOcV!==Ui(FuY-15?x@LUW7_AY21#ck#d+ZOB!(n`Lm?9%w!QpaS4)fbKIef0u#P zcwGkq_!CUIvDoa9gNGT>saV&Ue=gtI_+G*sm!j_myyp| zELLYt@1uLqJYM(~^w+gP(OGx*{dZj4N-|u(X59Xo@T@WYcG}ZiR-w_8^E0n5nPfKq zXzaPU9`k2ePMvr{b(`tr`|($DxIt?@7AsxVkh9jL+RLF;eRt4kAA)@(Yr-6d>Fckp z2#S2Me(916M^BdJmH2r3uGzj>WA3;KduC5QGe_^#gqhdoo_~4qfc?Q$f2?$HvU9Mx zwQ9*a9W6tBgDFckZTi#9=%nwp_1?P{`Yi`n0mTW(y8{G*$Cv@*sLf%`E|;ILTkfEt za0PjN#u$1}J?*ulsN_8qw?jcEzrCocHEx09Y~po{b|e){m?q2G(A0p~AW^LZHH=Iwp<)RDWsZnymJJ3jFZdig5k zsb@lrt6$i$X;vhC52}iu)7vE6+zdVP1AIeC#etP(2}(wKxxA3 z73{QXcv|`O{QGMz$xfy(F4+cLyI_6x=!vWTeo?uJ#Rch~@A}-i=No+8q-5QOZxd%U zPn^vhH-oWq8#KBJPhEjKcR-`F(Eb)QGU}T%t6NibhyZ$s1SUH70?+|jh$;E;0SN*! zvTrauigd zwA`?gO4Vtj{=8Do0CS+btCd6Vulo8qD>?hs!|cmvbLK9nl~?J;8bKIxiVPyP50!uc zT}0WE4g0#>UvlohgVW{~lJTFZpjKVBhYSi=ET><)23>E%T|4QMC#Ra+Y>rJ5NNQ?1 zSSPhWAmH+tSh7Mq(#3)P=@h~S0c_(z1{3nQKNLmLznLt05Aq%6GnxN-BK8-(z25_S zVu|OJ+{hP8>h6U6f6#Iv{*Ip_k%-6R2?Qhe5x}7yn4Qq%@v+p_%zsBjiF@r(v=G*F zU<(!n6nqzO`JufqihZLtYNPhgO@e(WoQZ@Z5`8HAq0Ul_ih`nDZ~~!NBw+A37}bgB zs1_QXCu9h?479RZIO6Vp#Xg?o;1V?5_=(fKSn>gsPBOA zRpGt2S(ozj&!%MNxa43b9yESmPmi-t73x&xZ z6O6)7`eu8&KD+fP=r!bwNCF~Hf-d6FAv-)NijGcx`>nx6BpL~cV#jB4c~#$fKHm7W zeyz9S1ZyzE3^)WY`A~Fj|Lw^$%r)iBR8*bE&h(i*#!OZDA{c*5M%o0Z+e@jMR~9j(UTlW34gMY4X{RCr;lx;rYfRIyR=gw>!8v$|2hKX3(=~=TGeO_gUs;w(8OK z*>_JIO!GAT@+y6(;cZsj6%W&kfme>i9$xZ#k+zkl?iJNJm$k3IKYYLH_A0+svX)~g zj+$t_<(vxm>Rn7G_^^xeFA$MhL=#O;*y`H*vDn>wD4in|8k0{KCfQ%2s)#==B6+yVKW#bxh~++97!gf#IE zb(|@%eL1PJ*_jXPk@dq630o*x0>vzjDiKv%xu_W(_o5fSNJbVj^wtw&c>u=_?j$jhSY+)o8Qnli98T zGD+{12QxR+emoOsVt;XG&Vq?;L{Kj$$B|cJ0zx&3lz(7POXTausAq-m?{YWKEjLs} zu%?YH4W-l}P^eLvoowWuu>Ypn+jHTsTH|+eCv~nu;kZ z)J_U^PaJ}Mcp4xL0#YBkA>>D(WAi)&81@Nq*yuJg0aA#NOoSvNn@C_0a9k3BPoeSw z=tp*!lJA|nc=+LS_#vj!`>E$09|tpYqnoBDP9HyEef8z?c>$XGp8@!aU>}i!VCy#k zDy3yP6e=Bn(kb)ntvvak5}?sV&Z?!2^Cni#olh z?YmR6zOV+^-&cqjUm;&g7Mm~eGeY4oA9f00Hy7=@f^Ge<0C_-$zor@1x4_1B@%LI} zKAIjDiT*Y0`yqWGk_aIDqKEKvDjPN^?)N+|3Z`6SY0W_(PlTmJexTkDxM-mbpT|s1i683gO^i=yZfp@@2~)OUWPqh$WZ8~f(>o!* z9@0u+cP8veM|^lyYj#k{$Hcl+E|Y;gRy~9XUA0jKHU(*G8eb7QqqIWO_QZ6&e^Hnb zU(t9zLz$NM?$9q!>}U1{n`7%>lgiIn!R7@k-h(7X>-2{mbI$2mJ+!xQdbD_j`JCmZCvV+1d+ojQ`oaUZ zHzqeG_(;*6q2)0XSSW+rssO9%1NX4bTsxrQsJ;2l`nLsv>7`L-mgoKMxrKaq zi@Z2ZEzK*ptW%z%p}*E}?q7?i?3upn+MZn}c3WB7p4xwQ=i(g~4xaY+e(>bM!;QN) zY0lTt+^!>gTuIJK0R=uc4Evmcric9UfH`{uHcdAhH~yBIyd??TB+PlZaF6HKkc`0e zkoUzAF_1mbFYi=m&UjXAx zz}y?)>iwBavzF{XyF=YINg8Ge)nU4 zU9dDWI(=-7@x@(cH;iu>n|^xv=7En-WJGXeT*`|NMTd`OP|5jJB3n^T3~)VaYD1GJ z3zg*%)GLz(&5FccH2aCR%_8K-M1y%awj`v7BSMtFC z#*d$RQg7;1Rec>rZ5=he8M+hpscBoNY6U4MHWJA+q6`m>`4Jc>?V%gv_h=tl=SKh# zAwMEIs7-`J^S?X-5D?MnZ5$N)D8o`p-Q#DppT9vfbqrgvZTqGjTUM@~w`PfbxY5DA>r9kEjMYdl>4LS2%Y1y)I<3v=-@Yu0XOP4->dDro?yc5TU%}p4#H|sB) zNk4h^^XW^yhyQ}wTEjTdgG0*!E9TAp_92Wj%*DQjd^B4j{84*6vJ^uz80f;04U;pW z`yK98;{kKCfq(#JQ!`{T(eE{vN&A;;<9=vbNrrQXe(;|F@kOYz;IH#J#5`%x{6|7j`+zPrnBAkAt}5$2j6wF9MmzWs- zDE5uosEyh`H!O#WE|Nu9Cv}RAD(lS!s_Z|Cwm958$tVg1K{1I6EQ3#EuTTuPH;@s^1_SV|U^3=pQ)MJa$ zj0la-;r$#*iA~Rx2tV`LXvH83TcTQK&8vif(xPnS(NiFFF3)-X*(N`IMehqBh{YwO z&;L+i>y!Gam?5B{@Ww@-Am*|W7tKS}u7tyoUC(H$9;gjY3@IxsON+{2HE@dKi|VVZ zgb4Mr2H64@vIlLLRbSJblk)lJTuna8jyw!`5$uz+wD8btOuq;=viW%lY4)}BHvzSl zTVF4tSP&_8QfdxLs^-$G{pSWTKDI(ZY~#1ZS2y1;@%1Tkw)kjt!s?F$r}kekHq9-m zwtL{S`}$uVVO}vcF;63(J`H)=%53+IeYnYO<9IuLxmy$oCz0-=631w1pgW+t;FL_I z7un7BjJn+pYu&R=HU=yKH`NT^Z!vhi1zeYwcEuC#QNSI=Nm(}L$%U34u`geQTRyt3 zdq){Ph!b)U1+uk@uG>^y@AaF(TW>dJRJgSCgj_J*Xli5hJTg2nGw7rDn(7yGXs;)+J}j&b-2U3({Lw@A)y4!; zsrdlZsVer!%jW6mKijeUfrE`xLioqn;2h(#ALh^gsHBpvrp?8XAs~nmG*p%QxO2lR zZ>P6kKWA1J#a5Ig6z1i|MW@(VJMY-Gd-K*Ut2S=mf6RQ*@<-|lHF5-0Bo7Z5iV8F( z#a=*qusx6IJx)YmZ;AGG>#u;bA5HztqQhsAv|DrMskL*l1q}!g+We z_7mHX;~?0F6XMWlpAf-51o-gi?}1f}6H>0gz9`4a<-r4s=eR+;HYXN^DYfeqn8P_LuUnRdr3R zU4zVF)<^-INX(PG(OLP>!W^zy(XS?7WS~=Ibb9cvXJ1Q`Bq@Qo1J1vUec~Vf?N`N9 zl=Juk76TpG!r+VO4Bh}&Oc$cf{=XexlGSCi*<3DH684iA0)JgyU1fFY=8x9SJ_+=W zf@NmX+iz<5M_s6BcsIx{Vxi^A?2+NXqu4iUqc&>)*Cq+V@v*|bk=i&R{GoAxteO#5 zIaDjGdRg__GTUkI>k}tFUG*z{R?07@WA)LHhisf56~(`3=Kr!39!Z5@*f-KFg0ZC- zQ&Rm=6fF?9RDCP|oC|sEsPLe^{!VOWx`>4=oiU{?k$9vUI6ZKI#eKD$8S*rr#&peKM+l4(gI&@AXWU;vd;mCRNxS}7Y z(cRb;`ynnpG7EVoMTMpJ!@d3U`~tK6lWLP18Oh-0+T#Q!l-*(N^-rCC3+572>sDw)%jvtB>ydr(CJ-Hsy9BmeBInG8`9$`L5 zw828wR|*wIcEyps$-o@o9e|RB%o69h`)qePe7w2VZSr_m1z=0o4_c}GY&|g3Bw*@v z;GWW&N_+E^C?iKN&lk5(KiZ+{NkMgzeR1f~Or@pGwGDiiY4|JvuDZa13fyog5xSuJ zD%?`lyJ@)DV)NV77Z+{Ltl7ET^^S8|T1sHR^Ng%adrwE=SPFg?(cmuwwF9cNP3CTL z+`8iWn%!0hj@TWw@i*CTylLH$CC9A}yS#BcbN|f$V(%@WqUzs$@$bwq#L(TP5(bLc ziS-)T-JPfybayv`0Tzm)Ac%lSBS?35cMUPg6!X1%59<4#bMHNO-Sc1Te|~3OpS2gx zFq<9Y-p~F#@%cR3Hx_E!=?Ytm^0^A2+ec2y4F_zn;4ThtD=z3O0W5&9pA5Lcw>$8l zeWZJOTJGofX-CdpIe78d>TPNU$K;o}FO;`aR=leuW4oO1-G%w1U!^Inn8_j08HV-@ z^)h$-1sv&tbUGTSpqENTb<3y3g!nJd(Ww}eaV{E0F#73z$(3JTd<%J%;+0wPtfKK% zMuumw-(|}?2d^C3nfK{)MMD19M0T>s;LTMq2rgZt%)t?{U z4!wTu;)92m-P|2LyMD!i;0r>P%%7YVPW|v zB*iKfq(3RE1S@Np%WIyHRXeS$;yGXAjhsxlqFSh=nu(m2tF%fwzi^Wvfr4@}cp&c| z3wv(CVP}zr55vV_I9L?$Q89pw08@UzBH$nn!@%IE06PW15T7VnW&VVoLD}(>RUtvt zrh1f1!DMk8YA1_Js2(Btrp|fy&Fg&prZTc%RW*G34qm$oMTN%+cqk@}Y`j%Nsm|X6 zlBdyVafSoyTj9BjHS!9%k|N0V`-S*AcJAnZ6E)f1!RYCnY;EdpX==~S%TG$pNKVU) z`4Afy`zb9WwXS8PrU?cF!?mlZG9nZq3{eE%y<;Qkp7B_H4UgE1cm)>e|61<&@6zq; zFAXeI4}pzhGCBj*CZwXZ5mc}SStu-sw(hZ5EDneB@3+L|5{bmt*4C=3s-mKztgNhr z)Ys|tzITK5*InUTeGb3&n9%D)v(f1Sh!(67&&>YYSEH}ke~P?+?2rAi|D(n^Xup)IgAmSlZ8WhX$68f6Dq#!_TR4Wws>Ed(bZSi%p$G2 zI0IBTlV4cS+R*Z|!pmUIZGg&y&d8&Q%VLvxC=fk2GPmeMd}7qIk0ozBihVDpnymES zvekX7k;4wtXT}MoU&=cM{9-Zr{0ct}nwb z`aJy5`qkvULQ){S{jz>0@s9O9x7aO$Kdh6@m(~-wESQ{8Q z?>u0?fA_;Z7kw`8cRQkZL0!#izOc18u;)i1sgIygkQByMgy1KOoFMWWQBNstFWn6m z8!R8$Id}#+IEVT^dg&7AasK8u%^ld?;hTibBt*?rfz1-Z57%{xS(=a}j>Rhn;)IuUokX0PT@|{Y5m{7 zzWVak+v&mCzt$}BFf;r7^vxTWC(o>$i$4Uzj+Dua(2%pH8^vELfD#euP9Rc=70+F^ zvdYSGGUEBfr{w0m^2XGR{1=h0@7mlncXu&$idmo=CMHlSPhd#nm;lrhK*nO7$Sb$J zk6$@;`OfX#2X3uiWw&Of-;SNZOO_jJ>mNIP)BT$1W4+a(no2`I3%FnuCsd0UE5`}9 z0KR^Jp`fG*VBqh${ul&3_`Ll!+It72>C00I!V? zGq=oPJ|^1Uho$lHv2Zvx9|Ha`92_dGLc?Mxe1IZ?;|QWhJAfCO#EO!E07*y+O385* zm4@c2*KJzWbn#SKYI^xNv+DdiqtKy->Y-H(f@$R)1NYrPX+EA5V~l{<9y0#hjtB=u9-! zL^Eg29ZltpyNfnFK$N|QU3;v%%AWqNx0=ak<6l>HiI$>L@D z=~J^rDkP6{$C@Xa+Q==_kT{J*rf`(ktn#-p7>x1p@xH#kf`Wp)ygUTzWoBlUm6ajC zK|bQbspipytXj{ce217ctB_YWDw|@6q*@Ln@~RAfJ6PVr{zK{bV}I?_H|OgM%%+s%+s6vvBQ|Hh8+ zR~7Y_fHTh}(YYwnL$#7;PyuH+HOrm(H3kk@nLbU<{F;fT?#4*z8Q&@NSqcNKnlR@O z-Y?k4Me}_u8e~kPLp#}2WK(SR?92>gQ6?bEzISv@TfYHgW8J0k8pNO^Mnd9Z18vWa8N z4Tpdu5oeS<6%m*Pya_0dv;%q$Dmr&ngpU#wZmSXQh)CLr$T|QSM1 zC&QGM)Culnc+Bg~Mmuktm<9zsjR=2ZYHfMR;)?lebHaYX`Hq?*wgkKrKd{B(J;X%3 zWch4G2u`A6RuT(c^wv5WZn(F7#l?-=?(MaSbJ^y#dx^Wgrk^&}P5{^eJ|7{p>}vyX zHW(FOMR6Npp_7<9-lx2SoSwdm9ic)ZkxpfdWmZ1lbX#$KuoiN5d~OQBR#Vjbux{E* z<&JjoDK_5}RGMI1W0S+FeG}+~NuBH6LY|pXg{fDAjUB#N-HN(+|Kqji#bI5uUC0GF zLZoG zqTLrrG)d;Yp^~P^bi}w*sJG!l@25m{onnBk!Q=ZSB>JRf)66V~t7=ers2Mu@|2Eb4 zYwE{bihr&*8dVFYq0P{wvc9sg#K&fj%&a49B7eNDrPWTuDJDwQU?C85aB#4#t*xS> z0)cvoiHY^~_5J<*G#c%n+vjje4A|T|nB3DGH`tmyI`{*^Auc3w*z})aDzC$2=8taQ zANynfFWcW!bbr17cXCp^sFcmCpR#xoeN!A%0+h<+AwLwnaej~GKu68?!EXC>$N9F4 zMIDrZskpR_)&i^LmtNe+>dR!HV=Abqqhq7O&cCyLGrX!98&Wus#6)o(g^QAKk*Dn# zkBy7Y3!iHQoccW-lUE^OPB98H@=?YGhx?vv3|8Sq<)aZpxpR9fM)^g*miQAZyS&jZ%*pz#@4y5>awpbM<@h3bV5oZ{J__joEO@)nL~RtwTHaM4tfWSV3<^U@s`-iV?R0(&w=A z&Phoh5IG#WPtQ(kzM0fATS+}@aeY@ceOJAcAFix5+x{*m>fM*OS4^&-ymFzbwQFQ_ z(#hHVp`(T2rL9}-wyImGiP{MRQ-BFXtH$_HVN{{Ul}87*1G>+aYzW`F#$}tK<-TpV z5AHNRe9rskv4Atj-k#9**5b1QB0d5*4?u7O3U&f&=HeTJ*1Yd|W$%CM>(`G_@d-t> z%}f@W3$E`@^!sw>a@=VRucbmRi-7skwPAOTq<=ivGq{@umkiN#s+x}!=iQEb>*ViI znwe2wP+D7Cn^s?7`^iuD&Z=cDYZiO!Z4cjKk^JCg{)^DJFFidTzkmMq$F~Y3{P-C4 z`qlGC1;trTJ}$NaJ}K2zLD8Qt8#}u@eYkNdbfr$Hltd-q9|sbXfPf$&Iv=)gjaL1G zYYnF^1?@e5)4|8b!rjR>IMCEHBsR6p$+6=4vEIWQhE~n1IJT?%`nA@EKA7Fslvr2l z`?lurx$5Qmlj@2zX{i=L;V4aeyU+X3nYw74ik;s(@fU zzzhh85Jkjm)HE}etb8h=aANU_lZQ{;-oEqrwVU^@-MO}7?@_JA=atmGWaWxPMEm&( zBf{c!^0F@!)lSJR6qV7Dlu^7bBatA^*C7NbcrYV|CyR;HDk|4VOE&`0fCtl9bQ;^Q zPCg71iy>pNL@aI`gCp{4xO4OlRK^bjxLCl&V;DlX2?hD7Mf&u8r{J2uppYCx5FOe9 zg$1D?pb3M4!#k+w&&3v&b!O-1)i#%Rbv5*~x1cV81vzB2Xh37bi1+PRO-PGYPKwJ; z08G`|t!$7BDb4SepOfF~|p|Pn*^oI&g{Zbf3Be5y!%+Q?oW6mOY zi;lRts4s*2P8F@#RHw7FX~*v2!$-^9y;_KqJTU_fHACIp>{(jr(R+|*AAyt*@`sk8EHZW6M! zVHOfP!@0643;q(FL64AzY>LBUqHunyNzZb)>?STfkxO)=H7rVYl74#>SjYk|Wg%~Q za1ZFXEcE;qSUcLDSyAvUKkIv8{?ybIeWlGFTS0#YU+nGG4!Qz*BW#X^&OV_aBTSDTU) zUtf@iKxf1|r+m!fpglfZCW;8hdO7WJb@A7pn4OKhsp=>#;R5*V0NxP@I`Rvc@vGXZ z|Mm1Q{{~-&B8zPytCVeo1uOxtdyglc;>Kfd*aDn8z&Zo%7xPqI7c6t$^tAk)PfXas zb4PBQ8(%O#zxLc(Bez3xx3m=lHfq1wsp>j!rMJGJhoNVdUvyQnN${gnPSHy)*hn2+ zE^}L5##xomUJO_Y0DA$9x1gYl=wg>eGRGws-O#-rdFi0P;Wn#x^ln z{ShzYgEp6A)8E#&6u9}>`-l3qb+jg>CR;dJn);gVvDvf2*+AJ#f!`a8vNYxj$8HGP z6X3HE01ttocf2h6ST#`X=xOuuLzIW7OJU6qHk@j$&y4>3LhNKN?miyw zY@QkOj>R-@Y!79!BUy|(2#dx>qM{R=&FrroKXU!%;hSD3_WSK#;A9|VrNL({D`G9X z!gKYC+bbT$M@%!=$Q?4!GZ`QEH8JU1V{1oOU%%0z1EBKKc=W*+v&@A-4jpup$v6Fz?{Ew7q*^SUa; ztH$4{_WtEBzK*#C*`w7pgN;3XjRQUK$s!=E=S%b$8V*mp|dl=rpG(o@-0y9E@EC7gD>?EoYC zviCic&8UB2GG}?p0K9%R#vmjn4QHFBxg2_P@o?1Hb@r?560Nd&bBnv{;#1PXLc>G+ z0-l5heoIX#swr$7ZcnMrw0mQFKH#X!w`*@&>>lOce)|1Jey1x1#zB}5VLfDxA^3<& zK^0TUXfcL016gRCO+h2uitj}fA`;Y6rpW^Z72gM@hvwt}$#Ya$UfeyGiA1Zr9Qs_V z-_MRhUOAZr8SQX9g;R4b%RxJQFL2Z(_{>9a8^Kv@{j>G2>)&TJ{`im{pI26xo}QT) z_obq!Y?g_R(?!0S?uoAWABk<0p7_$_y2%#54F9bm>*X9|gsqiVh8Z0Xz7m@7q^+%H zag%Ilskd$n3p0&9v)4@?AWZbZz;);G6vZQ7Nm?1k7VW)|6)ngr(jP~ z^E;CmV->R{5)K*y?tB<`l$jym$S>|BDQY35<}iQrlPzYYrXj=rnhpvwwjxUYQV7CB zkdt5tKQIGoVM>AyK*nDH=Y)3JEsa>UHt?@&PaZ}l#yFZg*%+H&xqi;@!sgYso0J@N z!CkyesIHKSu;dxRT^4Ji@?Pe(HQ9%MT6fu9<>!yXGs+#qa2ajvLbll{?MIh@hrsyKJ&RN6w#c7XEVHSaY0nfr7hXsF* ze4YI@J}|;FI^k8px0IB`ADs}M4Jz4S{7%~N-iE6V$lERm{nC<=pEb0})Vj)){QMu^ zOWLZNr-ys`#w#LAJq|qCB5OY%@1TNpmLk|mZho}ME6+2j`pXPtH1~9;=M>a5cYaRE zjE?yn6#V$W-XnU8Rvg-Q=I#yiZ_$Z~(ce6LV_p2K=IgvwQY!|8c62a?z!<++`=;gf zW_Q|L9v6GPjsKYVxvadXwyCwDy}zS-GUZ!YTueb@^>{^oS4YEmOT%DEQF~f;drCI@ z+P%(=e~kca2f)zKHa`A#A@NVE*2e7E_E=XVNK34RAM^=>DSchIegkdej^>3cq8BfV zRg=tHAeE!PAneqU;I~m}afvA&0nfJVyuHl8NqdQ_s_tzCom(>VEhQCS2@AC00TT~c zf|!0~+3bZ&0z{N_WELn&C@YV3C{0;Oy4RNsdgu%HoMy2DgbACr_}A`nvm*LC&` zjQzqT&G1kTlgj0}0-k4J=Qro1SA3zvX%dafo`EwY9_&NeT66|?dVGq3w)#zz3RAz9 z=48yW(XNvAF*rnr!(^0=_A^<>)7_k#Ey?Ea#@tTv(uq`5ke&ufR5(V3!#}kPcsVr+ zgN(vHo)*H)EDFtLc-|9Pa~QVXh&dx;DNgW}M3db2fToMaVL!tk-3cTP@i$}lw_MAA z-4o+KX@6kfANynfD)t}4J{F5L*KtgP6a@D5vN~T?yu6iuThD9Ra;Fs+UtD@y{cZ#f zGm#LSGeeywvFJ1|>g1?XkodhfCoDU}KF-=d+asYpww2a}U@la+gv9M>Y|qO|3w`Pn z78UIL!sEEp>Fu^#);TP)EjnXYeL}}sM8h1r$4}Ka?f4{2rNCk`Y$3vd?%Bc4*`WzY znSfIi?j#DDc+jmPx2mqB7K!U;sWY`Dm1X&5bEojnAxH=!Wpj~Zo8+QW^>ZY)zeV>E zH$RRdKa%;6;0115`AyU$t2=EcWc=2JJFoMo};?4wN3w|*R!hG*}XTP66n|n##UPZ}O9eWEaa1SSD zAt-JtsOhAsXeBRiD~dNmxeh{}e8Mi`iZ=6(1zvEA@D25O6y)G@?9l#WcaQz$vY-FD zq==saaKgx2x9H%vPA+1B+6FodCufD{U{o(0RGO*II&^n6M{0gP1QAkMj^- z{@Czz$UVy-pDTB6>l-dQV!wW^vx=-OK)wx}tGtNILTO`N{kyxJ-bcFxzDbM8s!Ys{ zboGx83;Xctg>Q(D-91E6}gL7MF%`-|B_c49{VgUKRYHNzP7$< zc(}WxxwfIUtf2Hq%#Zi>iN@Q*)<{?@0%I)33J5>MthUvA_~J$%rv%dGyDJ&!=VUv4j(AmvsM^}4yPgZswa(|-JTzHJs^dGa{q5bZ`+=FMv z)@&x>1-nJW=y-uCX_+y3`C`K@-|jqka`3S0^|NF{eJC!}y?iBQ)n8QxyVF#czEjst zl$K4?P)S_A)N9{P+6oZrK+x^v1Ok611xon zSOA^OJI%*GyuyI7ZRP5-m^<9hndg8V}O z5QT6v;{2m04zZp-A7Jymz$}S0=LM(?rc;Y@?R(Gq9Wm;qTs5r%6qj} zs!i2!E}J(qWX?D|`!9J?{C~7Ru~tr=8`kCfMzeg4sf<>Bjr zg`j|`)I7Vz$DW-{>i$MT$5t|UBh+Sjqjb@N2N&(qo1noFBJ7<+Js<dHk=1%U&=*ns&i_@mGD8sMi|_&KU2*fie6DbpjQ zG$MEG#{?Yb^$1cpOfs_5n%bs@hK{+zCU%b&rsSf2lKOXl-`qerCaT9VMMX6ccr+C5 zFDfpoAUKNz{B3Z|DLv#`tmWaC7k7v3(|f3EYBuq zZ*Sv2c)s#<`(Ss{ef#zB1D-i%#_z5?st9j{#BZ?EuJ)}@@vE$%+!;6o;Rxi65Qo~z zOY#GK0zOB^qbUIz9IG8DNXpGl%9t1!i+W;T;?4}I=Ai3K@BV2l^(0L z9;m6`P+DWN&cNDW_w%hWyUCLh#yU(mT~xWryq-4Tdd0E{IL*ogsqF`T0i zxCf-I@Di5%{MG`%Q4%;Ri-&Dr7<7J<+_55+&^%~AtOGo z^?OC)n>TLmrcdHLUBewN-?Z4U$!p1)YGvJN04l`@?W&Tc^W{U&oHVyIyWL*X(p}xr zR9K!E{VpdxxxS^Leu(&@C}~gd7D*GVu(_ym&;mYN?3!=`t3=C(e)m3pdE@2pdCkfq zBqnC;=cnSj+FDqc4&7`?+KTU& z$V6|~|KjP^(bGTI>dB^aCP{Fj3AQ!C-hN0MA)!_*rXPSYaXge1niUovl#m?<00E9< zeqo3fuQAVs~Q$`n8ieXm22w z!Qqk+L`Z|Q^vb-H!nD@*D##@xu#dw;z#4ZhGe`TSqdXTlB0g;xj<>;%GDcYyskU== zuosRsaXT7e;{fa(fqhIg)dvym2YG`asSNh$)Ob!t`j-!}&qBfjT!SP0pJ&CT4m35< z7{jH5S$Ct2{|etLY%Ht#aKVEl(>H}rXCRTzAv2lG|D}M-eBW zcpew+BAI2Ol?@sRPJJnk-(+Se?yd~%0oFwVL4J$u4T8%b_rg8|7tV5cI?Qu2E-c>U zUFI}{K0Hiqqe3Do(MRXbOmXKX*0SNmIP7YGl@C9g8U}0?yDKbi1GFQ4jq~zG^GSN% z(l1Ax+$wMHc(GQ_Tt?kNYy0E9W^oqP)N*8lBlON0RDg6Uqp3Kzt(wdmVhK6i;9W}z$0a#-vcBw`CF`mpU~y)!5SeIPBf)h|d{`KhhI2dKz1Kf~gGTaWqRp=Y_C&cy$!Q z)avTi@o^d|=R&R-FFW9Cq~@fCemUNFU@j_TrGU2~V4VP;7r=O+=`kU90lc~J`d2$2 z4M!b%erc1l(ZXv>H<+$9etzSU?=|x%t0O`C_C^>=S@H|oVgzl46m8URMw^7Eyl^nL zI=SW0^;4Ix-@c~#*TO~j*Qq)$<-0Ag$#>piujQ)_>s~Uylixb*_$*U;jj_g|o7y)w zDc;f+xxPlobPdK$5xDc?+z2=ujD#%^w+2G?fX`b3Jki5{Jiekhbaix~!`JlN<^gH% za=U)Cr^RKwe;4Bu0TG*Lg9?SN6ZMqR)tG4oi{<62a=f2a2BmB)vT5?gjU*zgUV8s-dnFmLLsa3XP1@2Sz9~rb9=B!`Br_9=_ zveur`T6pzNo1$g`03~?w39RrmPKbhFCMhw919V~Bj3ggfS*BpCk&C%o#L0_a_8)B* z5-$~&8Ui9+0t#aQ-vzLpK)6?b-Sn|jjb|>!9XRK*daI4R+$%AGVnNUcz$g~<3-b?4 zh)sxyjPc_~akwcwjw8%Jbi`=t#GxdkL$?>M)Rxx}l~U%Pw^(k?HiON3wkzo_#4D&s zXdG71h!GKM#9$`)a1evxfjk@s@Nob?8(nZH$pa5KqJSbN(6dROxpN~_l~(UlUN?Hl^Y%*Fgg z19X(JM&U^{!>J}_t#hWEar^_DG?&*^kz;TG?KOmKF3PMc{-JaFG%=lUd=u z*1s??um4ANX9bH*N0sq@Gih0~Jndu#kB7@(&HQX2=D^wN?t+)ufe+qZwaqd;5_N1> zw2_RXl%zXOB@74!0}zA?@bXy!T^H&5*(VLd^rYSJeT{*A=+&$i|% z7WrLz>@=j3AZd`%QPG+i7H*TB84Vds%FxuDa5M)^%FRykkXb~b>NVOQnSsJQDIWi;Tcc5(}mLA6MlI_3NwHXWox??AQSm2%?+Sgpbc#V7C0)yA!ToO^(~_-*@{+ z*w+}pxJ>EQcU6td)GzFjxnv-G$x!(IIsqqXtP2oyks#O;B)OE=M+xeQN^Odfu>0V#5)5_>xu*D7E8`~{c{N=1~9&_0={5BP`sZ7||)^N@K z)FvwpO;>T?E&==&N|roY$!gN$HmzYcY-r3Be0XX3*~3OR53jP>X%M(h-WI23C!uGx zgRuURh+2*i|0o{xs|qre1j$0+Q%FeVrLuD4Hb-ex95 zK{!r$kY{u-);S>KWz68Q zqwNZkHNv1@MGO+a1OYoPDmWq{Jjl=AgYr(0-v~CVE56*m#mzhLgH;dM<;f)Nr~kz(yLjrY7Ahju>=ZMkP85nPoV4gDazBAG~SRL z5_;p(XZ{WCjP+B6ZiWN%3ZT8i@ZCETm(C5iI?yt{p(u^X=Axpt@;RkC>6Zs=>cnXd)*wb@Uf{2s797w0orhoEbc`*kY z(p#yuKHuH8gl*Jx(~vfmQLs_b^wic3T)g_(ssk^L{Ih&teu{YP`sA7Q%h#UK?_b1c z)fG0=n+F+fRPG=O9yzl=LFAl>43{;*86JaUO|Y#Ec1^;GY4#Y;nf~nL`=7zzee{GdWJsH3(U^29jm2Q1TngIHVY`UWEbA-tPjxx}{J`cQgSAhWuYI#| zSb+N-T259f?R2{ljDp@GOuV;O()qUJ<>H_ zB4vTWTA?Z`AaDT)KBV83tqUn?6w=17{#%2ivB);L>{?V%0p<1OrkBPZK`GD); zV}WPaKiVwoE02<1d2?w6>;(+m4UYSsC>|&ogT#-eacggGa4z&b^7K@6Lws0jcp0(S zG0N)J)64dWHr6TjY17}!$ki83FI>NL{!Q?c56@m7J#fVEz+U|W+XyQ(WwvR1XN2{@ z{(En3t+=}UqMzy7%U2b)UspeNP2<*j`Mb)Zcceuhiiufage|e+R&wGNQlb_>+5w0; z093>InLbFkpgR_?)ey5XF63_V8#4NQrc`$ptvsTC!u#m*JIfC{9gfU@+s_$#^DXLm z@=GEl4#0u;-S3V+KdNJ{t$0gO;iiJ7$-LDM*PXVwXy$Hu#pU>>OS(Z{>^^3{m|-$z zra0dcz8M|ecG>rkdHw?t_r<_!(dwbprKD<2YOUIM+2)bTg>Qp)I$7&qJp``ki3S<~ zd)~3#Qk1=Z54$LyU#=a195IOowPnqVcO|XAWa9BLc!)`hOZj5wbSvsZc<(q2`P8-E zI9~GfeZqZdVLKpZixaoeRP(>6{J=y1*bgCrxQz*SHjFIy^En^r{URad^QRu8%|-L%+GIqTe5m9y4d7|$ z5L*lh02VYCinN?jz zA-Qu5T3B~4*3K2hmf%5+uG*xMOq-NMrL=UpsCW^UuS7tgS5c`_Th~EGVfo>c$Bpk_ zH?uIkd;i+GtJ?sWKxeW00n>aql)%A# zeD-)Y68q6OR32ZKN~RzvjWRon=Hq{cwX?{rO8MFncQX8-oSn3Q10HYB&+jaRb;AQE zfVBmZW@5YDcRu^_tTd-;yn)1?KwaPvoM?ru1F&rzc2eOO3vn{YVG()JA#x&QG*+I3 zL}b@faC(9@$}3eM=gC3P9^#7U8vFh`@;Tri*!Rc&*uRv`A-})X`+h08^BAwQb46an z12Q3Fl0{6Yh&NBXclepnv5`8bQ$+;v^`7Fue<^r& zL#wY!h)fh=bLWwmTj=mFBKaDPb#HJymI0$OD)O=`m@^Eom!22i+}Ra-O6U37J@Lm+ zdY}II{Zr2j3Gw`s2ukKM;q2IGZEAPk2T1OOq!HTW$S7$bt3EaCtJ_Vl6UGi#?gZR5 z3fZsbH4o#-k4`6Z#K_vnZFAgm`2OC6jM(PBx-XTVSDUO};<8lNb@9pYbLV|8K1v8S zak&-o=~;MOc*9f&5lw2+Cz#{)RVC)eH*cN0V0`VCi;utlWHBfrb|})8mO8nZSvT7>-8;yk3}C6sIxv@CTt-rl+L*v8;gU&rW&w*t<*_!jay`c+MN zt=&W0;2^*7_>d=M;n(wAB?AtqrMk!$hKZCv)9U`bq2-fNR)l|EqD6G@!mta#S`lp? z@C3j^MacGoy2*F!#scihOi{xxB0Gb3c)n1)u~E-@)q3-7mT$~I{0Q&~w}197=kT?U zqRT_TdP}iO>!lxPNE zd3KwL#jS58S%IIQ91A$1YA=Jg0l-=e*r*Xax2zAj@wF)9TT((&S=NW*wD=$CiG>C4 z^D1pi8n%p#EP#-}hQbV}JOWoV_T8&1$(-&QV^S*%iGg;LN45-TtM?KF8zto0w3bY& zY7PjB)d74FUcOpXqdUw${akT4A!{seGUd57+|yEIgJZ2qpzkS(#}d zNCv{wK!^-*Yym)%1%vZJ);cLK+jB2o1}4~;#~wORxnbqdU(2ALa_zaZdBG8VKWgE+ zT@_;TeKG4yzhHa!6=?Y5#KL9@XVYGnw^`{ z#+hqU;mjiM*i6X;>~3l!9Zqo2K^m;Ti6F@P8O{K4s=B>By|gGfGrKUYti7U_IK~)fGX~+LNwSHo z6#?%EFkS!^X>$=!@YgYGbbT}sarE(lRrX8eO=TB4>0Wwt@?GBJ?3M)Ac&oJ@s}}j_ zEe%|`HFVD&>jT>^?Am;E%c8wY6_0DGS!;@T@Btej>LRu2{kAJPXRl|U{3~LGx|yt& zneM4qmjX%xyI=zqc0eT9rtnyCbM;;BuOct&*M!PhSY6dnSyeYTFN8svK^_^2v58a) zZ0Q*MnDk+SIXJ~4&UFlqGht}_Td%KvI|DZ^@YG!O<+ysnZShZ`%SMOfAV(BJJY`mx z1m$Q@jmX?MLciHPk=#on&m!)fWb@Kwk<$_}cRAo8$nPY!JH*K5t+h+I%j-{1Tif%e zCyU!UUM8m9iB7oG)ES=o%{Spwa7$xBRb^&E($o0FXV0I9y#D;$@Zsjuv1cV6rTOiI z6zmnX&9v;^TEwQmeG&P{^O5~2r$fta7h_C-oR7?wm)owrzq-qKN94!n#hJxVJ)Vd8 zMds&Kk57@&F;t8xE^ErvMr8?#U9uheIp7 zp6Zl%$V42Mb6C0a@s6|a4;TdNgF9$Km*9w#G8J3zy7qwmfmTvG5)rg?v_Fjs5B?Zz z^6bvufSr=JMZ|3dMLqEtGmMg@-XfbV+aDdepJ}$)X~PkZ{by`WUVnJCrlKDgSkJ?#r`r=7J9M2!6cl%|}Qu;_%Y= ztaZr)%Fn(5^ZW8Hhc$gy<7|YHV-tBGremSL>4E09bF20rHCwjAUqaHijzhc2wAZ ze(|l13Ri%EwV>X`6N1{FSm6c$SOAR3igE=&pN3k$(f*+uCXHSJdAY^SRn2w&kArs~ zIQQXW%llVs4~H?mg~ zfPOWQwv^y~ZHsf(%PjUNq)ouIH0bU&e&^!w>&GzT`)JDu42|kqy)A9=QYL~^`Ed{+ zf2GoV`Eq-9{$N#oWldFmNl8&*&G+KEyo#FYs`7@if|{0^_NJPK{2#ehwPlrcRh6}E z6*V0d6;0*km6c`rRpmd*%1Y9*vSX9JzDr1q{+bf=t>xUMg7s@#h4HNze2d8Z9Ho7s zlD9VDErc+R{QS0}YWK9Zn(s07wSDs9MbwL^gm)ivGgB(dzqd42b+vSKRQF~k6@E_q z7LokweDJAduDWDq1K7^ivI686;T;qUe(9!4BsW{fDsKv z<=M~zrIVnNkDhHs;QO8=`?n6pVK;aB??3$Hq}@yF&)L!WwHe`GL$5|$KKu59dwE6eEU#OLF*7^Mg5PtB z#zvXLW2ENJ+Kl|qb*;r59>#5`@6E`~Z5Xfp()spIqVf4`$NT+RH;JQrr}4}^Hpy$&4(uPQN@?B{<6F@7GS z-LUuZ&J)2$-xa-~!l}-w)_eZ9G|wq0nu+r{qxD~TZ>g=14Y%Ff@TRsrVXA|?{@ zH|H&Ta6s<#Y8}UQ(k5EU##{An9Z}i1Y}t-g2X5?Jdvk@ni44{P6^H@eB9ajX8gWN8 z({G5pJ^eOFLZ z5Skjg!Dg+RjiR`lAl4TkE90Hz2oCdvt(Em2tyZ;C*SfEvb8+6DyE`UXlWi@Xf!>eC zrl;%aEf12+=X=ak3Q`er5|?yU5WOdx!0j^(xjR2_@YJR)PW^U@ldIbrO&?6EQGRK6vM^Gv^(x9@OmE(#bE9K~PH(SrCVj z3X@U|QJ?3s=iql4?K&XY4L}t}gNRWq*mR;HD3SE4yzgdI)Gkkl`ybz~ynJrnhE)e{ z8W~(xUioOFV&((EmRI=E0=?-z+fiyIgzXUaKv>J1%x`Tgh72Bv2eFX*V< zlwIK>v&d9Ud528pKt4ZMUwEl-I!WdRA_6_1BCqUI9)4pA6P1vY zlWXT_($HA_=2fQg&5uSqzNpA23E`S?fXFW}g`iAfz!U*=0m2|4bOHWRfSVS?Qx%2V zwl7ICJ^mphqI|jrPSnHlESQ@EYwJ;6O0+8&xx{B_H9gSGzE?`JT1J$KpjAH5A}5o( z+bBKqeR<}OiiUb}PY{yJy;)N80Oq`rC&_2S(c3n_Amjnp+3kI!1eX zhX(q4hx^-jOV8k7e?wDkXi2lb|Niy|_WiLx_TSHbfxn;2PhT|G?85w|Yw$aB z_A66}{=-{gk~~{K(UUi|3QrN z^VG<)rn1rLkg04x(gsNreG}tdEq!IdDL#gFtLB-BXxU3kUXhc& zrY?0;O7IRw(u1ET{3{GxMZi6v8!C0^wMOog7Xwxjhbqv9AkOSu|KjWno6G#!(TJQ? zM`vGQN#*}w?=6Gk>b7p-g}Vk#AWGa5;_gD+o+KoM2o?ws+}#_3TX1)GZ`|G8-Jzkq z*Vnt@Ir8IvRk!Ysd*5?1YZuk*_U_$tk2U9*W2_MnpC&BL6r{6fw6e5?Rv^SQwB!_( z#tscP5-^y`g7W#13EcV&7LE`@4Of70e4Pyk#e>_CC;dca>_f`6{TaR`T3X7Wp09WEMkva0~`Uw~H= zJh3_2Je~}R(}cOX{7f{30$W>p{6hjhYsnb-YsPg1s)v0xh*!&*O08U~7>5(tlX+68 zC)+hewwSVT{z70Avkf@xw*N=8qJN@KXGK?@UrB&Ttg!;>#lv?}QV+fPB>1|mAZRo}Hws1c`CiGHDK>ES9&!4$&>hPRZ zd-Peu(i?T{^YtTN4+$s)4hKnSfNDDp&kwbr#w&P}8?rd-m#$gj)=e$b3YERf@b;TIp! zqodPf++xlvq|d`I$97Kplvj?=*xGDpRKoD=Oiq8!JO9V*YJkZIFz5mvBc45$heY0P zJ9p!NJ<7&7DCAR!=YE^_yTateOYFq@GQ<}b#BeySg);6oYk(4BwxS-2^uUJtetEey z1+VIy(RbE!w2k^Moh^B&mY1soFm?j^c5e3OJ=@Ty4!a*XU?#ZRbK8j`ww+aC=ev(R zueK|Ikp%d(sQzX5_^Yn|59R2A9J; z9Symq=#K6Y6K%KR%FeHD8W&9;99F-&Ld(!dtD>yh&DkqGIf?o!2=+^r|Du2O+HL-=T!%00_+e~(<&M({ z$&^D!>V$=A1URbrIbt3?j1GNGX1O&-^{Vy4bo3ymH+avQp`Yo@0Gu^a8;sT>c)_P9=0F{ zSxwI3CKuuSAN7=wM%4_2{eiew|>7AoLPVgOiIl{ zx0I!Hrz^&OKka&&!H}jjX-dap&CX)YN^i!%Xv)BE#4Y|qL?u))vo$@tDI1mI_$B!B z3D*-q3ou)90X4>5di&q|erY2P&C>{qv?&o9{e+77ka08|qJ;IIg8ASd*!L&?#Q)qt zDtR>e37NdrM#@Bw6ZF?At%18pscsHcf5aoWnrC!TR{xUaLb5ukWk$QpN6bNbobM#d#EQYY3 z=7&Tgu3>48x*HTmxV*AXg?-dBSxH6pDwaU}dmMRj1)r9XL-U1OAi}Zyx`>bz$K2Ay z*!UpCyM%f^MwE3|7a-C_Zh{aEp>prwox#>an6WLSX=dUaCvHw~xZ`*&`azg)6=`A` z&e&y`$V7hJZnB@x_TawY2U1yvF1@MCaE?gqURgn>H&}_O0Uk>Lv;n6vka9W_kY!Mi z9a>#bl$BZ0&^qbh=&NUBY~yN~SDQQwdosHs-1Dv6%N(uqEfr&b+;Vv+puSDmQV49A zURVHnTW~S%j7_77VX&6Fk85RqO<(Iocgx85#7rJK$HmiJLG3S}NJkwf%@*rmGtmhOYlq68%^ofLGQXO8MdIZ18T zAo<|9yRpGnYST_mHf>)U=OaQ+5}s+Hn6uSFEcPO&Ejut{1eR3T$IY#|<)Zd!>p=6! z_*@0WxFhEaxp#E}mH`0z7}?smxMHqdcav8N)G;kNanbqt+Y&w71-n>`OD9ws#qFzZ zI-#l57oAi6Q_I-5I_G_J#sShOfEYkV8n}CA;Q5i&cZ*9;XZj9|R@_)> z^`?0d7ELbO$L9GpjJi}b<&3n=!^Qf>;(^vigcm!z4k^UE!nBHJboY37L2*ss?=)jo z@5snPMQ4K-zrJ76QxbUall6Eq;7wuP6L<1n;RnUGtb8~(10zD(>T3JR5T9QbFQ@$6 z#m%;@r4us;)8f$CDcNBG0jf&sQV*V7yK#B%5eZcTop)btM8)k-o^EGlt_Gk}jF+&5 zy>qKbfTc}mQp$+;?>H)cYU;>O&523M$Z&9ruyJmC{A7%Usf8Y45Uw+V6`DCO_3{io z-6}Wx+D_q_3do%`1ls|UgjB5u)IrP>cZ(>Ri&oIB}9$~2Uoj>mwW;` zU4Ox+_qr6ntSc){d;MN+>PBX)+IyjyA@P!AH!E{07iaSA>$NVZ<^CQxKaJa9#nBW4 z2w3W)tU@dnVeisxb_w`(DyG8VC}Tu8)j3W@%x#`-^G|McUpdS+J0z7Pg*%@2AyLy$BRyB9@^$ty7a8yJ_T!M=Q^94`5 zpRp*i@Y-=Pm;oMho}Es+MXkgbR9NU0>9=YNNg5u1ZS&MR*gPQpw`rK^E#q6_I%48> z+u4kHIkfqXn@N80Q|yJ~GZ4Rkm<>_ZHqs)k*SJK{D6Cv$6eILy*k1I%Iv3XPCoPeV3OjJ z*-}y6+E!IuRhVCpRF)T2njV{*6rUECo|#r%Q#UX=IkmKcAy9!N6;aMp;aBBAa%i>7 zuWDN}w2pJRe$iY?7iD}DV&jIBZ#Z5Q&|~DXX96~CzyvV50WK#Xs0;4ef%0Le<@I_J zZhrHn(`Ht%cpS}C9f`TQg_4poEEd1fr^eCVQ#rbFaIk-5rLzZH?SVGe2>T-E@>bsV zLup6$Is!v63-eQWgtUivfOO-aMnv=_Z@WE(fkK06yx3APO5N7(#3NZ=pJF|qseSsX zrx=wHXq@D**YYWo&nt;iGpYIj*TtGfI6O#fU50I?v*GtVBstUozX_v|Cb+FH9+<9K zkn8tDUiQ-$&1Y{^-Q7aJE2$}{$a#l3p+XFmT|PXsxOL6^oTQE9apyCKyiPM4adX;n zGgtt2CpsoOz+g?sXT@{T^^_Fq#?OH7O)HHfaAF+JPLigva5b|q#>7Sq_>O z|H?`Bo}tt^ofG0ltV|{}^y~mwEa{k)dA5E&$bDV>!pB=Xub#RhEqDFfw}T%}ihdU4 z)84|Z&nK)QD5SE5SBZ;Djg!@YgV~Y;m@}}WxPUGLgE|Y-H}(s57p|IKI;DQ}h}MB4 zT8DJgw3U379xB{7`(->jI$T;%tg4{u>FrU}Qk2k<_`u=*7M=Q7V4Im6E!-jF*Ce%ZWxfT01xSKP>=$-r=}0D zE*)B*6(&sXSRZ?YnN;j=*U3t8icKtO92uXY8BRk?e{*|nZvDmt`qJ`ha!%&N5k`_c5 z&#SK}t1d3im|JdMBMy_`CU*mIHX#!UBrMHjXt8gt7CjK}o@SL=Us&Gvxzac2z636*G9S8qI1$ZtE*NfW{3nhI=Qk!ggxVvRqb`D z6$xRLex_M^mmDsMnTfL)(^EGdZ5|PgZTs~OpEf^x%H;G>%|jnN{z~sntRJpOEly0V zhq zKFhvP@2U44NyoEi9nNSbe9q{P=_4$3tj(wO=O{+VifbXh5%lKVz?2oxW=r>(0%?ye zj=^s~%ic&$jA(6W!L4HvpAHInacy-CG1x^~z4Oyk#RY{MRX_@bLO=%VU}slfPHAp0 zu|wWp=BoYKD{g0YYHvNHvG0THyOObD9FneS@-%qZ-q@d>UF??{d)4@hz%OYK^#$br z;A=7y&(*#ZuU86(JLU$h%`IfVDabn6y(@}6+>>;bT>W~!@JX4+(P8kdwhG_qV)P;ahpC9fzKmm8(}OX2+Y<2&R< zM1F2zR%c{2<={Z^BKEa;-2%J&w*b&(eA}P z`Nr1ja^K&)Rg`+9dGChB!|gwhvdD-s$#Ju30ajB$X9ZY(0Uie~9?fkJe|>yqDDzC_ zj_?llgBK6o`SI|E!zFGV7JBM!Y?y@g_=HvXxD|O>HMoH}J8)p4?&mNCTtDa^``!B< z`@+2^9{ zY8($59NVt5W0#>Q%O}9{9_-QL7FA~B`^LUqL*kaxv$HDqugN@m_3>?aU0Hl-l(L`F zIn))tpTgTU#EuUGRPHiFK=?El1g#7Dk2Yb-R z@-5gc_aUCY7W~wgO+~s}O&IT^js>-Nm%-tD2u&8@)e?9#5$^Yg!kSgQuPjf?e)NcS z4+!+}vbC~Ok#)5-Hu3T?Z)(3YG$yt%%SKpcBCK&vuiRLmyeVmX5gcpK*k(y2N%VFL z&o7Fv5(Ssm*%qdG*5}w4r`Xq)m?>m7EOytz^reo5pS4A`D~q@d)&y>mygI!yvC>%5 zw^0g9$V_V+9PS!{-xSiXUCi4dn0k0;{?UUe`;NKnzi7MXio@~yMS!gfFhVxO1Fi4M z`M#)7Qesld&}dm>L$$ozkNCvgbsRF8zc>T)vl{$8VpJ4V^|Ul^-n@9?uTWQBAlPYw6s-5C)+3IdKOg_Hnw#Rj`h#4%o2!X>IXSYnM>)-Q2hPz ziRGocCTA=&EaF;gW6DOtlNQS>VQUL4FP#YuPc|?yHnubkON`E_Dy!~lZfmRQZ>=6{ zZz#>qOo&e}ukT7Js!A;@YZz|m$2WJvs*pjC`+@g%+wEpHqN8RTK4rd}sN1SBn%Z#& zUw!4?I=q&1{uGk$n^%$1-rIkShPmdVz7>3R>{9_Xb146X9*S!l&VvXqFOZHzAQDMfA`wp_A!5S+xIIlX zfd&F;fd~yCw_GQ|Rq8gsIQKVdB-D2l$L9scp+hSAOFL!;);8zZNQgKVcAAP5NthK# zqGrO?<(ZN3o`L>`(dvSBb7Mz4ca(mhhP3aq3+|`4*@**VK6-85n@$(if|Y!8lYFvL zqv{e|N-WQqZWq>MX4B*owco>GK!3oV`F*HZT8~;*j-965+Zz|o-M{dT)R8bD`eZ4@bKZ-_*KG#uL^`yyp24#K* zGcjNx!e_N(i={A&Az-8eM`OUO%*B59=u>&sJC`oMym9T!*^?*UoZN3Mfj~@aWI9Js zpM?rNIkZKAnGirRBJ(=tKtKx|mE#O4v2UKLODV~I`cdY|Uow|2-Mf1Ep46jfUlrsH zO!cK@-zlldg~bF7PYf=uFZ9n2q*SGw1X+bO#((xwxoiFGUFgTVF1L1l6F958|B}hc z+xB-anmv*aHIuQ_ICJCLk)0CiA7p%-9eg9azk3;<{Bq~i{nHO#Kd|@lGjMQ_Repc& z_Nh}+_pUlP>}zP?TVCd(o`FcnMnzzE8>=2B+jAT50vdqcMhF_N-5R&-x9A zCBS91iG@3Jl(RS}F##nyNGH%ElE0%B{Oz*pBGNORYYY8djJ`$$I>wae*tFKmuPxt# zkZpB|`V_3hHRh2a-pblbtz8PE!!~5%$Ki41b@JQc=}%bPk@3!b_$fZp8WRaJLCA!M z+aNr@zOFgl-$C;Sp>_9`5kf4MoSoaaii15P^O5Nh)SpC5%F@we@q)Y!oXle!>|-J# zU82I#U*((q;|Wq9>j85sH#ejQ6n4gub0@3qteT69i?VY4w6xWJ2SlzBAW}snAp+$7 zfi7Ray--#AbnN8L8+XngICSBhr1|Bug(7^dg1k#2+em=15Abv|3HP&Y8Dn6k!noGm zA}joK-O^GRlx0VFL~d$!Y)fwq6_=n~hPl|XY4XmiynGPQV*vX+3;zI*z@Q-a)E1U` z9y$ygSZAOkAXm{(0J>qoI1jki>3OltJUtQ;xwbZ=`eyzysR?Zz&AGYxU2Wr4C4H?8 zhNZVSi z%bph`oFsO*3-K5+owbv+EVN1N%m^xubWU~i&GV_BtzM&0jGrPOb{y2xDAOdt;Sb!Px z<>0#?LKH*F;>(-r(0N4zEp3p1A(7{o;HqAn#bckBhpi8DDKUzwaUR#&_0{`b<3b(v z%~B|E5(mp6{MbJwG(W|**aBjefb$t(B?7GY1WfoJxJcT?eDezY+E9aPX*BbXx}j|= zYU*+Pcl5j5e5b0SY)S2nc=#jc63NTs%PFoJ33Yg7# z=*+l)5huURHlV>krv;cz!PZcIK+`c{wgx*4#9!Dxt?n+Qz}1Y(qc) zHxn*)eQ_?WZ44?b3|fHA7HqK=68>_D`^53>0%ArAsx}su0qCGT);kf85Eq1j$7ujK zf3mUZ3o~0$cM4KJA9iiJ!#aHDK8yM$nP-(HJGi@@ICV|x{>Kkb6g0jW26=~Dn%g{o z`NGT3b982;s6OA&P4B7dqfd4+GLAA|U6k5M{UdNbtE*JaN5>%A;K4mONT!gc}bp>i&ZI31< z*qa-8*A~F~BnMpH1>w=PwGWW;WpLD@XDkH^b#e@Njt@&QO12xdjziEX>I-Zt3kV3 zvR>C4DhlK0MHA-(3Wk4?T6QnT^1MW${H49jV0Bb8FPtLpdO~0HPJNuTX>0xbcm58VOLrRlK z6Z>2tBj_o$Y)0Q^%G66gAv>zH99@d8ZmAhu8b_GQ zt28e+NWnntEW|EC0`d%0%r^*E;ySWza?F%tr9VbXUG+H5tHsA<%*UWlx6NAkss9t( zJcrbVjQakjsr5-LlET(8#8nclZb#ingbll+)nz!d2uCT9W&wgn-YIEQNFHHI(*}4q zXLvWX2LDw1{=}d76aSZj@~^F4^1pAJ1eAaMyAiaJ58?*g`G;`eOHr}WRgv-$KZ z+52~9P+CKhZ?2P+;kEPH$A!KK@6y_L#_s%w@b~^rE{${O=JvA8xTNarnwrA8imLK- zbZTJ2Z*5DB2TvYeyLwAT`iqXPzP*cmeP`X&>=5$2hygnluC5mj<$VtSM9uQRj1D-_ z0RzBi%5%x;(ho0H4I?c_E9a4(N$SV2ieJOfs+7U`$uf=~l_F)&nI67iar!U!kDnbB z{bKyfN(;%DMe1#eAW>myf@`cF^c90BSzs*&EEs_)VATa@yd`hAKey`$S1WS(S!Qy= zZa3#Aa6*$lv{9pOD59<*vj$zAmRx%G-KW%+((3uj&%a-9H`yZO%FpJ;E$qAFcJ$qo zsMAMK$G2K;Wj6uLR)7h`NCh;^#%x@cTNtebfjJ!l)P4b=$8^ErYE*exTxMi=R=Aw^ zHvt27D#D}US|%NK5uKgew03i-@i6KzB2r}5fZ+!p&&!>fwniVMKR)vLEI z^N6wUIl1-foim4z9(eis^U3oMkKKN*V(oDH*7aQncHX>p)<9Q1zr4gZ>bH&8&x-2! zjdSMo6r5dw7M4E75lNQ|a<=t#GcS#vAu$n(|mna9gt=ifdHdY%a>A zwCk3U^j(zRuGn8Zb=e|oWNtiUS%ur@;q~^uFU{y5Wxu71f8Q8xe>5?mTbJaU;}IQV z737b4sH1*%$lJGUFO`nj47&YscRig zDk_Xe7scl_y#Atm^O58ix%=;)N&U9-Pf_#BEmU-&&@{!w1 zxe#U7Lhr(^qV9pt(VoH4rm_CIfx7yt-179);`r!{aQ~2i;E)IvbK86uN)HA+cxOa44 z@8}xsBt-q&J3bMf?O!^S>|JW-m}hIB>XFe@92FJOTv}3;5Yj!=PzI+BR|a-;HHq{W zF^4{9lqdH+nM&WE_jRl9F_y33v2+-i4{$zm%~@UsED7F#6E&buZ$Q%^zz^Hc9_W%pQ$@Yn%9J60}3RuLT$F`ez) zT6~O#Oo+UqHQ=@0@+w@qb!8|avp`x-;np>$?4H7%1CtU#6a3sfUH3x_MS>=eNen<9!_m zAf#V~TuV@NcJA0*3varfp!*31T|kBMz?`1hLW1x62}P7<`f$Aq?3E%l>?6!_VF+xC zaQo=$oyxo)b@9gSac|nQx6Kno=GPvz$61v-<)!;Hq^D&U^=brXKaUHMEQ)8SDHnxs zCxq-PD{KQ3pYgDkh6CnCx^1kq!*dh$Q{%5hM8BV1IJ-P^2dy8vEHe{MM!cBNV{?Wy(LQL`Kt>?8k!N_ z(a&zYd3HlZwKdxJx6#F* z`~$iXkP0G&<^c5H7k^;ipZF91Jwe71F%)V(C9TmA9n8Y}On>9MY!jNqW@rs^tm zT4H^k&uF8(SF+6$%b^N^5!B0L?foLb25HtNCyO633-^}2&rd0G;wsDpb zx8_~}^L=~ug)f=!IHN0WRHkvu{mFyy*Ib%RLb}YSbPj&|B~#a4QdUwb|6Ru}+zZug zCvMIs>c+!F#a{ZXe8#+NhHUgE^axSU4A3hxU$?mzUzwd-Tp695_D18wS*GW7R6xvXjkxWB&w%+A8~r_Ia0M_tVA%OX{I=swvFD=uZXdXG@w)N{wNI~Q@aHFKM>DgK@B}$sbGw+-!0ht5Y52F#{hFPw zdT4%We}9~F&ln<1Q8!gFRvpQ`(PDEYa7y#S8^>sC|2t;V~ze-V{>u_H0n z%)JS9w@^o*7De9`&fK5Aw>$MmXH*)o*0?(CYj5*m9NbBPx0YiJM_lWB>u_T*J!8GF z2ilhx{8gB6qG48S6f)1iEm(L3LaCtvG=v#-Es-fP-oKqkhWk1Oh7~MbKe$K5Lin@3 z$G>l)W_Bq(IiWBqc4206jX=iyy@arJbT&7%j}0sH3++01{^xroH(AX@i8FEFQ0&eC zf>04|d)NFm5DKGq5fkCRj4KFi6!`67Ge$y+<$vcnghMYqLE-JRLLBl*_(#Mcjk^nYZ$dlqzP zU(kVrt6R20W_|)-M!)@7T3b&E^3SogNp^Ie`|ESx@hfE>k#KYc4o<=8S(uyK5fYi- z&<-tZXK&r_+PSNjm7@(XR&JMQk$N#yUJnN+prc!bj-l_&JY{5Pz96^Q-_JKPDyXBY zmiC;^*=1_!@+Ol7~1T^+A1Zlq%I^eE4#G109_s% z8HLVBo1O0KTxxR2x4&tB=bFcjyTNy*{I< zZ=@sq=5uw8^aKEv5{>A)3AH zVpe`eS!oeO3Ww`!NWW)s9L@~G$!WN_1Xt(Q=W&>|OcEZr=Z~J=d-d~miEjr^*hz94^3z!`vN>|H8nC`k z{;OyyXN|hJCz3XZISo}czkE@CkzP5ODYL^vb%lBTEe*))hvb@u#=LwqTqmpzkEZx| zERJ-yPqbNOhg|T|KO3jDGw>pZHK&A~&~A%ew-ePbltk|89^OuXf)MhN$vfsNw-=i2 zw7Mkc{UoE%7G33;J{)-!bxG8ii(LVTW>=WJuFatiXD9Rd?0^KuJ|V$#EB2>q#D zDZ*dQeM>caZTs-O#U-tXSFapzd<*;j$mR9>prElK-ac_j=E@<3!`lo+fhGX%2t$!x4?GEe5?&T>;$#<565kBFU;E3R zvbeHKe(&yAJG5LiBGN*&O?B?vy{fJJ*+gBz;QM#g9~!=CXzkeClU5$D;_}NN?7_i` zg3PR7Hx=0z$rZU75dUQWk3ew0tncjX2zyiGZ7OK1?E_JvXNL>;lN33A-(<5sDQI@= zl*{F}&R5?&r1vs9`ALQUw6?&`i2I^7>Ku)Re3{>`d%n^R|KU^pTcbAPVAIIK@k!~0 zSj{>&8_!f@wMsdKAuBIjczIV9gbMBJl1=#C3YfD(WN9mBW~&0(8!7 zUuSQ5Wo~q)k^Zmd2^f&kyh8H~BreSiw^yT6603_UN2W=O5LS;89?KgXy7=(!TlKBy zKJ!byw+~B3MPwRzH|*0+-rl&(kAoaUoG7G-;MaEa_ewQ2UUPPlx3o1hu^k&;>1-V< z%&wsIF%i@3k%2)=bDL)m-!#`Yew2Qsq5UN(GfDEs#Ur;)+lTwrP2;~AChR-yE-h32 z*Ne!^xJe(IL=$C{yH%W)a^c-;T zU#$LVJcywJvUQl8m=qI}vWkKA4bahfK}l&=Qwu(R-uc(ZMt!rURU)izoOE>cwz9S- z;3?QOxU`6^t|{~NNBR2O);8o3$txQTP$GU)dyl%UF02qY;2$1o%Hj|k9X!%RSR>+Q zF;k<{_P)Lz39*6csWIu9t#xe(dPJ0u*B9VoczdLLh>~pZSJn6*{>{F3KxW}Y-n8{L*K?fYO-ulSzzEyvU`oidiRmisr8P_4> zB0L8d->=mwl{i?(Yi8%7a?nolzONp7U)Ze!L=Yzuz+lX+nr4+#*A(rT>ZamenOj=b zfX2eP?&|3JilO0&s=E4!%*e+Y4=!k) zJ!E{8M^}K!h?&uvk%|`?tm$}cxcMx&IgQ!a4Oy7A0hb!1^(ys`XO-Y?ix8Ce`?0@1dB-IhuCr|_{4XLP(wU5k7^M|>AE zJtAbik4}+OKuP41#)YqzA98DwBO<*FRAtrWKSo4F${3nI_Xxk1S$Jl$_wdT#<%N;w zZ53HlEuEu{wk}R3?R}#(F>NQfb{d_%H?8_m-8g%5$8osE*^=HTFmqWnvx8F$MvRnijP>l z4(uwdv@>w+Ynk5|b;46u25<$=EsfO{2|Mkh% zUc=2bv86@1yy6TN&y0tB1jw=o`5@#RUpPNHa~EB1Tu@OtzBsr@O3XqpVhBX)stHe; z86I=6b$RzeE-AYpA|>VF>!-nS5mK+;Jp1tW`tz$Dvt1RveNSZ#F5fe5?Si#+GaW5M zX4;0~o<0TfsmXz5a&IzJ<+{vFhO6tKmVwKMuR6ie6PgB#*Y86B8rk^=_Z{f{F29hI z0;|!3ZM98JRjqSVcq|q!ufp~&*whZ2T483+n1^ptY(i#CbYfIUTx?+GM#+vsf&}7v z|4?IetcT+-{q*$M)s4Y_3Ymn(AW&&z;kV(3y+*wQb!mrCfvFI1rl&V6Ceq4E&o$01 zqa$yEGKDbbu*k>+M25j&6RtnBGp3H%JOoFFktO2t8q&-qW3hM=_V4i#3Y=e=ZR~Hz zsmpN<@v!`5LxrS)KB48gHN)K#i>oUbxEk9Ur5vSj_t(wywvy+p&VGvi~C1j-{^4N#s=` zVF{1NlCrAvsU*MlRzY(yLDOw# zoX)wnxXr^EEMXZtwK&;4&{9}gmYb8D9^n(@Y;R}TP+vPR)Nh8ec8TzhF3TK-7<69~ zy0MA~$#V z`Sd{b)*dB3W(#CQV}t((Oo52K;7RA>7tJqe2J1S8Ip?C$%S&q$vy=VPy~bWTD*lSs z9dB+k+sSImL2pg-q&G)8!gNM}#exC+prS?~<{-T7=T?bN`_DbS_Vnc&Jx$%rUPPwzV?Bz%odk}7XG(LcPMJj00VW5)FF8>=-KDT*2_ zu`kRFY3XdDz-|chAT}IF;8Xa0K4z9XacJ)#Jh1=|VW80J*tV`jnM&(`ysv4_yQZQJ z4utP0^b>XS-f`2M2bgjL^F0?7C6!cesQx(O5iDbxXyKNh7!;EI^;N{N0~Kss1Au83 zaChImlJDtUTRuE;*XY}xA2Nzjfo@U38R@Csu72&U11oqqIW-gMl0C9V?lPf3VR| zan?U&u6neqg==6aH_H1!Qp{6l)ZXvP0v97mkjj+B^Ir9&Ulhv@1 zHF7KL!<>7m{N$^JiBtUXv(K*HerAa>E2}HeF*SK9EpLqq!&4x-yttq!RYOfNF*2+) zJKf3DG1Rj(z^x|0v#_?RUshK3mus-TS;g~r1Dpb*089Ze#?8?o`+CvCW-2+nZee7u zr?J1izK4Y09L`u-g_w1imY!o^^z*x-^z`sRXI)#kTQqhGfkpUrnyCiF%+2?vq=tHU zI0X25O^%NILj;tt;arPDCi`jqUgGTXVs?IkyPb=dwNqJU#`JXmGF+U8v+Hn*wxq;h zmzNVu%TxoLP&ENM8D`b1^>c6zLt3V$lZEB=rBx))5b%`A;nDK^f}r1CPA;}Cfvy2b zA(<691JlDBb88eDJrp7hWlSHjH?A|%I^RM+O~*XjG_F6k9d-`EiBY&PgV>Z~sHmMx zUfY0u6vF26759(c1qJ>`&1n2@#vj=CC;r5LOAryA(h}`Bwf(jyfDRYXW;_;fRx4ex zW3?QiRMWNcnp)PKGH}nNp~yTl57HvbHT?vt!t2 zyEC{mU>eSJz<%8%v)6tf1-@+)P~IwOd*Pt!QRhOJW#l;!?-7KAg{Qu})q#<=iRGch z>?HRfSLXmr*D%AlQfC>%Tk0+{o|z~;Pu-TKt{J#&7-#;>`}KaCLqMON8N~rC*@W$c zju{C{d)>9jvvcpS(4Se5TY;B5rymbZSzs4X{goCmzdk8Fvo=wQiubZgaX6)QZrdl; z-TK^HoCMA!-F%y+e6QeC))lV}8+rCRP?IIzR6H_4wYWPv<{Mi9O-jug%M1f*7(P1wT{3Ysq;d z%I+c#)S0*b+__J7pQgJ?d|{+# zsO!Glmzf_vI35#!ag8Lnw6cq^$T2q{2H|B0&o_izOtpAg8Fwo)!fS0l3mFN531y>l zs-4?T=5pPuV^J6q34-DcP>c)@E|hB4ScZpR@A4LeVLUL7dpTC1EqtGWIiHv&hnV5P zuNH5DBW(TStcsfB5^@7`Gs|*(7Qfx^7TGm|h#Lz*0GcGl>g-gzE$rQ2etZ5&PCG0j z*FVJ1!NtPF%vjUVMBCD_ptj@PC*>zk-baV0#>Hmax&ຣzn(W}_sLj!3`Q|Pv; zF{=^7!<|at^;vrZFOLMB-{yWr@4HtEZ=TOY0y#^$Fhf^r-l`n}!fdP_ie z?^(-h5ls;*Z~^JME|Od8`U(qc+`RonqSBBRzBL3yW}Vk+gz^coK!Y zP6cwfziWY*c??{hhT~o2wpKzzAEkQ^&Md>_6{ML!T&4m@q##Kk;np@N%VY|kh@>Yn z_FrM&-<8Gx!uSLG{=}d7ZwcxOIKDRYiL(^9(KcYr3Q*j<&RZY)+)nBUp+E$9VQJn( zv$VgoEW7^iN-_T?97tNH0m(URaYhkla*0OA#y7UtW)`MVuO=?zkv)qv*th=ARxwiE zqfpa0ZCV78sEJ;4@JGUV>Qfl%*d|-tv_@kyGhvCQ&9LpOk zvB`D(;3@mc_4QwFZ&mz$L{-J47N!sO3~kcMr&bUVLx@`U!Imf_R{;|0o#MOVxO1~x+jr~ zEwgOTe>;6%`QYA<{D-~@z58`vGf?sTx9bugpB&bDAf`*d(*n@Tf|OPnCoE618So-AAC7d0n2Qx)Fa!*=*^ZsR#F0^AH6Uao zB&;WTQ0w@GAD1}~u^u?H>&;)!zkK|xrJ@@hm5^SR>s9Ed5U(U*xmUoN4`@;0CO4N3 z*G_}oFTI|>H+k{G_~|yi{ftH;4AvqnhC)xn-dxqXF8T7Lv5S$3o6U}gw*=MHE;hEZ zjm>aED7cCfTO+Zq5Cs=TxwFiG&T(KZ1+3p+$@YyT&*%4ysK|$#$&*iP9S{)e5xBGh z?oIQ0!yO~Iqn&An1v*K=mvD(EVZ;`4498?RPgCdtXQzX=tRyd5KUB2*;u(nYjPf4G z&MvIZbu=#7>kJEUCIQw4AhHg?gb3%H7|1aG(rNAN7nvPuZ|q7vqSweP3&hEs!YrSF zFt@y}`@tHXd^jSqC;8wZuWuj2x*L|L zhq1{|LnWCn=z?tO63oHfth}Mx#WVHGcb}h@Y4*RXa!aNSbUhy5d>QdO5aJi(6Oz)4 z$0Sa>@rpM9wlNN#c~Q~st5@>v?VFdDkRM%Lbz5Ke5QVb7vND5!EgbCa=&L9xijVp2 zY;PDF5(-IVPn2nQQzZ$r2@3I891#)mMAm2XiwnPlBJ3Pve1G>F8V(v7u04KQqo~$t zYSZB3n-v@pms(uaIli>14}hZyJt4`0nj@AWwzsAwNF(YmA-A2tnvKOmSkmQ^Mz~sN ziod_BH%bo`;uDveQdHO0IXW|n{QfBz>isGBH9VO_E6x$I5J%Xce^CG65q2>Jc?N_r zjDd>_aF%)j2iLKbbt2852uEvK(3X+-O(S5^8Xi$3r9Kq--(cULYTuvu6My1AJE*WP zrz=T9ZWpJ4D6kg*=8Rm9!q=QGBzK0A;mQW=n}KlrpRjLjlgx+TOpzOq4^Jb?5D<5A zI65+wm0qy1be!!41=t~f2t>J=w>1H#0`g2gw{uSW~Cv1om66PTm0|&zcA7%&cZI2R|NZLM=e>=+Qn3?14$0nEe8y#lT;{!T?-5PAM z0i32l$VHUTO8m6ju?L18fBDWT&VKRZ>9-zl#WW5}_&mRu{ZriS35(_d0WI;Z+Cu!Q99JAqpZ#&@sPv&P zzqM~!T|c4+R12BRYDF~C1=Pa!yJsu+&Ubo;guVOvrM#mdv?Sw4sNVJ;{M+=|wi<6? zP^6cz`TyAa>ZmHWzS{*T-6bGcfQo^EnAm}>D0ZNts0h;C-5t^(AkrX+bb~ZVgVNox zyVu>%M$b9#9q;$|{l-1a!P#S4$`XQZW%%?);pHoHaGUN$?w%~1}RF(G+AM`_!AU_$TT%7>m3>Ecg$9NNyM%>#$5z)iB^0N*6nfnVQ4x42#G%b8+0T=e0jY*c7Q}?8XDh1L>(U; zycqH^>-%(f4a|-rT3Pp8d{S`jeeWrad9bM+hztlf)NppijOAZmf+$n z$;#B`+SQ8_GrztSr$xj@^$(L=9BX9cybj%I-FB(wzGm0Xy^e-jpSzmoh!{d%QDJT8 z_^I0-TMwn}I?}FiyzjBbw2g0PdRZ41(dk_39_p`Y{!T%3iThE6zKN-yWu=7#JQj#f>R(gd0cA@+`qsbtE zBnl(QFiiER#D5lfSF?SqSj8&-{{|W2>PJd$yItGmsmSdl35?gSbKZE(>vUA1*WCOd zX^B7&qW@9krD7P3&wq=ZQfa^ck3b_~Nc7`1EUBvM9~!68;M4>rJG=1LXwM>PnjU9q zGzyjQM<^9XqtZh=g--bJBr1wT#UK|zGJ%R`#BS1W+DH5?5=iHEp}!(!ytX0|Edhv1 z3%eoPtq-c@6ID}~85)u!qz7?K-)vV@W=K-rrx^_WH^QO8$sl+ZxTDKj|tjf+!SrZW)V(SRAC`;YiQ2*6Wwo z@1NX!PD07;_Pv3)V?$Bv%gun-F<>ObYQfKG%g_CuOXiW-rt<<;30e-BDnZ|+61q7` z<|LwOII`N6%DYUy6~@=qwbs9DfyWR83BI&PcV%mJq!`xURViOtcf>NoG-ePH;?Jjf*~HT7#wT}kP5 zwM~KjMZc@NtmH*BML5(sg!F{=8}808OQ%xN=1DeEFSlPt@ z?`sM!EkiVwf~PFPqA!qx%~4`=8(?h{qUKb2ky3{7*<=+FJ>H!rl6$naY6ocPnHbsGdb@>0JH;m?b$48gj5=6S ze`yjWS62AEqm}o}4d3uOSG(btYJ(!%GT3Bu*|yaH(Pkz=93q%3Kmu#0l^?=Qax-ik zzDb|SG$N#bjI}g%EG&&Xem44thoolzXl{ka4yN-9XLD{J0umikJ3C}$GT%Hd$xR-i zVByTnOkns&4LvUdi^_x)SXc&Ix?x;S(-g$c!HEglL~(OZM)7w%oFc-W_Mx1@vgEnh zW%|*GL?R6h4d}c#%*`!LOirq-EN5i*DD;mt(ilfSvJpsF60UDzGP|_J*tS^ZPCsBS z1z;GkO#^|}E&EC`vWUwJ?Q8s>^cj8S(-(2y=zLS_&x*3jyz)%j1cSpK`;azMH^Ai1 z!EeT|8gQ++H+zNY(P=58(H9x{psmRMQ3=2A^*i?bmX808{~&7A|7bWS{?;G(Tb~*J zrhN<*K&1Ff{)_feSF?SqSj8&-e+NCfE>ebKD+5 z2e7?kKWctl+glGqe|^}M{y=1+BrW?>MsDI9W||Ct%gMHN4aX&9lvg$285VW(ix7vI zN{hPkvtVCu9KdJsEPUSC6?Mr$=XhFGn1Y28o9=pG!OG#v!sh_Chl=it+s|V!vDQk6 z+fq!*ZrhdcTVai%C?p5EOq>|1NKPzEPQ}eE&=}V2$UDR%`&8YRw36J&qqldR^HV=R zoV8&novSej1l<50yMYA{aNuLr=R2ga_t870vL@?}0rkh(({YwdOP3F z{;Hs}-HP1mhiWgBHSV6Vx}o1-ve|y~2EC2P?N48KQN3gH$i&xTaBgILdaAy@u_(91 z)y^#;;**uBrP`}UCT<2+Atqa&Nw0m+1{|1xB~q|vvtr@Z;+A>7Rax)Kg?E?vP6+NW z*ukaE2}}TTffEq*6jrd?nei(jt{}!QC{X9MvEDl~XCLpBsxKZnKH715hxCuiyxexq z<=pk)YgfZB8z$?T8of8uGBEZrQ?-9|TK{}VNoZtJ`1I&#Wp?qi>kln-P2ucjV!+JU1Z??H8QTSrR7&fR(@zm+7brI zWD#f>=x95zUius0>H@3_%wQgXPQEj}Tf&yaCW$)Huhbi z1$(Iq?s804_Kc8z`E2`>YnR?V)NwGm@#x+=2Zxstkr6|qPWe@qsReoS(6yjCv9-xL zCEg(=ZR6EMFDO=&7Na4C9co|9ftnNbm6Sf4b5nxvl`5H~VLtA(47 zhozHaeA?ICl3E&K&Vd;CEMrFd`aAPW^3uO%X6BbP_s$1JkC|9NKK?$yJkQ2U0Zd)8 z3LPP#v-JDm(Xjtl-`IF}U28!`al)tU`0}QP()#AszLCzE@xrM(y*%3;{>Rsv2@5-l z$vSM&&(Q28{y@%{R2mjdp#9G35g4t-IAkf4|5n)hV^H~@O~&va)a{S9z<j8A%fIM~*H zX=YXtLPb1A9KXC7H+uxm8N>1V=Dg|v&w#I01pWHwc*tak+7?aYYpVc+EMt zn{Pd!u4o=$l~kB^?7>M@pF8r7yTv`%Aw#}aNF$@9mdMMXCm#!=x<`I~PDy`jsC(=E zJ^g6oM}bc+I$eJH;f4AKjpOf6o-SPh}HC)s2?c=xiy`5as z!xCoNN0Wm;-nn#1*GR`bJvI;GZK#XKrho8HmvGLOa(i~A>c3u!-IjQ~Y$X$mBqso)NH0Q?DD)TS< z*~QeC^o~rX#igf2#1*7x*4EZmR(>Ya-Y zc7_8#A9nQKFX?dYTy)K?arh7wgn#`Y409t@84hf1LCmxpTp-T+KCYDniQoMlymNWExWM$ z&SP3;9$p<2KM0Vpyo^-4$w(Ilu4lCWwV{>0M^<9`5PhMxwNDk57sMrplvI43oEt|I zi9{q{4=Wp?ifZq^eY5QBBVf%4;AmGk(c$3S@S|b4zPP6R+t>QK+@`joS^Ow{?UV2f zl~0C&Z#P7Rw`FN(TAcM%z8-nis6;1wB5f4*;UStp!cxgAVSvnt354JMA=H0l?0&O1 zD#I@BKUD?(N&Ei!^(T!-`^_53D_?(kSN<0g`ESB1?OVkv{yjjfmFV1S5l*Av;xb%7 zBY7+|B%y!HHRDMWLu1vYmBm?kbQTCvguneMX^ehpBGu4_Y143VlBdm*_vSJWUuM7_TTPE-uba$%=WQ`yeJaym7i| z6izQd!aT(HPmQ55gvF_)f{fC#+_ohwp>8hqnV-xBOQ0Pt>>Ybn-{`~{C3$ymufqP8 zhf!u5oKCR2$^mCFwxEr|J`y~(T%0yMtmb@dmcjxK(kDLO3SLTx8u=Vh78G9?y-1qw z=xfa=$eN;JCLwkn;;Lq<4O6wX5}&FEU3V<73?9lb>W;tJ>Z?%rn#XCgxRa=)rGRSk zjr(bD?W;}A!sKdtuCz>?8p56!UY3XOJj7pK?0cCXW$SEg*7CD4r8V_{@!>rVijp?6 z0*2z-ZFih?J%8Q!_H%>RmbP}u>1okvQ9iMr=3XYZp5DBqeEIOcBiFC0tluWZv6KC< z@gW{9exL)`9oU!*0jJj5Lv{!D-P`LC?rm&izD{oaajg?#FU5I`dAM9T+1y!yF_6{Y zwD0-u)6Y(O#rmaGWaIev3s~%OLghzq@Yh6WRUVCl6*H-;;R%_9q{|T*Z@z zE??YN79Z;CZSUAtaQKTOujg5APsO!P@?!qVTm7H=b`^Pt_(p%PzBDqpfs6qQ<7**W z2tsxOb_0Zh%Y+RWjNIJBHT1X{dde_8*57Wqr1=vCd2Q)>qszSXSNg zEd$c#Te|vQS{Jg(L@@Ey@{2Z#N&B3-VgBuV5dk@UA?PP0X}p+~lWpVUt=O^mhuZ6oh9676ezl`U z2M|whNLa!yBYxi~Mvw4MIt;J>EORuzh#y7tzs4b zA|ULF1_@-^iZm+2ofpwMAR~=;Bs^nyj!eK~#|DNPN~-fyv-^Mb%`Yzeb-X4Ku@uq* zBd8D4H-e(F$o`RvlFE$6)_OFaMEXa0g$!v}WGf(}AOQu@Shz@l^JKWlXx~8&^fdAx zDJ>H6!YD{x7(+uiB@+GT;d0~hj}L8OmX$WHO`ay1W}X?|7&?_CLL6o#tkW+;FVV)$ z{B#`REMvY$29|_)XZwHskv~sb!r?HadHUtBslL@OG4QIp@`=#B8(cXz+Of-M@T$As zDsL<-si}xAOnd03rxbDz7>EKlF<`~S?asZXXn7AK>?WL)qBjAHNg>y+t2h-um+8ediQ= zMN_?OOKc5JBkuykFs^UA=8|E3+um+1<_9!91xY`9Z`3 z-uFCfP1f?d^RpYVOTAhzc~w&C#O7lUPF{F_Mak^M1~Um}T_E8l%<>j&*WG6P(Ks|E zJSaRg>s!v$)J$7VXXE!D-^%loza^)p#((+r89%gSp=s&q<=@!XY7=gxaDU6;x7&2G zHBUs`Tw^cKo~+E4c8S&d2+-dO9FzoN-@WcF3-0gED6bqG`IS-gY46>Al9v>B-*{kX zp8iTL+(5^_t*y^9z~RV|&4w@TTDx01^cTn`X!H9X14i7O&ceKQ;=8?18&?EqM!8yb zH_J^A@Ifpe#PdS32z@^rS>&WZCK3c>Cv01sNcEvvaG1R zp{1v+vcu9aE-0(?%6VZ|J1ILg_M|wOEuM;#G zVlWVkg#dI&wA*|3l${%SxBsNk1BX!5<=p_?>fBjX${hR0g$q4@6>A%0r34gIe z+P_@CWtH}=Vio@yFrq3I4{1b3E)MBSW7I?$Eox+X6viX&yf~!ynMT5*39~bcc_k&q z-z!@>x+kaRkW)X6;h==5u=bM)Eg30J!vlq1v%--vK0N8S2n&Pxks0@<)4nOV*gD%sVT_CXG1(-n2tGqbhL6tZ=%;`5O3Wu9+7GyH*k8C@_5-$ez^<~s%C^zo zm9gMuG6{?H9xgYHSDJ+D+;>#bkJm9xGfVDHEg!8&FHZiNonD()_$4mpW2)y%zq`Af zFjQK=txwlyX%gpUx zx-JG+?uh6(cIyMXd&^DhjC0rRH*GiMmoR1n&w-dTi=ekK{l)AG;k%nC5>dA|KKHbt z#mh^eg?&=BHy$G+QV`*u6&JE1q<`@T!gOHzCJ%-P?y zdaA~&-doOQJDV;CuQflPu7Jp0(Jj}uY*pT|OJ%R<4GHdNoPyrmh~}R?V6p;IIuZ_F z9F|Ggma2{*htT}b#jSNceM6&D*x8)&?5|&vKL&mH=ogjvDf8N!=MO!!AB8-Y)fQi? z0=8NS9*sY|B~EE${v+UhfXzsl&rq1zd=s;!!n3CMH2lQ(pFP3pA?H4*D5YFmXDhQ= ze~;+#?I*6?v~}?b36F8|a51$v6O&%A{Qi+aN0LJNJASwAh*_K?Qk#~x-gqSRv1FWs zOjoTaVV;$Q;$B|hTbSQSq6sfUAp!*R5YQjd7$)MdJZ1dlz@U3+OL=ieZ^!I2pO2b3 z1r-o#eGW=4DIOR^M?^LI`L)_xR$V*iWM}-*(9G+mg`q=Xx@%VETYv96j{2(JzIsLI zOZK=OyXSbAromb~KpFvuHi%XhM^C$Yhh5J~d`#z^vtvB;4RsN>esSsaP-7fiqMwc0 z+s5?tW6nH?mVaAw(h;&9o&y{s;@lKT7K}LOC@*U#KX<>NP|ME!CCc~GkEt4%hsSFB z$I9J#CUaj^E!fXKxAHypy5cG>~XdRE@SbsMveoQ+7y>Z9{8ECE*4 zwnisMq<^b4vuJyxPTst3at#<-$Gv#s$n0yKinRRpyz<(rx|ZI~CBhOUVwM;SA4A5F zOLKaDw$jk^Na~-=II$sX83WN&WNntFRJojgS9i{;R8Av)*W(;bxB8+w)C$QW{8sU# z$Cj1J4~Dk~qXC3*{GG~V;&h9#YJ>j2#kc^?-r52vv(`Ay<|pL3H2}f z`1{C8{=Z1%U-M)CMy%4lRjlIQ0tym!5g#-f?eD=i0tPY)w=@h=kRdF>B@EJ;hn$;N z+F^Q!N52&m=N07t>S-t7CW*9xIa1vyF)!o0ZE=%lMO^@Tp&6295xPkJlf7faU;-ip zGhfl0=9i-7oA@;5t9Dsq#Q1a@g&vvzI+CwggCLjFV?Tz1&_JY`(>vQAS`zs#;k|aI z_Q(F{T0(7EPepEJ=^{f75QTi+-r=k2)UOVIp_P>cG>GC(+eWzfe6* z9aWZY?>w^&PrL8i@j)Kgas%IWz?OHD_a2KHt4?Y|AGR3<#{!F7cj;`|VkyRI2smv3 zrzyCVeNwaV%w8L?P7es$BQ6(+@Pr#eIQU+Ib5`=^F|Wc)eZ0$jZin4H6mgK(fuGG* zl+{rbm;q5cV3{tSTqWDSbkR10W2+HZ_ZnQW6VyvMA!p1iZpFmq%)#s}1dP`JPYGbl zB;o+>gmH(IUHepOuk%6uW{k3+BPSwTV-MC?03LG|4r7*`Zab~=Y!9g^U4D1{s^N`e zCP%LNUpip2_oSYZ{Mo&)-)RK~M)*a9SVr1zQIXyLc-MK`i|1U>%##4MomM;P?2643 zVY)(Yd`j*oqU+-5f1=rerRKaA$^^BusV^flr>dcHbanuRof#b&_?q!8AS7Jf(O}z) z1Jb6lLdKlp)~q5nED~N~f`L2aOJ1{h9ON?<5H@6IGZN%9+5VzD$hW5~rZVHK@s$lh zhk(@@U<9O`H=cDqYxu$V$(v^+}0KU=JuOZt+3Qi_kL)p4&Gnwx$&ZnDlO42qe+h@2Mk+M?aFN1x%SGzIS02&qi>ej& zKJ95my%~iy=~*Q`JtM;-6W_njJ-Oe=!SEh6fG(99Wo|nVVgj!_qptM@dr{(iDzFg_EPC2$Rt4yAFAr z=ljHOmq}brJ@M+ZOZ4K3uP?(d4f$g9@8PLb3XS2A_9yF4`@;+g|D2R-|f_tJ6N;mYJP5GKweDZ%%^KaISDBUfFHiI;}M#rd+Is zYXltySxv#NfbFWuYVR^lRJ~Q7gxz_Y|4hbhBd3c1aAF5eYk{*MpSk2&Pvw}ZkXcgy z>~eSeSlOGPr|Z?%iJD8Ch&y}6=d?xPYrnRqI}E@kOTc6RSo|0=cy7o5gp|>mCov~o zigjK^-#Kirpcr&ujTKK8o$vpHC64|bTct9;mY z;j_#dGdg!%izp^Ka{)gAU=D=c)@;xPTJefewZ<>~mG_%&6w+l~=gh-m3D~Rwrx}x= zp}_g5%cp~sMrNq2?5o6$|J+XrtQx%l*w zXS9crhhcD9fOo80aGalapu2~s%ag|sQ_~VFYRbx+i#4s@ZagG){mr%4jv8jcCYHe# zYI@JPH}mhmdEmawgCll_#I!`k^d%0uo_HRm-+~>WZ`1S*z9PQ>*)`FSv_Ki^?kOnx zl2V-Z{b%#+@*U+d@JFoV`+f2(QcpxfCDF|vlyqL;C{Em?v>tMBVC?$Za2=JKYHn^*o#*O(!Xr2 ztCDEU0*P;d|1l<~O|t%n&V;{u{xf$Qs*@8=bHX`Jh-O|wZ-wv}4$2LaWtW#kP@@9) zc`h;vU=}t(!c92$e5%_OJsS?;1038rI=`X6Q?9dGsW9`yUH$(nE!ZWT=!l+icX70%F4FCA37^Dhd3CfUmwJ`B!#C!GJ7rR}g<=l5G1Snnm zc>9{ii_5QURn&Z+>-*Y=$HZ2p#uP_?EBTU>@uec?dr`)?)~ilMg#kY9MP`5!V49HN zu6n7~Sx`i5X`N_lZR_ah_|aI`(@|Gb^`)n$q`p2YAkewAw79ymxxA)3uc0ry@cYR3 zil)!;_1`MKg%@b%w|?8two=>8)RG1fli z|B6wxf2Q=P|Kn8ee|(qvpFj6sZUz6_9sWI7rG2Yd#VY;>{DT@IRUYE_#B@Oy@smoJ z46hBMtFqk^IOQlWqq88c6Vh-!gVE!Cf8mPZPhr zn_C>iU??=CVSJtlF&G+sQ$XxCsdzjAgIz!^r#F7R8F)|Fd>ybr3a?vyg)aCVXq?XP z8>}zPPlZTc5jP13qFSAm95+dr^YPm7FxfC8CT5OEa&Cj)4r!Nt5{6QudcqrBH}g9S zv%3ncapq=q;^(vw+H5AP7Wyt%2-vVO zaHC`9@$&XZ7SZbuS_5t`L^GN91vupO6#Ah8F$mOxk?Tyc~N zE_w69I_otrhqB#gv3bhrepczF!D*6B)Gw%gyxxC34@b69W-d#qb)$-c&rUC=;y zuY=;7M6D;D&mLK--F|atkJA3zPwuEbxRaTck&%%W9u`tmn2%muDl01X_4JB~h-|2- zUCEl{<>siVJux)YfBgK>d$V^hbzi<6z%Devc0H@;{`{nw_B|I1{SZ_%O>` zsW^q{nQQgm&0+)Z>D)N$eMi>)oPdeEgn{%m-#rRyoX2m=n%KX@!?CgD{^W$5(!8oX`$#<(u4!OSBA9hXDceilFVUchpnb6Y;ZkOK$Svdsz2PeekG?e6Ye=Yu%Gd|x1 zsU*}e8e(BaDm-$$Vhwva2V2KB$-Yzje*}2q#zv8I90{on5Xj_ZWO*VMyo)%(Xi-Tf zfrLe2Ac_RXsBi|kWFC%m!+Fe77drAyMDl6#PN^G1T*sPu&gJZS`Pm|Mo_;Z;GG;i) z|F47EDpqOVDps+I{|Wz~eN;sGcn-pzCRD7v3A2y`5OD{R=HRM3-!x1`5i3z}2#-t$ zLOg+r!xL$6nFhNe(l7RrFZ_a1Gf-?CO81j)n!3w;EsW^y`8mHdy@Z}yIdhW`C1(7=H-l@Z&yPeirB~kdk#8n;zj3#u`v)f;o;Bfxtu$GDzaws zT!vyymh6a3u{)BJ<8tBRwBVDpmlU=VWijIt@!2HdEq(Z-($m~WFH@fA#l7@QvZ`(^ zOilazv!}nav%j*X#p_duhL^5Eu+e+V7baE@3@lDYq`xQ`^bTvbc#-lz-gf71%gx6u zx2sw!nTEb_OSXFKdRtagLfnu`$dQTD5wM%H3%g5-hsYj}-{V`ToL+G$Fniy58^ET6 z5WBr*toMU9T@PO;Wdy{GfPgVr=L!Vufus}IYzhwRGrw}*ZRC0GmDSnzLC;j(@9xw; z%%`?#lhHm2ZP}ex3I{!ot}|WFX~e~4&&h1dyvBYFtJPXALoO+!4defurFoo5!&w!G)cfIu4pz#_!$SJj!wl7KaCFiW0~( zJrGNUC>otCEHm_PaRlu6#7yVNuZXHx{dC>K&e9vLSi~HdIIIA-lQ6f>Mvk!cY{3%T zPV4viUB2jQa?Zd>_KLpjx#u!x4(_|RUFz=IT_$2|8l3w*j=YF`&`D{Ug}5;?`eS}l zA#O}(enh_Sr{ws&Gy%5{j&GRA-8q@M`&Xj!Sj$Z^b`OMt5U!;_ZV0*YsEs7d9s<0G zCYe$2wiJ{?UGY)$ya)v{5g{jmB!tJwP~mza1yC*yHahx&c(+nyT9EV zaGJ|Tkj0Xn$DC)Ez1()~11cU?Rs}iM_32i*QS|o+aj+q;qqL~Id6H}yl2BgLIngst z9-96f5I8*DXCLqBUzhZuDaYkgT<6$y^4FXfZ{CE5MK?D5h>DK?l%A4SlA2WU>5cPC zxA-7S?*xg>Z_gboIJa+92=r|gm|G{7&T}$wPjcOc&UUu>QC2cq5Rcg~HZT6Q4Hzcw zasQ=g+;`1%_JIxMmIG8ahFtP<>o3Cn4)8`d6q$ysn6a(o@vE3o?WV>f?vC4HtTvap z2{rn2)_6&jcqnALejI6Tni?IO#Zn*~L(gFmofJBgUs&nDt*Tpoppks>cB%M=m={kc z?93OcDv*vlMpka6_)7XCq(VTUCW=e5kl`*WlH8wVbX*`txY$`_YJ&i?l4^aQI~84d zU3&0V(b<>rw~Sj#hET{XFm(w%1y^a`{{pPizE!MZ75^g$MA}Mo44Ht2aGnBt+m=)A z2JYe22ZGjMoi@1QDl`Q%aj<>@cB2q~Ci;Ftp|eCZVVDe?KNqMZ)kxsrxk)J7OOyRR za?&kav7+vCXLnb1&G&>)$tj=niu3F1>RS6p2Bwxq=SdTNn4gjLahHQ0i&^bqvJhi3 zXO*x9#~qo!{80Hh{%CgbW=Hp_rdIW7Y)akeH``?A+d;2)I-cUS6lbzx;__u-^8lO< zfW-g^nXln9Wda6(!;%71h+dQ{kkGX!@;`DnT-NR>&K2}w`en{n`cl_ss6i0Vw7cUQ$ zJF58wMZbOy7Uq{|zJ8yPnw|Hpkp6dETtZdX)!Ml@8C#mtX`h3ceMa1uh>!$rebeXq zn(Gy$EP`}zzdaL|>F?rU9^m7f`YAOmFz{1cpihuab6Xm5c>>btn>=n28Q1wefQf~$ z12>R5kaX2~zr-8BOMji_fZdTD*fRkyBwx#6y#A2w3uUbk`E#LChk})EM;|!zc+(!q zTRMky6CNE4P`()bD6}TzC#IXuL}no+BrPJSK2NQ)?)2pNhLMF0XtKo882fP5dR#OA zMAg=X(QV6UNj!D!Jdu?G+3By%HQezvq* zo9jh{c_pSs`iFT0$Aw0v$JqrtSp`@=(!U#;9%~F(d z3ITaGJ~kJTkkLAX4fxpX8Prw%9rkn~ZfS_dF(i^`OUPtB>9;*2l}zdGYAvtKqu+=^ zBjX^PX4q~+A_fwdNel|uzevqWFaDselkh@2QOCMIsbF%3;rM}QLo?)BR`LHDtF&(w zt60T<$6xbqi0%Oe52+|BWoU+0m(=O{@Y9Js_Wbw5_a*mQ&A~DX{E3G%vlK)a0#86_ zACWqTrS`N9h6bk`s2#b~i9XvneKxV=OhK*1>{9Pal8=Ccw83k$w>Qk5 zZ-1>MYq58|-3Czy(T(1s+$KQK6v;$xbpg_zK#)$A-UCIut(TpZtW)1tPNe6w#P&@7 zXzu9v_^~kZL$QrXgs*E%L_n~ci-n_y*&E#lPoG@8bwf!*Od2N6*#n zJbjRrn_XR0YM`z6F*Wg(t==t*M~^+8Y`iRd*Z7>R&wFjd7skfMqmwg5O_hehMhD-> zJq^0$QxaG;+q{HS>o9N`17}F|;{b|)B!%YTsC|OQep69)8>F<&?7jw>2=+q!LImIK z+;&fU>(OLR;V*0(OLt$WmOo^E_UfYp*A+J2kvS8i6}(*A250D`p?hSkxVY3t_nn=G zgGqUr9*VH@=e!&miZ5YVr$OCkvnpad22>WK6V0p|< zQT9F9W&*hN*nsT?=Hvj`=}D2PMHvE7rlniubA^3WWkb!>R3BzxnNUx*_Xk+6L>8wMPmr#&KgcL^7jfjJp8M!?pKgUs1-zrLwYAqEKR~%>{2~V42 z$SqO-o`j{ss*38K-gbt5Gl76`bo$?&VeCOebXArZL~w*c#u4!JQ^DlqB!MviOusW} zdKu!Wf4cTo@&6L5v~LxwSjB(Gir@>He(3)_@bNue@qlIR=EEV3u8 zc%2a-ZW)d*z+c^q4T(*u7HRJHgWf&}zU}hudU@Xs6!r{paXo|*kSIi2I5^e!qOmNb zysRked!cV`gl@9Q+syap{VyN&*t6S;`F?_+aj8VW(0<3B9d}cC4!d*iHW%KYA$;2Q zi0)_g7hZQxJUAQ_8~!2ii@9DzpnHa~w!MYPdo81fnGMNN#o?a|qcaMU)nC7HbMsTz zco+TY8G76#{b6*2rE`Y5 zip_zO*1Hed?KCyJ347TeY4oNSe#KZI6PEZTj}ZVJF~Euo{_z@h)qh!aI|wzNzCs5Ia*m! zYi;9T>*!?f>U{04$}3Iv)T9^#bInVdHx6r`J??n%tdFvS-qsUZI|Gt!wT++eQrvIl z>*<*J;qk|J>&-Uv8i?$0IrZqHc5-9VGJ5>C{toH4sn!@oe{7Gr8g+@+MHq1bab`ju zT>-B%r|hfC!h7@u))z9f_W-_rAY3ZFC+OHM>$B!Ym*QNcV(iRmlg=akZxWNkYn$wY zLS4*E^Kx_QAPj;~u4i_`GUS-WgNb=&G8JHO07vDdLMbw3Gaf5}U*blMb5X~cDWgmf z!w;$AGt1jY@G_mWJ>ziG2zl4w=$ZN~<>a8(>0e(p{FI+wVwqWDTO^A?$hJt8m`1A+ zXhxGi&PQCA)dZWL12KIufkZ2gmI^*10T5^aM_D(HzFa-#SGO=ejZC4^P_&l1hQVJw zIP7v~dq;d+VrOT6NN}9m-Pg{JetHfLb}@dh9Nr#MRSf;&)9|azJJ2~e$S*B6!Q0NY zwX(7@KQ}rmluoMVDR>QCv-`IUwH}8*y_mL6(49>-8tjjgGVVC&gJnBY&m!H$!nVK( zpfCsIXP)1}({WJl$4!;~bJvIWpQHkoMj-w}=v)mrZ6JF0&X%hO?%Qg79_W4hqjTH9 zh$t0uLL?8$LV*fY=uH`%go_C6TloDaqapU#FyUj)$iO@bzuZ63`L#HeOh1T`X$+q; zES5S&piWQ`B1fe!PlPGrkp-KRl|MN(N~WS{hz2`u9#W?vjl}2%A<&O{1l$6H2u?%F z67rECO)#DyBooj0)vL7ce*so$-zrwIivJD@q+%gy8Dj7VE2AR9&SX0IqjS4O0@BGn zF+A5?QkFuTo1pLfE#7rNWFh)jfHjq66ji%yowBI^^ zL_}_DCBZ{j8&<5hpK(sh@8Bi8E$+HTb6_;b(HA04tpkKw!n`owwhHd%jTU=cMIRIuG2+ZWqT?$UA9KX&DWori;mzeid|^2=AxJUv{Z zB12+6hKEOnM8-ya&C4w)EY2?|X>aTK6r1GZ8IYZsUsYb4ms|WfF}0`nS6Nl1vxkeh zt>O4=%h%?Qx4dr&zLDZFl@PO+5`E7p@sRbd$r)1*?fY8Kci%oIr*}on{wTBidf>># z<+**g$7$Vg?ShK5<&jCaO#e8*1uFanVG*QyE>|B-)nGG{1V%i-o*THb0XraNC9QD( zjexi_mvARD2OeK1lHElTEvanHpTR;sAp8QfSwTpYTSA|P za03SJUWTWt>z^n2Ir?02Gu<2H^3psh^u$Q7B&6}sAQuI)P0jKA`gM+oF@dPvNzb>t z$nAR1BdNblDlde$x1A4*<)A@;!5)C{-UQC7`d3x$FhXy!@aOlM-mX?6BqtZZ<>xLTMxTs(7KU0p}@f!eW)Cv$7_W~kF&O4IG#?E8EAmWLN|qS9+Jiv6q|=SGHG z+v)>jgYIc+n0ZE?x#l6YE}WY^2k=Ps7z0Esz(Rn700nm~ zQNrNR1RO-Pn$W*JZKeBuTA|m|GI3Qve$v;M_kC7@QK-wh!Uu%Q=zY43g zZxyRp#eW9{(RL52|n1-k^)Hw2XRzznY#{R3$rNBetrW^S6 z*I$abeBAnk&IhB2%+CeI-#uYGXR964&=n4D~VHXe(loxN9WVG8>9+Dt$4P&(yezOd1hTz=|6|~G z5q!AA+4fNg-7EsfHj$<}cbA!3+u2sz$Fgcla@We~Fjhah1V;z@WEovy*;?99sV4fHijnuatq zEX}Q$8bm?FQJcQJ(KNU~LF&Pa6xtFI5q4(?9Zx_}kVZk=JOxXk zA)Y+Q8pe|$9))N=G3<4x&_pC7OqwHN=YE?nuF}5$1z4qht60S<{yUHsJ%;@0ZyzMm zpI$#ikeN&)V`vc5KG5VG=#X8W*)!HfL=4tQblSJLyzqNOmxL&&(pLO18EHTYjm{Y< zDDyQvSoG5UwxK1(2MEF}No z{PkqDde~k&T2=GCVkP;pge7!QIxhzwlW^eXu;&BDECN=nJMAS-m?$LGe4@YV{vW*& zu5S0wpKvnK^z|^+F?iwVZ)zFv>b~FqVec*DqWb!G;U(Qj2#O+h2L^U`cZ-T*BZ30b z-60avDJk9E-QC?i4AVWcp4o%`;(yNl;@s!OxqtV=^;sXB*|XV$GkdLdee3#O5-^ko zrd+^*e_mr55s^XSn?OLHTdQ38b_z$^1rcxS{qDKo)yK_@Rbp4;_s@1#uzMWPHx65YP!9#u_QZB_5F)le5YG~;_jbnB0)ESvm$WW2|h@EO#Sds^W2G3 z+kFGR>$(`Pf2$;}yOR5x5Sx|+%eQ6Pa0By zgWK^9pD7&%ohfJBUYrh6Ss(pS%u98R-4Adn0UVtL64QK3NPwpbtc}?1Il6ZmiV`3o zKrRO4rb7`LQw}|MW^&?IV)6}7?+ZU7Z}ko+5O9kaWWb~Y275q07E6#g!P}e!{GWj@ zYk}cXV6}dEpt4ex&(5hr#nGZmRq;29!j%dF&ID-q8=a>39f3bZ2tUAV@V01!-TEdf zIu#{r`$yQ62x;av+rI?jWGU^NCQ|n2nK_kOTF2;}aW#J2)U>;$M+rsPkH%dd9eS6W z8daa$H&cZxuhl0X`ZK zBm$8Mz%?w&LkED!B?y584(uD3`3RTkFg9p*oqii&<`fYdpJ!ukXJTO3QdRAuZ(bFb zHCEScVCfJxGog-|JOwFY)6*-ca4i-d7^Z%lob5qo;W%VsA%jGLl+3#P_}l~n2@5$* zZzg+t%X9PS_+Qz3OcoW1>2gB&EJ*y_cn3)bWM3seuMCGJvmY}U>{(3X^i*WB{%E>G zra&?lvG6FrlY)`81B>WA{?SNn0spVDK>HT3fCc!U{m;IZ3^JAhv58eNTK<~X ztZv+KzO5GcD!9yNhBm?Cboby4HU*^qiuwK)(`RwSBam0;X&;NdKj3GX_$d~dfV=}8 z60i`%9+Y{h{(j;TH9Qon_C$l7eyvc1^Bw= zr6(dir&t41J(^R!`xvt;A!GUYxWEWrF}PH5j~dvjwehC!3!@BM&m5nmjHtTYQaA?5 zV@&qmSBxz?@NEYYu{C0oHZCr6Afy9Us)>H^f6*{hgPIz)G5&Dt;d4Vk#Fk8fLDZJc%shqj=t+=aq zgKk~4xNIC^e*N{m!xv6!nHbo9cl;XQqHU|~7w=7kST^k|s?Kt8vn|ceDNfIZ4Cc^a zZ)jA|xtk}Ssy{MuHP*KNoKzT>RhsJI=i1-b)l^wknOms*`b{Qb(i+>brPxoV=DR?Z zjcl2TsKbSOfgfTD!t}J?zE)L!uJzU~H|&>fgm9hmuyrZ~=FBiia*LY#u)t#cbfi~>wlObv`2l9XRXTz(vJ;*r<7i?6S% zYrKAc^U-~zV zMYKBtHFXJ<@K zk`A4UWcy7Iv)Z2fD{nR3vdn6QfFYl}&Ei{ur;B>CIN5tl8k0n#&CuWs3(nD47z&L{ zp;6ff=`8jJfMp_`nUMXDN*ZIqNi3Y3fY=d;n?OoL$?V_e&_%>cP=xP(6t5*~wHfFG z9s{t$USOY*$j7MrmPr;WCfa44y<^0gu*8sWUJjKt#dTHX5kb~XW%@LDv9+YSu4 zfB_J<;#y)Y$YUV1+-l=-+hfN+>}nWK9E0Uw5?}gu*m`GuKJ;2tS_4Sfa@e6M06Jh( z;JR&I>z5ddNSFxo7;+=b&rS$f2?8TN3G*d8oVPzoQdWt4e%;~5Gj~;+M5q1Fj>;Zi zq4M?RGglS0fcFpV?wLgzPQnoi#CJBg+Ur}gx7LK<$cW(J=(w=#inIqBx3#}(SVx;D zx5f1`IaE9$4k2JoVIM1D_< z0>!8B3RA@O!=&Av=rz%qtNkLjMx>qT7}_#1vm84oML>&FaKbE_2qf_l+j$#(Bz(?F zd|51`CoHAIC;3Tmv8|G1?$_O;^_RO_uN9|lvR64^aOAbouB5=4p?f=Bws<}e zO*MaEZDsK+xo~?%v0_v0zR7No9}S9PHquAePgbAoPx{!Epb=z$TKS%DQF%2377^h9 zgu_I7=TN_chwW9>`+HSX59_)vf8l%6cjAJ}nD8YZ$&>nLpBmr1c0bhDy{4@?p&)f| zc%Zbfi2aQW4)vy0WIZzf7&=)0E+s;@q^JbK%xUb^51(!+KQT5Ay?v*8g>20do^dfi zWHUd0ZrUO+Eh&JL=9v*#JRostPR^bwkq4JgAx-CYVBqD>=^#9!mKWvQ^W=hu<>x{# z*QW6)gkt^7%~Q5Eb+4(6$5VdNp$h?L(A8Fgq{<@Z*Zll#?Cy%I%Im-lP~kLW3_!*Z zWDV6;MURfLPwYi%FjzE5|2^M-Ip^3$8iob2WZK-+%yeNm|aU!Ex8EPkubm zO|S?mPtEOWjml5UYbg1d7US^Uv8kn=qa)S|sqtjAb!YqIw!x>)LEA&)UyvCszMhV^ z&R;E#&p=|k7*=a;?QHKH`>!0K;PWPHJ=*=(nr;C){D{Ae(?UbkoR3#!(H6C>7xYiw zw%q@tRqu1$>C&ODA(`D#SvkhXT9x4;|-Ell8cOhh#^>EDlDlc{yOfRbFQ;b zipw`2-PA%~w=654LVE+pr-vUbUGpBSvE*89FD9zPC8W;1Ok4Wk*PYwVSIcM#^L^$5 zM%)PPvk~St5tO%Ge$@M*X6~nhhKIMl-FZU)+;QD=TVCzl^nTl>Pn(vh$tqfGKl}CM zyB{z8b9~4wQgLdDzm;!IdPS6fxS!YeS;{1v4nDJabnwGoC3VGH_Sb`&eN%f9>L#m* z5R;l1ml>0KQRU+n&(ISniX4W*=3YDmtlaGQg`So#4OLN9mA%9Lu0Bpm*LG}GUAOwP zl%x(2F##<*xT0&|W>M{Nhi~V; z+Nvany{OGs4F0j%C26^1-m19aeFJdA3`24X%{MVCh-ZkhpePASPtC5HK=W4i?wDZg z?w{Pz(zUs&?eGLvVSGw#d_)vIE`*;IqESU4jgK}2nt$+mo(CVq#EccBEff@-R<8=b zvaB|2!}Q?(>A7>=-Ky0Is#y-IB{udoj%kTsNN=O5aqgjsCosee-fl;z| zR=2=SqceG=la26NCsc|+$vyDcbyhbfAfG)wFc6o~m~H256&(_k78zMmT2MXRZGXGLn->WgqlkBS9YTB=Kb&E<+XV2FQ3e*dLQ9gj@=VC~TKt3?IM4(>;RPajW zFH`Mb5-_!921;=96+l0BMb*qUpH|y2_~?4WN{OJg%fp_%ZmR8uvrJgp-hNr-VRBgs zfz4A%%q~duE=lgeqGvE9q}qCxIW&%nFD$H`7{)>h1CBvB2;mT9P9!EpjZF2>7#)z+ zgS6(Jr+vRub&)O|5RFuEG0=mQ* zei31>0|d$f?fZ|x0_|JC0v7Nu!2G=da29$pk{pB-_Tu0i8BRvk_}uZoC1EHF%y@t; z;I{?)T@{Ri-X*0)rle*iC+Fto71Y*OP0xUrR!t%Twl%B+a*3PGb$}9yQ~ODC*i2`{obc%CJSO>wXEE)S$)6v)%7E!quFHj ztFG0pqvwhe({a<24b3&R9ZfTk)P*0Pfs}5>(A!{*ZH@=|OeBFh7n{f~wvv^%*r1yJ z_EYYAle!oCoFqilz(yMZX;rXCV@X`22^tpvsB*aKa(b!N5;pA<)&r|_#SZH$8i(m( zA%?)fhJ<)V#CWNjy?Ckn@Tuz6qn9^bG2FjHLqXxq^0Q9o4jUhrxxIAHyX_~8_N`Wv zmiWlWt-pxdihGe2($;plmFPbAO&g8Z?KIo7>cct-W!cpqH=J-kyVY(xPz79?K-^Gx zi~ag1;SV*uKc*LF`q=peSOzEf#Lo;&rKBYekq5I!vi4~zDZG*2X1HmEn%s4-tFMAy z#`zdHQ`Hm(B=<1cZ!?#jT7bYG)X3fpN{3Be#NPtkV{sR5hQeynAr*{<;0P&zyPv;Juy6v$yJ2uPwa3 zguZ(pEU)riv6CTBhD%Uzb35gnZ{>?GKM|>7Jj7yV0)oH2ynC%VFBcL?R3;;&qv3pF z)Q0l%O_Ri>1B}Z|sFcyM-_Fk`BhAh&ZTHzSA^Bb&aRLB%_7D^&FJ9cdTqbA#t_pdj z6mUI$MeL+VA6^>yUqfjMJb{5rZQvz!xMvSpTw)mTR4N|tG<7Ai-@K)#PtQ<0J}Ml^ z*drnueNAoIWo3Eu4*R~Y$)71{y%;naG2W0@ndlDGXh&mxufw;B!g5+;LmPUs6H+E2 zgNQIc>aST3k?MWK+ed}?uI}FI>Q?5jrn*cP+XiG&kkBy4)rUj$Vd%3+axRVdXKEk& zbP^rmj=wYg7V!TX3$$+m3s}Ivz#nBCi0&4XfKVq27OCweu&w{ri3+nUi)2_K%}z&ZbDmk`7^cK&b~I@ zKJKmcjqLx+^uWXiwKwKYMhR81;WZI%`My1HbQTf_kd|Ls_%l0NFUe@5-c}i%Wy?%f zZLr_B)=cqs#GSo%8{U_n7yklQx&k2su+BnylZLoq*oC}qhrm1w!&HsMM&iI2EVCEg z@3he{@`JX`hn|6c_7)cv6=P_u=ip&$`^{p<&Mi0YUDE#c!79%Dj>Qx2vM?1p^)qj; zTvoqw{^Rj&Z`Z8QloT=G=Q2myTClGsW(by<2?=WP%9<@(qQ7jZ{)*)Wt9H8YS#BrK zrOU-_21Kj`MD+zP_@8_H_0h9W&jZ~<>(ZNx;!D}PYjtaFR7K49YR^rdHpzaH75E@1 zr7OMPb-!}Ba`I#<3Zjc@O4{n%J)_e;*VjG9&TW~+N~38@@o>u&V{c}~PGj2(@83OC zd+eFv?c3nD*Xppe&QkF&f?|eze0uz{W~(11zAM63Fes>jn!4Pa!oNLH#Y7e0IY^u>qIl^-5Fap1V-`&)&1dq_AQBAT0t2Q0iGqzgbMAk+C7 zOlimzqvORO6i147X8{$c8_5S|++z0R+-cl#RT^Htr`MI}Feb*>9{ z)prS8W`xaY-MOeB;oxPVYrcegBJ9rn97D>zi@tRG$a&Os*@vZoB9k&HXy2&4|CM2FI`)QO?Fhk{h}ME{(-d&4pXFCj7e zQwNN5`hFK@kQvN5j#0>*FZ8D%%#V&i z9Da1_Z|^<>DGQ@fIkHU)r0+ij3$$+m3s}Iv0J3Z|kv)Kk)MwxjjV>Ai@zOCNflNZ9 zvRevY`s?40&^+yN_wLtgI^HU?%yvWof`vqaNhB5psrH>?A7L;M^#|M>oSLKzjr4c* zHMDlr*0)!Mq=tR)`MlX|mw?G)V8siJ`BqtNdm8XMyD^7GB_N`3Oe}<`%BFPxpDt+? zNxj(q+`1C4zzA>e;O3f=>Vm8Y@9^T3%E-Wkr1&%jl5{vn{l!$VHLWGu`o^Z_&fdNe z$r*V#2A$1W1Ioir7+pB-bV1Q#-%jJ**2Ol%aJaFvF6Bo8W?(8b#`C_`<%_!KP8nU? zZ*X4d+LD7tJC)3qeW*Bc;0HgS5fC&0(x3QG+ia}EyLcr$)N_6CI`D~t`vz_&Aqgk( zQ@;D0l1zdlyuCbv!$ZfcNI$o60Wz_;gbj2Vp3}g{jlsz^B*;xeDsy>A z6#l866>VjTVPQN6kABv8e^6=5>f7fJ#)m76jS4ead^9@15yYqrX(klJK|wOY7kOz! zFfk#A5J##oizY*WTOj0TK!C;YK*+^}yg0~*g*+4_lq^W<5bF*2c|Pm zPt;x+X}r^4cE2z8R!@RrXPPK_auJ@joDBDL3~e>k+j!~B@yAb3nSWUprMI{%T$D7( zMMaB2+!{Cm;_OzI_{dd-2>mcS=w)nQkPuv$6PJ^p5S!xT<$qn}(RJm!hj#9l5tP4i zM}ME1*XeT1(J{EQ1D>d4oVF{F+xP0o*%$6^!QVZ+5|d)Px;khTo>>=6 z3(wk9_^%IveIq<4CvICpld!p{vZfI%O;;!V#J)sPu1v+9b4RYvC|xgnZ(LyGT%yeQE3^m-`!n(ef`5CV|^n+nrG_E`$~Fx2j>Q+=9;TpG7H{6_g%W& znMbr#P_Sm*;>a)0k|z7xn3P7O2!nxS=&^oFGBF^H#$wjjcel5V{m%KrvLKo|ukOVB z<;vsu_UOoOF#eQlnO6h&)!=)9`TYlAf%g5=@JEsq0_MLM|KqfO=klBw+4fA-R|iFZHv?&P5_ire)h4*QEguG+T38Auy~?fOfsv)+Uk zX*W-W`sUdlw>Y}SZxfrVNqm#M7JKcA&Sf(Pi@SH9CnpuLKcs(&gRWyTx=wR+=preJ!M9yyua_gSv^@%>3-urp~I0=IVwP z9Avtt`CYQPea!XZVtsi5Lum=C6;j5_Z-n1To=fe81JT(DKHkB`7B;$`R$58kPP4t{ zq*)6Z^&5%q-rN;cQyUNym{u8m#qj(ty}fdJ>jXYYi+x!lq`p{IbH!PQi|*+j8I_rd zIca58mDA&6gXlT;%6jFxHYFkyV?Z7z6oXI#-FFb*yE8jj)9_cbV)KsVs3cZQ2%Pwln8oSiUAkTLa{kE=c6KBbZKNE zDwHHb2?Cq&AwEBT7Lga?0Uokn-3X^Q&s6TJOFLT<^SJEC!|K4ZUE#aXrSgygNGt&+ zR3s5kMu%TEGmt-fe&x$Yl7YI*y5r>_RtQo7epC$G4x+3;l(|%8_@3tU*NwTiT&(YW z(s=Pk{n;&5C8d*G+cxo^RNDUJrg?0*bxO+9AJO2Erl@J?k)X~^+R2-5It6D8eX;d< zr>*z)omx&#mY2WBlXp*S{alPa10L%}tUeSdv#kLLbu1E^m03A=`YP&+^)MR3?8-_X zmxONy6=!#*Up!Xx$)L+WVY;{xZ(*B%bVul(4f!I{8GLI>_z#pxpKslD8z;ZJOMY|9 z#*KX&WQQgA`T_s2u*|69=B~55QiCk(>I(+bOPe(9UCiAgf?{*C%1RP566~xjbwBAi zIXM?p6~z=q=Qrim)-=`@RP`1VO*W2yR?A$(>%c44AS~D^3!*M>^&V>NqY_%@^;Vc< z#Ny+4?C2CGnaX5U)pT@rOt5~f`$%mT1@YuDe-&^rI8|DZ^K(8ykdw-XSdH`kzy;d( zpMeG1_m2YWH?RAZD*H$B?H}WJ3D)1v{6Dxn<5#sN(lUXQlR@C5Wz3KHrzFlxoN3Nl zn8p9ypmD13P@H~CSPuE4!9Q2|Url@Gg`4S|yD=jYH8~KKBxYB3WxTh2`_%W%4*P?m z4kExD@L2F~u-kRn>bah~vxU>w*u`$zt`eD|up=_jQVyFH`AtvtRxRk_vF zmLFdb5S;IO&EVmgPj}?cY}~1=v|D}8UNfbA?gwrrTwkZPRO%x?QZC~FfEf@n7vz4w zNWJ`h9Qo(dfcrBxzk;l1$TLUCs;iVi0UFuSlZB`zmEDcdDJ z^HFlTTydG$?6e4^iBf0Rj&!cg3*8d&`LNa17f(;dhxs}e7HABQt#0iUpPB}PeG*LO zB0S_LL0K|fjiYX!s1r67xHq^6}F* z%8y*X8Dtj)nY+DxX{dJMuGOlY5!`}x!XgYFE-C;$+jq2nHt5Z&82TRM6&Mg=qwjU~ zfMrI=Y=0}cvvoEten?xrRzWIl5h&&1ALN&q0|G-!H%uM7Ft+b_p@!Cw+S|UX$D7Y7 z^&QpFHo5CsW!jj4CU4VpCdkXL+Fzd17ii!AKS1ZW`Z#K<^C2G=^KWkD|7E#nGpE+7I)vg;ku%BU zU(Uf_w2$?ddzhmQMrXlk8YZ>FODpWcZKthIea^a;yVMff(2x+B5gU^j-PqVzkXM|Q zm6cbN;veMD*_1KdUfEFA7?)fYoL3f6UKW)S^TXe>qoyW1DR~4nL4x$Lp}~%tHU|se zh`uZ?eT=L4^N%#Ax~cZ`b5rP zEx!qpjkib_$eV6b3DnQ%tn&)mFvGwQQi1XVfCk_l3yi&0T8qiSY|6D z^-=sm@EwOjE0a*Yu$;h{%!sb$#oA)5lURyaLcVQ}tEgki;W^*DYwHz%vPp}n@Uy{Ue{^?N{LcQX~@X>gVWF%1o6 znT097S-uCgk4b-)1D|+>tc1Ds7xAerl2et~b7$kWgB!0ueC**Ln~~pCS=E!3ke!>B zTU6Z;(LeB@z2!U|BLI6qgugLd%iEkP9RE!rQdhzI;tr!-%5P7YI~nPhm8edm50UAs zAZs~fEP;@l3`Hn#4GC@>>Rp}dCTRcgbcE}%q}X-MEi!WyQ3!b;Ttuhw;Ai;ow zR*;CYB)S9%@7s(cIQ}ju+2pBj}F9!`&E2FqA}SfAD+gT zqcbV&$(xXzMd6rtSSpVj_UxUB@w?W?j5%B%r zzqEk=aeI9-2j{p!lc%ZkPacYHk}^IqY!<3;;I+cH||*ZS|uacYY{ zuRrE<A>g4CM2Z=I;L4(`gQ}{l3n-d=-RtGFMYfa-56w> zVfZ28?Iq7Esz2ZCcQ_<$A;x9N#iP#yo`S6gn~&%ndSE2SG_c>8xl#7mYtqa zl2z1D(g1NV*gL$mxPtx7;AZiN@EV#Lmk{fd;(0>vpv-4cVS`1I_QHY&d{UpK_v-9f zyMOsA1-T37E?b#9M1@7C$H!J@B@suvNwecPq%AM)aYe?7`kzuUni76jfrb6xPo;5_FkE<)D3c&Vm|E3azpf~3Q>gk8pBL{V?UWOO_SBWE zn4T1&LmmbcBrzoML{OLyTI#{fI5!Q)2dM&7JQ(Z*mHD8tM0{pQ7}EI}P=<(9B1?_W zg1QF2$#G#KNtg(^&~z|`=IZR;P*kp1Ua=cHwGuP74mY`Mq8qekgB(BLZzx+GdyYQ2 zVW?YvbY|N)esfX7<mXhXP_1Vw33j0Qz;lTwq}Am|=2+IalOz0aXv z#wep`DlsdsQ0voY^wb=ah`7in#|a6k880+dSMJ(=>511a=(l?Nv5Z=+$R8=zZ^vRD~_p%NgOFw_3(S4lV%8K%9I?E%| z{D~y?L2OD&B$k4ip^+Jgr8Nh~TVYaQvz~6`6BC;|KKD+?K3D0ybg}5e>&mp?PDBBT zJRhQ^Kn51F2uuAAB4i;+Tpu;5mQP}y@+0QjNEG{LfU}69@-aD2SlTuYy&SVma z3<4+3kO?U$h#Mqf`;m(O5f~lr7M0=`o*(nZ^YhwI z>-p5VxJ;1FxopD6?a1kzYl;LVqJ+h`~}+YgT7XK|y*&_^Th!*BWgT)Kn1ups+(z zQSRDer+ocSQBT*YuTZj5`iW0gEBGv9BPVSyEo-)H*(W*WAm!_(*I!${YV2=lY;I_8 zYKsVpU{PUvdwX_47M@8!W5+AAk{{l?d0h4R-RQ_GeVuCt+c!Y02cY>RwO*oA1n zX2_hQ(uy-g#c%hjtuJ%GulO%=el7cK= zI^@PeVG0zWL4c)$F%&NuN>QLJ6(UXgrbrUaJ@Wly3QQ=7$4QV0@({n4F|=vA5(F6R z%Jx59nWk8jy|%u6=NNuPWB&yb^AU@DcC1ISvHrj;VI73LoH`O|$mAc*0!iw?L1~-Y zF|E|kZ1zQfbE(;hML9Y7Y32A?h?#>J46Lq$>bkz`c3-{m)cmbh%CVEF0NAcxp0#&p z-o1O7$*I^G46LYcH?T6&(0TLu%bSessHEiJ`uf_U;{Jz^zrR$=y>u;W=gy>UTSLyB z`KI;JIX=RtyCZdu(vw~s-#gSt=WsKl@3yJE*`siL3XWspEQLv?6Bs3#a3EsDx|kAg9N9$QEqz8SGO?UT+iQsEcNmi!*iZnmznNW3i_y7(Njf7 zynW>PA|cZ}Dmr9x7{jI@(|zd5oU-=XRw|y%z7YEcpQS=PvLO*5bsTZIG4Y)ILwZGa zYD;xid43e6&*0jc8gf#}a0(6Q8c3sG;+!|=?-4hU6tdyaJ}V&P#IwkB5tkV^pA`@` z1uKlfAuFj%dMmfR5E9qo7P1!PGv`_Auz0hB>@kmR`|Y;vwchtMQS+eD4Sj!`=B{Q) zr9<>AME(#g2F`uYbW@LftdpzqGVabXgWcCO&iMUwYOG1CD2NRYa@F|oX4lR`>hFyo z-+mh#n8qB3rKtrug;8Hq^bR@hkhNYS=dl*3gQeQy(k~b7GLc!W$-6>ZGN@9JzG80bU|;WH-@xqQY4*q1Ro#>t7CF@1#=?{1BBQ=& z>->y}$w?_r` z?NK}SBKY}Tx2tag9~-0^`1WULmPRkueYqvbVq28P`j{(7t2%2oy%rX<-ni-8gDtTR z(t{N|)M;J_g_v*^3o6VEZyw3#jn@LU2Y~T9@Loz(d+Tb8yIZ5b%6FIY&kg}1NeIHF z5XzCE$iTSp_%t980c7wZ+QlSs%%I>%4H(Q9CUi(bhA0gR%t8S)(&vpIQF}qkO2knt z@o}0A4oad(vZGV$@N{_^lwzNWwO*}_|5@Q%? zDq|^?%?t73eJv|zMwb%_!k8I;!l(?fb6H2^vII>LtsTd#j_JpIM8kpj{LseMa!4aL z)l7JLO_^JfH9z7{pDNn4?c1Xl(W)Q)Uwrgb+^2o~q|>=`{#P#h6ckd4R5*=B@?wJ{ z6JnDSzZk2z`C8b!8U%y|=$ZQ5dFXTTN`?@3pa>{d5G>yy=3w+PaHge+IoncMl~dJF zLuDXyp|yYDvyEXlWpE6+)p2Mhg~cI*mD&BzAL+Ms&my|bxJeu&jnL8Ix#@@zhMFV% z@R?9P-ysKR>p%}bXj-ilcS`T^dQC@z>bhD)t@)Q=6Wds0(dY~U#|wazWQ_HU)E3tv z?o3XGE*;WooPIq2&sL`e{JU77eg7!1e(%1&df;FYlJ~bRipX)!ps+Y6{~yx6KPVlZ zWBAj4c@zKW$I5vNj;jX$iyAJ_zW-jZ{_g4fYl-)7C-WZ-n0|SenHIU`| zIQ`n0WGaEhu}1zz`;a(k38a32@Fau^cqltVRqPqPJv!G159!om36o3;4$*$bP?x2LRm|9v* zXm;Q`^`}+_A1$=iyqs+#Lqm2e9)9@X{i9oN9If5iA5Lmff?c%kW1n+-J=XECxsd_j zwc-{rcfgBqS6S6-CEJ<)$S-^f;uGymE3x)HF_wO)EkuiDO8jX={dOl}eje zsT<$XFzaXKqxF+(ej|6dRo37g*=z)9Uub z1eD8%6tXD5LTyrMo<&Bzv8~vrbYV&WPI}XcH+Req^Mf|P+OL<_uOxR)P zQop;KvfNhIrpe9?%gpzsATfAwL|F<$fx=u$W(iOsU_iixTr{c>c1m!zSBTQjOBv;- z(#7denhHh9ked#%0uKf|~H*aR+Q1H^#;{8Vk6&0t2g$hN4Bj3L3yQl1Z{hpS)Ps+=; zmd0j3e0*|J)0(Plhfs5H4hOAl+{hiOS^I}p$3T3}g{t{BFLsfPWSiXx~2x^CcI^K&q>zS#X?%R9Vt~ zzs|P3P@IF)$O)`}tK{OBSC13zozLH={YA5vBiauCDCb<5^8eLfA#yA81>HyyHv^Gu z_%&GbId;ey=x#4 zgQ5i5IvV3KWW__uU{7sBO%M@Yiigp>j%s>GKgN_FMf5M$zFMh1D1t^U=%a zK}wu{LG(8Uc7O=yrXY3}lA1}~Mv107K5v#+7ujwhx7T{}UXxuXEY5EKuzuYOaLNRH zEfZ^G9?R?7t?mxaSc$CB6P5TPve-ymLQhmuM?gl8e~GRD?|ZH@K4%`dK6+>KCL%U? zYI+odc>9pnCG4re-XMn{Y66mGAgyO&>_<@8gNrv_+`Rwn-a`jFM+0N4Gv{xdI)2GJ zG$^Ju-89$ow*MtLEkQXeK5;8VlaJ4wi^m86eI)KI+umCcSt z-|}%qK6UGI_lY}tyEhpVI$8bto+}kfTr-u z*q0&&Ua~_i8(C0cZtfIuaBsQSso3WVIxDuB?>ORp|6-c^&B?ZPa83-_av0K7)(JHA zWL@dTV0B4{UBG-XuvxMs>h8+2Z);{dgyyDsNlbnSxmXb3Xl(i=0qGKC@?s)gfC#xr z5Kxe@h{EKdQ4twu2GSIcmyRg^39)|*lDJq?vJfYW9+zqEyH|B~ijgP+bhr%9*fK*nGc|X$qw9WA z!KaqeCr$BZ5-i^&*gGY;6%`~~=xKQPxw;2>Vu=(o30agYE8y8HV}QRMfId-yDKY*Q z36aQ+oAk7GJrh&1{Q`V{M#W`i7gSW%<>i$X7uR}u{qXYiFDn^#&fZDCWQ zpkR9KYU%nl;bI_067(+x4SU7i{Pi+Xr2|cgS%WQzS~8oTwKn!Q7S@fH)MB#>;qVyj z>4g)Muy3#;DyFlxJ}N3EH7j>sGkFG$pQAA*v80UjayU+dUeV~YS0)4iUc@8r(k83l-Un1O_Xt7~f-ni|;;uy`EnZ?WKCxu`5U2{I}nR41c2m7Q=M z2D=e5UqWhJYiV>zS~6yAiqrauLZdO*%yItCC(}8+k__p177oG@NFIa(XxJXtXrl35 zY5N^;TVsup*|qYLU@9D*p!HUDmXvhld!)H&`n@tqe*3k|z&6L&G|uF;!)q1ObGkm8 zW9qlHQ#Vh-?W4@ym1D`xy%d+69neM^fY z=TF~xbnlac_8G^U(qC4Keh~-iKv)-u8E^@FT(rt|<-v%fqLxd55f`u!;4>1F)RMXD zd;f;%&EqPk{j);`;Yd__+#!v_+um;5@MhzAyK@^}Y=|9-sm0YeM>tL5rtoMSn|=0I zx5fInXC_6OxwxGCtgrCJR%@Jkr)zq5YU!(*u7?592ChLL)xP+D{a#a0WMywjMm#!j zj6CC)>KT~fGDfI_a1u%6Cm>Y&M|iw*ctF)yyP=D@p_xHNN`6&LMuM077n|qHwdH^% zAEGSd2!M(7>N1m!gq9_c&P9P@M7Vuw=JsHtL1UbHq^^qJt2c?(&nh#{4zw-DkMl#U z0Fxw-#i^3v>BMO6XWJI3ig8&;f$uw3ms+ptD-a>haj}^koA9ye9T^HBK0g)@6~|8| z2$7jWR4B}V0xZbQM4EQ+Lgpffc;o!l@(knI=;Wet3N zoieOR5!R#t6^c+H&kS+v;EX}z(EXR%I#=)I+BpQ8nb`Vz>ozo99vIn*l9*OILCXjCk2}#Wd;4bdgW$fRz8k^tb`Oi%*}y^PLGQ% zB>;d1V0eY(oZ`kt)o1C(Mt(*{1|y^G>?L}5sJEmzKP4rluC5_AHs0FQ)XB!dP}kbp zI%s+ZYG@Y9E%n;9Bk%l~inYt)mVm-7g4xQ)eZs8c^L;XX4Zam8=MDCcjEv7lhsMAu z0_>ftdHinh!Ha?0cShd7KnFRu$M`}fc4iK3=j1&##gSH`B6DXB&q_?lYe_9__~J1B z;N{r*Rp_OnsO8e#J9bQ%+CempF~gqJkj5mCSXcs*JWCmZ_z8|rewt&em6g|w_Kp48 z2&hyBoxBk0`G;bG_WhIa7uq);fj83=1>ZQqKu94a5*3thd<1}xD9D?jlcez_zhUbDKwEi1{P zupqjvt8x_EL58C^*gXR~uyC@vzr@H+v$Dyzy6rIrUYlUa4Kal%P^o$Dcud7}(^$38 z*~DzvvD583crGreC&{DF&ubwfY$|yvHn+ArdoX@7YrsvwN$44%lJ9O>TmQ&kqzPhsR`Wl(f>lYa=1^U9GHcP-8 zprE(%@sGzVo~+%ef9$N^qc!#igmu@*o9t3h+rItTp{sB2tAEkaH#IeJw@|ivcE$dx z{Dti7&|b`qf(f?F(BQbDxMH6oWedsm5f7M`n6Y6I&RpaipIr1LASC zuF-MFXg}v9Kj+x*-$UH%N=oB=d^5s*EPdW@v|S560}#LqzHtFVDfx2iO_Y8?0+~(w z1gGJSe&XedmI??%t8y#R9nMv0ArM}jm{7n@E+)?|rC=1Nr(X1o7}S&=GrhCTXwPb& z3PF?roLB_W zJdh#8f^0@8NQ8m}C`@IDvd97~ydZ-lPK9D5$U8>?lPJE4Az}2`5(uRz%vD6@6&&ka zK;pjJs>+X^l%*t$wYRWusxAWjF-ChoNw|f<-@u~oboV_9&;1rz(^Z8@3of|-TscRhK7)hrfOL`sMS_Dd`j+N~Qk zH!fzW+^T!`ApMi70by*oEVgK*W@x0Nqq;KJ&c)ir)@(9819}Gy?>W)RB|IT5I=z2& zi|R3083{=W1{-oZy96FKUBl!fL0pNIw8_fNtf zE;c$6`l2F>$h<3#`dbnmA%8R`l5@`ZH=J>-zh`XyR%~XI$XPm^<2*pLnyHXUW&Oqd zw}5{>{srxWe|z@3!sB1b{2<-}2761SQ|To3Xl-vPEiHyfFLx$t1~Y#SiO6JMlfCzq zmeyyb$1zC7M2rW8WC)=H0c{c=eh<}`%fe1V7E@ zhyf!WpwF|{d!I_!_2ee=ymIxvK8>N?HSQ(s_?o*QS|FQQLP*ts6-}jOb5D-)p z0}H#3eQeLMySuv+gAQpEL{UOelu)|6ySuxa&F)$?zqK~#Ip?|GcYNc1-f@5L=QEtH zv4!n+$J*DruHQA|Kb;FA7hKwK)&8EL!z0^IW)`n>cOKbh`sO7A!wJb{oLi9j`I}== zX$nblsHp|z(>R#lQ}xKhc)jk{tIjvyM7;js^)4|n)XCAp;KL1>%TlUNqv?tbQv|VD zF#OwX)vTb!7%@`>bGu+uS@-Fr!q2630Y$|H1ph@;)qrNOyRRvVd%!Xh6BD~ zk9@!FGdMZw@FWG(4N887bDA6H;y_`aEC#YDeNfD&Fo1o8ND=6^ZweSF@n9UEKZRX4 zi|V1;T6P5&(DkYefOvqnV!{SQ$3g?FgOgK}>cgG~b;mXaeU5mn zV_saya; zbnZ-xt?TU%@3P?ZGb@TxlEYK!d7cneS7ya$hqu;bbE85E4j!&nRENYoO>?8)J>gsEC;Eq!)b{b*1AQ?zlOj6EONX#PUT7ecmI4oz zB=oVMvH-GDASDheis}3al#U}&AC`K{)OwSWl-}FYDVg*M6cy2{z<^fsFZpK+mD%ebELy2Ed$!3KH4Ayf2$8i`P&ZOP@NVZV(^Cfz z|6%=9*T&K0mA~=UJHOwsIeE|KvdLTh9ea1~UbWBG=LKGeVd!7@14G;^GHaY6&4)_WVf>Xy#ivx;&4g4lKNsBMJ;ye_Pd>SjMW;o z_RXrR0awnxy70=^*3R4Z;;VC8K5diIlSRy=kh_SC;S5A;)yf#ROFgXzs;iF{mEK58 z_h@T<7ajiaqeC#@4>{dinEg}RIy9L)jrx`&T>Mye*_n49Cm$Qm{&UIb{mZ6)G+f;a_bGBv>GHBU)oFX{0`6AXm{;h(D86f0 zVREe5e_4O^7%>dR>2eJzA?+y~ilS10(@j2bKm>%ekoFNuL%%#Q#)5b@kG-Y5{z6Lj zJ%67LdhVp>F?J71ZD<>DC=f$Pb8U2UMR;C{qm#X(o@sq}Qhrpt%`?mNti(P}M^SOE z@e>0ZM~BG76lVvwXJ*gcUECvMa-V<5+;iS<*O9c9>q1VRjN7*LX=YY^a8N)=L4IO% zJjNmi=V}s)iwbJX%h@fRc4p6XwR9bv-dn!a-*@)XvQ;mpPO|%B-IskkzZu-mvocI( z_JF5{Pit#0^$u|{FD)%9vn&Nj29)LY-ni8{YBCo=KxRlef+Q}V_io#nk7gCC7bi?n zDjI_{s3O%He(!sv)z-nHz2iRV2{nC91h4_?0Z|t)TYt&n;97qM*Aq}`dj+3EghX9 zQBld+xlA!ClnkIQF637fhlcyGx;n%RA>HbiMT?RMFrJ8BS_si#K@kt9d%3ZND6J}7 zU7l@jx3RAOB)|e5Duu)3D$%@?uaOPf!;GBvCFH(K&vvLPi*BmvXsc&5SLan%MAp~G zwzL-&mp23iCbu?qH`F92#NJHH-~YfC*{+YMpF@5#KrGW&ZF@d+rm^b0mkTZ>-#H$9 zW{&w(P0O(|#AzKXE+Pf$fGHC&yA9UAPMLA=?Wm!q`fuI}x$z!aBft|!i&I%{!e{fl*~hRQ=M5!rW$ zoFhUZ*VGppTCultIT`fgEQ0cs3Qu2O&p@w0PxthQU~e}&S1;@PA1*D@n?3sBNOe86 zGI{e$~)#vXBJ^HO&g#x03V z-fU8Aw7WGzL)0%LfxIC?Nz7L)pq@k;L=-zpiwG%<&H|JNE`oIo1f|?Zlj9 z$jM^l#4v-0*H@Vor-;j#27 zW75V*_bMWovNVhv!&v`EuEnb=&)AYzzU~HZt?C%{9DW-q>f&=+LDUbOO%Na6bpBAz zeSDxZhw4#Cs_Xn163QLOPZa~JW3V=z4i-xW z9JSVnQ4+&@sH3r|B{i`iFSZ~h%=@+dHa2<-U>UmyA;cs5nDbw`kN1qb931(gy}P8K zCMC5Xp}4rXswAzv$T2HJFFV^bJrKwrQvtvCOlMGXES5xyGc_F zX^m*a3>j{S{H{MLsrb2z!dDJ|<6pxKFTY75zlt zan~;n0INHDr(l5eV=@FNkW6{#kpcD)5DWp)5J0IzhQwt>xN<*lTur0AqmAsL`8FSp z<~S`CdQBC0NLBfda*029zU-|gyHo~9iu{I>pp0RP7)KiVWr3xL3x){@_!=mGAo9Y{P*D`phW33;a82Q@5h8m!m;i>s)nV-MCw|J}l(eG_!a)fta{5yw$fRF?c zj@`U_E;A>Zx+3Z;nr^q=$?{e!K8gA^i05V4$6%fUm9QL)g@hAtt`$?KDg`-jLd|Cmtkgd&BW=QhtEP#9P|q5q7VV}wfDt_N784Bq_=Z8ojmc& z+xN5DFYNodjKE2{OZIpDWDbyJ0096W06qc)15`+)F$Q6e__wi-+SxS#WRehwfe%4M z0#PE#>6bWoX!m|OKnwxC1Ox(P0W?Ye%P;%s?SF%P6yH-TmIvUzfX4vj12jsMmPLP8 z@FoXqekIsP=O55IuXL1FOt9 zw%UG8|LoCkBUZNHAu$P_ogHlw;W0K}P(fl*0JZ@70M9Vey`VHaIVDq~FNhXoe|qP3 zc;iilnRgKtqha!f$W|9bD{jILoju=ZJ=co_3#Uj+i!TasnI#dNciU1P< zCa^)Hnz6Bixh^MpM^^mdqN3vr-qc>9G()5cpa7&SDWDD+8ubWS0;hm)fc%Dt#Zv$l zG_}q3a8mtq{O4EO+wJGUr!n9@gyE+al5p^Tx$R7Djug#{bYbss99Kkn(ga%;_`^|= zB2OeE5XyYnMfw@yY5 zj#ef{PwefSso~t#UhnE=8yy=^US3IEB>q0ZOh!LRa`JN%J6c)=d?+r5B`a-a%nTbb zDq@7X?}+hs+xLI|>{A*P7RhF}H&j*Gy?7TAlpdc};OOgM`rcgM`gL+nZ3~Ol$7}<% z8+z->&(1~XPBh#&S9tS$x|K;yRMBV0&RKJU6y&p%RC=W3+y2~&AKaT5?A^j)5duMf zNl8&uRLqZySJ=`BWu@KPTJc zM$&G2Vj$Y|E4S}wv|rfw)7c=kD%o3S1o z55vGp1Sh$$Qv~O5umN%9U=krw17IhD3j)|BfRg}E0iq<;{lDy|w|~Vxf}qAwB>@&V zTo9tS#9|{LoB+ZB3I^ei-U`s;?0Z%pO5F~W2%ub4BG`fhA?C9?KDpZX1vq9GhIaFd zh0wr<`d%mv%y+o>a`#4)g;zWdnTI`WgK8$;%jL3ZcY>HMA7t@5ic1?4Vk_b!DhpC7 z+N-j9x^p=+1Y#JQ1(=|RS^YWM+A8$u!-%DOJY`ooBZq_62Xg#Fv!Zje%c##I5>a~? z9}K;XVQW~dNn&gzUwpZ)(Yc>d1q90Nu6KQRt)jtB_w%Y{<}%1bWRxwUlQ;9mH;uL0 zb8o*l{u-F(eC(H}0Uy;+h`fZZpg-^%~ZDfLp z#t6$P$m5~1`iP1#f?PyK>yMo9bn*zxG1A6}lmVh-hzx%$HAPo-l=d*`=L*O(DHS7R zkMrEu;YUNt9$)&rWc)*9+zUk67}0!yn1>(!5~hn2wTbaT6?H`%U{*o7R>*@{`m<%O z4^=T7p>8p5?(4amK5X9ecGtx>XEWQAMYOgKFTW($#`bw;V%!G@n>Qb=tb)wuKUqFh zUmekxLrmnyJ)OM%)#_VsE_nvHUOaf({wej*9(iPWd@lKL(dp=tTQhfCZg`htAHj-` zf!q^e_tc**lF`>tHPW1Cwd&l*8zB{OFW)=<`RC4a*RMHwe=>7?kzAS?66*We<=O5X zllEL1yUajV{Sh+jiPD_Y$_7{Vm!}o@GqF=WEcyCMM8J{50qFw;7HE*Dx}b8JpnMLa zd2CCERF@Fx=8Xp!F67E#AcN7B8N&sjA_PU0kO2__0tDlZ2Yg;l{ZdcYaT_K-d$hFX z-2&8Q65*oW?z1E?<;J~Q*BOEW1bPutI0VI&aTq4TWW*%(geoFX;)9f!l)=eiVi=8q z8WSe;6= z{(!6?TM`-`hz^SeYft`Cf%#jTuOIK2`1=F?b_V{XhV=hCF&Xl2Km6@g|MA;jzYyf# z>)rm}-{{}Yz(0)md-vu4gU>toJQAmzA9|7f;L0ev)1o2Y^-nu$>B;=dy?M$D3mVbHktxrRC3Y%P4Q0pcD=Fy@Lrf_MlTtDncF=v@%st3)!x=XM zR*%p|hL|BTrpR1VWS{w@%(@SSr2(laLDV^EC!4;OdmME+wD9R~^7mDg4M(Y%st$c1 zJ6?C}YU>pXZ_YOQ_AtFEzN)9p*7dcujg`}T+oGb3kQm>`PLI#{Tt@CF$vq!4&S~~| zlWCW}U9t2rePsFYvB?wXpa89}udW3@S^jk6WSwb~A5NHWF#G(st9KKvzd_xZ3itV; zdPi$MuJ^gW{^jYAhWM={>I?3>8X=f=B++0JMJ2y zu8mB5DD(T}arch?sdM_uwHHn|^D5`(XU*g=DX^0gf*Q5z2>XUWt1|S-a(dJVK-h$= zh+;n8FdkPHrPW&$zGI&fipvuMNuD3B#N&-FC?5Uc4YG0S=A#-u)=P7q&(5`)9OJnD zd941iTE9vBG6YyMBtd=CY62L^A(eR~t>Gf3d-YQMr%172zwmc}bv#&C!`PHrp<7h< zHZaEHi+fK;&9m3W=3id>7pJGzRYj+#=H+HkQ&d$R@5ZJ07})6 z-w5w@!^NtcjW6`G8O`POjiF)DJ~1)+&#mp>+daBV=I1tQ;YztV>M#EdWwyAQ+Q@Y(!Hv&#{rMhX!PK`>>7w#5q)t1CdrhtBq@i)VIf zKe+MQ&NDZc@%~--Q$r8CyIv1=TfR3Bs;dxn^+0_SH!dpsiMB~keI*e6Kdr}u{OK*?miz9dBTQRJpt2U`5jg4{W#32+TCCw7)a^7F|6WTBA`?-vu zmuFHU-2yEn?#UBf;t(_VpeUI+7Xlap9!qP-N}Mh@byDQMs(psEkGTG zZer-<6C46cxxDKrwwT%yi$H}B)A;C70mDbwlhfPc%j1}fU^O3xb3s}N$|TIfiObYJ z5XU6F83yV|B$|H$5Q^v);M9R*10o=FSI1Vs@&S@*FojAWd+0>50W}(Q;BC^=2}y53 zu|X}F9~CMCtV^Q`E~zy^>0kcVG7cXEqW}2z^B;X_|LSN${3xBK&n@{Ku84u?Cx(C5 zMEsWq$NvS!iSHmT`qAR{JA~2gl1aL-hDp-J4e;OU!a}EeigC6O>p?+4O5PkHc^#DK zqhETU!TX>NYqAJ6A_SX2JRL*Fh|mcbP%Gwj055=&LGhr$_l6B_MM&u7QC~XkcVbDB z02>=C6C!Mr!>+WHo#6HECCR-goNR>iABr~bwwtK_2$3jdks9E#)0OrLD~B+Roa>ruO!3PBWJu-JE?Z;MR0g zbp<0t$`~2{Qgx^2_A5@e?wV;^nmzmc_I7Q_3QRBw7mr2pQDPX;BbpJPx8||g(%V`$ ze_ylV=Ub=@W_J`+= zxjZwk%Q&0)?KgMrIbN4lpRYr1jvJ}HV5a4+6taZ+*1S;ggNWY%0%*?M98#*L~L=eATu z28E@h-*fX=Yw={UkEcd;qjXolGJz@rCIxI65X%F|V<0CchKf)b0bPoLhylT9xn>yx zq%evCIf|%I86#z=`3Q$`jDGc^6pe`2Yd+pPufOa5xp|*0HtJh!+3aolXMeexpi3EO z^*-eu7}r8-c8aEOxMPKaVIrU=M`bZg5W$RIo?=+!!ScebULS7wd@*o$zNB;4-t(Pr zK|mIxfI;$V+B@CD5>8&%a{1!s;v8uIvi!ilym>1-5vfW z790CCFW1XEq$$7j`75)lx)(F+GfbYECkB*I+oFn$?83ZgF|8aVpswnetd^kEvhrSm z)knAdhAJ+vBR{YA*u|Ei%7rRO+bBdJhlo{W8#gUWjt;>6{c@kav^6p@EG(-^E$Dmw zHg4y#Q}&09zRo+B{HNaQjZchEJDOWYJatI$3oZ-|O=L5gCB;!aC7o67k>NQNZ3$5g z1N}8IF`#_Bp}L|pFOA3S=Jm58!()t%EGsLU2PRiZ+Zzlx+_vz*8jqI^9G6$PY zZHxiN3dv=i9oK719O?=)yBqo>-Rp4@6{9G1P2&R)Mezbpgo(s#+ED_zBm|S-2M`I6 zCrRX@u#}c{vV= z32}vasm$IMF`vWi@8gTu7_GI&5CDrW5&4rKOA9~*fITMA6>;=M{Q!JGgTD?qHxAHN zn&KoHNDDtUX~qwwFcPoi`~JHG~z@qdSXgKzwA*!K_L+dxTmkEH(|cM$uC z{|@$1)QCaI#wUmy^H2&dj;2QAAwaKh;dJ?f`GfSVv%2IM$ubrzvR7+l5n-o>@G zcM5Y|5~HJYQ>(h#IGC7Zv-)~ES_Lc`-bH}c^|wgcx21+^ z{^<72TizdT^LjU9!jWRWD!?c%F5Au?-N-nYUTk9f?(GA;7tY`8{3D*l#yrnVz1YuK z3@{FZkv;s0HQn0-;&z$Y?mTg0wZ6?M6ZcE|?N(?$8Hs4o{nHS`A&ALv`3J*)w_m!< zevZ0)zyx4mB=oh0@pK44qUbEKX?CpTJr-D8#kNC2(EMaed#he1av8lGRvz}{w zy((k6P2K+byhPW9%&KVwa|DmEkS{pZ(zVsj;lAn9*81+AJ_rgc^z(_jdE;7sNnA#K z@Us}*S(nupT^+mm!JNIf=kGf`bMKDHw@<9pKD9pn+q>AL7z@YuU!sFQ)z@szE8l>F zrjReqXp_cy2q_Wg6T)y5RmD+dg5F9F$3am<4i)f+2>KDUe<;QsAtFbLKu$#WUK@de zDx*!UJ!@W3z{Zc(+q6!M(M0C#T`#jAM4rqZ$1+LRHtj1)F%WY4i34e61Pm)CFe-YFo|uZzJF5ukPM+0!jo{TA;{keyyUVUypQ zdpa8oGLkXBnC_&^)=wWj%JO4@Z0+RLMC3-5_q4}l=2sUrQ*)w-JLr&WYio;)jLgi; z6bPs*9HOG4?Ck6!A|h&QYeACZArgsDlmpNW1cRP|IMCONQL{i-V^Lbb`**s>^P`+v zyNWStf*9zIC=rPLg?&Gx{ldPV%La;bX`OciVq^!`qvdrdAI@4U@Q1s=a=epVRDW)W00}0n?6rhVjdPtmZ zF-c+RpiAtBVl?%Khy8FVNwAfm5Jdzmj8+>H!FP3OQt~Jft?t1f|LRcuA2D*k=LkQ@ zZvez;KO7Ey7=6nof4sLrZ=XN{Tg0G8?_Wvn@8RqpEsx>vUO*V|_DNc>P={cEJ_nVP z{|y_EV-b>)H#q=~lBO(#q%^ySKA8i^Mo=4r&qDkdw@)2}ba5nr5`YH4RuOK|&j|19 zY|czBjE+c*i%SR#4~&Tp%grjx&o4~Njn1n|E3I$v4WVcymdU6>Io{&Fo4{QNxEd;w zX5zycup*VW>E^dN<8De1yFYA{fySoyo3DMo7FU&=TUb(_U7s71pAZrj>f`z4ljHmM z?``cq+P?l`_x6jOy{n_Mlgmd}clW?=0WrSOagI^$54`W~aojXkS9yZ*NDaM78(wZR z3wc`3C{KtD0b!hozgH+y!%zhPWt2b|tZ{7ioyu}&V@_^H=iI)VCok_=zWAy6?w9YjdPZFfOnz+p{-CM(;oxA;o-PLU9rrQ2 z8d(i*qu(sPH(%r7Xmtx(u6d-b$`B*jA&*q#ACH&6uP(1OQdwVF)nLR_(^(p~XYDaN ze$MF9GlwT%qU?;qZ%#KEEB!UsGb7QpAslIyknhMpP-ZA}*nIu=WuV@-8 zdqHK8TIegy^l#%z6Xd#T#?UcY(Ihc(3Wv3yy-yojcXY6F@=M~Q68cISFYi3^+|=T^ z{fn<@0lz=kJ@WC|As41=9@IQ|cIEzUbFZD+uzAP44_|G&SzYm|iM8!@Q56;Y?R{_6 z3uhNrYV`Li5dv9)Q02oI4vZA=hjaTCyLn1&?8*HC_0~3px^kJuN;yW4iU1!Yq#gjW z0=oCEECzBYi~w|suxna&>~wppA_Ty&H3$% z1kaepIz|zIGuCq}uW|F2mI>P3$dYOVDMOTdkl(oronUeyEEb|_ah~U^V@}zbE?mFj zjm@jzk8Tcn1}*71F}?{8?iv{x>t*G|R@J1GlqPqzcJn$hv;$InGasDSH+=lw&b9Ed zRm-2d`&7rYAxIk{%|jGKL&h<#UV@5TNKI`|%S%kn@#e7WQ-X5tE%O)imc!L?%g?fsgDOiaZ65#{*r&!oTX|DOHAzMsqJ+7+mHGF5tKY zkckuyevdU`6mr!xmF1UaQ!{T!R8(MicwAywMP)dX`4!*=z(o>UFJLdlxch-H0WuWu zWZEZ(+lk53D4n|Z5a1JFr6P87RcSMN5lAHqkGKE6yt_tcz%LlF_W z(c#|iZrs}CNZH?w|t_2#WL>$ly#^T5R5qV~P1U5$qc z&U7H;2m%qn@D6f*Naezdc9Umb7_mTm@$3gHHt6r!d~5F&?fZ`&8QQ=6@cyMkh__#8 zkZ*8^pLc+d$5%fezo_`Qyu7@syn_79?5y$qtjdwoJD(dcO4DRm2VB@21Fg@!&N`v_U%GUBio63Cg82NN4$Dg!naG_Q7x z{q6kIF&CY--f_8Z`^nPa!Q<ZYgeco*f#yr$yF04Alr5=a&~vC>FRFc zHg%DmZQ`ysfgg5hAK!0#bce^f-`-D{`f7}lw%kY^m5HWP)r_X7SWFmdGZZmHWX$A- zYmZ){x9=tNh%SagwL5zkX1|x@m*gZ4h-D*~w0z*KZ6r>X$N0-p@ zR79<_nN^c(Q`PHo>B>)!);&71cE^e<*N^QuxBarwHC^+^&tF+Y#|A|tC4UGh zcCI6*1;uEzHVwzP@;E-02UCPFR?JuGZP5_=)m@p( zdXi_b^Hhj_S?V*AFj9z4?Ckv`H)rP2?dKlvTW`EX@tV{q3stEn(unoY(Z-|qKmH>n zAw<|Lpf)}4pyx2NXlqB!s*3Ju;q1|-{liz+YHnnXJlQkzGyKVgwZi@lWswd<)ydb| z&)--!CuJua+k#IR#3Vs=y`R$u7e@yV_m3S-B~2AMso`NI@fjH&;Vyc1>n5$+`G?NB zeW6p+v8BK|JdFBA&-eEz`Y z-_z5Ro}QkVm`ME`78cgl)`p^Je}6x955<3tjrAzO10IjA*J46bV~x)Ts{-w_{?imv zrw)r)G}`pQg5Ut#Zbk$CBL(GGQO8eczp(FTGD!j%fexhJ)7f%8faU=f2&9)J(LSAy zr3yJXpTNa{5&{9N5R*xg1erU1>?7M@hlM=H@Mo-su9~C<4;}SPyU)Fn8dl zlYM2v?hID1b3f}9U+7MfS#+DM!F)i9IVuFa*#7Ro_BzjsY}f4A=Xr^GbrsjeLW&Xh z0qhrv&r|S+a1J9B?W&oc%yNhD&?f*m57 z-pIjZH3Gx9xR^r_d<+-Tm17dv;R42`8$EEvz^1bR3G92*5}KvNz!1>^(Eq5n?|YDa zAp7ty@WLcQ*0j<64bi{i=zwII)hXX=331!X+laFEKVMDK$1Gu^_LoASX8^B_%5-J+CAy zw>ULBKOw&`t-iXVyS;_Q?(XVtZLDuSdN!SX*8T zFniHX9-ChNLYh4ppQB}z)X}ogi+zU7N8?pXpy7Of7 zKaS@nOsMsn8t`)Z<)hQq{=VhR{_~HoZaJ{Y>64vM+)E9*l9K+L4iR!T_CxbCltlf~ zLg7d*Xoz4u##QCD4(n@D?rxN=Ek=ryk%C~PIzh3geVmBSGFIfmXb>txzZA)q!cj#L zjN!txZqB^s_E{x0>zg@ST3~ij&ms<6o!6oS-J_s)2D5o-XWg3CiiKTGBltWS9GA!V zs-m6|-6f;rUDmw0vhe22iFbxCcbbLVN9argQ$$Hye)El`UHvT-^Cbf8>MB=jOPL8B zQ^@WyZ7tK18`eE9y7q~4G8#MG1ncTr7AFR-vDMw7bxhytE+aoB_l`mS#$#0rmiF%5 z(V%xH<(ZkFswE>Ty{4!TD3;=P1I8vA8(-crjPpplaN2MBnhHd@1W|4pHiEAtC6-4p zL`EnxqIcHZ+TWI@>^@VxdUxF1#i1upRC%~T*qe@w>j}w!nrEy!m$ZfAUb*;|s(5 zEE(cv2EqM>eLt`L!oHu$2I>aTAJIKpsHGQ(27p=+Q~};KSR_FV#27_Fy6qKR_DCnj zN$3d^umCLq_$CsaBw?{cOKS>Gu&BOnYi{b@)QGp)(J@&uF&Rm28EN(zX&*8(1M{;A z3UljnGb;-7ODamTYs=FctFoy5Yi&VBS%!B_p;3F&0TyQ(2UTFBiagOm0@VRL3d9UL zR>&W49hC!o#PAaw@d6G^=fHd>y1BLQTxq3QdWK6vLPT6lW?5-_QzMhf6yrZ)odk+w zLP>5MCh5vb&7IU_BNotZL4l<5gGYxX>8K=6QjE;TXzKcp%18geKH?up_gA?wF_7mc zIk3Sy{wZrLDR8D!7$t{5atu^GWBB*bEq(ljeZ*fqG5*1k#1C)izhU3&c5xCLd@Ycq z5FzP0 z$fJ`T@v@B6tI4q&7%i8hLN>a7J9+o*p0(C~*&9kiZC; zcGlV+eE-+1g}1)UP2+wcp{}d3=HYcKV|~{Hr>-qqvuWoa``tde@4CKY>7Aw1?$1}c zK3e6T;`%qMuejbd|N0@aq}?~a@U-Lct#4+|*F`RRPjqkm!@c>>i+)q5n$vARkmr(H zZDiE_p}U@|hn6mBV6Q9cU2Wt$Y`G<})N#tNCqq>}EEyB4Gqo*aD$sH&Q!p6809AmY z+}?4$O`3g8NO`t=Z;OIZC`U1$0F){4lDK>q3i&J8ZNIf=Y|Hb1?&}fZ=X>tL(Z#1W zt#UV*nrx?1={3ABUX7i#n`qtc>MT2R-i~u;-g&sL*tFu}t&;(PpGyn#>}@@loO`6< z>piBeN)_Wt3q=Yn7{Mk+vAUI+wMv5eQP54j^pLh{q&*v9lv22+PEjce@@&Q!cCk`t zJkpz^Am|v;PpUKt$Je!Mws%hE3#W@ZO-Iv0 z24gh!K?qWT*RI-|i)1>E2zxxmcH<(edGnu5)qFa3sG-adGa1BMT2WVd^{u6^BR%5* zE;RQ33Fv5GBD_{vqIW8=j{@e%h73u@>l+;3ALet<_tVM!n=dTU%(T~QdSy_z>tKz> z>{j`qf>{$Y7EY?u*MiDUb}cI*GNHGJ8pF)ax}5hG`j!v0ecYmN-i}sKk3!Vi5&0%W zjx_>76%mFs(mYD7Wy<6djcM5nH+GJhRjo3jQEq7Jtw&s^H=MT@Qp{F8O@8pj=f*Q< zBbVHqy2{>`?1pkTudw9U&V3v0FCDAgznxgHpmY7EQeC|YZ@0v}l;q0NlFrUfkw_%b zW1>wg7Pq#xhJ}S?WMl*f2MZ)7mKZew6_+=B^9s$5OecB#%KqYSZ*CQBo>!xa)Q>>o zryvd|fAjxl%p*IPIGr;n`uC@wU-th8`-Oc!lMP~@BtL7A0H~iZln(rU@0}&$Q_ChM zB2h7|I?qM%evybJ65@mWL0_gV5)ED>0k(}HnAXXi*3i4Qw*7h^H%OA*C8D*U&_1!S ziN&mG>!_-)udS}BD=8~3uPUyr%q=TPFU(EJOOH>94UZ4EE6KUu)3sfQP2mwz0y-~h zx(MIj*{Q>3Brw`K` zMp$@cbZksfL2-9yA13KsDHQWC>V{6~qeVh##dx}cb)e$4T>_MabYUs2GR^ucEABsK z#6SFP#NP^}(eGu&5@sa_!A|rSs0~JYMFXgZ(Q_tU0)k0|Pq808l>Y{H3-X-S+AQak(~*vAvHBvIW?v6#(cvDsa{y`2R;jE^kdEiP{^hACqt zwfc`^WAmE2Hl<~r_wmuTe`9I)`n`>DPZSxr$|UZR`l$4_n_`a9YNdPCz}z$C5v zNVJk(D+wI}Jt7wDAVjD8x*x?x-?M$CZE0!#!R38yY;+L-QujyGIJa)#??A7h-rly8(-@SHt z_u4Qm$vin}cX#h^eeB@?V@3G1){Ye~?UC~CG5s(}02)o)*&mC4f9$iqoE;$sc6w&1 zx%mr2%bQzwX&UJs)G^+<^2*2qpH@w?)kF+rkZM=(!A#KAh*A(2DqlN#=C50 zZL>yf(=9O~bb53$utz)}+BT$%C(ml&MYW6Xg z8NZniA7iRM(R2UQg7+HymZAMD)pmvkmp>M`qxveB6lNT%t=!q#u%Mx8rjRp^D&u->dV%+>?!p~CE$gk` z&0c@#kKIR(U%c?pSo_eay}P#mVPSZ9}Ec+0N(_ZL!;0Qs#G-P2%^D0P1;2`Ia2TOoxe~w~%`yWei8E z7^?jF<&PcS+ehlCPklX?R&BJJI^o8`yhnaTD~` zN@ko)Zp*W0Ca;eih*`EgUPG;B-;&DB)2b#8Z&g49!;t=ANcpH?=C5# z!62`&A}BuWbH2BK@@w~->%OcT^>wAP>y8P}v^GS?+T&tsuAo!4`y^$8zwD>8U)c9E z*#P#52e1#mL%@KRvfz77D?ZRSmBKfH09Zok>nGc~goS*VFDCk;KIir2;dY3 z7kb4H%37@|+oJmg-NZoAEIAlbq(xQeenlJ%IFe!oE&&{p{GNLU&4j><*?Wn>o-Kwk zIH)m%V{C6B8rOj{d2vHB7*VS~hl zH>fI&lNe9HZfkEUEG$lnPm2yujS5eys;X-3sAmbf#B@AP5`iRtsLEy0l_w{(SHxf)_e~%)8Thoq?IW-XxAe?=och@WadeVaA_Qr03ZAV_OVl)S(H)R3PV|{D~r%EQSMJVpjn)_b@sFe0*-mxSv&vnbY*S2(0Jr)dCezt=bl|*`Qll;Z={9R!>i{`Y(KvD%)_g; z&MyrN^zPk!a`}XTw^Kwzc2UG9qZqGEID0vus+f2_w6n~vpWoDU9v zTvl3yiH<-~j6OIj~C?mV{h;1O*zGec|hLx&FBx_9^DW4$f5HnRRMn!p+^ z5KuI!NYYt))7gS?owbVmlHt%r-J^g}u*Ihi8C0U@A`mF{mQIedl(XH3*qu~Pa+^}u zFsig_OmV{0&Wy=qqdd1ox|ctPfR$|(=Q}g*7Ja%DZ+*4W{m;sPF^zc&947soEP+DC zHyO7Jh_23vHIHd%pJ2n_Z_r?B3z9__)_5-B7JRoVAuW6^!L@)i6*N zf+8En^}%)&9trx>XkRj`6*?jVT+<#s%=~>_>5ySeMfr|Na{MJ@*yDzwBjwSd2zMyL zo;E{p?y@i_iqqN?l972@Pv`oh`_)a=pTD|#_p9>Zc}z* z)f+P}Tl2!Hlfn_X5@Z;2$V5PpYI&K^_lETY+M2Dct$BHQ@$vD|(b3J#%>aW%9xW{` zHa0fy?(R$`b0E}j3j{E0`s&>2L44UcL72^7{m*oZbos ztpiFiM~}ow7Kwt32%H2E)GbKPF36G3sQ3>69a0&cyKoh9r_WjCkPl_ln;g{MS^#fL@4hQ-E&$3}#OXOlC3 zUV341W?6ZDb#--pT}yL4x21uXpH-L;6B!=im6R4ySX$E9$PserTbAbn+!lcI2-qP< z4#CJHLLLB+3WZo4ilROBYd>@#L7;T|vna(McEjlQhuv^0hAyLI$1tjk9!nDw76B@HD4|L1#WV_}NU8~{_b4d^1?HoeRHQ&6Rfcpir8b#>+6by5goNKH z6||>d9}ZxmyK-5Xen(+k0W24xDEjA%?}_P z{Cy+*{lf!-0^<_H!ovN70z$(A;-Z6-;v*xO46ll^PdM@%k)M`GPKsrh#Ih$+xdi|j z)a{UO8cbbgdzT;~H8D9ov6D~v0F))nC^_*s0KyRE&c?n}+5_b<8IzNs&Zba&Ev`ec_^#NCVUhu;rW+2sH& z(TBEZPd@fwhnuan)yucK*UlI{KOMvPTrUpwPk!^+UPI%#dRSOeTx9OUYmbg>T@e>} zyR-Qqf~_O5ax-^rV(zX_UgJC~`n7<0^Fm=Vb{6qmP?LU{JQMrg6*%ze@$GL-!exEW^s|xaH4>DT}oF zFLsfNI<~B_{6=DX$Q(0 z`=>F6u}H`a#!1oHA}~2EcV~d(Q7?_34aY-Iq2c7w4EjOuCqXgSN9Nhy6gOTncQJVH zy7$1jrE9kBIPv@M51xIrv|qVp$HZUMCR{q(&&z6xtaGSHHXH{d08o*Efw@(K($gk% zfEW;k0)vEUF@sBJU?toS;4sK+#w9>;f(iF zs~nefrZ44)*4K+}RX5#@^WXO3_>3D%k0pFsTO2$W77azg5FCtW%T+xM2ftney-|S- z$*Ua%-G^p9n?L{L;ZsnaGA7Nl* zr=?MFaC^cc?(v*b$sr%Tt~FQMd~S2$jUb5)0_kK5|rHRZ+~!h?1IAaQr}p z>eLzts)t6w(6E*z%c|c!tyVBWDNGr;&MPl3kBEpM@gE8JI2_W9f`*0$BO{~Y;$lEo zevo68A{bO85|M@l2Mkk8m9a+xQ?M43)3ICz#P$3-k4)DqZoeOK+P%(j#nNZL+;mpY zmX~t?!UrOeya`3B>6EErx}28N+`-PwFAoV1Xzyw!r*nyjB6Xu7rHCu9m4a59okT2# zg?u5Wi``jRm>(4A-%?#C<&h}I5doSq{3Z%s0N{-SCONfZKtzW!N$3J&1i3)$q&8fx zKPj5;pgY) z^3Bi1I>^`QTY0W=d&^}hss@9x5_FOT-VU&j0Hl+r=s`T78|NViLK?L{nW( zv0Lv9nBGU|^-INi7*Wthqqu@FnFMpGRLL`VO`=b!^rcMZj1jqXp$7SV(wK4xoQ2{Y zD8a&jfTs7OP6zc82m!_h-6bpp z-N>7jN=ZacLh?@1xR;ieW@ct4CnuA(K0G|!-_OU-%iZ6{J;2v3D9|@NEIKYWH6=AO zJtMocyri_etgfb=(}_^5jxvQeu?k?W0GI*dvq=02L0o9O)riABPxRn8{k%nVmZp+%2NfWK_Ksk0&o}ySL&JmQfJ-hdYFK%d4F&0G%$$^@ zvJ9t|QY|U#SAfn2R0i5~5ptrk>3%BXWqhPnT2xFQ$=n0RWb4D7Pe-Q(dNnNx2oGW)bmuuU~(rI(+*~^!D$%80l-g7j&ZR0;%NJ- zx+bKF7vmlE)ZO=FW!;_V%sBx$!~CjM0vnbb(}vDJUijYVwvXMLoCM>FLW6dJ4zu=j zK=L-Xh?|vy!Dx-yAyJM)3zA$zy^x$7{L@S z{t`S+({^iGeOPi)O0chQ$j2`M=8x~ZnXpV78u5kY1IBFlGHm2MN;r6e<&ZI6OUESZPiRUR2U-W>Ez^Kt4Yy{8 zsojAqnrru*Ubg$#!c%A0J$!5sp0v~A+YEodH62YWB|^0}wpx0%F(7*lF{|}i0j1+yWQAfk1<2@ zcICGv?FIeX*duT-xs|ux`~4D~bMnF&M&&h^K(Dln*!GSWKF@mX?;#&`<`0k&}}{nv1HcDpOO_{Cv{v06tfsB)%7lfJ}t5o03z);wl+c zfa{wP8=0C|TU-M8Vk*r@Ag7l9h%#wJPPd*#@*ftWf4Bd%_BZVNZ)(I30YP~;orhB6 zf+pWUX@v|a3HvfC7!U2#XX;HGZZLS@Gidn30h>%WT30&O0WM!L6H~_$0~rUPLb`B* z`VDCdQqwYPs)UrQh5$StWQW?>{WqB`Jn)i#~DKaoXObisrW(bA93;33DStU(C^^Y5A zk4S{4NMZgn!t1>oi3k-gLvl9-qM|-fcS9@7(XbDumfR%v%aMaGeA!MJz5oUD0oVzE z4gk@xTr3xVml)};rJ!u4oR$+I&o7gU5V(adSxCev^$Ryz10#yUTL>Sg^BECT3S%T# zq$pR29)B+l`v|m4D&dm|ud=c-H#e6w@gaYbDA~))%iG)A*Vi{7Ab^B@ z@o_P^S&X9m%&PLDrpDT?F1CO#l}J&U979kXLA%oiknX2}QuzS1|1z0^)YKxOo^nRg zyQL?WMust3TLsj^!x03R(u9IydXv)V7Ds!5KPmxL@vB}`)tD3^U=S0Vq^+mt?C;12 zT`=IG1P93YlD0xlg)6V-8OGTI;CTQZA`*;3!C4vjh@jyFTn!{B71mYcJrZ&y%{7Xk z5(;c9dWi(jX&E^&sX?WkZp_ZJ5w$b(WXlRA>)OFBDR9QHrsnFl)ac|89|uqOM|E8H zS3YOXyq|08v#6|QBOulaxhibQ=;F@tUd)*<9j6((?Yr|@&DwiQdE1%%hMC1p!#jo3 z&5CJ4=o7>s?Ag|%L zrs;{R&%8q7?&;{9xq0*KnTzVL4b7{nwgviK>+00zh_wUKTmn-h5?Ki=@ArT!6W`5H z)gC$S<=7<_8&8L6*w%)*q(@oWx*k4vd#U>U)nAS0nOJV|4PWczta9nx$fXOQ)$@is zf1D|57=>X9HkQHUBqWdzYpz>bnLa)GGxT92WWE7%KC`gQW0jzJaz)kbf|4USIrpqg z)}Gik;l^)EgKXxNg+l=vknIZS?Fh(d;?&4%;5%Swz-AonY^{PJTB>0J$}m9H{gU9%1Qjz z#BHjsEA|NpjEPU_*m2tt27`K@UwTeMfJUD$mb(UJ9%8ann_i9|jK`fJPZtt0p z!r*ebq#Yw+8K9TAA`v-EKx1QLDBZHMvh?)y?Ck8=Y&H$zPyma8uoH;8Kz&)fhm#aw z0=jymsa}{AmEj)}kiUQ%Mtov6n~NwzlS(RO9*(sG zltV`zDfET_t_%pJ^u@^)Y{h`4fT!YvB%l%Ea5pauogs>>ztg50F!hEdclQXEE< z7e>&oP14YaaGau`>cXVo?+6$6y81(4))lre-f>b1ELe)T!eC9DpYJqKo$Xm2RK-ag8mXYBoK}g z2@auh8$eu~7Crg2pu4Bm>7YbdxOMaV;$D+8%Y{zWSGf&#t> z=5%)P>g(7QC9MV7Md>M#39)_w{uHqD@bK{T^d#Y3P*6|^-4f`=WHPI(t6N)JNoYu} z^(ne$vfD8^T?UKytQNkr!E}3$l?kJ`;tCt-tw{-aQ8}L_%*(2XkIrhY=PEE9-CYO` zPem9N$fiEf;19*RvqY&xQJk(=MroF71scSGSV%F~l+@Hc(bd#9e$~{G(;X2`rF@O3 zKEN&jY&(D#h{ekr+fHyrpJ@!*iAiwEF{vnB$M`tPruv#ddvac7bn0V4QQhZn=5`52 zngMH0m_e5u`aJQT6kqpDLLG#Yla!Vi6dC3B?Y-e4bHCH;w5Kk4+~>LTkbwBUKs*`X zqX8Jh77i&D%#3PXan*A0hWpTEBgiFjI15aYfxfVmG@&DLumA>oD}+a#l6IN|+=wj+ z`xfB4dh6OrGawVQ_5J~~`P>}<>}Ii+1xHObb5u(#uuU(|$gQrcC~5$u9iYDAtJO)h z3(&~hgJX;Sf}FL!Ii9e# zIa8W_2^FoWuHD1o4f1wXHPKhQc4Wk!rHeysCe>#Tmy3R)c>yVMAOL0nbY*ceq`eo? zodjw91U;QJH|WWhlE~@)?(@k*xq4Uz1nj+W1=_ZDNrcOYOh+i;DP%DRGWrQJR)Jnk zfG*BGAM`S}!>T-C4;(lectId?a-8EdyOxi;t2`!GGq*R_zKivJ@bsKZgqe=fRrRxL zw11r+f9*uj{v|@olg*}c(BM(H3j`d-p!Un%V;-E&v)v4u=E&-9pzY*a(k|-E$q6~B zzkAMu>Ax5)n{#jS&hyhA9GUrL#n`0z6Zz97@%ljE=i#*jpz3+AucS zKQ1mI$T{kpsk^C}iG`=}M^Bv(&QF8G&E4#-IhoIobDbUed0c`0nrz#HwjU0MHbpoi zbq^}CE@$PQ3`sRB5><5pzL3Io93XE;d5SeoR6;~Uls|>%? zg(rddTq*z|plaN(oPzwQsHmc%BGR}~VO+YA_E4eK(bUuw8yjn4V)Eg`hp@1)#>#RE z^Re4NXdr51+5Fa^*w_R}V3CBj26V!1V+YrnnPeL7TUt=ANL>bW*n~XzzgTbo-TtH6 z->~n$wBfzoV>Gf-&aW~WAIX3`vnuVGi^iNc^M~q>==TU3co$m!c7bmE%ixOGsLYIz zfW(aO?DC|724-PKkUr z$cSTe1azz!hfyksDWjnoEgiw5ay`1KV*80)c=uRSGv=?`|bwAGs{ql zZ>;CYB?|qLC;$uv@Bx-&mITZf8X^pYR1AOG>_^COH)4r;CNu8${ksTSx16QM;a(N?G>~np?#$EI!<_ zzq`wP+3Z(8&3HOs_vaLx=H03$q|-;_SUXnjZ8kD^+$d9RmqXtrOi8Sr zAQX=S_$Yu429yrj^H^(idj6Nfx-@fF{kK-znAJxc zIV%vbT1IR{!1aRC*TIn=lhR#az-~giIF!5=xk*dJjQcKUr)o|b@ND$xj|-t!Goa_Q z7kXY_m*6-%J$g`O@xZ!8Y$_75Gf>+45Zn>f0l4&C;LGUH_S4c zIqvzO{`Y>KV|r?-gU-m?=VzZhcKGf+Z7Z`QAKyZnk0xb?Lxo;rnrz30?t@-I&A~-FIOGY{8jJEIkg3l{-Y`NxtisvZ~QIayfe4> z*wJuB+_@j-t!}Qrhl5LrjBiolCHc(u&K8c8G8RHw?V7}3ktqpIo{j;bfySm^3W~Fu z+FIo(uyYFa3<#4ene7$ySp-MGCmU(^qNAfp0X#k-v|3h;-})1UG`^sq0#-)zQJ*C3@z7WHSgtsLjtfnpS@nkcuHoG z1vT_3!VEbrHGzs@k(iLkDQ}@zDf~tQJz6adrR81`Iqm7`7V<%9Suh@LLXe(`nKUJ^ z6h}y0htT!0-N;Y54@%Wi^aQwR4nh_kP4*;!jug8kkl&GF`hXG_&ZlA74~;}@VzHRC zeWZ0OD=Q-r5{Z+@6)PX{7qNN^#Meg5YAAjh7f^wCS zw&))VuBC{gG zaw0;Q$?-X{5kcW0Z!La=#6ZlanelLC&&%X^S!or6!C+=&lFy&oN7AjHM@8-BG+38oJ^F!I_ctl_ zMI0l($9%t6E|UmRh|qhH7+=B@7B@CIrKg>O#iudRMqIoKz`Fp6gMl#sUKjtnXN0NU^br%0em^`whT0j0U8h)_vF@-!|T_)QQyrB(Qz|43zyn4 zy|%d*%K3w^3{UXDSQ?=GkxVuSB@6uY+7ds+6 zCxr!042xLq8NB|rSumrPkx_h0>#k9R(ULc-XBjNmYqjHv*NvRoXlG{&c00GJm2DI3 zX6k2W?`r4u&4E{4OFnH@TbHAom!_WKnfD)8_&P4H&;CW;@w2pJfQ&nal12d#uVW4X zQ)*&oTU=gacmle-by=MEn3|#yGVvG!9|;IZDjCIZTTvJ_=JQ$T=BU0;$N%(Z+f3&t zv&~e>GeE3bj^oV8gi~7C2FKb$mcs>;xyAFU$}Uv3XgCF(eDdJ9{ms4_ zs*oY2h63q8!?b?>_4&3fi>KNw-kZMSV$?5(ZRYL$c2qY&-yl;43Q}=Y#4sqAFH@;_Q`7qf3Yide@ znT2_AMH;QyEtSDyYQUnD*%Z@_#C+t^Hzp>gy}g}W1S-6JWE0S>u&|IcgYm9zeitqj z?b<7!z6cB(FBvgbvUO+Xt$U!lQK=0?S{eyLFCB5DOIDO#6d4jhVoK7q_QXlZgZ~}p z{SR+{!@mEnhAQ2AyQa;Fo**-MP}~GE4SwH+Iwdg zVoIhMx^@BYUS?8!d*p=Qj)GPMh$x8qozRYi6foXHI@l9ZM1S!45d?;!upE)eVMWN8 z3K91N9w|(LN(ro;BB=%i07;ii8FDEJ_{iUg$Uprdo~tlJf5_w`tsaNNA+1$wYimVC z1&IYon;04zN}5Fy*5u~qDq^DwYI{W`EJ4%gp<s;p?JX{_SN>Uda#3NxcB%CkalI+Vs{Vp#s=aIh`k|P#i7< zz)>c-0!tU6unLBZBycraBru2owN&Ja5KwhMbf8>{f!cO{G$Yy3-zl>wqq?C|B8IyM z1&ZMuxwM^}^cWd2FT6Cin6K-nGX#1&9D1dq@@n{kr;um)iuAhi%&gh1#qYnpzh`Ii zC_T^arTy-`nvfZzZz&kh2C4)Y3gmqO7y)DukJG=7Js@!{&rD;6AC_Q2@vCpV8vdK!1E4BQ*4&RbaOIr|*HeCPh%hcyjFAAI%J zzg{))#VAN`@WeM`Pde<@|8~Evvm`VuB0ip3UEOSAVQpmo)$E%!`3z(o9KeQkUYqQF z`}t&k!ES($60xC<4ix~VQ*_YCKw8VWFSlV#i8qWbt|iuX6DY~gP}Yy zMg)eYWh`_sS@dAvir>dx&{jM5c+1IqE7jjEzi&Qovti#!kDxhEp(9_WSw-IUNw(T` z_3|q#kD!>;W2eqWr6h-0TSOl`=s)4-*x3_O#}8~D-M?)*B-EVO9&`Lt;O^H2uU?i1 zs8?m3sxRH#P&&K0aCc{&%7t4SZ{1yTXTQqL=|f))8)iHldIG7wU9!{U(Rj7Pr+)eJ zXie1dmGMhwhRz%lym@Vergre-XNHS`trx-nrFoBlL8HipjjO_XdA- z-JknqZ`Rp~o{J&&yOnu12Q9<&k2cgT1>h$(7{~?lF|ebp(No@DA!i8zXl|1fre~!E z1;;tK+CRIUY<`ZLc%2iwj~##1-)L>1#kJf7-}^Uje);^hsJKFz%Yoy6P|+X_g_3BRX0 zvD=UJm)almlJGxVZLi~_{}z|;caPfl)BWM>Kio?%6w@n%N^xr9PY3wZ|9f9N_yHCv z4Z(1$xw>Mgo+F* z-E)dLp1Mdj04)S)CMfDP0gWd(^atf+ujIY!cK%R4J_76YV)SS}Qr$@YlhH*D%8;?L zbeSeR#=aw91nYU$w3m=BE9jlOhSP-Y5379tD8 z#0&wVh5-^KH~}P&dK!o#{SK92Bwpvyn2EX(7$H!MioKGTt>8^i0@Ln>bdxKPzgIv& z@~2oR`H?CRqkMZ*g$cFMGI0VJYey(ZO$AVKghCEvhx271DJ?%TCb7AtO)(r`m<~~r zj|QeGsZoTS3A%f^7tsvdJ@ROBX_OGbK!Old4G%`wcqZM=m1B zRN7TET!t=e?YLf9XJ1)g*x1bDx9~Bx4A3>^6W;;5Xy>ff7*J1WyyH-EdI@rlIgu#=_`n|=aNBC^Bgv57Y zneJDfUeVXr{_s}+!nG?tv7uZb4P(T5*#^5Bn&FKxWpU>Pl^cLS4d5$K`R{bYCcYuIsZ$JJ2<+A@$SkXKSsZy^h? zIo~h0A z%JM}8St90}ITDJJkPTlyR(@DvTLZap- zGj_`)>i{yprF2_P_<}I2HNw(~^2UCmj)9!6am6J&QZnBK#oaNnyIEenu&iO64D^#y zT5W5EV4aKC>~niIU7o$;;^-~cCvLw#<IC}NL6p>z7HFHO9Qvv zfB4)rlzgW@dAcmywQ1MJhr9vMAnp!IzuFFny z(0}pv_suz9cFKG=NL*Fg?FLI@4&=I?%MG&(baoE*ijIq7`g{2`)>TS{0@AF(s9cda zgNtZcPjXaJ&YNiSysa4i)mYliR6ii$*jU6)Wf@Y+xE_%#|R+^s19iuH6VY( zn&$ULNO>tP7ZN~B4>;1cQvk4q$|j;lqDUep!Czu*OiDt0bA3(sWY9~;vWMB8_|v3G zS_VQnJ1exy71JNAh$qv#p;RaUSC$46KPCnJkemEHhVWzJvf{|Sr{=#71pXgt&_C|J zf40$mhy7{$m-r9wW~}Y~6YV7}FQU-^uNxNp6;0Lekw4J=s&x~ZeD{bb92f#6^~PUA zeu|)~NSTgCbZaLNJ^ewKBM>k~2yk44VZxqdJ6btStmG@9H5I7zCqfzcqP|XG-^qS^ zKx%I{7y-I_Nva?X`Tih!`)mjSeR&)}NWKpV>W7T3qELk1C--nF6aGGtd8$e5` zOC@ieGLEQ}lBgv=u<4Y$e1K&FVIknx11^`_)>2rS7N72&n&y(8<{uxM6cL)46jxeN zUem>9@Fb2fcqT&jN#NxePy=8Uimis_8wg|vBHF=iIT{^wRO`{!o7d00d-uh`$=lA} zxvZ$TtGO14SpcsEM3@9K5`f=@;F6g5EF*#mkV8wQiF@7#m{Ki6d#@3Q?v+dr%V2M! z!JYzS<<5#qWK7AMLz_?X>tt_>5FnCKERQlNF2R(t!UBpN21gYsnGKDt@d+vE8QE-( zB7ly|<z#D=6YBXXFL8F5jO;qf8KMOn4{EE$eEmW2M#MHEcSjt z)G1rXwO+Biza+0+`vw}OK5)c~se`pg|NMO1FV7Y~ba=F2?Fs{Z9jY!HWTs`j(R{I$ z{2Ur;wg1Gn-!7f9va^5mRNKbgy#wa8qAh_5-i-})W(Eco@jk39i#+db?6l2*eWpEa zdv)BM);xP3hgWZ2Kl1VOjgC+7_I1k5h>dmkWQ2O=Nbe^|Z}6ZOV>RNQ$F;^&LahW^Q&DN_V0r59ksCS}d}5s3 zf*mtTvf^0{w<}Xu)EADCppcX*&h0NDv5R?59g+wt;<;CZlY%L{(5D_uL8~_5x1|9HHZqMpQMJq}1X z!z|t0!XGOHePk4@oW^Qc;_`L+yd=;t(|*<*&`+-~^d5rr z2TXhW>mg6GGx0fE<(>YLt|U&yy4|ZcZ&5pUe&@O)bC+u@n)%zX8|UU+IXdO!BejE{ zmn_kNv{M)P#aJX1RRfZK5 z7JvBq)+Nx)&g4q5)nVYi6_^hJ4nyGpwIciNejjdfE2^SBqDhz=x4vpa?i zZXVJ{ut_a$-`2d(hWUX(jOggh#E8mbhFDNXP+UuL32p18w4OK#F(s_7@|?`rP_OLt zP@%Yn0PTdL2#lcPO9}#0S$7Q#g{fL$O3IvEO-oT3%}h)je_aUV zH18ONGL>^O`hCLhR!E}EBh9WrP0%>a8z4s&%TE-hK1vDY)Qo$YlwgkDtl6Kc)qC_tlEJu@rDO#wDdl>(Ch4p@6bM8F6|@zC8b;Yyr|<(dL7Q!Z z4w-vjsOX9*ZcY!5_Y8{(;<9=4O-ZRGq(UDIqcI;K5lkVF)c{d5K)3+Ug~3NLxFP|k z5O5D54gh7Kpp)zoMCos%ll){f;KHSHz$1|dOeu6JP%J`wgv8W_BXXHY1oLR*%A>V+ z@gD-j3TGb$VkmAr>J7t{QSWZ@;9k;aBqBsmskBQXXroHtsFGER8d-pm0FZ@)0vYBd z;=Km)cK|p8JRV4@5kZGQ#*z>$v8+laX%Mja?aji*8h&AZQ9!WyXY-?Bai@~A_oU^n zPD-DgQ!uWJGaJL_64+E!IF8r4xT^4UVfN>Sx*|D^hb6FFDrHl4jN*SL`>+`YX=30k z08U~&U53?Qpp~Wr5TL z;~hYr4diwJT*AN(6daJ_ck615vlw2j_4Ue?fC!g~Ijx+!tg3>L)Oerp$iUEuq}ZsM zf`Z!I^2V~3wk~O%knfQfYUF2fJ{NfaM~v zEGl#OD}#ZKP8TXlvcrOcrSjs6#y1|(qg-R4@U(%6>HW*A`?9+F12DF}3IgD$ultnu zuaAX?xPGvF6rAw&>9Z4WEG}vV?wk4ICso6V(6hm+7Ly6Cye%%urnTbev zB0-glx8beFdxBz&9z5Uu`qPzP53PFkQtQcc-R2H1+4DJ-jD(!f7Iv+^zLxHz+o=Jj z6>%@p-1nD-98GpU;BI;mfJQ05tfHLZ?CPTT?oDW{M__<`w3}~3c4kX!^kcgnqn<#J zHZ=G-w9;t&OP7OJ?y0>pIOP*=#ca&6Eh83z|TCof-PC+oXkZd|fGGFfxP*taA5X$>E(J!^~6k(kC5(t6-BnOugM+gLn# zb?1@Zjo)q^3(k#f0+QExev5ow%`OR;g7W)_K|c`~$^mLJ@sg6H30As&UmSrxZk@o; zm{(=_YqS53;4epP-cCBS``~k}E4DT(9^PJX`>5KjQx43MrzvH-oB1;&@DLpB4-4l2 zbW2C;I)nh&W%{k5SfZ=tEzp*30) zCu^woc{&LCG7xfKJTXXPcC^RFI@X5bhFOk=humMxI51<+t#vDIEmyrg9eOr$gyEF= z>SGt*ojB>$+<_W>)!srrtqY1fRB(7I%9;)M`}3Kn!lE_ZBb|!?u7%`m=!{SkmZ(%^ zj4Df9S(5o6y>Aw*Z@Yz+q;?v%%KeAmx71oy) z*VLEg)mCKImbu6IrDhd0rGtVHaQe5}t$W+lmX@tQ)V%s>{j*Z=phbQyF3H6&sSt&fcOZf1rJkYB=Dm*SUf|-?9QC?b#)s)$zyE0coRS9-UB|;8Cv`~z0cr+mV0SKpfHvj{m zK^Xo{4EAuq3?7&$A(jE;8h~v8sG%{iLb*$%;J~9RM(IuY4)AE9i!cB@V4)k$X3_#E zZWo2$4(3VPseT}&bQvmv$Ost0U_y?N_w~Ksq)NpLWj!hOL-0}Q38wHSC4oU-0Npb+ zIB@TjUI5@%7+xUaEIgXNqRNP{NIN@IQwtqaD-AQ7+x~$% z@o{gG6JErGpGi(!TU9-eBOEP|KsYf(ESbvXp5_XyS^T0lHk&2jNf3pHn@`Ih_xPZt zw0cShy|EP~!=Q&F0sP_B#bHDukqZ7WW)z8xf2c%O#Ca)k5~TZ$rCvxy)va9N3RMuS zkjrcdqNq_uP#7sIDGv(^D=#na$dxtKLF-) z+E!#G9gOiaeQ@hRVL3A-I+&4Co>fwOSbx=Gy#ak5KttX@gJ1Ps^l8oor{C@x-(J0b zM`ltfd3j=aG zqWAS(GoO49E8ZF%4G7SdfztEZ7Ytm?sJDTli$uc%yxe_Fy?o65>>WHUjhPwW9$D=j z`3M^KvG0r*(EMAFrtRjduU2T7A3OGL&&rpZmg()i=JQT3%|8_8T?%r&!tmG^@n)0f zdA0ZROtSY^1JiF|2A`}_oJwA8VWZf6qHhSOQ*eC5P zZky007)#DfFuRYac0qlC0khaUziFRKz|zEw2~C}2+PMqLYECnAwQTJVUq7O9ewwPz z^x=8~r|Aqj_G-x~?bY*cOj~Zb6?!)g3fK-Y^rnH@`2eW2wv8(e>2H5=w9fn`2Gdn< z_n)jYe1IM_!~hz237YnD2&DZJ^b$JZ|5Gxutdv7OYw{8%NWdHdoX*a_lvd#6Ta~%a z&18RT<@_e@0x_5&lpc^{TWx$NZ+o_Iz4z?-?sFD=dTnGEoqDXHZ80KQ2#B?!wzcJ1 z`{Mkc`McS%MR-<6n`NQvvCr#n8ICTDUWJ*Bsof71Y=WY93;>x%%#<(tukJnc>gic) zi>Cpe5oO5&Q8nP!0DcP)@KdAW(h^e0@s*U7?B#42ZE!+leoU${yKyg6unKBA{7d_- zi*hro+;^WEB4TQpnYqmLR3_6SB)}uu{$+? zT`k}#N(8rcMWtqhB_vu{na2nDl!y3$>@?KNsr0Q*s*yf9WsyrQMhDj+jTw@a$CAlN zOIy`YmR6FHUY!$_86TIC*wohA+}zsB@diXta`Co~|ZERUvoz{#bqfJd%7fDDt1 z5D`qNDWWo1N-=#&D0BzY_Kvb#QrZ?O0u!`(#ClZVD8B|ZrAep=6N&S1I&MMQ9H7t$ zrhWgoA{g;~Hl#9MD3mFuHgb0Bp8l}EjtBmqXnz^@{h=wK-oJu<|L`FAkNB)7!S64_ zGW5GMtS3U+gZc1Y4<7AKGx|%%?*Vqo;=~@t z72?W~Bc~mJ6lCuWgS!1N6lMTI0nFtJDE2@4cW94~P!3Qr#rFYVAxMjA^LI(AE2+iE zNt)96_=aK+#eyriUTVks=gl~)4l0O1Q`_r>rD47(wd>LIc)8u?M#UGuuGK5$KOl9At9nlv#|`v zvE>al>M;=}=9Vt{`X1J%Ar2P)p3Z@>F;Qie=`EevWi>I$44(`}G&3!sEGMD77lo%INLA>@*l z>|z+@qM!q#P$nP6C|92nW)VMn`?`xV6an5Ie(C@5eO4~lsF#tFSH`JCP^B%Eh?Q<^ z@~;9Zzq+O_EIhihvRdKiQ!rgC4)ZSvy8h8{S0>gdnEQHfSa*~j>ltfGuOPXA>7dC1 zo9P!@-qpd%NYC^44FOoyZapZYJAW5{b#-BudiJ>=o5Ucj(5Bh ztQLXgt8zBKz{Ze`|j_|%=ZhW2fFZK?h#dY`Q4pF6hL^me~@+aaSR zkn3(JS!;BS-y(M9>Z1J9p%K%L9Nc^R*xDz@<_5f53)c+cbNhA*R>87W*(twA`Maev zT)#Lk9XxscjM>wMjnKLIBD0ye+Kx?V=8XZw5JET~h!1g_uGo9tGjqFLP^y-nKeV=P zNPY9d^17Rq&HBE<2QHob<>K;18uQ2B8ocoKs5f8FJ9`@3`E>K5%l+-%X9qbR`nk(z z6et_QZyU>PgrbZfvu#6c=JnHs20VjQ^q^7ipn;l@>H}z;_D=(i#*naYgE17vR;$G( za=;KSl|wiuC2LAd!eOrf{k12LOuu($op;z2FXp;3*_71$D=fh#UE>LAr?>C6{_U9k zV|D$YK=snPSvVL5;C_HV2*3-0^!Fgovp&uyKF$$I$z^^C0lz<4Z|=0dx$rp1KM3M> zgM2NJuJ2&5%h&sPMt0a&vyV=e=FV?lyFR+i^8E_31JML$aY003qHlF>il>t;gOLhE zxRwzyXpcvTcR+JL^L$4r6Fokjo-V?BV%7pG?V+dEk~#r7 z_qWjs3ihb$|-6<^56mvwa$`>#K{SPxy+!}W=j!zNQh07z*7NO4Zv9dF5C zfdL3HR4$mpZ`&OgaV0UzJ}V)!G^@J0thlzSxTdnHy0S$g5@S@Z2zl~O03rd<1z zpsO}m%rTS05BTECEcVlmc8i9Z=;HjeoUHh4W?V#&o149jjis~kCkG9UR}VDKxOwfX zZrBXKOaS_#D8%DVY;HJTSMN~YR?^Orbu|NC8|Y||737x0#fL}7`(@_DmRA(8yCj@e zj5I|wWexctEKuSU0=D7c7zQ38xCahOX^s*gMCDz==K9v`n!3#LqMZE9{EWoBxahRd zu!w+QA4W!WR&HEzX=Y7rX?sVLK)}Oj0XPIEdcV3wE{$#g^?uK@^-U)w{M-x|6 z{E)_x-wmCqOamz&Q!ErHsU;jHSLy)~(Oi*C@l8cJGwAjm{_7E=9CkF;qkVpq2awL? zgE2bCl-`8ohEO4Ua%2$XkbrSQ$?-zDq-AoRkXACGsbOf;D3J21>q?_y{EG?-1zb82 z90M{5LdZL)1rL>SAQ#HJ+Clw2|MRQNW)0MZAYG{6i{Z;Hw_bF+>tFe$1$iPtcL=z{ z0T?M2K_XdyK02wgZKbWtDo2;Q2{8eVww4L8_EmMJt>T;Itd*U}+9uH?O7|8Fmthb= zjOl2Z0kFru&NqDBOphMl?C4^Zo?Z~0n$`xI{hGbi9I;xt(eeu6G$~kAD4fS+9lrBs$z%P?w%;5W+1Y++PUcCkA4Od` z>3(3h`GEr_Cj;sNoEb*iHX9y)o$|qd(K+2dyKX>fbr17f&g9qcuWh;H7qP3b)g->f zR44iLVXu{2JeF;;oud=F%_HMtsl@4_i_r?rl{1P3H#)K}hZk=(yQ0A7? z?Ch5~#O6}C$-MytwjVdirN-3pAOTAbHTD-WsySqCi#C0;0%*^^`Ccx=A_qm_q)TJfUL3Or zxZ&_~#ob3MiZc5)6!%msx{;c$qUJdQ&i=IIO^=OVIa%)5_xtAkLyn!9`N8tKxV%ir z;Xk*1Z}{WUol^JlIbX-|;#VrFN5(pKcRY+d9Ew@%HhE^>TIoW2pNARN4#EBgc-c4G4D52=jsXNbq*a+q+tNdvAr&I=Rb* z7>kYW=364&A2e0OvweGnzzuACyiU4zd|@x7p?!zu&I59L49HrzsK&~&J}#8h5L~&E zURENt1OqV*r_$r${7L?AeTtE83Iq`#Jvhk1{2tCQO$^gZvi0Lazs5&qmzI71{(bMh zZ4VwB`-cW{xje?oi9j5a!;MVN^703ckg>GIoE)#Hq|o^2k^<-IqVu7CYwX_Y2mG{7 zjgP9UODwAni_3O#jmXXB6c!5UMNIF8O)`;hoVQDueSBVEBd3%p2xIz4p$Z1yX&B5W zV1Clat^uf~|E(fHp=R7rCDy9lz}hVpOVDO;8|}?YYZ*f%qkUrr_Nlan5(k3z>KV_F zWXj!GPNAw*wt}Lqorbm@UR) z|Cgf;_No8!Mt_2wUy3CD7rA`um-6D)^KXsJ;VrO_nK!(aumlrb*5WNWd;61QKpT9v zk69HP8>?fi77M{x*oU&*Q%q|V|Ac+iFJB7@tt>-n>)SQ7YHd`JR2%`{H1L^(FcQj{ z^cmZ@ky#C`spqzDgaxr$+2JO(cA=Q1oL903Y{Jn(pc-i_Oq)?P+Va+HbVp9VubQkO z8DvSnunEKISJmca{%mS`fx#ad=z>9em8z$VH-T3(Q6-y%Do0|PfdurSt)@oZL&5Lc zTsbo)X#G#8GasKDnBTp6^V;bv`lt0TT)B8&|I)>ycW)iAvN-nLeosWu;^Kk{Vo5I& ze#4DNnWmgG2y}S5kU!IadBa(5y9y}f$=|sy?+L}XJlk$ zB&X-5#22PS7sQ6;MSd+yHEOEZCzH+K2nLo`kITqj!J7i-8C&r=1mR_xvytz>kIX(S}BsLCy? z$So+$OiNFSj*bip3i9&}^6?G`4T(xjNG&WVt*fi!@pxhhUx`X_wNfsZ{@Ppwphd=| z?N4onS3A^H+Z@2y6VMh?R02uww@peBJsIRuY4cBS&$yHnj6tY>E|*_(yZ=L2ks%Ne zY<2Zt`hSqD6pS|7hqd)^)0P>vk_w>pq+9u^Z2caM3P~mfuVG{4@{+QQLBCgLDa4TyC74e+d{WjaDz8Kftyfbp2(O1*C*L%VJ#g?9DL*uq91?xfB z2gok02=f4(?8lW)t!g^@<@+uNdlNq|$A+3ju{aciFHMpgJf(g@*7Q2zBn2@Hr+N_3 zyP=^suW7Br`xW-@u6ldg+1b7?D$c_hlBU8f%X`nGBX?aMoG{l*DD`r1JpSZ?uG#h3 z&y03B*jO1oyS(YnaPNw>D9k2dR3l6-)@;B1VfKC7i;e-V`erxvULM%-X5qAFeY@X4 z#yuWB-f-NxFQ<&Y?=$^6A}DuC0e42da_Q%wfiGMT8LZ_~a{=~o#V20|EM4+IXP8C1 z-WKgfSq{7!eZ-^0DobJ!4l`Y}R*F<@I zcJ+3N3Q5n4$}6Y$LV9`SH8ol~SRXjHXZ6yR_bxv#NQ(D%-Y6AsSHr?)34M-304B&3 zGo5{g9l6*qIomZQEZFzs*Mmz|_}acH%MLZr-~9QV0mw5!oh#v|=j6s8KC=JT^$U^j zt)h2r&RaMwYe*->x^dZ?x@KJ%B6Zot3%{J=eLFS$Q&yP2JTDd!pQh;z<}T|X8h}9G zKB}$*i@Nno(_K*d@@ZDEFVT`MWr#XBh}9$rE8*S+{>l|-*WL}C`ZObBDu9EZ^Gfo`MzI0Cf&IT$t zAR$Zg%k*vE&3=PFUfC0j5a`77Sire)==a824;$$|Z^G*-yg7q-2stz<|He@U}q|I3n* zwxR*54W?>KP1UWRnh%f$5YF^1C9-I2qN|iNKoA>d`w8H~_F%1P36V0_x|x1Qig}K$f_lV}-Ug;PH$ErZ zDI?jXtkhD7F2F&Dq7fcB98|i1F2vDMDAotlbR(cAZC`;Hu91wFHO^}++me@dI3`3d zz~_d)&#RCiw}|l2n8<+Oz!x!5H&c@i=VvV`FB!sZ>Y|c&r#E?wLI@bqEL@S4bRazF zWl46Pg3qXnU;<3VS1P%XV5UnqlZ+1~$Y;rw+^&`Qj-&S^DCp=ym>rIyOn zVi_)#VVF!w$mNW>ghGy^3S1#m)z>r^=hqeG)#hfGB_*bXM+Jt2yXNPHczZg$ePRFQ zqjz{d$$Ec`)xXfgyc(3i2`$k?H-|J?du14(#JVHj?MV1;)v~oRJ zP;e83CqVr>$bJKKAPBkv^ijj;@_OC;va@nDwjehyB{n=cDmX7QG$Gb8CgOv+`O?tv z6(ZR>9L90@2n9p)3kKz+9VyRoq_0X@UTt(lFke)kS5%s#9Sjy{l> zFrQPsEG=ft4~H=^QP1BxTCBUg&@SqL65GgY9wvp=Me0LOer`4Rp!d$_oAH;Y3$M+b zV%&SECGwjI(%-z@XrsOpP7T;}v#)#l#9GBffEnC|u409*cO+6S>Y{*=;>H#X1Pir}jj><3Yt`HZlPxWc&RjZu?8e1eN49Q#ad)rN^W|@iHrqel^YO8J zZgfg!u*>5!U(PK5c67em*(L4<>rU<%Kjp+s<_Iuu%v1;X`TQ3sa zB4U#=ZbtboP7WT)<#iTIyGc3Y2+k5k#rFElEq*RHYs=S&o5ssT10`x5C3MB1I}Ux6 z=x9D?WK!q~PlxlSx0cQRechUchSyKKyfL%?W;pRuf8<_Q|ONv!1};_ zgP#u^^0aG*+sJPZI;{0xdBA?{ii=YwuO7a5=hD@ujx623$MU6RN@i$OfwQsI{zt}J zrFEh7HW(S?9PH%n?-t+^?kynYOq5&&elf8>Gh-~h>~!~Sb_t0?u^J#AOC|I9sv#r{ z2I$t*+)+xdO|L$3&3xs;)e|g?^(_p}A6vf?1ftsVW(Ql3vVxij|DY&8=i;0smw>SB z(z-`C@4Vl$BVg*7nh67&dm}a7kcL@E(N+X=U7!DBYktr@Sy>1TYeXSmtKV!wO~%5W zcH}Tbh#+SUC&m3u~C>>n8C7bp;;3`SDPa9pUsnwd4nfm|QBu%~wYimFM|Qr77U zXFV%f6$Ic}i+#VR^ZfQvL|?YQo9JdQtvY-}s{!`J1=ajT@5F6RDX# zsC50}%6qqJ!a`kQA`ShFg$GYYLPf71}B&G#bm;!hWlH8Q^ z7(cfpU;F$>*ElbikgxVRey&tzH28e4J$tfw(HiyIJ#f+Wp*brQ8p(YzhA*&iewnY1s?D?=&P5%$} z{jXpj`M-vJWGgI_wl?X~;uvu&{zLyWSoW7ewqiTsFO2xM{IQ$`UV{HvAm-N(_!rox zjaJHmDgAl4pTVeIIBcqy z%*-s)P0zlPmh!&2F@@XwL!{cFgwZOhuNv_>fRcxE2xHpW+H|TqF@HVzUF3w zxJZj{AFbX!`SAXUz3&f~pN{sf4mRIxKH7b7`1r~Gz5TlnUtUH;xg{o96;+(z2)7D& z8=9&Y@v9b~{Fwm50lKKDc2!LS{DYUgdUg2uW2=vEz5M(`!Xv}t6OuwhgS|bz#zefR zuF&T)H#p{TjCs)yu%>1v8}xQeFPiS6A!7>1FK##wks2#v4 zR58oPAF1GUlxs!;wZkiX`oz=7ls5wqO9U}PMi>n}^69{*=&wRX7D$kc#_amIX29j& zdf(~T>5fjP`wK>US`LVdn8XteZf=?^s+d*yb6@6jw>tB$iARoq_+!bVS$)n97;<^= ziErz7JQ{7Ec67;Iq~~>{!+oTiRfj7M zMjnnv?)IPWeCV2&g=au;v|9{XC5nnm*EKL0@A!FWefemRPZE{R;-oBW4BO3%-j(2f zr9O9BO5~W5qA_*SF$yM<)*b^ysp?8#I*pa%{5-Ebd%0`*%7u%k?>xN1#O%n)$14Zi z?}EH)k34OM7#nvRlOlZlJ-&K>cMS;eOK(h((Q6-r+_+NjsMvbI z7oXm-^UisP&`6Y_rF{l+7e-AZVYo!z5vL~NpywRC_35X}r!F4#^ZvYI>9niY&qZZq z?$tN`FevWd z3+;uVoe@EAq;NtnzR_hxNrppe*c!i&i<>frgS@9kG@6sQqcP=Vm~Adqtz_)1SX*@4 z5^tc-g3nh3dL|g%jJeW1TDs}G)6h(l#8bno4 zl1t`f68X8Hlmm`39I7_e3wFyDGO1iPe#C147Z1Un6Q$$k6c3x6GIC7AiNg@)0m??4 z$s@CQn3hN(^J*Duo0ZUNUjI8`->+Tj|FuQwzYdkM)wXFfo-IPV|NiT1donY~Hn~0x zDORYOw7M-Q#v?(d!bMCNfC-Q>nW?|EnWFx2{8{S&&_eU4v5|mc0KFQ+)LP{*mW;&w zS55N%FJYgS!$cBof-D;CXJPZMY>JA3Y4n55YCK4ZUsDp>aB`bka|&0!28wC%&WiN0 zq&S9zT1lfE0`{Whc!fp>CHfNNa!|hpb>5#971~p6^=t^NRaChp)F&aJhN99i*>_OC zAT<{Wk~Y;xNzFD4TS&q*H4KuGBgCpra;2q0lqTjh2zcc}Nu^37RBEJb9*w0*u;_uQ zOT|K%hF8=91v(2Qrl?>B1}gwI0xS_p#^A&vLZc6g1Vv*`MczA9{!l7iE0K*;C`YJ> zVR9HCCFT=M%#y}!0u7a|frmJ(MyUa6=tdI#WuhU?4HFv}&H1AMrvR=37=U6Gz*108 z$K(@8SSUgFW#`}b4>I%eH^|7pS=n$%imoEyca5f}8gED9eO27KoRS5FX=58Ibu-gu zes&o3`q`?FFE78bFtsqee)r0@o0tBudc586=9*_u46a@~|LED1$5uur=IbMqwl^qs z~y8Emxg4zCuvotNX=i@(LYiw{+yo#$C~?j)56P@>?M(brZ` z1EPBHadkd??B*Vn`J z8rihSxDHVvh>SZ3)P0r8aZyRSe{4oBTONun4ZU%1{S&KAE|L1a*%lJ0EtAzb`Gwm1 zct7`jHt+hRQHH&`Uq^c1>^shQ+WO}^j=WtJDLR%8kkjFIXftzuRt&j)hy>? zr=R*S7;b=cH|>Z#LV7(#`d>n3oEv-U;Vu94%q&4cx;o)n(9yZiM@)F!Z-n`X0mgHY zYbz#3ejbzVJFYljQI-E#=gTL;u3fR4|I}mY37c8d?hn?vKM1+mt=)}|ldVT=_%dzs zliux35adBSvcBFG%SRudymfo{ z*;CKjs^@}nvsaD0dwz-M^)xk#`ctNi zm!DM^>>4RosUO>ZI{oy`inkwU=ERK658j^Rd#=cHbFAgDtas*hLA!!&wq`}l%}Cyc z!VC#Pe;4i46cdV9%7@pM&2n|gFK-I@_<8G^KMr5pdEduqqUqF*ulh2XT*D5F4<}9VE#T!gR>SUH?Nx<-+y+`$qSdw9$YrOwq(()Gg~*lFf_2V zGTo)OJ2)}m%AM0MU*7cg{^%9z;S}avMih%cn4DSa>6cO}!@~-*-h6p^$Na8)T!w_1 zxp8XD4AAX>>na2z0M=*a?zntos-K^8N=m_&9XmIlI2b0XH3;@z`TU)EbeeBwZj-Qy zz`1_@KdMF2yxh{@N6&*cY|I%tkkgrlecgo}k;c(T&Z^$|R@-^$_DpV5hDZCletd8u z+U5Yn>BE=B8l%B-g9+IuXUIH00;dsVij25;eq|Y@lCmUOrfm~NZ*B5c!A`b*PRVvO zqFtqIUagClvmiS^Hz1^-AWzBiYHOS7LVUuRmn0U}96G6jn6D0 zCr;5&puz=welw3F;#G=JA=MyM2vMc5p%N;JQnqZXoHnPbvyQyykeV+2Wb3z<8XAC# zi-9&X#O^1m7S0#mss`J_q5AKHecB(twsrbH4o2=@p=fIZ0094omMnicYFh|PqajqN zN`_%5E8O+x!P}16COP|Wk54g1lu47Z$|lgFf5G$*K$R`dfmBR_wha_$WOCtum_*jF z(?$Z6iaiaj8B#KZv=mFWBP%fMvs|-HL5)`sqh!iCKwMUdZAJB&O*M7ZH6_I*`FR;- zd70i-H3wy?sZ6>Peq(Pl<=mT<*^b2-Ifa>Z6~$HDhFXP`i=$$d43!HtErshU8jetE zwTvxtst2F}iT<1v5ta}W5FO){ zmK7P75*D2t5u28rke;8IlAD~El$qjQRcaxV>fst)8iV4{T?sv;#BdD8C{*24SZ9Fo z68WlZ-?9md*t75U7s1{_g9M_e;Z)Hp|6_XnU$2yC7boiA4$BGn$uB%y(| zilOPy9&S-VZA?U3Oh{s3dbFJ9%S2MCBP6+2g>J#ob3oZM$xuk3s33qJY(hMdZ4D

    sZ-TBMKvvWV!lfveSj?;5P_wkefLwf%XuB^@%EjD~Buxw#qih5z~erL~38?Q5?d zpNk366Y=%}{0@x5Zf6u7uM*!VN_`g>;GYEj*n)7z^J+tK$?QBm>n@u{h)85tQO zc8JAdi9|xbos1oFxl}CS(W{K%9pX46)6%Bh$KKN>M}z+YaLHEED#0hzd=ewH`hJW6 zjJ7geL$*}vQ}m(^A($?08wj9r&r(#Q#}hx#%&`j#|CyGWEte7sxu&A5Iy%BXKHkCh z+fM)ZWf<&JW3wrAHlf_!ShIoKxCvkjjoSdafo2RyhXXcH-qcal+@IS#SBhV&=RM5I zGW=?P>E^}j`!-!!GH3si`7375XrGXBsH|bC0PD+FbP%gL;nXlbZ$VbVsj#1h&rSDV zyR`hG!Hn$svkfq{3=jc~lEHKzAB0rRWm}ZZh)LdbTRUxBf28N;3KY#ks!RwK z5)d>Hq7NRPFFn7z7yDdszqsMUs!1jj=RRJs?c?!V-jA<-JAL}q#B9|mF6>BYnql^$ z+gBIeBF?r-N z%N!;hir%>4=jtJb6T4iQ-v8~9USH03c0AVO%g#O)vsS%dbmH~z#!d_Oz8F8`N@v7$ zAo6w!@@2sgzfF@pSB-R-*8R;89{XXoE zdod^eCh~S_m+L(b9qIFQRj(g2`$Vjp;&f>GL%ju83{Om0vA`}cfFp;;ciwGZbxiN( z{Wl&iUpyQR99+L~x$eoMd(Pf5c>K-wq0OsPcg~+PJY(?k-rFto} zNjj2KixbZ4sXI2RR;l2| z#X@c>D44vKoWNT|?O4t&&i3ihvTg~j7!Uj*1kE`SDNw?P zT3LgEVg3AB)jg4#4v1uIx0qpFoR-h5FN&`Q)wL++1%VE_qKai8SX&UY-0smCt6LxJ zECRjkl4E=lGvk6{!>ej4p;-XwImA=z(#6Z0C(Pt^87b;KOg3yR%$WgmewPi^q0wz~ zCxkPye@NfVpg(mN|Z}{ zR9JdgXmo&wbE)gMfTfFy$Bl1l*AYd!2;29AHM<(`+y-$y3d(wx<;U!jZQ9PQ_~Wku zssFBlXl1mR8oJi4nEoBbRR5-;gW1DTjS^>iIxz)$Dja3YM*m5`rIpM_wJ7}kavWCl znQFCXs1hV%mbm>dQNvbB(7ze2;YRlOVhXqvL#<=l(a>|8eq4jMyoF54K20kW{f}|O zzmAr`ClhefrlDE)fYQbz82Ob}*jIq5U#VcB3VNz&=TkM9+cY;Rdb5k|J%@K6zkdDX z;o%k=kyB9=B@~?om`9r&Ood>K7{pvs@`PLDQ(ur>l9id6?3)l{7ajQ_GR!VK*fTsJ z;=8S%qpja(TUXmpHlJ)?IDC8J?D0Gxp*I~pMHFQ$L zZ%XJSQukF-gMg{P=!Qezl8Ol-z9(EhAFis{OOY#>dPa>7NJauW5n!B5uA3Bp$o|vy zhvt^{Ut9zH>@w1g#L{g9Ow~}dSshQ~TE1Yc44qKh*uS=6h*C94D(?Z%pTye(MSoX= zOY5gJ3XHRgt_p~w<$|$d^&q9DvqsYa$Q}efSS4Mc5?uwQF~5F=Tslpu8Kr{BD7l$0 zKE&heYlv$?`69k@oP?YbojT0Pb&ZkH{ts`zdw9BV#4p6KgA4rx(4K(d64|uMyfw9X zM;i){HI$vKEjwFXcE7&bhf`lxSt80z*Fa%R}rI654u2uz50Awbk zCd2&B%+)r4$H=uXZ{0ezDMsEvCQ}Ib5-yjQmsgOVUszC3R9IM8TwGXEQdC-6Tvk>> z|4Diw>wzzmfh$H~!h4Z;=C^Jf_@y#?}UdU2vW?P}Ii3<(;;VHoyndbbTz{&jhhtugA> zf-Ul&Zd6MZ3tRmGY_q?nKf98!9vV(?=oQ_PB%(bFyh07n1&ZaEdKjNM)u9;7mf^>0 zxn}O3MxVbvv$M7E_5P5b`P1Lc@Xh089$)72>(*1E`JkKy&67Z|6y+|(@s$9}mC#$H zLe!{^S~x&iKcF%LsVHt&UfrvjHzlunL1@ez(+6WWte$Cl=Y`E1|HPOgv-@`v;-3pe zcStyh6LZCiegK0|bZmXqy86;P5AQ6$roZz1pb1 zrsWN-s@FSqdgQem=Z){bjR-FjN|;7CO5jO=S4vf0wliIPbKCa!2X}qmxA)t@<1Xhe zyWhX`+30Js``0MjYnHojSS-5tV)#4f#ZNx%H~p&rvEa3S+OvaYzH@VYho!mAt@W9k zY%=A;rWLnW-F<$`$16NCJSD#|{bhvlgnNAveWd$+r02a3{jchby*F``#hm`nXSaVk z7BScP&7^(r>&QZ*zDHio+F?Fv^p(LK9?e0V_an&<2Q@kj%P{WmwXMtB;TfcNccvg^bWc zR-Wu-wtMF7U32DNp4RQw0K^a(d>+}mvd7g8lMi2Bz32Ltf{b9aEW5zp-B54G!NYU+ zom{Se^@?Mle>&9J*1GJlSwG}vcf<(k`L6$fmlLk!TV4-(^)&S7b(_yW#Ih5z#;L{0 zYf6hvg*@l#jNnw?Mi%d`_wmG9WPElz2wvU=|v;P)%6=zJ!EkB z=w2V@4fA|+cjwXdk8LcR{oT*%U2=C1@=r;EiYHRY`c+#q+JMI|ovv{xf;_Izv z4u&u7W*s~_&)jmVZ`kU>!kx+9JH1}rjqyt7mAhKr{Jd;p#0bQDWcSy5kH;ny(_UaH z(-cAuCDBC@Pcx%ToE(me`4OPzK7VJ^ebL6nU%hA4)OD*b8;*09VYT=8#ZQt0PMBYP z_R;F~YrEv|RI;ua%FFUD9(SHLz<2pD>!n?voE!b<;lh&2A6~(>-(3CPeF?Pl^bbu- z$g9k3tcerk+Lr{~u8e&El1OIx;6fOZgYhD(H|eZvJ>dduqHjl_Q~V0fPk=+lp-ECF()J0&pX%L zylCexvF^em1W|VBCF$HvId&MV)J2!fEFRXoyr&Lt)P(3heaqLas64(SZ_WH7ogRu_ zy|@U%>D8IDVjP1PP|oD8OH827hfc|Bp%D7BI&$;t6OX>8Z*dUo2vd$!Tt& z7&TrMZDLeP=Jp?6W-X&mKrbGO?Ip!J3UU&Ztnw}GKIId{3k+tcp&bDTN$M!1zc<$& zsmk-L$S*1^%1TO4jE=~Ri*jmcJgUNG05L?RY>(1bM?FQ-c(**uHpbUFDe_%bnt5jC zo#bSLm}ukJh|dWTVX=X!31NB3aj8j3QSnK>@yTAvX@R-7qt+EG_O4qz~-yHdhI zlz2QYA6Hy3{iWrCjcaDDSha1A?wT>9`fc6xTWI)jm3k7Y>L-_V1DK#t4pYNWv8sny z*%O04G8IBH(dDjnwMb$b^6@ib`C>v`+M(Ejp+cbH<{%O}Xz-3e3|C6#73MAp57{0Z zxQ5SLtincO&>e+d0`;h(no()Fg))=Yj&ixcKmpi0I&uh@hZ= z&@lJND6haEUzVR2PQ$p!$mmFBF!x18B{?}c$;rv|@4>-=^z-RkNJx;gv-6K1Kj?dR zcXuBjAAffE`ub*PXQ!p5B_t#yCMITOWmQ#GNhA`2P>aMIOetcjN{Ml6nd_rT~ScRL)3OD;ZMX-`0bceLtpu}5Xk4eQTwx;( zjHUubDF{-`C^%}g5-m(@kA4uvl=MwaDrn57Pzk6uMk-kg@RERSv`3*Knf#te)ul-^ zNrs)^G(W1V`}FhYnflsy;^w!>;Ro|#c8JUOpw)js-4u|_hQ931;9F#BdMY;J(%d9c{2aO8z&h)3+I zxPle2nd?2g4=rA(V`=`Nw1DU7oRXeggJ~2v1N-WqM?RZpx@N$&DPy0`TJmM>;h!h% z%D#P0b3STx{E_{==MHzw-{{*r8{`yN2F6^!|KaSio4c+R_+3)htS6=8<-$Rtrs<06 zNr~_K-`{rV;li*F^Tol<;G4G#Vrl`cvNqH*r=J(cEd5Gj|l0W+p%ld z)JfSNpItb&>%ggv2hQv~tA8#yIyN#l$0H{E!p%$TPaW>Meft)tpQh>c>+ilhaqjMx z-A5q4=Ub4jTt5+UCHSM8r zpduu$BEhqvz$q=pNXB)l%yztKd}jIWyIWEUhKgi}N{)bNBwl|`QU9nQ`F)75OikNS zs439*L@3iXS*wwdZ8@#_7SC}tE8OMmPgp}mWpdX*h zetwWg+Eja6CfXXJIgw;kAUu0ao;g*ySXX-HH16gHb$mco7|82DS^?rZ5DS4-FHvc= z@fg@AsX|kNla&&MTc~GZS*n6y6rFIMFxK4<1PW;2Lqsz6>AUov71+2zX)RP#JbU(F z)wVScADK4D1=S4qLMqYr6p$zx{d5enGxFUdW1S)+1We~P1!N|x@7>KFJd`(J5T{4C zI-M@^MbqKhNiaNDxo1<^>J7XDr@~jQ&(=E$_HT=KZN%r!5q4-#_3qf%6_Kx5-gxCK za0@|Ek71HFb{@&s)_Z7Zuq$!@TzPUoZ{BR-*ol(aE8zAc@b(6!SOsbhn*nTTiqKMr z*V-iZzjz>RwF+uNgJLR-(FgkN+si}%4vs{)x1V&{3pdEe}R2%8Js_1pXP6{ zk02OvT8d%ziBYwH0-;!0&WcK?wVejEfbVY&=Kns}Xde@SWQuxt>`lg`V%eq(VntP2 zp+jD^O+mGj6fdA3!7)~g?wqji7Z`B8Um~Vn9ATM=t03$6ul`I+F+6|D(NeQ;)+CZ zTqrxi7u^*$dy1O!F%hnz-e*zm}Zh^Wxmgvg}Sxa8ENl$5Bv48O{fcig7qsB)nS*OB8i z>}#*k3~3fD<#E@N>P2dNNqW|TvT_p_$5&4FPvhf{i=`V-Cdko==}amg+*CaxF>2AL zSEp}Wx_Rr)Bl~X-K_O2wvro$L1qAd%nYd#gphgfl4JO8Fs398Y1<*}R(vz(n{VxqZ zjMKCtGHSimt0lgnTf)=3s$nn=zhNo_NF9JBQtt8S(1Q+NciKD7&n+A&QuI|-G97!{7C=og)N50df$G0baeFm;pFw@+uIkfZ`?9GcwT?o z#mfh--#n*x`q-7r`W8m!pWi)+34TyjdRi=*CB^9vGL(S1a@DDtD$Dc)_sGDQu)y?) zuLX>lx%#Zd^e+##3m*+pMMJ*ZQ-|ZbO3PRe7D=r(`t7Y}}SQ#Un z4J)?P9?{5=-c_U;AeAzvP>-ayZ3+eOlAxy)=xq8D;AAIy!bnuzRT_Fhom41JsL=B{ zIm?8+t!m}&hT^%B%0-;)nT0{Ui2PBK)NZ1@o^^R$YpMtEq!T#Gu}YZFmCn!1`aLdu zdU@d@nPNNuJxhDbp|6M>U)r=XA;mZ=DUDp^{b#K%)+Z~smE_wI;%x5R_g}M{p zzIYhw?G@-AV*U8lJ1ZlfAnV}5FWX?!S`&;)&)4^Z2^?fwHxl@{k^(^Su@!h(`$tf{a073ONKE9rnnE`-?m1N$?jM`k9 zurt|hyXoDfmH`esQxb*?rN1#oS3OFt*j8P3B_g7vNv>@vDw9J>LZR8Cr-pCt?tZ*= zqTxWDn@FGgNWc5tx?k+Q&-S=ugp;eQdtyYcq5@^sZbpIB>_PRxeWiQ0r)^$aaPmmO z)=lCWb77ipbC=!?!zVWEJFWTlleVw4D_RGNQcyH763i_3iPf=_HZr=dejbsvdxOyElM`9h9d)YB<-! zwZ_#$k&)qV^(^|OCC~av{;b(G{rffcY+v5DOZD=V;X8LqlS5^K3MS7+?-6{WrJDzn zbYl}K^`(@ztNPJH!MXGB$P$7=p`sixMiW%j(qD@m1dI7d&Hsaah?>y+ZGYB~#J@h+ z@-L<-id@EokHt-RJ)dZ%y#+1u)3)gSQ=<7#*!N44_m`4Y6|R-<5@Zzo^bNQ4i7*a| zHf8|MTfnt-(U33R1x4dDYrSq-eEU;4QbPjkbPMJmc2A`@W^5D(*iI z=#nj+o3&+|Op=!dVnU{!MvG!!HNZy_bP4DPR0kk>N=1W!Jk8cRFwJe1%sr_f3f1rq zSD#QxSEI6-3T%`dTP#P-)%X{np5lsA0?7g~HXVm$0B4j6+veIxUUhwaX+ueVW@>s+ zM9dGLz;8cXf4cnm5bS$2BXLD;=J=B0o&r%v4cVTCQ<$cQL_DFod|ghmVOFARW^!^y zMp1rgaaBW2ZCz7CJ(t_uz~R-_)>TxNXEatvO8E{#{vljFPfhm5srCSaFtu(|tzLD} z<;tR6`B^)@e%=`1_uTb|kFSrHUx0po@mdTU4^(F$I*COa=VFQ+m;x1z8lxu_zns3N4GJS@8;KC37_B{!#_xTd(UE;}VEJ>H@?ZcAs85*?e$u`tJPA`OA0DFF*ZV!lQyy(vtJ@b4!Y= zOABP>g~g4vwo1iWfVnKOYy?65fopE?MGmzMsde?7ni@fVURgqHL`aZ(SZF|4SV(wy zXmm_?Qc^-@R%&5kekpw^t122A>V!f*iYoqA!}05UZOgj;%~_)V#Bp4vkgIT1fvIq{ zR?CIysz-v4Li3qb?MbRAkFY|bbamfqfQr0}T9{j>;Qf}TZo@APe9E4&za%_+S=E;b200}fa#X&@SZgq{k zS60?4NmWM7OI+|mP`^Pg?WZB=`8<%ui&fm+k-68tMZCK0WVp+2)kM?o1Mecet|G%O z4?bjj^!nGUhi)D`ed)@yiL>}M6-<7O0+n166X@ja{oKsq;?DP*I^FM&+(Hm@1hGP9 zyq;hbqVE{-!ruLEQn9u9_swhX4Bqu{DAJe+~t z?fTn;c7rTCkFyv&)o>m%`L{Es*E34<9$1^7y>srsg@ad(^?gFz-uXOPYB{6-!_J5$ z(%G`dh&!{V-`%_9z1ceZ*UQasuX*!uv6tP3aNE;?Cb}k5d!I$dJm@j))r2{ZXWVo< zWAOT7g;c@8!70S$&XZ#gU#vHqN62SDbc-7dyL$e2Y#De`EWqLDEo*ajhKQ zY+SQp)gP-*UAz>Pnc^Pg?&}BZBAh-5cR}0x^>joIe5JBf_|OB zWrf{aD9-_m$CSfMB$SK+W)1bVUtT?-XJ6*XlQ`CIw;8nF_*!&1g2sd0JLNa*iDdR6A>*G zl>gQQFEbF9+NlxAq>g^Wc9Gw|~F(umn z4Ev~mJ`_eUty(Gq;vP8#>IG+<@k-j}p0M0GbVq%2a+_L2n@Q7_oBVT<#jMm09#xWr+@X;Q&H6^G#U+tVJM0!*+Kt;p(;Ly z%Wo3$>($%_4TmFdX?AdqmsrH*6}u%Bwxm%-!C|IaO5jDI@)vM0iRjo1;(<#h{NIdRK79 z1c$7)eS7?w>D{NMmR3(qKH8YMcvuI8c}7O%CB$Us#JbBGegO6g)Tan)h8lWe&|VGw zl$b6-U1duvnt<&Fp)FlfFe^gS+}vDSS5{bwp8qH*> zU{__XWl@qNx7aNw!QSDcrGd0AnZqfmAaV3`{0sgfcD)(2o6qx=Hp^u<+T zQi`c;eMiC`j2NZHIsk?MfhYxt2JJ-SU?xDQg>+aCRNNh4qFl7Z;OxMS%SLWoGycNG zk&XP#C@~RN^`LMB)E(u`GqdB3g8e=x3rnH>x8=;nyl(x|q{ROJjP*lJlyjbU&sTLtkP7x@h2ck~~SNZ9tX7 zbQof%JWtp_d%bnsvhteJqTqum*` zx!pEC-~SYVCx2PzZ511gHDL56393j%Bq=rFC>4ldBaDA{)SHuunRB5rUZCShlpi zW+jHr)8ZpwI-H(NsuuGbe)F|9?2%=?{+_NS@8Ppq?+2TfU@LiT^@IN{MwT=M&60^h!-ls;<~!o zA!b9K|BRTAKx~j9*2tho$QYBUd!x^8e70-Rp|5}5J$uiQrDmoESy@T-?L0GI$DQ_v zzH?s2A{FEbg4q2qHt@>g&VuElj_=CTS9Ihr)K)JQr5-P`pYvo5V)otVsTYPu8BR}q zu_oF2d1GR1NnW^bXnsPvquGOoyth8@&X{>@H}O8`5&TG@Zf93kVR}*E3zzb^6uXyJ zzaQOeX8!!uOBdt&W)YEb;l6$k?q1*W=f=>Ah=+cr`wTW;Gu&i1 zPCtr!ED!{3wRyU0nZe`3d96EmnM-r67u%h5DvvAg(OOyhKf7V-T>43vAxZ6 zHLn%s!l-&##m&~Rg=EYO;5!aDe5&@GpX#;DG4I+DVf0F-XMv2^DF!Ch()u16hzQWi zm!u{qx;dxBC3I>ODbm_sU+nwo&Eg@q;3}GGG^6) z=MdxYz~hl^m~DJbLI>8V;dlJqdqv0{fDTy7zDbb?)xg}-JNyKoJ8u&+p^ zloL40!gB)G5Cq2VI7#3HMSQShD8N`}PKs4zk60z*izIEGk|ti|tG9b91qa%|oHj7L z8?d3;&Zo}bq!zs{C`m5-cUw_WQ9(gLZf-6t%yC<0W+wYMH90LcDLXZxEG4ldIVCeO zJvKShKP5ec4bF0tbBfXf>&hO81i#SqY(P)NG}AQL@*dIg&Mx0Rd9#p^uz?XzF6vcw zDU{Vv6Bf(RO8qdk3jj#~^pQjfs<})tD+n-C1IEeeu?lJ_K|KLfI)nBS3Z<3}fXQx- zk_IZH!L$c^%ThgnNg$NBF?1CLrjihdjL{Mc0k{l6XkS-mNohfLK}l*+W_(U^Y*K1m zTv}pMdQNU(Wo30sOO-^BN1y>>$tjHfRYT7ZDn9S(UD{ZGvMj^3I61AUu)MRjjo;iC z7!aXU%V^N4#=-zFC8^a6J&{Y}eXc>5wzMBBEqPm9l-to+)7cX(Q9o7^JJr}14B$%7 z5c!P;%y=3b4v1k4JqVD4xGCnTV%ey+j-M`^oV#(w;<+H1oUv| zp7AZ+jh=a#$*Bbir40$0sV|x;toZGw60t1+QF@YUCJWF$0Jj4wGk}`{%!HS=cc;2r==DfU3XND9njz%ka}L%|g`gl1sCz|G33F-9(sbywBZr=;Wr z1;%^1$2eF;I9l092L096w3ozxB&g3Y>Qe@MM}mVSOj2fX1KL_$XDfJ5XiN^EJ&MLC zT5qZ-iUnTyKQep++y8MApQIQq$*9@C6or2n&XMrfQ?!2Pbt|0R-#NH@V!HW?_Xd(2 z;4^{@GTayHT50M22l`5_9`gf}?$Hhvoch?M>#XDKy|2qr@Fw^#c&ragFh`>v%? zK%!qtQ%qcWWPWZzeMQI3`({V(Ts6ybw5xZu%CqSKy=s6#@f}8?W}0g|-HjekY-!ol zT=R{%bp=m$;*Id$E}r}63Ld*ar;5NLzH~uJ_{euhk>_I&GX$~&nj%9ihJS87d!gmx zndheeaO|t4+h?CX_xs(uS3Lv0o_Iam{A}xI##51}W5z_RMFI|uuJjzMY@Z6~&qQt0 zP~K>!b+RmPPfOHX-&+&%tdUZe&!lBDq%}YH=4{XMds~>vI_b93YJZ!TL}!Dg@t+sY zue*A6o5ATl-F4OMmB@j^ zd=TVe;}hsx+T50eQda=@vLI*iOG~%tXAN(bfWYs1JXS_pANqCskqe&jzX#b(zrAtg zgu;&$lPW z0YxWl?-0sZ(-4^w=aUj%*j!Gs-d-baZ>R_h^i54m%&jaHV|t!^tp-|23GC-na_bz` z>-`K!`odJ7ju(K?VPNzFtpu*qjRcTUm5q{VeU9)0{L|m9{BP%f|IOrD|Bx&B!A|Rm zrnm>x2WkxON$?nbvh2fr*}p}o|2MD|2}Fbc2>s(%hhM%m`(~}&;e#O<6q~ruCsRgBb>Fc zISEnG!O;=HDe;l%$x&&^QS3is;v!=c6B1I=laezM;#1>d5@REyGU5|b;-ljegQJtf zqSDf0vkFoR%5utLpF929EZZvtbA({H7|f7@tu_G*(#oH;@QPah-Nxhb_O3GW21E*=3@K_8Ml4jL%QW;Y4dX?Cd>V96K*H(m z>Qpzh5F$__4p{VXe3KMIx6$NtoY=1+R%@B%DtfgXJAe_k98rsqWBPy`hwPVFOHD_SgzV~733$D# z<`%4`T2oZemX;QikZfApcBw~wP)M(nfX!m;Zzbx^@nvwWRopyfK~8W_SFl#&3hDj8 zF#^I$2>UQFT8YgQivQ?rjA$q;FD^`JY>vqB~hokB*H1hq5 zo-n0|-z(^;tgGPjYD)@}661X16uz{3oQ-hd{wG7;V4`frMn1Np#ot&!8K74#koJY}FDN;CXw7D2IQm()U) zOp|DZi$;(cK;y8y#&2z%n4{QdUk_Rc*=ZH_rE- zJV6N=&9nhfQ&D7BkZTtc|E8(4w4xz1Au~dvmOwp33@t7#^7MG~=)rXhv#V8AZW;N< zt6NWI75*8J`PbvLn;Tqr{P^;lPj10Rw2xmP$YW%%<>Zk@bAI-m%%q z_Kj8vnSEHN+0$H`o)R3F?Hg9(yW`c48DPQy$86l&dBBR)&GepKdm^$*xoCWu{ ze0zTL_d8ZDS-0@UgMIrA7OsCfb&>6i!B%sJ2cKNfk~UW^p3M3fg?1WjiN(fig!8mL zOWR9Ub{2nB5cvhsJs!wM%3HqI)NF6d3BXl60D|NEGE<{+ z&t6pLSKhsP-`>zNKO`1ciVO47tMf|hDq3!tz4(6phP7LMHZ?cicXFkH9N0b`}yYQ{q^K(Lq~yspsTT|-Q@=_ zpSgM(Ji6`t)(RyhEzP0;ujo(@r&#-IMdn+ZP3Ho?*@OoILO-Gchv1RlM0))6u=L^Y z>DC!QlbD&Eb=|mS#}0Dc($-m{d$z4g+`6Xp<+FIA<=7}Tf)_oZ|=cD#zUxJ^PsYgx2 zr+!QB-krW{+uN(xW4(Rqi?S#=i=t#Y35r;bYAIs!sW>nPx>CYC@HDy7AWN?9RJ`*dI3jCk70^l9glBra-@yv zz3Q?8{nUn*1(Bps)|Va?67B8UQdq2}b(9gGUN^WX^%Lt%Q08GU7KI=Fd7)FPK2nzJG*i(9N z#%=Tm@h~>9b0tl^2M0PDDux?vquB5scD}*xFK!>GMC;E6{a#fI?FXO~=&zu@FQU@@ zVi5o74Y^r!bFstsQ_LoRa(nRTJ0rfgU3ARvNNjIhv%Fo#CMKol zW@mTjWcB9eSLc;v2}Itz3p_Bm|JrI&mvm zY{re4jFlqlmtJO}5UdHxn&Xpn00-4vJ?r0WIJfD_SOd%yuy!^uiNf``_X~;;`qVVM zgf)P9%P__i^Cw1LR@2^EkV8S?UDk-PH;EEvYyLGo!{~-i;4+nP87xbJzJ^ifG5QJ# z%mIku2!W;jVS!$*O@HBGmY$8Dav|F@ejJcLap{fWDmB7&HQfbdv`Wz4(cD>DnVnG- z8Iu(logNvNnwy?kS6U)!Yf(tVy;`gifJ6ekkzhxf#MI;!CLB`sz05}P>v4p6hxi-1IJ+UC)w!8N(4pxgXgam^%+QF1| zhKN7Dw{x&Y3Fl5odN4{3!@z_N&A9T86$V#V?Aowm|E_8Ic}G?F8WlMnBS#U$G*t6T zXUplr443MnQn^Ay^#?$D79La~Qt(@O)g>iG&DDM7xfR7(zKyl_C)jS9O|M$O)+$UyotRk8hD36`-JxZI-SuxmUQox_3dG>Yg(l( z%q%h-0XQD6QU685GcXndLn-j3QoFUX;%LCTOAfXs?k@fjVL6$(m1T7eT{0dwi=bt1 zNP++wIX8{~O&@DvGD+}K4U9CvNDC||5C)hs)&lFp(b=IYP6VI-q3N-@%54N!sM2QTK0aC zD?vzba!h1OXG<0Ut~hZ}tC>xc;{h=lF!TBCGX%nALdAN4YHzpvF9mMx_3mg{iIG%r zq_u%vuA3OJM@d{z6OLJVuVWH@)3Vc@+}`>H#8g*xzyW_2>Gn2cWrp*5I^6u+rC2*( zR2>=~#IciVJK8D_AKYwk<)o;uy^CMw;P$tp=gH?U|0w7UI~RL-k@L!NkA{vmL57$h z!=E9@wLz0Dm;UN>{FvjF^(WSxz4-gtQ+waKIx{+Q8)(Hz29>o4t6ejk)|+ga`|!I@ zj3y2>orF9dj@(8@n2wzCWXvq%5$mnydhm?ThOFLbKHj_Jm|ODx=fS@Rl-tw*IU7Ux zl$B;SCMv|1fH)5n>w6lPq-AYODShz3;f%?f$L4mH35ls;q0u-^vC&RiNpb_f85$5> zHI4Q06iX3Dc8&(g%ji@gUj4T#a}{<#tgfF z47-Sox{iE$A3+R|v1St|Su7fNYw7p*cNuuxUwPukpN>!X`cGt@G4iF=xJkB)rw1GS z(2%!Orv9Q=I<~8ClBj!*Mmn~&^{Z~-T8Zkr){Z$6;WU5^p;W_Y(PGg1O=HZS?2vO| z_Q$P`-n2M{2cKs#qfw{gsw7NK!J z?c29y|Jw6^?K!l6_onsReqOch;K5x0Wc8ID2L<0Me34ocr0o@=PF$7qdcA(1k$N4BD$?6}95>(w4#p77sNzrR(#kFE1_g@vFOI0?r!uEn1s1dOUyYnyT}g zdmkO^*uAoN{OAs3gmUaGeC;O5MT7DePMJOdQEwgNf?e{`LtC56bS*%s6=iX?d0jx7 zFD@$X%I66iVa^|-6clJASaM+N-v`C>z5tWQt6?^AWcBy+`kopBIZv$>pcFQMLI@0c zsMM^mcVTuVO^KeNjyV-2o`KQ#o;p<3i_~z^gHbBwGO1W!Wu-x3L0L#-d|7=Ps$Yx{ zGchRZ3KbM7=-^jTK%0`CeCfJDO=F7|XSi7~?#&t%+T2iHkdd91z%PMH2P|l7$J7NS z7VRdr&KihwAMP*7qyCSr8G8brTPhg=5FE(uw;q#S3pPQ?{6ys)pt*ccaT zx+)>%4fiCjXNuv#9wa2u13X%vjpI<)zrsF-+o8+WNuX={cdo=vb5ff$L~IxYd~nBg zQ>p!_MCJoARb3TZ|3&nBSqELp;@{Ax{Kq!rVA$cW9)30T-qdlo$Bwx+a*pxL9k$!N z%YCAFMG6X_U)Ndj3%-B@@Gp=zxy*! zmftLGmdX^Uj6mg>TB%WCN-c${wOqo5(QqRdJO&Q&>3M6VijKD-2NTQI*T{a8gFkEf z&iKS0$!`b|69P^`d4Tte%f510o8z_ZN4U%f1u;fX@1@rJb+WTBB{1}4Va3UixYf~V z2i*gYmo~)Vx{v)&xWRwTwCd*&W!yImhrf#83&0^3=Bsm1OQmMzG%0ZBm1&jq?3W0yA)-2-nEeHsnb^rSM zK=<1P1v_xCSs)sQ*wd#psH*VH%W^L%HfZkHB*#7{z;HG5 zsfJptLXS$tW_?{TQXx-ASE537;hL7@2?cs=dB} zmkFYSpv6TMIWdV*9o_AH&@Z+F;HJj5gPE=Oqq@>5RcQ=Md z1roTXt+AbrU)7Szgh-1Wn7g!4_##T!~JXvqj1aLjZa)My}I-0rlTiz=Hw)>MvT%D1GOdg^kmnUxaav@io89^ zaON0`Nr>4X8vdZ%q8uc+i+92yzP<^A7%}6VWEvt2q^LADyJpkZz zcgHSApADxjT)%eA#_X)Q*@e^R_ODp>^WN?2Z5|!0bUUiPx3Fac(z|bD?XC%NJEn!~ zpHpM8Uh(!P;Ddl@M3gYo#cGzN_iYKFiwiQd6XUyt!W)llU%TgByW+fl;aanU$Gndp zjK6V3l#&zj+Ut#zhj&n@+gmSottW&f-neq>iG|6Z2B*&%o=MJ5$Vtdbi^#01Xf?5X z>JwFFfR1`R&H+(|YyNUjeUm2gBes|lv*j*b_zg{3%^o{V-5voCmcKAm% z!#*iqxH$9RNo`_gV|H$KL1uDEQhZrVN?A-bub{Xl+uX|Zt&e|QTL)|UwJa!XZ>;q4 zDmZ^bc;lh;!K3ZEb)x?cg^s;;B)ck`#+z1clKSSccZKc65YZAt{N0(rmp{`d&)E%8Ky)K z>F{yc<0j?*zPBqTqO+-9saC0R2>oczkHcUFU`_n0p!j!fQl3|2WOhwkQA4+dgU1Uu zmqq~#@3dUm7bsO2&_Hmh5i4S%6GI|n+k1u3Mnc+oLN4io+U1ZQgzS(@oI*k$*uJc7 z?a+C4fl9=_4&azl*xS|8locGE92{HN($vEhZrQs@rBdY;%zZ6qGz=-|LZyy$yDUR_Kal)ybhMZnS zN(<#pVYRNCLsZ?2K8n!lukk=ZU*9D_us7_)s$j1eL}MySLqTB< zVgTB+$EOuZd)xS}9i6YB^FA{Oz?2VjB*e5?f1FmRL;u;LRGsiBoP zRK+GawAapQTjelx3W=vs+8mCNxDV){AiTcp^~j?^07=+rvA+OVkHOsDCkz-w!=c4- z0GkESX$<}e!yp`S%q)5DZ_SkllH&HgaoBL>%rxt#vq}r+%Vl2zFr7e00BSs-#{hB= zMIrz)OHU*5`6~Gjf}Z6$Da-R?jp74OG_@>I5?^SjF*KNBZbbhAqjaMW{dA)LLmRvD|3S{euH5j*2%|g3ZS6dc z)gP!e?>U|*ebC9e04=506jtbi^8q8J&snQTGF`)5QG!(}Fb@Zd0XP5vd%1H_&<&?k z2ti!NlgOMsS*^?3sU4XO+dLz-WfeOLWVL~jZiyKY9fAfmA?3_mmj-u6=|3s3m|Z+1 z7_ZdK;qlg?8cVJ_f^`H0d-}6e^9d=&ngh#gm%Z$6`8nSvrHxJRen-{X|U(c!pt8Da!oHbtq&|tD*v&h?{09V zZUYaMn}8*Pf-^3A^Rky!j|-@z zTS>P*HT@c~o`#qVMIIp2?u?y%ch&@pl`|cVT&oB^67~8!y9=jE!d8VuOndhB*EjJ_ zBseLQEbnZZ-qSKcSTRSDJUHdnkk=a!!-=1nkKN_;o#V?RwpaEX|9y?6^~3u%PuK1L z-Q4!|*{gR;T11}B{6l@si&dRpqJjmD^#`NUFI!q)`E#R{!R8z1S8n)q=AI3|{Qk?z zONV}Wb8rd&*Dr=H&x~& zC3csV20Gc>Ju-7Q@rbbue6;3p+~JEk8+R0M+ZDg>kDyC8d#mcJYZ|Xyzw7Sm=laZ{ zEVCdsIo|T^t78u?Z$Gu)*Toe`VK&GoG%BPdKDo;0mD#U3UK=|if31l8(c$fMGmELu zca0BUF{^m??DCI?@`od$PY`s(ATVQE#}{+DHt+4Y{sa^@sB2q=ogH1hZB@-h_3cG% z{POJFgxb2wv*#{6cxo0H7m|<{9}^#z6&4zN<6ijrn^~sM8Uo)H#keipNqLNa?N+s7_LQ3T#0=6N*o*On$e(4=(Zu7FPrG*=?)5Vqgz><`r zN|7?YxX3>%wYj%fg<=wgoP}NN-^|<`-{8Q6%#7ylUXhj(;jrkY0J>r}i&1q+G>*$n z0}Ixq{uvfos9-A)PJ@A-Wc&ksr(-DS@5#g8XPiE;hU5o;VIVWd=o5JFnN0e-_w?^# z&xTIm6NAF-lpgJIoCr{eoqpc{s7Dbrqm$kHulfrlh#zsm@5=H!KhL`~f61LCf4ZG~ zSLGvNLqyIm>eG`rggIKE^eFQ@> zI5+XF;I5#Mf!Z~E1`1WdOAcWl#JXw~S<}D^Dyj2nZYyNJQpx3CaPCwL5P9iHdMLgA zsanqtq-TlHB~r&$xw<~5k)RQ>uTjZS3evyArmp{8E&P2|_rtz;`U6Q`J?vBH zlofQOk6a>`J+u}EmUM~!mL?U7P`wbO*~l=Qfa+_Z$uwCs|?hPE2Hn#G7IG_a5nt2^W)wP1pno-5Lv zLWw|z7I14Bg#<_J8712Q9YZnp6!QdtM*!GyA`=OKs(@v5_NB1bFhNQml`zZX=n8(% z7c}@n41LKcnm_}jdoBkNhG z6n#Ugz5>{ET00I9BLVdhfg>b2gki?vOR=R_Dvr-7th>xYlG) zLg4ON)QAFS05}4uRWvn^VkWX^fCAH0+U5MNbG5a$y?u48DXnX7Dy}X_PKyl;@bhx? zb9?D!Yh~^1WajUEH#PZChiEQS<-usbartt ztYy@uq$NIiYTDM;qvb^AP;?+9C76D+$I&NAxTL=n@XLu8el%JBjq%c@H&^d@c<8bHqw<=vwA8FNp3uhK zXV#MCOE+z?uzg7@G1h8u8g*Q)qf`Fc($UGq`I(>P?q@r`dOUr^{Xrwlkx5p=ry7q~ zc6V-Qt&0+LNhyg`sY;GbZz*f9%dU^}iD@sZsmSn4jXd63`72OQlY?}o!oTtk1wM`_S9D% zP$-X}#F>ort*=~vu?akTHP!Byj_M_n_Ji`2gB1@Bg=|=S@h(Pm$=sB!J-eTCJ#s<1h4$ir`sq8<+q8|yV0 z>K_-~tK_=eXcddPXdDhHDhay0sm(tsCMhpZ`++%xnJ)%(^{Jm**}e7gt?y{p^}S&j zA?H$btOXUT(GIn;u%#(DJSsaoSE<$L0TWFV8nUgSD=98FEF>u>zoJ7R?NqA;TxY!; zq6@BB3ljoOH&C<#O%K3ju(6NouL$?ECJfA{>8v5>uW3+vHTDm3c=QLz?1Nq3@l3z} zFWocDhmF;cYM9`<_+YY=(p7|6V8gm*oPO^t-q%sc{t7boUxa9{*9-c}RpoJAu`!*I6-0>y2sN;_MZT}PvLFhg zEM1;M#x;vdI5uYGKyj9}e{uzTkQnDPv?NzJl{1Wzjj{<`!zDa*(38;)TmzJVaI1-=SEBqW1&`36F z#2abV&y;FD(9D6&f8~^xIzB@clJpxAm{agc zcTS?WfySj;6$RyY^-Go15@M7P(6r(M*r!YKY4s5i-cO@2FxS=3015sny!-!j`v)y^ z{|&(2!V0C91=&P-MHTCPt15XEIk5QYfo?wz?CQXuL=XG=1$ej(j!<(LoxM_V3>66I zXxOMgHFFv68o;zOWS>UchtpyW=&Ipo<=4e}M>v%>ma`{J&Dv=K2Dn-hh5gwm#x+r- zQj`q%N|L7gCd~Rph58_$cLk7|}5lt^z5Mm4poKtc{Kj-&tS28i36-K7%CKH9vyAHE8e~ zul-t2dt_l+o={430HJe**M9rIZpS=3@%lz!Nt~5~by8fcw3`h$fp=KQs$JW*9Y1pG z;iK5Fxc0_YC8enAs4XcdeCOzS@4{^#SGVZQpf_1|TVDS9v-6K5Zx5Y#clNeq zv#hwRn5fkDK3P;&W^GSfMN3&>Wfp5Nr7~e@S(P2@fnQh$MhDqAJ>7HYr?bZv8D3eK zmic#F`swVl$4{O1Mm0K`@e&ra^Cydxt7HY6>Ytx2KC#GX(zj<;N4JzjlteE-xnkzu zi#OUFy#Mz8%eOB)1KwHry6tnm^Zm>HlP$g(^LWgt`^eZk$RtB##*IOXu8u$WWc8UF zTVGlm79|BIg?ODlxz9D&-l9>&nN_4@DaE;8(2;ULCYAwjZv4Tygl|=lF|C~ z`xo+^91_2Ju{_rFSZ zdFkZfV(pk86)(&ysCn%maB-`B^f=_Nvj7#hcU1UigeJ9=#Fpo|hX%QLcm?}~N?OE} z6lzE+C~aavc4AiM8$0K~=dU_~g1WsNi%l-}23P=PowBvW)6>h=#kH=nOrh-54_k2P zM@@s;uHNv>%#f7iZkben=oA!8Xa9Ed``$Ray1jAl?0^v+(LWI>?4L_kpeUunIvZOO zAmwRX2d|l{p>dWLb}do8Wi^>0!L>y>tsS*IRM-dD@JkM`UeH+u%F{r!H;4@Y1=&C( zV%;-z^;AE#9|SA39JU7OTg&?Ac}ackwLe=o(9beZ|Hb=rd+!G|`tyI}2csMPZ=pHG zWQ?OgW}%mg1IIep_g+)3-+{mvGw&0U|0UQb2ihJWu2wgetE+49`ZmxZfgCtE$&pq; z*v3H&lE$<;aSjX$onbJEq>=!!0QAVAhKmAap$tvxGg&yzo_dL~DSB3S3l!D>$%Y|H*)M0@kxg zroZBkYhiU9R>^T(t`PV1wzsy|)HRe=R2G(&7RSXV$H%3lr6(1YWY#ql^Y{%?sT9XJ z;WwS0)qvxK$4;{c!`>Mh{b8uM@?|mregy8ubv4yOOpbBO6~fST5`PTn!<=@=JOF0W zU^+(4RB7e|u(7LscUjgodz0OE=3Da%_SN@}mw}H^FbtQf;V3RWHWkxMRY=A&1dHlErLkdv8pdJ1!2o>BzKjH;FnS~g zhBAO-Py@4t-3vOKf9q=aThV)!Cbnv^`4l}@qn$x9-vO{2qYXq#E1uB5u_Gog%Qq)A zHZ`U&J+`5;Sklvt@_O1U8*^$~3yRA#vocfjGc)VT3&s2zK(zyq4K&6;`x`}!r|6Hk zpT{2QeulA8Nvb15lkeZRymQaum7Pc6yU?7J#J2kK?sgubQF2XHx>uJM1bWe4 zlGc+=O1M}8t}B}SA4~ZCBh}sifY3oYC^$zjMTPZdrmri?+m%XP08I2B??C^H?hL3I z@UOKDD7~Z7jp@N57j~yqaB`XI1b7&ry)?>q9B30`7*^ZR>+Ta%R9i35Xmm~A?77uK zrBJT8!^KPt6!7429TAmPB{m)Ho;01$-U}El&=Bld1ubo@1%=rp1u?g~$Js97eKmEV zTfC{Azf7av(bK&qKYxl!yIYO@g%K|b3$1Ev*_(*dk<)^vDq&7iLUPRG!tAXr4d0=P zMSz&Wt|d9le@_-GXVo|ErKp(P>=3C^!Cv5~>LUO8%w%P?Nz$9do_zZt$AaoC_PY0p zt9>JEwjBBC$mQQ1eeCbtfAYf4Auu-3E5yy;$Mc2B(^%gCt*Dm-;udYWdE!IElv^8K zZQkmzf8C?sEu!CeCnp4Dr-w8qJ&t*i)K}aI6f6qNDoaR9dYzwo8?JL~i&FJjpL_^k zF{`9yRYvK}U%lFSQp1AnX z#;hV9yL!!EI4#<@<>0-GzW&}1Z{56p?W*}}3)f7y+rd|kyRBdKV&*Vo1i6V!G@3B| z#>Dxz#{YEVvz2GZJayj9>kg`EOEmO!IUf)*|IW=RSNE*8-2A2Cj0q2BjkQ`Z=*2?B zaNKOeF-Gnyd>wuXcG_j?b~>pmqnC|k!y^vw+8OZHsL74xE!D875wUE<%#u8t3FMbih%0~2$={R5be7u zg10N3HczO0cCRfk+T_&rR~BZrFU_kOiX2^?9vj=Xemqnweq92de3>w@$3Taz3Mf>;$$W8(cHGs#5)F@Ku z_t-TrXMG~Z_200rl)du(*$3aZZ|q)KbcpyU$GfB>IWKAGvh{f9U~OCf##3W$m$PyO z`u9=k?Smap&VaIfAeDixHepV3V{}BShg(5JL}y3kv#_V%8GkeE{%GVDGUfHmDGz5% zIWle0jzxbQJ$&b~q2p_(t5+}Ix^>y#*Cjb2(B|dCuvEWBMSXUAL2OxaY*qrVkJsMb z8X6ejVD&7(*Q2qwP6(jZv{)?w0%484OUAx~4f}RvESZ_RenGv_sm^zAczNY%$ysJE z%>rY5x;tC-5Jm$Nd%Bunc0+Y=d~|43n5erKniEv-_X{(!eVsisV^ZY;g^psr0Z@Fv z2pCAh3d@3WEs$#DQdwnFeMo$Gc42Nun}CvX_C`vA>Uxk6Bl2}`2y)L%3@NY8$JiLJ zrxEzMYt9`9#}CQ(>{4910isjbpQ6!C{WAJAlLQa~=uHW3>f(czyZ4<51uZgfMBWcr5?^jw4eMPigwo0C9I5eyxGeYzRs55Qu-q8^Rwsbus8$N$2HaGyo9 z2POv^8Bi*q_8R2O*JI1~qUta;O6nT^V1=DT8Q5m0fxYZ73&`lYln1h+bbsU00M6}C zdNGu4cn4@v{V*U->EFj5v5aQ8QCu#E2|*L1Yd(XiJenK7gg#OY|4r$e4s|thcqTZ~ zWtw3TCoE^M@epK|g=bl#VMI9SK|vpv8vT!Ny5Voxd*3!t#fFZr8VA-&a6rZ^k<&kD z>75wy0Dy9aH2{>sD_aWk%M%k)Q&N)B(i1Z>6LWG?OUv>an`-%8?Kq{; zQgW_L#^M7R38VzA91x1u&HTa~FdX@2&l9b?_Wh(ffC~#mfb0Y$4^FMpLO^$MoqUx5 zBxwk9Zn$BdmKvra5iNxf;A0AmqQL|;KDoDdQB&nn8)%gGxL<1p-Oa! zhMFwHzE|SMDd5Q!QY!)7!J-aK209w#Ihj?N*%>8OIf)q&Dd}-(so7~sg~^eHNnu%8 zaT!%P{y=U6utNZRMbn>X(IHCpM1^XFTyE&?{>bi`>uY=8kf8LipiFILu>?S}b1=plXHv&Tb&%r+3_Mz|w_XNq+aMdWTM{=bl zO%@g}BO+sU0IZ*zp#R>7z)aT>OuT14;%dPLDC{RSM&vv+p`Z(d6Fy1=08N;!M)gLgquFW>r>cXf*Z2uO(EzI#`&uWvzOO0;)CQ+aJp zPM)!;>EDm;7(9P+`?;CpYtK`cZya?tdDI+tzcXn^oaJ9d&PS_**7Bf606G z{70k4-S~Lg?JuTY7_;yuVqUb{zWy)E%)@WfPy3bGzTj2v&C32Z&;>dF^JJsx3vIui z6TEL|z+NPD#}~1Ombz@+`D~qI%w@ayTbD8{%=GQXV>Eby&_#4B3|27CWflo8UYup zN9IJn3b9|Z{^#YZetBwSks6+o6PwUgoTJDIm)kyUnZH0hY64iWq;=S+hDGz*{yGfu z;$gLsQ~+G18!}LGMa+KrF`7ix&;hV6Qc1v@fhJx%yhtm|?o!Eh^9I#Cv0r?8NP4!P ze_(rZI>^ZtytD<$q0&Sbps53r4$#-B3~()Za5dxdDenV2Bk$gBDNHKu&G^&n%nuH$ zKE642mB+dt9xYjQe%S--+X0?$gWm=`F@I)cZE?=*u6?x6-zGOs8XS9OZIqJ|SeOt{ z8Xxq=@}Yy}J;>P{tlgA#;Jw2?(KBtxo2Z4C)N?@ALBV6KY= zBOH}zNotP0$&1LKD4|Rxg-tU+txLzKRY0l$Y64Cvaj<~>`pl}5nE15j{A#+e4)Y3> zT)Qeby1!-LcDdyPS!PUES!Q*7NUhCt@zLYN@-_JEIgOu;sG6}*bKi(ca}c^x2A z089cw4#(ixKZ*e#^v~fj9B4!PK>^x-`IP#ETz|C_Q^3d-hmb~R*7Sad`w(3JE?N$u zmzL5ao_|Rn>VBBIfASZA_sPQl!iJDwz_r0HM{L0LFdt+lIm$W`b{zCiu=YRMKTblv z@7>n_8v1>C{Jqw|K>8ZvQUcodG!Pur<5Zdecb0XjT#nz4AP3MVCtslD!lC}cyDmMd z>Hlz2UlgWw>hF4L2`#~f1E5a}T`U1UNa+m}w*J}4uFrfB92yhr%FwW&uDiC736i~{ zxfHmHGXEcYZvh=e)^33o4DPPMeQGna{M2_z5}q<1ytcQiH0gb3ZB z0+V+0y4V6vZEa_2VnuvJc}#GDn_Ymp;Y;5iM_Bbo2>4g2bO;4TVRS}u>6Ey*U15Qy zl|?O55z|{4p;cZDH3f;$zH#C4#o1NdPA)z4=%z&yhQ%PDJ+8=ZrkKcxvI&`D7|Ule z0h4_staPJFoWy8fWeKgACYWuBa$5FbaS)_}(m;)<4idC6e=}q_2KJHU|HH8F9|ytk zBQUzF5k|u}>Q6yIk(ISsXsEYD!ct&9LQ#I8H_JHFQ-|4`$VHxLarp*Ev4un#T@JP5XZR!}J?x|XGV)St zl)F?a2y`HTjRIkNXIpZ7E+dj_K!Hz0#!M0TRRqRLzz`gaMeyO&Fq4pj#b7K3wslMH zC8R&^;Z%wxh!~bRxR`5dUhxaO34p2yUjl=P7#J+1NA)xm{Myp94<$YQLq1p4XC@}c zRhKsB6cm<7a<4m_*=DhEg671%?#EBK9(fgdTJ_u1?;c}yOd%7OaSuLEeB=4Ym!NO2 zwLj~Uos#PE^H`OqbWX1|R9W_I!c47c%e2<+ zd2`e>LMJjyQz+Atiq-&ZJb?|OwqbQOi~QYIhB|8@ot-@m?V$nT(Fw`=7M2#Co<4C2 z-|bus^h^Vt0|fP~IIrN3>Mt~(z6cACEU2nHe*KoNm9Lh+?-pN;;ST#D(~ZOJ5AI8O z{YQcElZx-ZdEdBGtYd|^-S#_v|NBu}H=EPPue!Jd_<9Fee*S#^;BV~q#-xhutFDH( z(;d~yemqWlYnG~J+EhrG=!b#=ZS)VF@t#J<nwXO8x4Uc@IyH)i^xi`JuUQuN>;A$ZX-2 zKGwQ$di%^U{dk|wQjxoQq|MRiaT5I;aIdEdnw)vOuA6&a> z?z!_P?d`oY(<JAq}bZX zN~57P@ylEFZw9YBdfKUAU%(RUTRXn_V5s_sn))A4TrFI}J-m|xojsplE4qHLa`Wu6 zv5;h;3V+3l%1zr?hDM;a62LtK9Rgq}pz~84=D}Cgs_0Im7=~a7%Ag;4^%HBZtNy5` zUDevyCEy1}$9RQ?SGKp)9t{$4R(PbRt{y8f3CMUT(gvs*$7&Fo8}pyu%GtcQV*ASa zllw~^-LCPl@$$3y8fT@IVsymj@~TJcZhgHToa~X27MT_u`TFT|jW@52T^t?aBddh0 z>bi2X7wXX(Pg*{F1a6+dF9@V2u@j@cf0)_(e>acTx$SUa+3RIv?@e9&eC1<{>mgQe z>NOv;Hf{v-=QTo5Wq$}ew!h>4qxiR9%i{9dTRYfXPI*p=*+-M7*VQcyt>R;n)6&wY zu|&7u=ClK059n$tiSSK~@N8|XP>}dhBEmn!`nzRZXcS!wMWsM006Z38b<&!0wI{LAE;vG!<3G05+-%U zpchO?;}JgHca8$|!dg&~0J392T`3TD0fa3ELJ6HdQ|1QcL_URDmtd?CXnva%CFF!4 zp#ty$HYS$aWXpj8DelDq|Mmfq|LC9g6K(V%u^Of4;KKZ|1 zikqSm#F5j0`XE0s(?M(Lmh5in2!ZlL%Vbpb?l{HCE|FIiqnDiSlO z0*o^*J}~2~LA{q0rEp`PF7V&gPZA>mHW`!r*4l1b-5B4_DnM{LC{i$0eo+uqg5gr~CqGkea!ZXN>MQe@ zFD_=@MZ~~9ksM_L(6`7d1#&e>UtHcRyP+3}dH_3##g8Sjvp6_IfXf6pDnaMUurX3> zs0<&0;FD2e8bXYh;G=}t2pO@2z;6NE93W9R94y1MMfhz5+#-P{Af1>lw}AlsAb=JE zZerjH3eIBSBmqtW>OuYvkh=h;LgR5Me*>SjS;X0hA-_`C7r~|zV3rJEfc22ob)~!e zPD`U^VeY4t1e?Uz!04!mpujNyfFK|Lkm#t4*yw@)pHyFu7zb;Yk6Jf9Y>#&}Tm!U zIOd2T7C3NYx`PM+1ApiN+h??sKY?8zy$%iga5-&{{P`TY076cH<1jl4`HlYB0{ef} zA;)?248}x~epawyo>*4B|8U&73@*3NFzgq+zpSjodjptP5e3STf9CnvyQ zGy*17w2kLT@3wYX`ubQ??`r+S-r(ye7P~Sk?tOaRk&>F_O`Qwaq9qc1T|>u;;;QX1 zFaw|}FvPRDt0_J)AucGPwWZ!X;?;7EvD5TML7LED!x>|Br_9lS9y<-?g8eCXs*G{@ARqJLQ?#bmQ3Np$!|izl8=ohoDzu=DZ(x zJnHB%md}g|pK0Y@L!(|onv<62sZGW5A*5;)C?6_IpT@16USBe&E_YsM=|YUP4-ofc zqL*cPzh2n$>aD7{s06O=x~it*RMVxFp5g_BuX8<4n_k>{d-{<-C;xGK$@x=Du3gwC z5p~wK6~B0|di&1VoP>~?_-MV~&)DBO`D%yC`$N;OFYo_k&tij;7aDTzzRl8g`eT3n zF#e&-ao5=QQhq^ffc@ROf5heGwgBLrS*-uXUGtIA?c)zp{Zp#TYNEq^(u1u7?jFcq zItE=T%W(L4hE)jK>mlX+eVwaWOFq{^43%QchbCBO;KKRq^||j;*U}Cl40P zo7lc|X6Lnwgr675Y0qdV&~!68^76_9{l~6BKPpSJfRNo-T;SvGV&&v`=FY9#@83B1 z`!wWa3;jcqwjDx_Ujvs_J9i&xxO1=B);!kRPRCyJh~bWHmg|OTje#_Vt+w8F#^k`= zORIitUz##vXlK7)z_?M8i6e`qO>Vz+AB)OvFK7r12&<}Vln3jQtWtxIhI$`9G&7>mxJRY5CPT>CFx0tnQY!)<%A3H@};! z*yF-GGrzqr9asACE4(N!XyFy#4db;+w~-e?KYHehp%S0jC}Z9I;&U zM*&PePTz8QB1cNXJTX&Xp;J&22FS?etB4}FPz#`x5cBA7hbgmZ{~7H23vT=G9!z{w z(8=(>rtba;{{E7n`UeVBKot?A?d@j5UT@5U8PcAJAnS0tw=9J`%mHZ+$RxV5Mh6fD ziHgQ!w3vqB`XO?r6iRQCh&H-Zhj9T8!(6FMOkm3TxSRmUbfEuN2a2L{(`M>#u~^I# zusM>hy0-GvoYa7jU{7za06+hP$b{VFq{^Jg_F8)}PYp*763AjgItNHL04ar;LSeFs zVIZ)X@Ft_s9`#V(rJtG{#dlG3t5XWR72rUs6eTxer)74=*DrcZL z1@#gE$YD(8>KFzag}f6OHd_kgIp@r<-|hi$36Pg5@WtADfJa(*ioE(f#V*CG+)f?;q@RK4$|E%&B!PU3W|!1h;?+Z(RzJHOY`uX=L<5D zcMF8;1n?vgGM+D<$LcxC;TWYQd8Q<0C8uO0r)T8l7dAFFi+Ft6M@^#O=XJDoCa1)N z1pBAP6xNnxqlhC)K9GT9Qm{)34#GekBcpMwlc~Ei`!d=?ihjt8yo|*+N{>tuA%D&g z2J_*)RbT}f;pas8tDgTqGhzClJLI1bXXXx!V@i=0Jr4Q&j#saqdb&E%sX%Hjig8Rt zl9FB#m`p*#_$h>IgUK2O(h)LICsV#xtL6@e!3c~PL~TU~u#*{DNsPlYP%?{QCgIq7 zT1+n8r_D|gaYlM-YC;O2yKgG8jbgw@=;RsP)X>JLkG>xjW$EfqHaa*R<8jT|J*I_u zuk}CeeE0T1eSI<&fTLq`sv22ezE~uOQL{tv#OnBrN6@IxgCO00knSYt7 z@a=>9SE7@G+K{eNy!%;%*J*F3laUc`TRR^lr0vvt``+B5w4^A&$=<=lI5HqmO-K8* zjoDm_htS)p(ASauY*nE5lc!pof0AV%nEm6c;R9=PGh9mDf^J7MtpGp!t2Zy-esKTE z>sPtuW!A~@+Z=7x0oW@7v%4e{*sSG}+JjB*npqlY!H0_-X2$pqO30h&n>TftIs`qN zKH$@4=o+->$GSay?`@!TR$UzA^kSURz6C*QgB$Wj05E_+#sM%~B!b#H`*(Jalt@NP zgbUj0PbWq{aQdeH=|)>{Rv9>oiwx zxxaP8sa>V@b=FoEFP}W|^zw;|P0ftSGkf*Le zpEIjmI-L}f9eiPMa&l~1Hd`#C)mjpMUP`pnt0#4aI?cE5 zcJ4h|yl74N{)06d45)xx`1fabeR5t)4 zVk4vdA_82qJUnZ{;~EP~8XB9c*gcIRp;&^5I6N_*1LQJcgbm7y`ETBU8&@U2F62y^ z$z8jpTlFr852R2^7uiYcacIsm1MrcbggO$%WC)BXYl?~naWW|lZMp3VJq0x-nWd5T zq5TiDec->$)d2wdhN8bnF@6eEm3+g$z3T6wgZ{PLC>U0TL%D;0Hv^0miXP_Y0upb> z&@>qP4&zT`*fkWr2QUk!I~H610qDtqGu`EtxfbSy?EudMSQKq{Ohgf~c!uuHqw{kl zqXuVKfyy^hel#YfM|b~45H_3L-rinQQ&U`AoSmJWl9Cb=6B8B@8WbK7pB7(UUDv}C zVsgtKTt><`h@b($nY5^r(7dO=01U=TkX2m%0R&S;Fb$kk z1K=qGt=5U?Yqjq`h6xKtQs@VkxVqySAw& zza%#~IWaaOE+#ZKKQlWgEu%2Mq_w>T#(Si2PhCY@YP@Sx>rD>41_iTGFhK~$3cxmm zZo*qfqOyOE`F|XQ+_aJ!F!DRA7^q4fs@%z$WJZ(Xw*V-o{{JH6M}N$p4YjE$Cy-&? zD9&Yc^fcOi`%=sD+cyFh%8dyzNju{;MJNU;W93Rek(y_;=DmqFgC*#eI9WM%f$+YFM+oB{h;F6fm%;|dci8H{(a+JXbzySu2(i5Md#4A+BT z?1%`AKHa#J6$ZI)ej)o;|sq6z3`tRKcRgm@toPmriM@KZuJtQ_ zYH8{iF8mXHA_E0T4|$=vqpthZwSvms{LiB=BKmI3~pb1a^|~_fl1)!cRu=tdA@h@ zLoPQJp5S$^C@#9f<{XQTKKAL|JuNjy>#s$rF&U|GI*z8hEi{(6-+}a|L+^$_AErQ0 z=5KL)>%(qv3-ZX0j&U;&NDWEvC`kG8@(4U3AGUev*Vzsl<`-Lp~3h2LocUJ(HXrV{OGiNvz3yv z^_|7D{d8|d-#g%Xa&m~lYIgOsvdrZj4Kpf=r`Fd_lgMTRpd#(s+L&RO?iwEUDc0w9 zMS^BYlv; zem0kh!4LbuJ9pk0>DgN8eSP=vx{uSFONS>vJ}}DYNlGEyPDmZl=vJO7hEd(M|L5Y_VC`_QruF&+DCoq{zR+63<o~M zBYF-VDY^T^Tg%YJ!lS#3rXV+$H)cg9=0qe1*t@=c@z}@5sidSNEi;eKoKXvPUQy!7 z3)SQ2!@o=f11EG39ow{J8(U8sh*^c4?1T1)M;lIq-uwc+A2>*R+&E z{qQN({l^J6?&03Nk^Mmpuu3JoM&D2m9TOchi_clX-qnVmYgg@!fuI^_AZx;uhNX+D zE*`6OF)d07PfkrK&&V${vd%bvD{0v#;_z{>a|>87O*XcF&p=2venR!!g$=Kt3v&_@ zBRoGpRt-6QoqP9r{I9b!SIrP#J_DR=L1ztsyAcUXCS=okUmgtjv?qgr&Lq*6e;B)+ z=%@hog`m0ylvZ`8q^5?4g(b$u=44X)&**@Vh~R*C-1p2^*Q`Q{S^& zk*xX&VtCBoefByuv$LAS{1E&R73N{`SA^I=5_c&WK_Z0!<16xHR0P6hC``9gqs^#j zypl^`ZaDlJNADB(Ef`amV&+mj4h3B}Q1*;sKD0fJ~ z6Q?4$z5s0Cfe|7wKnAAE&?8d*J3wRuq=j+Hs9_!zOv|z~KqN3ZwpN-WAb$(G$s$+D zRgfx)`AU{1on)lefhZijl!5gUFiu8}fyucIoy(eAZ?#khpgmnc*v;IFMh0_tF#qNQ z9R6u~sjPyMa^GZzk3}l1jN9msiaP+z7Qit8b;7|>3781ugC#OZj1EA+LZRejUCs0C z6!+A)G;jBaq=dZCun0#NJ6nfu`o=H5TffLEaN-Ln=pO-qH32j+;sF5f0Nwhwl$Ko+ zFtdOX@@|PT35tl(=DLc8+A<33Cnm=QMY)DXxn(4$S6BFV_MGP-v!q~@3=9y0U-)2! z5Yd%N(&;ZCmsTM-a|%TlU215c2xGPRvtc5N_vO6)ZOhcxs97-s(f`3`@T8ND&A zNUYPzcv^EMpT$&o64Q9l?5n}o&uS7;sze&XY|pZpG1f@mQxvC$42^40l^;+2{fS7xANXm4v#UXttS<|zLp#f2reZ~tj#ZtUb{NdQr8 zeU+DIxQVeNE(TGl;g?O0tv6jpcUOBisGq^y;jhN;_%zrpX9)mXB(f<&usR@r(T=Ah z{c7F$IJIbq0>hj-DKrV#7S2W=_1~5hmC|33s~~J7#|A!s{d<0$yKjZb3DFushE_J1>>&jn%aAu*>aW$3uTk9?ay1sHYscy*^iyB;le;j>rdj89`d!nyS^ZyMp zT0LC<)bg*dG(zKJ8sO{_UPYB4C^p~L$-zkHk3d_sD6@|)uU>zDqUYtGte0PSrK@oc zr)?ltG=u|2%0Pbvj0XJq(&(d=ue2-{x#YG+b1>IVESwMJs6OXK1V2h~KiW zj81l5&FWlN82_-$DzxHkT;7JS2?q_b?)_-~`XNLAb)xOfWZy02smq!&*E9y-sWOX; z`Ln@!Bim<9nd`6F@u%*8p1VeW)*`D#lMTkrFd8@JW&c6XpdsHV%nCv8r*Ct*zW&OA zQ)f=Qnpvg?L;xOYWoo~2)9&Z*UPp#I1^a!qbNqDfV7{Q6Mdo!qce)4Q!|62(gVVZ;*)Fhip4A*zlSFl zDca$nI96L%t;8Wwg2NqOyHM}r*6Py|;251j1Y~*%^B*ci6$(5F7L1R)$ zoq=`Z<$HnyhqH$cX`VTay=`0T)mzSoF4P65w3c_smokJ;lg-2|f!P;ROUtkB?)Kk| zOoD@=x=7d)CytA`a2hn#utBF*;>V44d&hm4G4{jM z*}4lZIG+tIask{bke?51oN8~~Yt?w#^hN*M__2n0zX#KqIKqsPcbPT>!n&@^H?Yu}M-%_lvv{@rzjt*w=r;&V8c$9h4OTDe zShApH&9;nPheRPk0Oci>CMD*?;AQ2oUjT40YJG5~=js`e>P?xcE)wJ0Rhk3~+vrpU zO5iXe26#Jlh3I8p3fda@UN(8ESF?|uV846|QqpM=ED3XXY&kxoLV`fZF006G%g@Au zBI{3G<4u{3LqoveX~NO7Q&r~WJ-Ahp;v1VC!s9ga>g&bj^>}kPg=Az*NR&|!Qz8~q zz=lZ_(P>->3JOcv2A|umUn}0Vz2p2jCCb z5{&LfU@c4%DHYTT1S}qpBM|pUsT)BD2NXu>#x0CHzYT$16p2?z(Io`fLx6`;V8fFX zw{^0ss=0Z&^+}2GF_8iBaZ!nhi4?rc&CM+@FK=pUVzH=m@+e66H}^i}(Wf;EebB*= z03xMurwb@(h`DsMBbyimBZp0N4>klQq!LspkSb0^T}%?E`GY_EcA!56;XlP1%EaDZ zU|+9x5>_UNls#c+tX7GVPJH4b2Vco0=ZeX#B=!Q}VRWLdx8$fO&7-t%L5?7m-a1N% zjw)O>m{F(TT*@<%^hu~!MPuNFlw2vshsf~$QhWlR*dYV@GC{2(1xcTuQ5})E0^cj+ zH__{aBX5yLZ)7os)W|qA63h#UYPwqqtx67)fJ0m`ig5}Y2EZ>Qm?}oLHZ|YO&#+F7 zi7m`75A==p_xBGB@%8X_$;eJ108arQ={u4`py34O#X!JvfVESXh}s{?6dC~xR{$&w zL)Z+6mnU*jzJ%M_+}2PXEE21Vq|3#!aR@mI0h7B$8}f_phlLnMM+9eQQ2W(3I&sAm zA~8%P5%cBWlvH-zVXicc_s!Q5e}#QNZ%1DN?EgC0M>|B&#a&ufr}XDj=-EYVxv)*( z=JM3kd~c8VqY!%~1=={B7L5T!rD8|HG}Xi~L0xU7`8Ve`8edbQN}2VeyRjjlw$==m zdNWz%e1JF7V~@mW*}AgI)td!Zl*mcCg;Gx^9O@q_U{kY?hl3UZv@@`e(Vox+V6rqn zyrbR);QA=tw0bfDMv25@0N6%Ik2h8x1VASzb%#{=9AFmdi5mC*SaO;KvMsq^krct8ZZwTqgYP1PAe0s6XzN6YKKRg2y2%B{~?-@NB|M=wrWKh5x* z`;|%BvmjMy&}(S=n<49-E_wFjrK<5e@3JJdh*#4z=1h1tLq&VV^p6{7+Fse#5~k9b zGZDa$R5BZYbv+&WoF({P< zesSdq`E8j+jh41H&bIGzLthGVbi=fd#TwrF@#MOJ>4!ZY+G`r3rXwwbU}OOmMLKBN z%M^aAN|nQ|T=kRR=ihh&c{)zZ2wIfxJ1;$RLm2m{Idb`l=+QZm!}Q{`i5UgZt2;YP z52aP?!3EnTqC;Ha1)l6tUirP~G;Q+)+dndb_hb!h*MUAKQfX87_^X#y2q<8)sc z>^^zn?YGaBHHm&+Mt4p82-b^sE$ATwpHy2G@kpT?{pX4F(~|LVvynE3kP=Z;~D6P0l7aop*no;*T7A?`oi|Mg_BsLpspcM#W<+?$h?{l zS3yT{ypP*=U1RI7Ho7oX|UqpCjMX5X|!4gZEEDB!f=;wRxTFk+N?YI8u zym8HI`O3(vcd+Dq+6IhT1YwvjV9WQQr6rXyvGL`##qJ&szaKwuZfF$~m(|n322xSk zd(E!B+nWYMEC`a#nVRscUp+IwbI;}crPM2bWS=^jw|^`D^nT&$#qGm} z$cBv(3?5fAY)aPru_<3f-Cg0Nu_7(b|K z+StHVODv8beShP={)Imi_8eoc-3n&Rrk;Qff{)N)hY|Ey?E9b(s)iOd|B0OEvV&lq6t6KQ{T8zuVxS+0{ zYi=ghRBb)5wQ=R@_FV@$A8UZ9C?FFsrp5?`(lEM(0Mm1mgh63V^MjYQi+*h$I-+Ob zxR!CVi#Kg=H`eQ{N-eF+&W;H$3yW?EOKgfsPfv;uiVY1&3X95$FRCtU>S*JLL^7ci z2qd7n6WBSH?cHC!U}4q7@l{hM*KXVd8+`@cJir)(VgGss|8EWmX7sa)6x~nQC-)De zfAzCHu6$=e^G~Hef2e$kJdeTKp z;Wbix2u$>o;Uh${MSQ`9;@r2HDK2p_0pSrrp^@HUQQlEeevwfj8JW51>CqKsc09Hg zhVGCe6C}h449x3-*H?GkOwV(Qjn7VwZzw6~WOX#k#LXBg!I-13j28gVSJgp_Ipici zYI31cfFKI~9MhJAk)FVPa1_c&aXaK9+@Izuf2*4-PERQFe>Bh_<%v34D5{h^%cTbN z4P`U%s}E36AU~Mw)wcBFD*44EDiP8N2JfWgBN%-R&_Kq|S)f1?e*+(cB6osfgbbs5 z5Gq+%isBlnxTi|oPoINNm#|3_3_FG(3rK7dPR@~nV<@l$q)16Iq@LY_f)13BA_Okq zAt+KdbcPJ2CmY>bQI2&*2oOtYe;s9KGWz1lQEZqP9OHw@V##<|FkC8yWax0AY)O0P z`SL=u?Bs~_#MG?xf|Qh$l9Cdsl!nnLBc5UU_w?GUGmh}IE)-_)0gVh87b3vGKC!Ze zj0i?$A{o82!sJ&QbQ59=Tyl++t*6_QkcW7jTZK7#VFA`IKYm!6T7A~jG0=TuWnmB+ z>{3{m+R|LdWc!#c5yOC!keG-Nim1gx(T+vwar~DdA>S4C5rX`$Eb__+IZX0Vy2YZ9 z8Z^{A7j-il3L$|>TL?0ywC;Xc%WooZmBa%g zm7$g-oOw01(<#mfVz*iYTwNxb$yi7Z5=#c)_)M8>K{s~>0PCw;$8_-4;6Obl^1Qo) zs+WU~vG(2Q5Ix5qCj$J=MTS4SMGi16`jVJkqF+6UK zsj)$jZ=j8>!z&Bzd;Sl%JMS2-Gi1nHXxw9Hs}}Udzki!>xd5NW$7e@mjdKj&C;=5L zk|zShLA3p5OqM6QD zr>9t|bQaG7bdSYR5@LN1`*lr8U|Fh*mvMBWeFiJ}$k&ULwU+h&I0@2)hC8b)a5%B- z-N^@^Ro#NDgR(+g{Y*35-hm<;5OG`V`&+%|((0f!nI7x4Oy&*I9{)?+jS)=&qxfwU zpd8l&)`-9bOuQ-b^9q|IOTNvUr2(DOovdxO%F%uAC&yh+G9Ml(*WbVn-T>-X$;7Mc z>!BU5aV>hdeceP_^S zsB6lI%Au3Y=PdNne`l8HU=;c&*I^L|o&|zcaDSD&zy(&$3q49zi%IXC3Tr>d=SX#0 znY+E-$>VAZXNMi$;;>HT)Al*lhWCKx`Ig^QY7UGSJX{ODo^Mg#QvdtnqRqn!7DCZ0 zp*nk2Gp&n`x_Z=*ry_fDL7ADI{fAFFmKLT7v0+X1RYG33d>v?O>xhp{aI$syVP+oc z>6ww7kXn$HkdskY-&m4WBx&bUOLC~4ubqw=x4J7eA_;-%gg1u}@o@_K!KHZ_$)Ta0 zg@swJc6MsF9Mx_YyE#hAYvj92jCO%#!+?hXUP)s?aC~&9NI(U=Hdbp!L~3?u);Aq1 zYrXFssmUNSuH)c#@sjE2v{A&`1#M>!f%r(gv@$a}H7qSUyg14w!O}6#IK0%^C(YhJ zA)vOs1D8)mI?$u^_K%cSl@{52HMV-E6{PvH{Qis5Z6{KO%qST)xo5#b?t&TcVimAR zrDyoiwsGTS`wmx~Iu~;HW|6N8kaB3x^3+Ta8V#Hrz-MiE*9OV#QDFW6_8_PSf;vX@ zZ{5C~G_}IAD%nwKq1(5oO&VDbLGVCGG!iPBIyB?L(O9ilR@XH$93nivIvD91rWa*( za9cUGwSXf4{{F&aXJqr&vIb6S>OYpdb!WT&YfzgDU=h2uqae2=J|)97EFeD6PwLxF~e z+jj5{o|Jw32nsW(`-2c%nxEG#1N8#X#K$|jTHEUCS+(t=wzR^`l-!K)=&0zxfXblI zPAl8GGnWddsPv2-1CJWX9Wao(Ts`+6fYw$Dv31GGfB*ggG1w0KS(#NV_XLyGQzZ6& z!aiDCL|e-+Kkox36opDc{`i^glfynb3d?PlfxK&z682$=P^Sol1d}{V1;jfXUrf?w zzk>l7g`m?!!d)Vc5ucUF?rNlFLPd2+Tw<7`v!#)d;frV5_wL?)`(}SsOJg?$J^aO_qz6d`(3eLQ6#RKmWWm2|Av?1NO2 z?v*8Y2_v3J2OyjvA^}`R)1>IaoERfz7@^EOF>gd5ebclGn1nJ0fwV^;Ax~V%33y^$ zAtj*(b0r`HG2|U8yF#GHF#Nobd?6(LsNW(&`YaNOjHCe;n<=EqM-jpxWBGtVY3?A- zmyaca23jcfeQ;d)HRUQ=vKs~dJaCo^<^pmyAf_-)LB^9{wMcrkrQW6_H#RLHrL?G; z&*uWULV`IrDpfd!C>)6d3}IHm38Oxhlx}2*Dzrc(5TO_gkUY7)FvjGHaQTIx+XrO; z!U5n~09M0g+5q(cWHr5h;e<#aZf&e9EKE;`3-s}|x3Mw#`o+M==!>O=ou_ATTw+FP zRc$AyM@&i-xkW`r7X8NqR^IC_MmXHaJQ%;2zl;X}lpPq!-btqz8Q8xG6Mq)$`_A_N znjylB4Jjgbs-d%e7(JHg7Umdp4n!Tjm{@NQT^vPV;pYP{NJ>ve04#&385z4sgvkvUih&zIt} zS>mOIP5X;GK2mEu4~Tn!II$)-r@T^O#Ux^eC=M#hvc8x;G_g7w6n&g6RAmd!ce3`2 zrH?zg4t@c8@j;F;{%+-!dD*QwU$QM$y<4-%@Ye&zt53b!l-cC@>BlwWkgF@7Kr>!J z12v&h`r`*`Oi=lLa7B{o>W)&VrDp*3Z85Nx&r&OjYoQkwURF-n_b{`?4>tUAPi3^; z>>;1}|DpvAeLQp3^PSJW+%`6Sl3o&#pW|8N{#X=no%Gxxa#BHJRVsZ4v%vh*DR@Z5d0rn$c&yl76)3i;e&AAOh&j$YTe(We!Xt~zpnGdGl3ca<_aM8+-@}K??^;~p;qf$Ew1hb_LH&#Md9(tP$*O|!bbNhlUkae`M@mF(}~VP+iYVw)5nLBYH5 z)YORh)V#bhE{o610ctUpwRW|Y)z?&(*Ayh@ruoGc2BfArM}GQKQ}?lUhy4K5%;Ba9LwS>jHO?H}dgfr-`&TadU*A6X z_~g$wj$fUMGYX1}a`W=iaubtFZJk&*?^jKl1;)<<115{1f$YgM3J>qCwJ>VROpT3> z3kyq#P0Vuj@^#fSNWP*PK7MBB_%T8VA_ffO51UjzYbpE53y_}LQ&rYnUMZ7eVkVZ$ zS8)24^{`n{o}I2=1xEUWjSa}lpyj*}!NZY3Blyq|ux=~&{zK4M%SQz)@b9@0_a0EMwD2EmKGFO5 z86K2~@V|MXzzT*@?!% zo?gyBg6wRatZi-V9h}_V1D`%NG&M1gPcmxkc*;Zf3BUp&m?;KpFz^Qe7Boze_YOgN zGm!u8(dQE~|NWa76IXt_zg(-oU6Frva`=rhK! zUf1hB@>S(^m4h!@hF=4KzZj^pkyQXp2N>-Fjf1fq;r!~_LuJLrHDzhNWh&K z1yPZ4c0U|+^$fH>>3lagb#wCw35+SsF7IgTJp;l>@xI*F9?24hR>6M;XE0&TU(D1*E_oOb94MmOibTv>qkasz%b*q zE|PC2sC{mGWPEI6E05X}q6*_f#SoCcql_-T)7uXU9?AIvC}?7w4nbmKZf;&04c2i2 zVZ57hm{E-oGWn>*U|wR16#`sfpvLW93E&w_zn10GNcs>$%l)v>ddAWIQEu+@)U=}&6=#dG zes^~~_x7E7D_`0L;B;=@rOy{H8E!W8+u)Hf69?P4!l^L4uBB~naLfrq`|}xXK^34P zfynxn<(%43jWKrvXS3<#B+dh*z~AGQo#m>A>fHcrL!{HCQWXO1tLy&L*yd18O$v1t zWT-$$a3nzd!R)=dwrWaVaAQ-_N1Z*7G$GH#dG^Uub>k)-aUM2V6B>2{n)!U;y==?h zisC1B<0AphmRKY%y_RpCC(EPWxuUqr$kA-~i=)d8HgB`pb@J=(O%IlAd$E7k@fBxo z|DF&RpBfq;=V9k!e5NzwcDvsqNzeqb-w6INzA1z<}ePRDs(72D#=-1HL_oK%-9bSc`j}_HV7bCMUFdCE1YinL> zYNuvmymhb2#fxJuUWZ=1n|kml^y2mIn`(QP?wr45|DJ1))ZeS!4AeQF{(g4ZhXEZH zV-h|MdiQY9@u%bZ--iZ$969|pwCd&9RaX~!FEnx9n(L3=!%Q>wzGby`~I$xk--no#Ec3y z7sIHnw*Y4Pa$uNTmnb*+?MA3=LuN$M_T5K682^Zht4@xqiStj1^9}WLHRrS!byXI9 zdiLDT#(Fu`J@sY_%=|y=}^_3kR z4XChF#%Y#u+ZA+qOh}<=5a`9fc1^frcj1UJ;{CsMKeiD;AO zu2x`aS-ojT)hLyok#ji{mvOcp0j5@|D-wu{Rgl4pFF7bGYCi@19UDR z2Lf~gfX7f#pG4-dny*ykea(!ION}i`h))TR4D$)__wowrY$t>Q86BR1HwvB!!5Kb! zgfF`c2xCBm(+qJO1-)hEe}%yQ{{#Fr8}v_n$(Re#UPCMlw4tC8p*w>78~Y7r&w$GF z!MGCnDfBA@Dt9tubi`Evguy_Y4+%n-}d!*btmp-_k*ij2ENB57}Lr+{F7Ua^C^wU~er{?Ex4hgwvY56KNEQ-Yy(y0oRhJ6wN^71i%~sMq>C77#_gqQj6Xg#!-AcE?L^sekeNp)W=V2KU;6ubOV}tx8DNoDHHBM zYhR4ENYfC4ItebWuWWTTw>Q;OtuE8Vh=)zhD_U9>cQ$S-%0B1r`i2_M)g5*5g~{*C zKDfu2whEmIU@U=80B`|-du^?c>|8IIc&HnNzqn+3a;@>Ib^4nPA}vZfYUvA!GfuFG zjFd<-;(wIp+>r71129`Er8eZ_dB~D(_;zgO6E?pRhUK+yX=!0mWKd*EbV7AudUs2# zm*-*6(4jT#nO*SO0?ta~)G_KIGuA&HzWmOduW4rH+_Ifr?Grnihczc2kR}?oIF+yy zi&FCoVhdZ^Yu^5Nde-##>qs>Nr-#3tTfFS}^o0jh{&;XM(9;d{hykZPD&$3B$|ZK{ zdUo(=j`xrbpW*!M75SM<&7-C~h+eQObj$Y0$5#^LF8&DCu>9ti9B^6d#+U&eh}eyqh;gUzTvRA;A|&>H^99A!)$$#)w_ko~I8z*_l*ZX5iqv|H@q}PtT&tbQ@<) zLzf%2f!95~&zTx*H#5BG>};4;S!x~N7nc%Y^zdT2<{b*Jw5v?)>IV^{AZ!f88w<4z zgn-H$0UY^^9qYVLoFyTM)Pw(a|hka zmf99}umCLOH`nPI=x99CYA@~wOBzaYbIu%T z7&QzG=m!QvvKiy~7mu~*zNJ;Jn8JfV-qeT9W+x^l1_uYHq@+;)h>MHM&d%=a?Ci5> zQVRzvDJ_Y=aVuipl9uUndxnqi9y+>c^2CIbdmU9T?%%xR(7qk-{(RV;TSY&aF9Ibs zoLdit)8~Q_Lqz=`{s>64XjJRM@$k+qt@D?(PM+7fa;x*KIj4vAKf8bPmk%#%%&b6U zyx8~)`?DcPP5~8lKmcs4{5*UUgTj(Xu>|R81nd^TYG&8fv4cV?Hf>~$9)%1ZB!eL8 zA)Yv?0rRdV2V31G8?bYs`XdWp3ueE)LO(z*2$rg7G6hwnWB(Xjwo zK&HO{q3}4&Az>kk6C#}8$oTC&O*J)zMY&nosWIUpVG*Go@sU26DS=rj6~(!5Y*gdz zTcVAdz>Hb^0fU4p)0+2g>Cx8&!bU>YBX2mzSf$G7qLZ{6q{hp1^jh|xX29*~1@d@jJ%TkB2J;(a5-BT~~N ztLihk!d8OX{$MmOjs$H4$ijgy)6vTXfM9^70j!?Rmh=+m|5LT||Cf$`4f}{bbxCVg^GdQt%waM0wN_TjWmdKclXfU-QAtjJ@4NRt zXYc+0&UwG{o%Nd^Kg5Aqvz}+}b*=lpuj_Kv)i!jpGfT(V5B1suwk7~KS*+~@GU6{! zXDP8L!E93Vl+`U->!dTe^qR?rFyy{Rx`?TNm>5@baw_e|by{Qc27R(~?)OCUZT@QDWE1hINIO@7jBAHD!a;uUWHB6}Zz0nr7G{?g zW><0+g3Y8*saWpqH}xhs&0LmNu#GdYc0U~uJ;!F*%f$5?E z2+}Eny=^NCGWNWBCUD}|Oq(CK@2HAzkr!O9039)c4yr(o$&buk6ceJ-%hQ`I)Rpy= zCGxX(vVkNU@h9BCB=2f(IF*?2x~w`lIMy~HC(=Gdsj@i7vXFr1L)5SOWtTX&&{xp|PiaDur7%i7yX+gR9p zNZw3B)A9X_&lfU>lg;~MU$lD}fK(5V?4F{l_~e*_ycVmr7Z7Xn@@!3hy!{cZ;o$m4 z;md(XU)ElIyFlU7Zd>!m6@?KE)iIycBrFZq<~T2|b>pk>o=vXZMjzfpVQy@rE-l0@ zjceZ+RCFRbML8%y_w~!;QHjn9-@fd-ckPz3$^&afX*H?catB^|IXurS-&2NN-vbu+ zgN0NUKj;(b%#`*IowNFo%vm`{T_@Z(49=4GjEY|5S7Wz|6K}+64s;!NUZtozwtH z5VHlOtp?$<$}OO9=Ld$W&pM*LlNF@fm6ihgbyVAxxF2Ff|BZ~eEu@6y17RY=A?tgS zu6X-Af1-TjnX-bJtEZBl-YXT=lKSSx{(e(0n~cWf?DC+bH20|J;MA;wrhamKMpk-? zORm>z*k88(yuid3fj$TV{0k`1Y}UM4?1gjLOBW7^2-PeR>Axr3hyA|fI@O$`#%6k49$>E6GybKYE>kZ{|Kxt*)U z2k$-r0sak1v882&3?|HFEF$eoElmUR^5DQx2E@;VW?>gCsXumXz{C_Zc5ptJ#BnUw z;A$!ca-vOlcXw@VZD3%ay}f->QW6LGanGd;HcUVQ3T-$qEluHVq15TTO`B`?N)E}r z>~gYUv{lzPmg!j-o|QgxM(Tu#u|BD(9wcCfAIXj{SqbLOX3d1~Ga$;c1;qUlKuNjp zjePC>=T!>ILGR^XU%Ghm%-Ma%j^B`$a?tzIU6DD|SO^&7!(GkZfq^c;VO1l8Az2tK zZ4mKZBKLsq+To}$hPx}{`J1t3( zpr?M=&!SrSVg2*F6FQo3crX~~;Dp3};ahFsr$QV+x6Lt|s$1Dl8=1#v~&PfHV>FJYbMnab?E;=ESB? z|8E!lkujZFGRZID^#O6Ss$8ItokZuH*>nOp7%&xKx+IIS5Wsu@%%?Ioj$^Ns6{<#s znZ(9;q-VxtslDlHR}IW#Z|kakiM9pD%UT?5DMUz{|u9AUCY zx$|Xnvv`0xI?@mw;!GNA=DJw1;ZZE1n*~S{#BuH&ML}(jIKP#gklRw%(_B?pTVOxZ zrB_{X7Kgvaf**EwOH#HfU%7q7i2^@vHmWBr`CNa&x|OZdW$IMkl0l zZb#1bLT}(Uz$gZsfL$Bu7HxM;L*}u|wat2>0tS$%5wuNP9W%dUw|sy20s zKjD@xT0Am`fjD+-$5S|X^k6^XY*D+lji0`;c zzN_^%AXHl?2R{1rdXKu6^e2O33g7oBd+$_n6Sw$z$kACw>GN@8v+HkPFO2!Iqs~Ee zz*8{N02-=ZJ3c5(q|X^)K;smMg}4fPXO-n%B)Q z+O%yqu(t#F0YK;lEId5kRgoN6?rG6yrrGoOV#RW(YdL@SrtR&Q?}40bKp6tH=_zXW zidAn!No@|2I-=ref$D!Awe*U zj|t6dfM&IyI8pQILqTw0V{03pM8{GQjvS4Q<2;|~{&V2;xsHXah?{nf96Ah z4Um^jsx0Op)Q-`faX`Ah2jln?Bwd?r`FXaC?FK6oa?7KBKA5PAM$(%$31 zHv+__f~H(KiHyalm4r0vEhJg9K<)ZgB?20{S@0ZowiI z6DkEMy}%6g%>O?D_TiB}Czb;RK0yHs7(jplW;4Jn8raANA7FMQ%oyd|jC2N>!N#Fa z#-pMz=tc`qYM@Xim7(cVc`JlV!EL|$?|y@S@$q+IAJ1V7Sw=(v8|}Eqv3Wu^|JlK9 z3}yWd*f+`Vo>GB9{&Fj#Ce1cwG2WAD&uQ=n7O-T47^LNnJq6|A$>b1a8sGW<;P`v6 z4>6+s(+^Q=J|ss4!$dlRL}##Iu6i?Xszksp2fz&m&lq423>LG&3K*Oi!QZQ@QBF&- z;Oz04Y55i9HBBweDfNc_Y@YBKK0UE5;_O}anUEY@&YCklYjHwOhbE= z$+o|#2O_RC=`1%{9cGh|>NAT7DD{9A0hk(qDGivb0dqEB&VZQ<80^En122$nNSIks zo|=^6R@W5Q+!m9Q?NnXvLB%Blc70PxNnvsks>#eii8OQ$_+!E}ZhIv%EhMvrV=@tl ziMZSmQN9k9P*ECOS80ig@?;}|FQA=?2y-H$_7fBuO4DX{_YY)eXKSdd=47OHHCIwb z>MC+v8!N3j_tH?$j}YGrd6`mxbsYdPCOi{>B}DSx!u)HgN$RN>XH1^Aj^88AV41g8 z+drBfcqxB6Juavzqp&!{y(;c)ZQ9)b=@b|9Bv4`5fU|6G(!;*m4&VvE-mOaX&VcOPH{Fg z(MZck&CH67j#lX#G99IARu8>04>@WVdq2I_GCau&egB}Pye=aF)6>%z81Lbh^7%XF z!c|?!xlp8@wY-NpudSC8%=eQSyNTdcYMbRtlV|QxHhR`CJunI_WPbuE$xgN$Nw%ZK zX>@p<>Wo(up8_WWfSzve(s9FhvmkheCPxILK)nLn|_nR*(%Y5^64o%N> z3k$Pwb@fZiD2Kttu=*!BaE5b^Z>^RhSFQ8bnX_lNk+ExO>G-8vN;ltqQnfQR4X{)& z{B+{%?WdAg0}dT7UbDDg7$OTmoe%_+PLQ3%X;BM-7X(}(5W70hexIX_^1CmZ_Z~g| zs;eyb=B&cgbuoJD8m(7NxUL4_JG#G%Wb3W>(toM`{Hg4ot*$qA^k}aIt_wgIL<)qM zJ`f0)2Ygom+qDD@@&0!b{qOEnD84s+{OIEcg=_C$y)e*y_*OnTK8r*G!(&V%YZLcC zH+fZ!=){7kuskb+Z-xqzCEh1#Q9FU>dfHvSF)@fTQ;Y$vWkQ1RisjYomUg_j>T&z< zNUm>gxJ6pJ!xNqJ$G@GJ`B@D5wh>ZaCTzS@+U-_&x=D%Z-LZ2=hWUh9a~3e>FCSkg z-Y;`;+|sZv!Dn!wnn4`^tT90Dp$ECNoSY`Jju()q48Pe*xv$KeKOfQ4mH=x8S{4VB$zv2dU)pQx-%-!0M4Sy&-q zAR+=ob119UMvE=1)l%`+`G$$jNKMX*OiWI$D9))X8XTw_$cb$?*DHQ_xAMuu_OD+O zwbdFiai!Ursi~<`rdHEU?Ya1m8-EA{LUeSrmX?;cw>KV(LpK%{*UDnBFeS;(DE6mi zO;HAxFk02wQrOYNnPdaDEiEw+m#=lr%3jIc)H~P94eh*?|011b*6ire^QF~uDmQ5u9Isq`o0SUGGr4i_O3IU5oO&H-@ z{!57FuLk>2QUWuB3csd+y%ZouW{Z-52n9%>_7eHr^aG25RI*r13JeHzc!CNOk#12g z;3NOCH=b0(prMHt8tYGl<^LUj8h;n|O($P~Uk8$ZPG^qILw@iy*a!1~k%&kZvHzQ3-`|H&$lzw(s8s4?&JRXhYk3PbNhm=JwVdN2 z8?o4h_~p&@J4=AY(@cCt6yXDr-us8%y&`@-Ud3sK~6u zn8L!`nvRy%@sa-Fp#jdRdMd^E_h$90^Y3q8$3LJw-UF@ePO`26*Nzef2nVnmV1ocj z3(zhC#wM640GKlYLy$!i9~?PbTmNaWqmgqF@z~z_#@MFTz@gzF7Bj4-LbWLOaa)~1 zR*d6VdlP_3Wc(1A#_Zt<+8~0=Y?SSbG_G*Txtwn%CTa?c3|S04z*Ygkh(tjxJ-`ry z4U<`5luRwDtB*-Zib+EB`XeKP8MI;CNE>#bGb+#vuts`Xi;T2B=BKz)abDvij{(>| zL4gPih(HtTXunyMuMy;LT+^7H-x?Z}YZ6!Z{fC!IT-1-3x6XW(m5KO%FF#0<*t!p} zHX_c^$D3?oE#i9?QFE7!VT)0oqq=1a*TcO_Q&~s zuU9_!c1+vl^)Pv~vwbMiKPoLI*~`PzFV-%x-7ui#!OpkPO;aeej(-g7B+P&2M~}Z6E$rr4fN*~xgwGJqa9$kzeT`4h ziQ_tRgg!wtKR{v{&{LZ=23{w=d7M$xUe%L(z1wXWa1jK4^9fno@Eu10^$=ie24Ewg zECcnTAael-6d;7G$&T9pLUW#^;_RL8SKK#vY!%_@6cgZ`5RS=D3n?sq6`UzoIdG|` z>r{Ke{VKmBhIfx%-1F`8hn%F$S8qRVmO5%4<*MuU^^N9BLvy>n)MAe<8#6_q9zlq` zbS6Ov0$0~ie{QElEdikr@PsGp5Iqxw%>n_!z;hk2*$p%fg6GG_ zBzKkUJJ9gmD$3jAov!u+broGBeI3=$9mP$-PR_5Mois6edO>=(v;B7uJ1@=GuUvF4 zTglB%)q!yK^MT&_x=qla=qjqv4w~Sqb`jAU>0`dPj;Y)~^6cr!m}tk2{(^{HcNx7) z+e}W)H{1uQtUcv@TPI1Ywjm^0`9X=)p^04*9ej(b_!kbIlwup|gZk2@(v;}*u(G;* zm^utbnm|$*kbT`I#NRs`!V5qo{>9~j;<~r@t~#*7=0sb*)Y0Gb|C~V5l%Z(}x^Ncsra((xr#Or5%OlVzIVSQswTym1PuTM)$3pb#U$z%@5 z`$Kr-fL{R$kgM~tuYAr$;O0()(TYH0kzPe!n{Dp_4;K@DUYDe?x>sr2UrsjP8OgcVt2pJ z$D}EqAhM~kD2S&9@@J5xp;AD9FVn-VOHr}>_ARoh2_rNF4EAwg4SiD9c8ZC>!Rwrj zVhl-juzCPIh#E7Hxo$;V`?h~culX+q`?y6V<`@fj!QcZ8TqQA&kiY>NxI<$*B6`T^ z!t=yO(CQYdC_&@)8?mXApn<0tL`9YF$52TT5|UtL|1a$O$KyYOee6Fx^PdwCVNZ68 zO>u0|$mjj%3goX|jLOcJAcOyqF{+JZO$tm!Oe3hfgNIQKaQHAE)zlrkA43UiRi z>ZGs-=qw_cN*Nu(HaE5v!$) zo#arXn%->QX^qJPn#eVU8O*lcQSa8iw?yh09C14nTmaxRl@Zq7m7kJU9FtZ&PxpSa1IYp9R@ zd@m>9{bT8)nGwmHnKsuq*xTC&1_zXs=Hl?Z+%oh8b$|pYj&VM3oiwk1mwn*=;+(OR zV$g}uh8;x%OR;S6VeF1^>@)1BZF6<_aAz%kq$xPu@|OJWYg#+5IUHX0cJ>Apk(0U- zo;iWO(b0C+&i+4r-JG23yE2AAWN_Qlov)$&?;wL9zW9=L6!O*j=HumKvgSEA&pTh9 z_h#J+jWv?i5?4L$9M`;l^x>TVFFSUL-l*rw2^RWz;PrM+>sT z7Ec5&ZjRbwXuIU-XK05Bv`h=y{AKMqv&(XBA2a)MIz}s%)ZTh0VB}rhZwA^qj%K~{ zdHPgsn}PaXFTL~17uE-wDe4-@I|ka;wNxm5lD~EP+Gs_A^Qj%#l42DrXEh5!_|^Ok zTc8o^Egk+#K$I{skU!sbx#ia_CoW4oQBut>%rpHa>+UAy_j7ryHPjirvesr(fv!Zk z?IZ6mPq(k%aQ^&_r?>7Y-#MpvkU!h_#1DDFG^!25Wm%$!_t{?g@(OXdi59XyhD|7x0dmkeMUXxb8%gGdqqr9_>cI{^10`)R35vKcJM^QI|Y!E!Z|Q;HuiQJK0G%) zBr&SKIv<;vfK%0+ICG|U3Dm!O{>0WJT_>J^;0$(Xa_z1AmFre<&>J0sz+B;$`O6Y+ z+_C$p)>%7G4HmuYPooMaKSRbH=6_dSo}zulPCx==Pd4;za?<%LGNd>j_U6tZFGuF zwS%AC^?fg!-a^PtV+S0 zvmFQ1QhDu3Fq?=la#;fa#-Xc-14vzR05C_n)?RcflC`4Z0C@!TH-P$l&|L#4V*sYm z*en!evT1yNWh`D+$~hdP*aHie1GW21SLUwa0^L&U>~FUSBuC0#~<=y>KL|Hk+o_A%L$9~a&WGEp%O>Xg?r*EM7WZQ1NZRaEfYv=9sC!9H#k^-n@~ z|L*vkDZPK(Irm#qjrV*oeOBBy0}ei9qVJfE2%=B{a~!~}h)f_m0e~blgVl+s*w6rz zglbSDr^sS-qJfZ z#_M|IeD)EMOWs7#g=28}BjZEkagF_-$KktF_9g&U^o;iWW@l#qX5=JNrR9x)EvOZ!!(kMzoWkY=R;q%ELRhl`vqENzCc7( z^rYT}6VJ~ps3=$%n3U(16_;QhecACfI7VW^k!xr=1N`MnPd-%pB`r;)7};rDWrrQ=+50ZmbPHT zhmRhyVkmlTX~J#|NAYbckcch>X+n#11;o_WA2X1yVpdF0Dt(>Qn)9NJbzd2|eTb;B zdTF#n+jg6at3+AEtpbgsHO3F4tQEc+D0J0SnrLe(zP|NA_T=Zgn@wbvdK?zY*ezHh z47Ki=m-<{F$#8yW=;jjNwPxzV*Kf~*7C~oj%Dj1a!$|R@zwO$J_!XUT0{x*220RX= zs_iYXm4AQfsM1Hpjl1?dmVFZ8@1T5hhx!YN0E|UYo<&sRn`E0KogX)_9*Gf-2(&MN zup)EE4@h`kIwLD{>h|3mS8iUta`d3=k*!}(to(ULxHoujQ_z|s&#fL>Titc#nllmt zEk3K>T<`T^IV1Tt-t|a__ST5gb3%3t7arU<^6``RiwAC&Uu7O&)^RY+&do`UO{mV# z#}kJl3d7aiwccC4yYldui;=RgiB@xcaYt!>t%V&(FKDr~OEc65{r!MJA7(Lz(QUq+ zLQE;mP6}`P`E2VB?H61`Jo)(mtbnaVrDd)DSjlpOktgo%h!KzK}#pt~Ub;nNNUdy#9$q)HE$N5+W`aAI) zK#^26lTJb`c{oQ+v_#BcP{@(Np)egOEW{_q5y=D`0Xs4>GBh+)R8-{R;$mfG6%rD{ z!GD|(R9042US7@#KAoMN+;Rlke?rAh3}^Xxb{L!YzEY?>ak}-)x#kNOn=f9kId(1O z;N`Fz&j%AQ-L*xrUUuc?CN0ljv~AinBsdE<527uEh8ICS!qCLrWsRa+-M8-7K6+5@ z&?c9s(mfwvR9`%qEVVD|*=@h4&mGmYtb9H0zj3jk`5H!9*uyLty zS$35AZ#JiUmlC28-xLAeCGai%H>8;ZU*# z5|B`%Dc)Ae;HLOcHX4jPZG2Kbjf7P{_R~m5oV; zx$zZ|KSMS?8k3i3L?I@N>ym_GM;?of140>$e{H|}Yr#J5fgjD^^r8umPV~`^{&O73 z?V*CYzlv0Bu2~$7#gmohrjEGjpE0fyGMh&~q%r?1YJmT@kG~81{!Y&CpB!xPo6s~* z;g7+dY7qRJuZ6rEx5I+Mm8xbT?0+;?i^+v4@LjVJ3@P@nzAd+?GBL=+7LSSDz^Ms}FbnW8fe@Cp4NFmBfiyN^ zGr)nTcsv-O0UDhRnj46v`Q52Wn2y$(@~T4P??!HZKN~w5I{UiXyKAef!(w9JboahV zP2W*kavVGIxxM~-e)^B9qV%?gri|otH5JV_vL9WYY)p-A`gtF!t=fSdUNzDVu|{SB z+EPH>JzReJ66z1td)>eSv+ZKD2N`pAX%syEGiUmFI=`4)VQEY>h{x~}#4+>`rz zB+f0|_IT-X#rY!ILUX?f&e0cKZ!Lb^@9s_GXC@A69-7khxTjj)00Wo zH|&k1{Ssf_(iOX6DzaYL@^7qZRZO^I&zEZT5w)cNzYJXNm=WuYUm2-&R)qAUL zjki?=F75Y)=z$PDoWD6xC_m}I2a8=t4c5>30zrn*Tm$}DZ-s9;$u`pK@(NvxBXp{r z-&Q$1>d8@e^OBPK1nv6(-PD;8?RzNqia_fc(bc7W*;_L zWFi8>2F@IJ5$|%4W)=L*@wZ5b^)j|Mkhw4Y=(*JEhuf4a<<1wSof+&tl@t9VE4cjC z&6ZXCxTO$bG1RqKIA{MZCzU4#4w|o3WK};aJ-l@9&d#02XO>hOY#9E@&xjGm#%w5a zJD~GqzryVcCLiTIzrM(|xf}9!Md&kuLY=MgDx2~QWCHJ=i&Bv9&PlHStJ{z2ob zjk$Aze|)riaCbusXP;^xX*M+1{GuqE>SP~gZjsEn`;(IGl{L!zg8HkA%=J{m{oJW^ z#O-kytyK*HFbL>FKJVL`T3icWky8$NY^Np1x6?cb{QP#!G!pOwgxcJLkJ6EgojQ^{gke2yXa5xDq*hy-j{D5b3wF#C`;KGp~g zq1q#cOA~0ajet_?P(mTnH^yd9%F%Lx8=F^sxjnN`FPF^zt)Y#T^csmLyU=HJ__mr zLPP;EcJjd`d~jW!;Qx}=um1r9{!hWae+~SxC;PW}6<*Gzp-jTQ>9;b2rz$rIo2F|! zvnFBRWa^FQ>GqeyeBc+5LMO(Y%&t**vt@x_@Qyy6fix8$rmE;LdMD$jsXhOvdMml* zU2Z=j=fF)SlgG!gU0q$}6-DWp@d-&GiI~vTwD_#7jO?tU@{-oBb^-y5vPO7kDlXjS zfGRQzb%^G`K-xH&hHtDWQyZFiLyK50OE^C8mDwK0k*Zb2~VcQMCDnVd)ZsrxwzQ9 zS9~e2@;U+&lvbFSS(p+Y=VNT9P>}CL#s>p-1i~3&qyut2LX>0iB*#0N+l}aBcH&ExYS8FXTqQ0`xkVJfxwmqG$H))dzX;-8(ECYzxaO zdwa(b1!=THr@5{rIy%_P-%3sQg_Ext6G_=tR#bf;5u}L|%K=RcfV}|Rq_flkD5WsQ zGK)(v4JD>U_6t;(a?l=R2SF|~I6w+gnzjGy4i(!c*KeG>e&YV?Tc5n$U1a5N`6m6y zZi$Y|cZkN={IE6%_Hy;|wUK#yY|#V$<(dm1C201S*)oA=A9>!jNp=X1cOPj;$g+D} zW4V9S=S-E;^*HPM53jFLH@TkF?x-Ak<)YoHb*j)--5HBsKvxX*XLfqV=j&O!UULe+ zlTe`(muGwD#X)twD?cm_*Lkj_2J?ZinIKVkAbv?^*jB3_Cyu=l7E^*2YC>zxg|~m( zcF*!fFQYv0*;jm!BOm(Qu#vn}M}sQ@1{H3#*N>Thmck}*~>EA|QDz`j9; z{UCZh-Df5#XjOrw_)pafUZ&bVbiQP|n>jt+_3b$H^V(d`=L_pX9@i&mCD?y`DRVwL zI50LYi9iRT;i(NJU0&aPO+TB4dxv&*c4il3Sy~u7nVQg>JFwNQ5utIFHO-uR&)eD7 z!OPsi$MoZe$G)ESDVdm-&Q{VW4p69^IN8-)oc6wg3qir}*%$=MziA7OxTT0v^2wwh*)cW53aCnYC8r=lVuJ~=uxvcJ2FM#Lg= z08A3#pt|v43Kh=*aE;Z~U2Uy7WqHxrNyVKt-Q-~=*Hw~0K-dE`7#_m5=9LgTdI5vc z(^?;&5=rI&7dCUM1~N7_*4*680e>keDN}V1DwR4sJWQw4IZx~9>rBdsEct0ezxE6) z+)RV`fuJy$FHC@-2}p2s{l3iO=X!p)HpIn@HrIO@o8)_Vl-Zj$fBgX7%K|fPhU(MW zyXWgK-RhFp0(nL0W<~)w&trD1FM}X3hYyG?9S{{P*tj|N;zbW7xkyuuCOyrj3+LMB z^Hb&^9=`NB5D=OVgq8umMeuAPDj!6i2aP}w7E$b*OP;+9w|vj&?sIKNuB4s5Sgfx( zmLCtuT^#sL2Y3RYaFE{^0{&pM`i-i@vser^jQEu!OfDEfX=qKBi!>%xDbS^Xr|DEM z(StRcn)QzQ~wt zainuMA(-3m!{Vh;iIbi33=}zm3eYf6W){d9PYLLcHvx(s&r$H(~AH+Ya)?f_UYfEE>eXM@Cv!S?dp*7(SxhPobv zFo@LUs{sfD;1gg=AtV4W7Y4H^tl4C+q-X48N{Vh~DyFKUvVWi(X5%Q-{_5(&q{P_b zypmxKa^kA`;E=;^CMK(`HH`^EsNf5YDMOysw_z?%FE8kNb~mf zsi>?@!lYn15$q$Kb>O9WZL!8e; z*rl~|^%)_t^@3_|G{YU^-#vQo@8!q2x>!1niC7Z%1p7GWr(mLey{_GrKJw<&n)jQQ zX^0%O-u%q|R>g2hb$hF!nVFuUQhixOj-OG6$7gTHM=w6?koveo#_IgLRE52c2ZX)} z&(wp2jAt+Y2tC)?l9TkVG5%fy=6p;0t-Px9!NEuEEcXZaJn*+X)aSbuByOO&L1QjZ zr@ug~>FlmdSzjl~4+hXJ9q7_G{-dA7&dFW0bvBOmd|ByviW(pe!k3eSM7mP8I7RO` zt_WRx0Xe@}NxitbV=+{8QLx+jO0e-hPYK>#`T|FkRy{hmR{iiE z`soA?xFFjIpb(e@~QgRHzmOqRw|xP&O2;h=C@xw;N}HS zrPoz;x!uHmK%+!C`)fY^qI&7J=kt>_Mn~!1JIO99LFDNyy*=?(ibLf&<#GO>uSgi2 z6m>c$YbgmxMjYNX;p-4OPEiWyGM_++faya zhQ4aR!^?O=r_a+8fJK;{ag} z>1}e1kB@Ug{RFvxA~|MIQo0YC1<#xd_#h$&v@H}I7ZfR8ur%h>DYJ{`&E(&yym@1+ zscod8@l{QsH6av)`2#1bT9p?RZwHW;S8QxgY-$HX12}irJP8THvejVTd=7SO<@~8j zS4L!?`rbdMe_^M~g*`#LmgfjV#8vZgqBC(qGq7{#kmd-&vqZpbVfvielv$jY765!R z!R&cJWEEH>P8QxcFn@ES*uKL3Qdl!f&{+pqBb*RHrH?XsyAlb}F(4BVW(H#QPNN}t zIVP|12;GRdF`MT#I;AM`tL6hwb}7Nr`lzS20}Ar@+1%0=uZ;O?znBzmMR~#$`u`Nz z2c!H91~1LSoMd+*c|8v1qoFww7)=L`p!qx!;MI0%(^4t$A9qc7Dl_!YQ4s#09RCpZ z{hMP-2nLNdDCV2&>*CdX@n|0#mObU*{3p!!r(EMSU%S61?E96-oZ?LU&Acg>1U8P| z*r@l(^g{681%Ll&OleJX`wnR|&Y2xe0m4)^QanIDFGzPFjm@A=dm#XDgu%A1!php{ z`i9V?WQ)ismT6%!ayV2S*z(pC51K9|i=u5(qeMENN=*j!(_5ZR$W)kHMyljUZ8*fim)# zptp4(J~lHwEw84!vZtpjB03?jsB{E7l$d7g;di8>awm3lB_N40ha{>~52OY^Da&>Q zpnaHB-v*Hauzq}OU z@hv*eMCaRu=#ZB!Rhj^J^mnUP)~b*gu>kZow3J20hmrukl#%x|^ofYh8c1UnWCcOG z&=Sq{OCPO$W_Tw)BQV-O(%sz4R?jZPJ)*Lpd2D2yOmCtw%lf)fVuCI0^;LAW6_4LP zEFwK`mxic(n6!7iK@uj|%OkjFfKbp@%socio@!im z)>CV;cXvH4<9+#@+bIe6U7LSM?ssV+$*EDICPd2oca^{C{P4E-qfZg<^c@~+%b(J`B=POq>ELf# zNr7JZC3#dB;ct%DcG)Uw=-;?ueNEC@O4wHhinz19nUN>KC>ju4P?(o_$fgEq`Kb=HUa4>(`VBK|{OOW-k)LseS-`Er5mtL<$G6jZ<11 zdJF9=jJ#dlGl~kcN{YK1I!J@~?939c@L2zh%);)T9vq%XB(rb?dPz-{ynfR&)fQzn zYOoLJZpw`g2@mlf93Da}g8`FDpCID8b7Q?7wccm@J9by5rsss^RHiZ}U7AoAnO|XN zcz8HHJv~1^e{gVcVq$_THGm^y`-y$oZuVX0Z_(E8o0u00u_+s7RkY@7O+7%$%y; za&R_E?O<``S`Y#q;*G@Q^>hD96QVZdjQ<(1kH_Gg~Iu8k1R18k*q5q9WvDDDWlzQln)3AtU(j*4eogXC{lp3FQnXk@rIu z5>J?@7aWX$?;bX&0U#9sR|d_TLGxo$(rPNgJl!ljJ?--g(~{F8!{dA+6Z~=tk}7LT z8yg!32gX=vlU;vbPenPVyxO#P;tmT)0dRy)*#W?L27WtWZU*3DZsGBy%=e?@+N|QD zFJBCGwamY28mcILQB%?}GyLx9>Jk$4)6dt)-_KNETm7@5X0%^6ZIp8cCXpwq*_0%} zh(IS+1lV!oVvUM3pZQu}&rh&3FjBhzRUf_4yko?8W6|503FlS=0+2SvkXTQ?AVlO3L9555eBB1uZeFa{PBzCu>GIz7I3JG7|d~ z8?+WAE(8$*eGX86s6?vy=1}F8Ery5F_d`7omeuQQuC(1kjXl;KxzF2iNI-y|C2t=lju$sYNSPb-+e|Ewgl$NtUga^K^Xp@-AFCA_R9 z9E_we{(2EVO<&%<@;Ul^5w9|qlpond)-CWH*50AoO$ztS^n z1|(KdPwyp5>;#g>M#VN|2?=LzT^)6Jo!!9|#%H#?JF?pR-39l1$48`3kyovxED~yg zpzftBS}tBDrKKW$cvKh=aTBZ_G8n0EERlWwSm&EwTTd4cYOoOhi2A5t_5uR*l2TdgoD|e%l0oX1t>lU z(R!2QYn2$|Nv9FHafRDV!hv^F89ow;l$Dhg9vUA zbM^M9lvKaOuIe=_T6gWlUc52z>|L#)S(1ap+jnx$Kfe9&`NO>n=K?={Dt+>D{J=S^ z*sg}v;$4?+wSLlPTkd+XOhJJbktL%WHxD0^sNTIg zc_EZ540SIS#V=ga2SKFSbHRKOAUq$;oXzCmJctAP763j0_B=j{$Q;#2I<*w3H_VE|& zL&(~Q7IzO5l#)O!iEV|$-2m_%3PG`@PNlKhiG=4M738XMF94AW21ba@ZT)5CHEC%X z(UDPcG4X}@$&F3X?Y(-v1W7Esn95o~CvRXAjsw#5>LT%`8kywem-3&^X&7m^286}N zW;oe-+1t1##$^=eR)+_Nn3^bS>d1Hp-A~U~Nl70ibTMEiFCXFDW|o=Ji`1p57DWaTZGS!48rlLXsNlIypgwj40?Z z@wn09?vAd8I!t7EdSdj%XkY(OcT#$CECvJ6DhlVBHLWcXNy!bZbuhhcu)`rQX-9AE zDnJ#-53TO+mFnvI!UQ#RfUj*U{H&$e*wC7il*3t((wuxPWyOfVADt~}0A%-%e90*~ zKqTK{Gv(4E4mw#JGcbF5_3ndjS}tMkaZ!GjgWXOOc*Ta6V{ED{7<~b{-(u{yWcr>T z>vRc;vf6b~T>Rnuov&6uHBxFIPPF1%5_3Z`v*SYiy(9fXhyz$arU7a#iD;TzD3P4G zx}oi2JVxfb%|WN1QYE$Wg&jANiVyX5$^*aw%1tg2_V4Y~11gih`#buV2Rw+nA)o~< zG=vr#LQC{SMbs89RbMWyzkZkAs@K+AOG2eP92SEVelQ@wWI)3JBC}@DXq!rkkJuRP z2r+*I+S9>k1{e+lSl{%3vvE$VG8}~l1D8+uiwt?q?DiDRx80uYdUdqUwzE*V&VO^` z4}MbMnmSX_3hM*W+Pl*2&&C-a@_xG#W3(^T<3yJG?k?9g(SLsIxhL`u&ByzDE9guzr&lY4(v5OvB~<*%%blL>7gs(01<-s z0;1PE*c)O5^9{squL(R6B)>&*n~3~QsSE?}(6=80Z%DShUI*;<0^4*L2Sr4HRNQnRnjtLZ7t4^X;lczBWP1xeeZ8y!yy{y0BRYcG^%_`JaC ztH#-Vj?a!8$%uWwupmwC)L`(}MB^vE#*bg#*?;@Sh09lN96WL|ATaE^rJatpet2MT zRcR5rg;7SS!>ywo4!$17Hg(k7nQ7AO)>W@-s7AsrmcmVsa6$ov$;RSFD-)wyt<1|VT93y;yjRlA4el6BCk~8EB{TL`{1A&)X8tGJEZB9F4Zs^s+a= zjrLCk9PT&V#NJfHUsY9=fq{XMkx^M$8JUStWRaLN&=2C{CJc1Rx*EX28ra(d7eA0( z2*xRZ#;k6sckp)BS66wueVga5eVL2J24{;;Z4!+kNk4ddh%ghZ6YfXqB3l8Z} z`Bb!heaHG$ZHt$+tlvhL{|HKo0d1Ty3H!JSKjyUFG&E4dsJRk?-f#$vW0HAoYrkSR zTFhiHCj&mWK1@XbBI3$KLvniz*7U^IznAa$i@`qLc?6!!_90YU?)Rt`KW)myfwyF= z-+|p93u+WzGvsuD_*YL3{iO!{JIrEC`}$9d_Dvhi$zm=F`?w0dJmo$Hiu{mNACr6R zL0%3&jmtF6Kn&&dOkGnp&(`-ZHl`rjZ_tgGF`X2~LC$&1Ni4-BDYGUiFOyOL#A&Uo z%>NMf{oTn-{tD9g2UD3Wq^QOI4Z|Z+f-p!W0(T-%0^k^pupB?W6ENfe%WYy5)74Sa z(J|E2I?^>j>KYtx>S(KPscUYjt0*r@PsZeAWO}%pI{mzvQFL~MzMTR@=wKdTtsxH` z>8-qCpeXuPac_8#p1QWYf~H}9O>al{P6>x<$HET0~T^5ETm*JFvUE#n&z{P{9P0l9o>Ck`Rza zy1ToF>5g^g8PwO`KhOWW&UeoHoy&DSD$c;L=YhTUz4u!8eSeG%4Qr@Q9~}=MFrN^4 zhl$(`4A=;80N^yh87l2qTgTD7+$*V=KOwBPZHmW9oG@=`^ zcL%9Y2S5|JDm2#YmVvGPZ>3YT4eH0{9t zC5gd)q~-#4e>Sc!+QVo2YtNZBw&KN+3;RFoGlL}{T)gK!stK8uSaR#1Ooxv6FMK+FF8SkY=0ySt~mlycXbbLSkj-AmJ zhaCrA?Y!i7rD-g^C*X0gnYPmbCC6>jk&6}lXRA2xHZ>MS&$kGFf2PR|&;oBs?k5=t zmWE&iA*j(7b-Kz%Zb#v!D0atuH=`L@dzVM)i{m#~Av;oNH zBlu6@$&{Lw{=`uI{P8)^HT`tHSF1(^;o9A9?Qph+H)CnHcrOleGtZyZe; zSDR8ZOT%ZD_@@n;?~G-_PcMzUx4rHCU#x4#hYj@bGP1*xa&YEyxED6s}~z|$%ryUg*lTa z;gl4c&9=hV44|;tG%lG!8yo8h^n7A-$@HnIitREvJ0m3*GsA#q7kpiw)m9Z_&f~uo zd1->^?CdNpElo~N&d<;1e_UN%ot~bapIbn}Q3TvJ2GGbr9s|N4476j0V+0nK=KIL} zff1OPdR>amtB|FuDpWOx#YJht2vo!dq)?fPNXjz9h9{4qrV+kZL3lF$iB10T!^{sa z!ONX?{4{0S0+=!jk0$pc#L-fQg>y$1E$Umg0uCRgnk*Ssl^hmFbV&rVlUZUCBqT}` zk>-EE5<@gmM3zA`O%aySjFGJy(heR>e);Nq%GZ>XG;cSTh!1wzj~->NHLX<;r^tv< z#HEL2r;tpyfU_&K*8>|r;W+xECyn)YCm-)el4l9DL>PibH8xWqJ^XjSANGeOaCBsG ze$+F6KF$*mII#LKOK|;^GUfdK_5b7er@=nPjdBai+2n)fIE?*;eYA-{if{V&753F-!ALXc&mfsgaExsx~H-u5rBkMi$eAL|GD z`9Br*aRo`?@A~a8Ar}t!m$V_{rx^7FGkv1%kNkJ%-;cNZJ1+lmaoBVo)*Mjq_{suCpcJTVG7t>L1#q+7gER^2O{GHgQf?*I=gYapXGa((nXsvg zG<}4zw!QCAX2xG$A8*~gcILsI-Pt)uyM{KgXijPK1u4 z+PwUtV*anla94Ly#OubI8ywg)LJ`L^Md+YB4$~SsRQoH80KW~$YIHnegH#hq>0vm! zAFkiRbqB};UQ15i$KwH}I`%TM)?&&J(O&y0FFa-?eO~SNcB=O~?TVDWUmZ=FL$_7> z>2(+Ca$ydhqfQ*0Ng3UhSg_l|dFz^83wQ0=zHP1Px)t*--`#!8a`D!uQs?a@{XJ(k zhR)}OD?x-9I|QY>q4Q44I+xM5J3{7`T3#QIIoh1e_24@`H4rziOwKT+Jsne|t^yTkaJ#}aOafR zRr3C4mmXVyyiQK-bTo#~Gr(62d}JVW3iwHaE9!p3zm1aMEd&nAwFhL%)@$bIiB@Wf zcTW-TmqD;(D(qDQJJrEJb!yompoP+aP}tN{p5GYb|CZQVj6=hR*$1AK$GBgX>VDe_U`p2y^KJJ+vVX+^23fT_U zqbH8Q-u+$6RxutvCI^H7mD|xa937Ku?;N=EzzHjBo5aM(k)gJ}z7_{(JMW+n5|#5y z1Xz$ph(PHbe*3I@>)P?@sswRF6hi$fO5Mi}P$NQMY-}R6a+rkNMBfircfH?i;AyHC zvT1(6!PTFho@uH|ONotQ(3paDSuC-T$4ONs67Z6-0MizAkq zly2GAx?(GF`{A)uCn-10JNE4-Zr#n)nomHYfO#BCjPSo|!F+i15MDnYdw7HNz?^q+ zKXbhiK}~9O5+Vsr=1MCv#AFD zcBl^*5!`rS$AF?38zB}#G-|duN|38C$KWm7*pAh)x($4#_uK#vm_a8So{SH%D zzoCMLdr@n@p2CP=n#|?|Fx~9f?hn{e{I59{z_jvbR=L?#atO>*c)n#r@sx zAIwTh7ZRv5IiSJdDB=m~0Eg>Ju4KeLj}LW@2)vt>ykw+bpTkxG5Cf0~&SVA!0Z*3A zlx4BBIGBs$@uK3rELIYe(ov9|*<4e^AT{wZ-@uSfeWwwDHy>a-gLkRA@>Nz^Kvh{D zgCa2SW-}&M9TpXGGRrDUs%c~v|8`NiP}-c8Q)i8X)58p10ChS|kw#lpP-5Z}z3pT8 zCJH=gXkFXXw2Di80Gv3WHSq6tN88|s53gh6%zK8HvH3_5ldVz3ljmg=Ed3O9?St#9 zAbYn+o5!J#7B?rXY)GBal%?I6qB@wa%uGWNhiGon5I&paE8hQkVe2=y{M6KaJ9gc; zX+DT=YV0od@N=I?jV0MxF7Gb6J1%c%S_ZI)$dRP-Dq;aSLI>wBivKz#@w4Rir%2}pn)g(Auj``IZ}!=FS$<1%^6|N9;dW^4 zYa`)jlE_+2{-Nx`OX%%=)E1~gP0(%%lKAW;vG+XMdJActLK=_7G;c|qvtD=Syg}4G z-ER8@12!{zU(RW?U7l%a7=BKxn-BXUra&lyK>5*klC74qM5k%+SsB6*1R-!k@Jy|0 zHfj<_6eaa>B?X*}%ovhn&6kH~(!DNdylhKv&s#c>2WYhH3_ntx$G6AR$DJ0EZC503 z6H41D6SP9&v*{d9Bc+UE3gr){zgUYt>{oxgLEObU9N)Ql=_2EWi9w-XLW7)LUbugH>+9{-UeokF@mpSM#`oF+Amc~hhLkNol0SV~ z=c>h|lV@v>?T$G$d*Ibl>K4Q?Mcv!y#BDu)d7j61y_6kVF?UynKUs6e_2@3oi#we! znN`Pl0J#swMmh0cAR&bs8O5$B1~!EX+#!G#JS8eE*Uim8H74b==O>T%cD^A#AH3Xd z-L?r2{YIu@bzmaK6~L&e+}@tz*ADIGw{e?IyHsU{6;*j_Ha4F)1odS=!!gGOutO#e zi`dZ;_Uvikk)ytA*Eubi7kq4IPUu_0NL6`OR$og$KY`S>_vB<3rN^c>=T#9}27edj zpFlAbtXU13TRPK{Gc$6(jZ#Mh+*l%)ISyR<4{uJa(;KVoQ7VhecAemD+uy4t(J725 zlMzown5Lz~S+~6J;Kpx@^vb7c5$4S8mz3|8kZse@7+<=yL0hj`L6ab*2okbfB*H>M zAfwhPt4z6n1L_L~G9x?FLZLVfeBFi&7xoKDfPykhScHlYPhF`*0@Y2GZ%~$M*VZ7c zUpIF4LZ|)5s?Y@cr_Qd73L^Ow~7i?%E*qc-a@+n z6goRFH9i3pBNCbH-{9ZRRqXHUKSC})YA*i?!r}fdf$<-~p8r|=(_r79_!EEPSNyj{ z_TO>gZ?#|^1uGfwSidsGgvRwxLhyv((S+B}PtcCL(`Z|mER*EZD&_>yW~sZs+RuQ0P_I!0rrtb zt^+TV|KlLwXz5?VGLx=0luR4%5Mqyr0(E*%>!QFAqp0jxQD1`VzGlKe9h6zHOLxaR zibOagcN^qkg;MQNPp}9t!jSGWogOSX7Hc$865#3ZdjIZ44=lGuCO(WweQf#sz&(r8 z)=w>OUOIdF;Ht-W7UpJ|cK6I9F;xMyGZIi~mQrE9Mpc!@aJwA9?9Op@w_J2FSz-EJ z6j8URzGDe{WEF$3C!*%?>`P*r2c^zG*3b1+Cj}|uJ!J47_br?YMyQS|$` zFDYhEVbb*K@5bg2l+BLlJi2%6!<&2dF6L#8X?)zDo?GYlHU6sS#o2d6msz5Fwo(cH z)B5A*G-}_z({%98Z(e$nBYU3MX=q{RUD=BTE zqJL5N5=}{k8~!5KQ+?7Jgkb|H0+_mb(MwCg$$RuIp;)!)(L$w z)Gj}@0esf<+A4?Ml6`kd&SrPuA zTP4fKn($ZyE~pnD*=)JaYU;FXq|k>Z)vJodZkloT<*B;~AMIm4+K^kTfkXrv8{3yq z1zs>*_6T`=9H4lp$0;JTs;v#%;f;)wI=U+IbE7h&UBY~C++InJiiH&3*pss#^K#?@F?s%0v;csI0t%K_{b0NqnNo3lNRay$^WiJn->b|G`KG5dF@(KeF}&!Axaby>lBq8nloo? z$J(K_i(AAHD9MA2Bt>jeyXd5LCAlUUg*FMfKB37BaWS5V1RyC8R_&41sy%U_FWO_U z_G@clJSRT|++A^tR`vGAV3nR-2wD#x# zBo>$TzYfg)_r@RC_b2|uKQ{hbO3iOAAisJ({yp>e=a!ozsIPOlKUq^h4%P`=AaLN9 z0yaZHrOE-Y1@>ux{QyUR`v@QmgGW3(m(C{O7|CBUeS8DoJJ<&Vdso$br(-e|0t-qE zGUE7ooXaEdFsAB+nQ{sCnp_Hblu=#OoEDduo9$lRcA>gs9)qLHVJdUjGl9F5L0^d{ z%pubhxFE~nN(1mwi&FQ1Mnb~+Czd-r+?|60-{)nYZfV-YV(p+&my^cllSXHEHmFi@ zvzTOc9!F^yhln&`4$K(FEiKI5R91My%WF?+#;Fm~DS)jkP$jStm8U>unNkUF0SW=i z7>qa~^)-M$k6chwm|B*fgC7~hmLiPj3zTtIR?asTGyw#4cWt8&E&_rPvBdyrJ6qdL z`^Q${MilsiWRfSbsM_OWW*nBw$Ut6cc2rHaRY}yU&Lnv#Q-Ee|UhTa6&>45GN;cu5 z8i(=+tBQcwJeGQ~E^LmMHOkLa=^R{8Ts1W(d1|7oLSxX}{$NAM-@^TVu{zB)$oF~Z z=Obyk+XG^!CKnhNHJr>Syk%>9$?DO~d)F>Gzq#C6|FENJBa5~OKRAWdG4pGZsHF$; zER?7r>EU?`s%qqtGo?&Up!ccrx!vjj3xKN&9HYb}h3v8uyH3x(e#NNDM;{V3AzqE> zg&;u!AFmkaEc=%^y7kuJ!gCE9dzp&Tb7xO2*}QU@yY-v$uNiSMewCF4-{SM?i`sZB z`sZ|)1GaNEybwBMjqE&Szenj;`RUa87`WSM>pzl34~6BdCu!b9SKjI;Mw+F$s5K-6nvGHOaGwn!)eIb0lx-2pGDek`r^S4n_3WeerYk}> zAIlXui?Bn4APh15(SW~XZ=mvkuPnqWKm_9YA&6D)`8YG*YWZ^$^!bG3pvyvtS`Q8j z@#hW7H4#~nj}jDVlMu+uf~@L*2!fLcoOFAAPnL!_Hs&-Hf6Yt^+8<{tLJ1Xz5CjpE zxzSU`0_N1bFsi=3z30Iu2$?zJiLyL3o?er%Jfr>g#$xA(Mj=+ZLtzUTG3umP)SsaK z_J*Fj!?hh-=U&;g&}aF)Faw>CWd?pb*ED<%_Ivx>)#iGBx-*564EWTTy=!A;Pfwa5 znXfHbC#qB~BG<4{hkbidmodtliJTW~SS@pF5=uwHWECk;7OB~>HrvOkx}&x?JtiE8 z&6#<57Wb{roxSAl?vR=0 z-%yc~7atw(6OL=cW4pzcCdf*K=T_|9yTR%y?$!;Qoh@{<012DgU+eQzn+sdU+D68^ zM*Hd-YrcggeDw9T&iAo*JiM#Q+hsU80a{ytOoq{6{`AL;Cnr)8AM??;SXUd5Eof2` z_+{~6e4sZq{`0plF@(V$emxx~`g;BJejV>^x%ODgn8DbZzhIZCs~ z)#vtT>o;g<*2zf_C4~tH@vpu~Xh2(P+-MrvXim4P((n|S5h-C35@I0k5ks7$Sd?tl_{Mj%|3Tx@a!qP)h*7Q%l)dVR0#=^)TB0P zRH>&^eezWDoA>XmZExLr^y>M`Z(#x4q)2#l50)6wq-8jg5@d1VL4*lDH;GA)96iKv zd=Ht~FgDJAm)P5+{UaR&1n~#<{fR&EkBxs*&i?m*|6LdUyBFlwnc&~BZ$cTH%)!WF zSg*+6O@LT4$*|x6PhhWsJv9!j65#p)Tm~=$*a4giz;pyg26MEdwY()REW^RpH!(h= zyrOEHFe*4Dq|g{6OvV70+bd9M9>lckv5ozZ;Ju})U_-xt9qRNEugZYtGcniqSmp#_4>f* zH45!UNzt0}@2jJNG+L^cjt|cVm|gKLI|%zJuldj?;OlJ^Z!W{r}? zS)c@9NF;0nmOGCY$zyxbIge>@fCpz7#COfr&Lg9F6Plo0{5Vh{J|wNDlfM*UBFeeR zra<2Z^cB!Q4PXVpUM|m=M^Xl+EPyz<89nDg2XolDk}A#)h6R zdotX1zJGSa+2v@|7vrM(nN^+2js04Mb+e;WOuYRrynS;&GB}b);os!U_%OqwG`0Rp zjruRBE`N&K7r8Tz=tZzlOomQT#=M*){q+3B)qN(tOiczz0Tls3fkxguMtpqv-l~TW zS5hi=clb{l4G@NOEu1f^_d(&Gq>nsAGt80h4Yckiiq1Iw^4{@7CYycVc)ovTUr|x@ z^5yHAf(Fhgc0@czYI^9s|C;B#OK;JWS18@oF zqp;m5Eo|B0TT&s(8^}e5jL!%oZG06WN6QtyMt$CsxDi4Sjj$qbqGGGpj7I;{KIcte zuGJ3MH@D#0iqETMQy0nA7$}S=ijfdv3n4Z_Tw(DpaRlbt;IcN?a!1HR^GG|7SJ%%M z#h(1`J9X4w0^;RHKB2(?slgbNn48mTEf3_JFu>W#<04d2VrD+DRo!$%^x!2O^E)#g ztY_2)&Lu?ajz=oCM=vlxJ^P{Md5i0NFVB@LS~jCuPbEi1s%Yh+h(p_BpI=JzI$scT zCO=|luFD4Rty44hEe~C+oWEj5v9e}`h9>j+Hpca(1yfM2mXNREAu|~(3G|JRD-@(T z2o)?^@a^{9Lf_zTnK`-Gq-Js_j_k2n%tu&byvD+&ti-I&=5~SP86U~D0e6T@>|70E?aPQy?o^*S*8K1u z)K*~3R?avPv$Mr?_yw?!4Rk?k8gR*M%q|Nkf-^A|tMU8yl<1g-#%4TV%2gAoF79h= ze|-@&HI81m20M;|;To3KVldnSn~%b>P2-x=iSo)UQ4y@+PeXcGO}J5CzRFO2Xzh~r zIeIM`avd_FBjVDarq5BGH#*nEdz$)tBOR|LdLjC1S!!~f%JNh(v0;STlu*mcS#9Q5 zaqb>jcdle_U)Q6k$U=xGjCfOo2DD^{kL`ofLI$3Y|GmQA!R_h8hgr@J)yI!kYUy+$ z5w5T_NQrYKCWDMPPkl<0s$$26HS{M|P*4DDjH(D!Cj3*6`~JYbKk+C2QSm?MT>PC8 z^0)F4`(I%n_J=kh3djLG12`<+@3mqAa5{k349sSX%)w#tmkKsBhQ_)>17iwkljyUV zjD5I)C;6!_z3uG0?1R2%)>8;TBGMQv=D)U=Ot`p@Vt;dg^^B8&NBy-pO`J(k2!jKo zkwf$=c=i%5MF}_(0FuBH2T%ae;&PNnM>V^fR}6KW&qzG*$>WICvop_Ly|Q{2^6;gr z#p`o_SsGi~nB224G&7sL$rRlOu z%f7)&9N5p~&cPFPDR?tB@j+AR)AoiZMA`{9tRaIgg*Su4-cF_7rb8?P>R9vvHi0qT z$qDyLCyv4RNOw)CYj(mWpl$<#4vb0xLx~4E1iU(Ha4L+?0Qz(~Wj>yIIJe|vU5(#R zYcR9*{CJuUD@+E0)V#!i6y!!38c-E;#I5gz7`g4z&B z;TZ~_gOQ^(`krL$^~S`(X7ASwyRoXY8Sy~1oz2Qa|X7d3V5_6<3bogX8w*&Ye+t^w<2{(90dg7SVAliy9Yoj>`xd4MhRw zRSs{v>pl0x`;{wCqtFPsvILRw)EQ%;GYTG~)~KcLy{FH*j*!J95j%OUhsZtWpzDj> zS2iwpvUPgp;JtqL5$8`K9DzBhK&XuEon-5ua3;imdVa7{SD-p6Vj(Vk`xocsx8G@6 z1kZK)yuin8M*6F{FiKj5N&^aXrxJC+SW%6`ZdQGgt}25i0_X zMx!DR`C|W5QC2sv8?Je>aC6SJ8!q#8g7u|3*64JrA+j_A1w{fvqmzUh6oqi+rZvwu zdt8|N=Hik&hZbHxFxT_xsvK9-HiwxIy$qt3GLpVC0Y9pv`v`Q!V(&#(AxHmFpe+=mcOLbQ2` zeAFD}3X4sJ@8&fH%xiMd_;Ok3<36kcW%J_ftE;jOXw~VVP95b` z-5pP-TF+KWMq(vGa?NOR(w0rR4=wXl*g1*$Tly)z0r=thKGLJU%xz>T6_ueK~h@6ne*?xNPvq z)?y{pp(4vzx{SPk|Hz{U&{Yk9BLD>_5HZHG5=(ILFcgRZQW71jO=FG97zctpOiaqi z@{CVx!I81XxKVs$Y7Cvy4+Gthkq*~xFc&TBR+|P(*1+y#uy7SgT!}6u39>49gnFcf z^JOO$ES*_%VAIfF7aGkr@j-U)bj^OP>Ft`cD)kl;E}A!3{8fAN^w()>O{(%^iV{2p z36K`yZvb$U(Xi3n@+&u*f<6~`xg;Ii*R7!gO3M6S50)|lBOTJQolyJ@ICLV3Ueny2 zUsMVs!}Tw1>eg);mDK?eB@mv>;BO=*A^K!AHW>}5Pa*Ew4FNtt8N|A7_^|K4@y`7J zEdId0Kk+C2QSo2GKHk6mpFkIYCJ+etaq{B8hkYX)7-7IT)_TL6ko@ci zRynXTfRzZCQA8Q=h5*_Cd>L?>3_3&*r-OED_w0h)-8IF|)WLxX+{VP5^CnCYF@_n9 zf~`Ii3}qUf$YioHQW^#@xm>Qm)`|PClJr>0FK5G>=H7W6(iC7z0DBUHfG8xy$9!~} z&OrBq#`1lQB~QMDKmL~D9mmV7z=jVDJ*g;$$8tDzN;;KDl=RSqNZ_! zXVGH}@wrRdbJT!5jYp6Go*IL({A=dc!xxZsAnJg5cu?(QB8;R0O5Kf0&DHCzt(LsC zJ9Yp0=J$RJ8;5n7Ajbn?3RwgrZB#8AqXbYnt=)tDNne7##GT4XSPfm5$%RV>6VYh4 zLT1>qGy9aDUE3GsVqcXO8RKth@3ih)u~~k;EQdCOfmdrCFt{D8e#C3u$_LWt9p&u8 zP(l)_NYv_$U)~#NTIjLdH`qk?6q2!#KvqcOAqubAk>I^L+4)dYbIRr;JD$FAeD=y^ zlt3e~p_|0%BvW3vJbnFUr;n9#Yv`PJJ}0jz*;>4akU(80h5Aof5HYw(~LFfIHK_epnLnaC*jv zb(c(#@70;rFO6{@*Rp~RbUEy_U9b3l)zlD^S*e>1qc>|`GeM^uW~f;UE7(fNTT5u% z6x(rM>fHmB@mAy86QhXxJ1s7qyLS5QPJP9oO|x6)Di2E{y0k=}f@s4=O~U(CwI8JM z(Q0ge<@A%tZKuSWjT*1ks=r>T>9E=$^5o_g?^_i&*OluDRY^`tm!2JO{NR$2-(vk} zfAnC!$@&kHvM-Cj!JF?IB$gap&7N5O8#6<<H(a)8)>GqYN`j0e zNknRN#%%gEbLgoX>#rdRU>_a0Kbfi%u43FV3JrUNp9jY16Vi5c3Z{5h(mUuL6_{joAZSO0(qC*Z*b>2m@LK{=)t9*aG>$h~++Cok;__?sxn1ZU>n4m7qlR{i6#1fm_t*w)P z>T02PAh5`cjHJ%Bn_%hkdTBH^UkOg`fDhJ~xD1!Y;lenXnP2q%ds5tZcxeBr3q-ZK zfD~b}2uMxh$zY5VDt|8_B|UDsmiNIH2wm8Am&yJq+5C@-Kd|pl{E2^5{NBAMh=0%C z`kVXA2VHn3jIaU6#tcqq6Hx5mz1j~wBMyu)p_hr3bVDE3`=$iI6WAWWD1h+>a!&umhHV*-Sw#A*)9I$vAFah`nFGVRFeDFnNnlA(nFdMu zYj4}md>LrupFVAXp+Te|pd(;P14RQrv^KBsT71UymmYiklTC)$vpCowg@CC5u&%3P zCyDtvHQzTOE4sNSk;Z;X!EJ((bJuO6JXf3?xQ^H&pMqPvD#W3qgn zyxdyy^(Q;_gkWJFj0nev^=vU_EInQm7cr9JP+sT{a=?65_CpCOLw`Uj`!kib}H>|GYDUHC)ygtyYKY$G*ZGIMwBsx&ID&| z@?KOr1r@5Gn#C$5I~O>vQoXfB$^N?bP`n8wFM#N|kZu-xRXOvKamKaTS(oPIURn~e zLM48ta>YE|eECU5TH+bARo-vVy5hWf@&h^aSQtGK62C9B<*o9Iw_;xdXQlcqigMcF z_WaJ7E!!R*Sm$82z*z+~8EQAkiPxy1k!!Oca@lZ#3@&Lh(GA7#N3S=ES?`?rXuXo( zDdV7nrs?K~p&|)l9BIeakyhyAS1jS2xgNRZytT<+R@+~Hco6={Ki&SFSF*Kd(!p!_ zCT2PF3|o$GD}Qt%#^bi<=U2u3l|^l}bv5-ZmGv!kbzS|P13V%DFtP`m^t*jGqow^@ zfaArWb0%&p)Eo>oolFg_cP-C-eu-dlx@(DEkJzM9gg{&dD)h(-HOq?@ z%8KTyD;6!#&s$-bVKTq>%%Oqf2dY=EEHPZzv(Bu|WK-5+W8XD%Gaj8uwz?T?dFRd5 z%P-EKjeq?teAC(%B@MQSB#0wEO2f;delav6EL^Xo8M}H*#w)wgjs|L5eamy}=F_K2 z_N^bfe~$6a3er=6Oy+TZq+2>+sQ-HgH9U}X*_^Gr2!xbDL=2`#vos|KBoRSebX;AB zx$h`=xC5mh6W(GA3fX^7`~JkA_!IwS{K8QG9qjw@44VT)HjJ@OacD}d_?Omi-67n8CUU^9t$w7ua~RneR3+AAHyyGJ;N1yBDSaB4nuD}T#AGT?0vj9jQXC%gEzCC(%)76Jp z9=*0caQe8Lx39C4Uu0-Tpl8yh<9~g8XM-Qi1xTkeyi3Zi0YqR_5>7{XabHqwQ(0jZ zu=rrN64)g`8x~ZZm>gyy6Yf%B)d)))kBPHv;=ui0Ms&kyNpV?Ddg|w}V9VU^FS`le ze(}d|KV9M%yr;JEKz;S1tZeip166fyeC?!AUw)>y?qq4sjHc@4sqvdT+g(aZ{hS!59^zuvc_btVSWFq&M)HlVN1iBnnv$ zm}2Zveoj`VQa7jH-Z_%O#%uP07#EuY?edVbq`^R-`m4nMUu zns1?SCT?+9{+v5o8?d%7aM5V$ig+(sKQGkOszD^`aKw`b>=Z<2p{Wwx8&uGEyB zV=0U2;W7{>N)1L3uifgd9#7D<>Q|XsL>LP zXko}Of@l-4Ukt(fSphp=9-Dgm-u3N=_B}kmFx__kfR_@4DnOVv!(F-BOSAU#;!>yi z(Kpqc&&hvv*?4=eu)Vp`XG_%*2UUFhRC=;#K3hr`@9_S^6Z}5X5l9{pEsF$ z_JDS@lPP>VK)Hj44WnQoe719?J;X2x&Y=`yG_-S}3|$8$7zDdj7bE^PBs%IXPX0Y0=NG1@1Ndq^npx zQ?pbZ;V!DOJhaDS#d{+VMClAT&h|H-XM1X)-7eLzgEO=Do2G4CS$FmH@ck?B=o0nd zYB+2P7xyLUs7K5;{C?sFJ*i-%t2;mG3nYK<+kKusXT^ZDO!o|hgt<~r&2+s#-OH>_ zE^jU!DX;8HNU!w`Ee{IIPe@2jjLS|*%}dVAiAw((l9c@={d+-LaHOr({cW$d8Mv&T z;j+xY$ISGt*|PlW$LPoQ!XiVKv?R#NG7(}3PsT}$gV8+lEWJjRDdS5`nOAP|!o#>t zHKm2wsR_xQU&;sGc;hy%Zj(eq($d3Iw8@cK4umwZjByS5RN3PRLF1x2UY2 zlB9x)#1AhD0RkN3aQL50A14O;eTw^@lAIF{?QMkT&j$51aN-Cjh5D6cYP8fEx2&o9 z>tw|F-M(jc4EcTF*}Wdvv$tGLr&~*}QC=lys=CL@IZt-4x^i%fo6CpDh}idU>{`EN z54m}CEMAFMRK-akoERG7qcjC+imY0VtXBQ;OFdzcFP=O|{gyOP`2Fjf*QJhcYdt>* zSj?ERG#|O~lNQ#b$i+i{ea)RqeP&y@T5~`|nIkO+vve8DXZOyZ);G%lzs3x#UO`ea zu!tCJ<>y2e`%iD*pZF7h;=hjn6!vl0KU9~0$Ut$q0^c}0?dE<^#Z`v-u;20#?BeY*v(!}>Y+3HcbtFP$m)QF9U~#jt-9SI3?kWJ4Ag zJ}_CE$uvC@U7wG-Mo6oMa0kna&ZQ?D2F6SL=pj0D6@{~e3x`VUcX|0AxpIByiZx5N z?%I)=n~_(PYiViu%8XW=Ej?v~`5+jERB7_Gm&a_hw{n-1-=c>MD0fh#*!Z+!aX)NT7Md)@Wllr0_w zQvy|m3W_9{($b;QUaE&nTiE$gg&m;64wR&Z3lq~2wFv=P3_y+n%0!qOo;fWi-SftUOV^j^qJ%DK0NXXaQy4~$?N9FEpJ`DbJhIT)w6L4cE#UrHbg97rD+qw5v0mc zVl)P$*E~O^bNTd2^G9cFKE19VtZb-_`yO^2*gO>f+fDV$d)DUw%Dk+PZ_NfMse-2b?|EHd$)#!WbpJG-?WVnv#7#|sVGz|hYAj_O?i1U^GhJHy$*6pxE6LJ zn@{zdof>%ZnvfU+tf8^q8W_g`kh9U^F4T#_WNG%BvyHD_7L`3v=NVKp69BYddFhiqky z*++I(ZCTd7VO^e~aqQyNkdQ$yua5N%t}m^_kJI>{f@5iK{zK4Oh3_iO z$qa_!I`DSIo0$!(D3c_FMkb*SRpB7@DOJyHia$qvv~_WR>*HqQ`SFQuS7dzk<;yiD zONQsqVCZO2BxR^bjv=f~pS!Vi?+MDcZ&9(~=N{kpi;Z!0bq{iIEKN(Huql+Eu$72? zzybOY6eJFu+fOi_OOaCGh{?gU8L(j;_v&f(vxoRQ4>%t@;cFrE4g)_eV^tRxhx?$|Mr#Szl=Yy?@#=Re@^@ovi`TQ4|B`?O%8w$=CFG1uL^|VaV9@lR6(7W zB|w2c>N;4pfx+W%KDpe9nhTc)L=G#5&bURPEv7OQ=v)y7NKtu9m<)U1VEgmmKimK_%9`KS9OCUXmst|Mm%BAMNLuaTDq-0`xgCB7vG9@8{e4)ozGAhvPy8 z55hU{h`~KiVxM8cT?)shrPZpX&aJQAp|N%+g|-4<0R=?bhEQR-c0u-yF#p$AFP;AD zufOsO(_g-}ICJj6g9m?ARaEj{mc#m?$UZnU;^gXvRgMG;6OJm-(Be?aYYVczWwg|{ zVvd+xAd@CeK>6Ucj18f{jR0yHaPU+-f1A#wbrqE+T>{EMfJGFTMS?Xn-a{JYJ%?;h zAH82wvTSttNLBsWqN>A*Noqaqx`3Yo%$d-?=zD~Lz2y=+t84ZzoGu$>t*Q7N>z8(R@BN0d zzE{?k0ReBi+H!81A3Aw-&)E}aj_kjDc+Z(PPw(}V`%^1!ld{&1B~0rHRiczer)Xyx110$?p#uRr{{*j+{wPki!Qc zq90;>mZwH2(L<#%4L=`=(T{U8pKNx!Z)N${{oB`;jmF;@Hk~&zNem=a#}$RX8zS;+ zeZP%To|~t-n8?R3QL0fDAC?v#5l1AANkg-Qf;G|3nc5518eMp^^Xi8MR_-b>(R)3v zJvU!*>w)<_gQc_87U?hBvElToqw_UYFK%A<%EUNx=Cn>N$xe9$r?k4SDRem*H-6ld z^2+q)E-A;;8WCGj^_s~8`cmDB($(@xnVRYW3$+8*>cp+pNS-TMtR~z#MZQg0H-G9P zzfF51PG3k}yQWZk%DAj3h)<#+G^VD^ynnmR*&)%rWCe%4jd6J%_sYuV;9>9Ecfwvjh3ZncXG5R2h9EPA4{*tf zvULbG$xmj?)*VH{Jkcq15$)kw%W{q$0s6==Z?uC$U@^GOU$X}iLRbkwtU!N=iXDxJ zi?OjCsBLCbIJsZHe@Xn-J4WV%r$H8thRGqZFiJ1x!6cxa0Q3Ws2$&W$&}o=VOkaC# zOej!CMp^y*O$HzPN?_K=@-rsrb{^Dg?Np1rf&rcEzL0%*AGZuLOzJz?A zrQFVk!U(Y?<;hyQj9t6H*9~gQprQ_1hXBu=$dCfh4a`VaU_-^5a7F)><^K%|{C_|G zz`j56C;mC{pTfT1YP;Ou90<9X_&pJG`TpAxNbs3V7IqTI;r@%TA3xiWm{8Z;o|=;I zHO02M@e-B3g9i&3pg{#aI_%?eLjbyfN5+KfFi2E~d3j;-d$b=^T)IF6`bYH^4}Wvj z*n&s+8~70cYk0zplFY*%{K+!;MKSpS96JHZ0kQxx7?42aq%l}E`FSA&-Td#ELLeT- zlc#Y&j>1B0kl=7uwO3d=T3v|@^!Z@>X2Uw;-Mg0s1iTs@=OffHE|3Jk7??a57$LiO z1!9V3Ol-UqY_a2~ULFm^TYgZ7iAtG4I2Yf1(+fLl6VkmqHABULuVXJkJw(k{^_l`^DX1OdNxic z8hc)rbuQ0qRv81cA?oS&cFX)cJBt@LHt#p^3ZC82v}Ck<9e@Qeo;O!N86M8dEJ$_m za<+VTBCq;Tb<-k%gVgbr1JtF#mD;;~< zKJ4T0!CxeX|5yPc)xHX`?u#!RH{HBp)!7rPLPM{Vv{@z;?);KrlA5wS_|wAg@u!|{ zpYp_1<@tIgzw_E10ZYdt^myS)tWV+)JPpET_jo9Dc}`+RAmCqd8ttwNk{xbEdRw15 zE}P?G+O$EvM;5gq)FzA4m!rQSC+PY?=)18NY_aDTcJYF(q zPP>c*ACxpopjxweg@2g^Uodq!YZ&ryUfeCU&igv#i|W*EQ@ZDf7ikE^>xz9eQE@cW z`LIIXXQ51zfn14>O6wGzR*kt8i`SKI-Br4GGyD8Oyn-|dA&^xY)YPv$W)79D(1R`yuUoUJPk;WvbiMWkvxYXW=v`;rraz6Ks69SOg)C=Ks4zY0(1GTxxTeu2 z01|$$DDuRUCu&8d` zyiM7bFKiz@=I{NIJ$)K(zi2VtQ6?eNB|mvgU$}3j#+Ox#X)*Eq!C>cU9E@rf6rJ+c z*`>3+8ps{cS%CL=NxX8dXY01gwd+S7S;pSDVsY|Nb!`=YNa-2bzBabeXKy%f-|O_s zGV1?g?=7I(TBCj6MT@&zokCmc?m}ItyF2xkdP_@zDzrt4ySux)ySqDqkPvs7H!IM+ z_jz}mJKj5EoHO>jqjRJk0V3Z@vSxnYl>bZ$ElSO6sA}tD@dSr}>+Tq_|AAklOP4}8 z;vzH|*=9M#u0w|W)OcW!r)Y0d5}$(($D{*`fex4FO=f0zynj)Z-yiJzV}I=5WB*6& zn+o!=eh*}uoLJ47YLeyr9<|00Y@Yd(lR2g&{rEs4(p#H4^YcnVqY?^AE3jk3DBeLb zz$XU##01@d4rke50Z@d}+h3J|D71~DPFvWD%V!8L?7tGY%>TGNG{h!kKFV+GBk|Ei zP7R2IDJ^K`@9PA#I*>mighc{ZfMo-Hxj7jWVmCkzz-xDAa- zXl!nXNk}ZItYruZRN$!b7|i4s5Nia7ALwc+iK>b+f$mo@wg$*bz*FUMv>D9R!vlIG zj2^HS1Iz`6HifX1PTkhltjWeL1pExN!*@Unh7J1*Ke&E(`#w+<8K0gMrA4kIFf<$D;F& zs zOpYdstq+Ad5GunB66HmSLF_aXl7d8-k#fYcCG@dlT>c|2d`!tZcJ`b^xWm)j zAa9$GUvJ(r`tZ}Vq&Duw&9x?HW@LQXQ{!?a-|2Bhs8is#AC#spAmV}8HIi))V{v1R zt{KsvZ(g{VkyetDntuD%4R31)T2p0=*>QTB5iNQxEkunTDvRpyg&^RMK)7HTGgIsp zjIWFB-8}o}hj*`UolJ_d3`lwZE#O>8`t^X2y>=h8qQ1{BvR+nTp;Pd2e#Q5BjrJOh zNFAQ96!^nFiV5u=5p(L{pZoY2>6ET3$=d zlYIA+Sq)#81zwnGxkbw3fUL__BStgo$)d#y9*z8r$qgWFm#Uqv}Lvu=f0y#90t14XMtT6P9%KAY8JmMi7V zl4+5b?^jgonlZa#(Sn=GNEE~)gaeQr|v$*ecRiyvg&krL1-@DFG;9jfieEUPVzEr@kacMMN;#AQcA zSODSi!@f&r8}DC$93=EXs|}h-<}kFn~_sk(mq{pbDZ43Qssy zh>C-x1nQ3;>HBuZZd;Rc>L4N19e5*TY&Q%LI-a~QpDBxxMJOT&LrkfEx^}_L)n(>Z zk#1g=pT5_m6pnbtcHc26S+Nmgu#t0W)7X))!F*YzMWpy)`I0`8k_WAc^J8PMbv|frG9o* zuHF$}?7sc5`Qhl0?iWaO_yt>*gW%H2CANynf9{bO+Z|W*rxK*YI1zE<0)pbq6oNNb*am-;ZZ5)L% z0cZ)MYYYZ-b&cfa6eTC8mX;KCbhhBBc;?hpUjm=rz@`bv&kw+t!w(cr)EGsdcCr{7 zE#sSP1mU558=R@je~_O4ODXVoFmE#IHYFv9^4~urf@~q`QGQqWp}*nrC(x%5jtnj= zE@)~k5`tzrA0DuH%Q$=`KFE+sGDN)YV8@PndZ_r66uxpIEw1>Pgv~taXk8{_U zEt&aNM&TZ^_~GdZCJelpSdHHtyr8j)0wmKGP|YBA7tcu?T%;_Kx9zxsb4g=_3sZa*!mB zCR+nUz+DpDwWzThiIuk~{Xdyl|BnwA2ALV8S?s4|+c~~GZ}#@)i_iCWAJz4>+FR|q zZNNzn@1>86H=HQ@5aIDQ;+H)%O+f8Pz4x!4T>Fyj=SfKqEUm86C>_8aaQgbm=+NU& z*UXcCeWEwoSH&!CPgEUGRHsErasv^*H^L9p81!4_`g!h^^Xg}Itg^UtGx+&4t1D;C zUfv52b2NE$*y4_Ux%bL89|cyt(nJ6<;w#nVD${H)OAglJ2a7-;!V5=mfk=Zp(iI|3 zPEw;r$T0j7PMCJqJFPDlq+eW#k_Ijg(TBWy*~{LO!oO6U1_yi!}pM$ z^B%eIo5~%IbEv@x#3CHm=|tbf%}KZJUR`rw+m=UnA1kPfpSy56E}(d@6Z)yp%i-tM zw{P3BZ?FEky9bW`_-Gmv#(XLNqBr3`4<9j$ z7N!e6>lm*VHg1(jUW$0^Qv7vL&D%gVYO`AEa@m5J(>mpmVOd1*f4VYKdiiwK+b6X~ zXYx<(@K~W&`1nzUXRxQ~m-z3Ntyb?DxAymIBSbNDa2Z!ce`vGGgEf&GHaE&DByHc) zbmL@sYbkoRa=Babq;x8`Ba8QXEo#lxGLSKqGY4G=;e0+H!$62#h|c%^IM%*nyg$IZ*x#j~lizj<(oD$IlmHhCq@b#Fg^sIILb zo<$_7bnU~cWzh?cdre-z7KsB zgpt;+^t|As=kl}^soL^*1R+Q(5ms;NICq(tnHn1$c>C@ncNhO;w}=|Iu+o=iy^o&3 z<0tUwG4t3-%!yMq*RR!m{REA5z+;XAQ?RRxPm9k;iOfg~9H>Zuu~x{92jhG2#+dJJ z1No`Y*KV?HtJkhQ_&bj=3s<+PYh)~+m0`F!ZU4d4>-SUa9P+~=n?j>7<`($*T6|Fi zyLXK&}##Ds6(zI~aPAPPtkJ5C6Xh-&X_Ve_bbn7jyl@Q}NQGg{EdqD>AV zp)lB-sZJCsi!sjQOoc)VN{S<5eCwO?flb8r)P#G!eQ4h@0V`y<_CZnPzK((i<>?mp*wdc>@^!AEMOw2Vivv~60?g>M~Cr0-|y#0t{#PGmqEQUP! zF`-i#ldn64&#vPSn3cF354gU?W&aY(mAWtHZhf)xwZo0!ky7A{U`ylc{NLnQtYL+( z2VYSL6`k- zj*MG_sb>lCdlF65ot~{C^e3d|{?Dxx~zHa;_6?wuBShK2g#ZKJ9mHHbJ+jh6BomM6|YZlLJmTL9^Vfw zL*zsv{VvFO;H<7+f=3YJ3&=Gdp(i3WZb;Nyr0|;*BVi>JFNcEVocQ(Cwt7J(+O7|k zzwAWHU(LwAG_CT1R@{{tX}8yTY@Fr2MaR)V-O>PwdZtotD~k_DAYBanFX#Swi<5y=$RI{63>D!kn2iIlK zM@Cl4kd|tdOqa=&+Eis2w^JcYTD%nz@6gieJ+QkmCK%nyvDuTA!t4psXmn207~nn9 zJwk&#ll*)$!-6Ub(?=LXWON@zLMuo}6u^;zLDTuMOA4|5>YJgx8+tlh>hfZ(8sruGROH*`#CvCI_MX^Z=k@K!C*%8%DS`xN|#Je`){nRha9S zldqm>N(f1e3d%^!`=l z3)h<)8#`WI3xhqxtoVVCZ`+RT?OHOQvV9%q)PCIS2N3E7naKrPx8=(#Gu5>R5TrsJ z>0CaWa^x8AmHDWnTd#jmhofuxsZ+z+syyjwpr}Aq(IBnf0Pe299Hj|d)?^6`|F=vk zjR{5RGAME>Mf9&65|7Yyu@TVHf1gtj7;C9itu>kj2u#LhnO%O&Op`Q<( zJpM#oeN}aBjo?qXAy#8$PC@Qf8tnpyvk5?l#h2&6tkJQhB=Yn4q_3Izxx-^r4i9ZI zsjS3A#}u@+4G_tb9+o*e+~ewCCk)`BY7unuI^9*0b2K>m?=Mn7m>k7T02iEcx^|EYeZR#O9&F zRl~#AI5c#aTSr5nhoez%S7rC$poNp?SO1`EZ=c_O`GCWqPA0Rd=r|s?~2AI95IU=KPao$1WXNx@6T$qnp8wc8~Agzi|G_ ziIb;2yxha1f`dbRzJB|dnV#Y5?BU}UP+i)Xmr+t#R*S)4NMtMv$b#?Z(vgy%sb8K3 z-ZKt;@ICEQOhrIaj=SBrOI_u*g|Qc)@8xjvx_%E?2v&eVQF@k2OR{Ep^2VC9gRN0p zF)2IxGS<`v&uI6XMGIR1kvb5f+#W1i8Y)p4F{?UpZeyW3&^L42XV7YvME%mM%`y&j zJ81VtV<3D1B+r8|1qhOd5bf^IDy`3#*Bo8gsIQnQhK%f81g{Ms^Z>y{XTpC4K5olk zx&g1-ir83dar@oJ+t*!Q?HbKL(Bdaa3|6IkNkfnXH)KZ2+j-Y_${6gL_4vw>Jma_Z zM=nth8z#w!$LcPOn4?oMZ)WonAqo;HBB;s@w0n{);{munh^_jFg^7N-s=nyE7|y zmvWqeqRRoq@(_~nN~X?Mbs|6sLR7&`bJ$il%V~$d!~HOi_;qVf?LNEf-Pc>Gehw%V zwTueRj94$fC_leMXV*8EZ|vA{@ZC?x56-U9S*ZsMrjf}MW@iM^e;!v4k zcZ@V65#a_S;5{2&=#DQ`7*ta2)mhN0J-bg{0V6Lpq9{HjFFGV8)-NaBJy*MM>n87q zFW(x!3~>Egk?fY~XcF__O8M10@w#iXlr-?G^@oT%r@J+V71gwRis!P z!6PyZ1gW}wi~IalQ+t>2{9`bH!=nx1>PoB70tY59JFL|0wR^mM2oRY}VdxG=rwt7Z zqh5k0IGgxf5;|_Dp|C6^AQGr-rVw-lCs$v4KaogIOiuXq%gXB8J7>o)McI+R-aiO@ za*y!c32gk}y(0uALPOm^b5(G1L}W@#V|oTDHj(_rhH&~afAtp7(PPR>uoNWGi6%?+ zs(!o-_Whn)l8gG0#*T(JW{po?#+*J?YGdy8{$6u&GB62cNhyMJ8IO7{CQWdH(FkDY zJ-B>ZhqQE$B+{iK+om^n@R(u4W1~=uXIa7C4gR53pPdH2IS)N}Gq80ZamDJ1IT{1< z$bhN@oY>ueW*`3UrF>1bUNtqEv^dC$@KnX{3Zg@Ds)PFLG1u=xL^Px%4_v;`t)V(0 zE{Ycs8y1ztE?NnnEMT~fBUlKVI<;G-qc0p_us{$j4a;VbIh?-(e*cne?;q^@V}I=5 zY5#kYmnXzNfv+3nLpzOMO5xfQn9G=;#D>LW#_o=uGZmGOn_IkbxF!&$W$83vu_o_} zPDNt#aFcq@Xqhqlx2Q6o!Q+v*yvaTh27^oE{~g2Q^LbM#Tw$0Ho$xkUiYH8%(tpp{ zohk+t%18757D$=AOiJ|gCw~+cJx?Wh8N#lP8~_2lCKr~N<0i6!Kc1gl9^o4t8T7cS z;zCu`QXFwH9p+=n^J{9?5J^81((Ukc3P-3|C}12e2SzYlFRzf^-d-kiLU8xN-XbTP z7wEVV9s#&aR31z~tQ-9bo}xXbP22WbxT!_}=)0A6QHPnec6(&nP2(OJ?fz=Bm>10W{~HnGxpb5?Wk` zduJc`#>p)>KhZwWIVw6{@FFv@!f6D+j45<$8i+RiwJE4#Q+(I5sOs50l&#~WWh~Be zF08@xx99im4=cH|aqp^!51yJmx^DH{=)JMAo1<$;Sg@_5weg#KDY-EbaY2>U1%v&~ zaSUSYk9B z=}SSd)!Nkl`M{e8bQxgCQM*-g!)C@gFLr;ULdZT$30LQbqU^==LLgX`@L82~SAS@u zYQF+P)*Jrm&kDsoctt+4(vZgvlcX5Zh_J9a7p(N6*Au_U43T!pO zSDfyE@T1l1EG4SH$_=|{V4O87%_Iv=Rg1o@3B0Z0dK}3!)hhTY-5;jHjnaUqS))Gl zI;|JR9+)1o2En}400$k2T+OkbH}PO@@>V47CKCDp3Av7R*~wA;=a4*gcp=+5-FC$O zI#QAES5e&l=>C@jhqfA@+vaoZz~IaGBTvmR=2jk;PC|P%wy-GZhx4cRHaiS2eew1` zdiR!zg~_d}7r&c+O$-k!DvDw=GqG(CIMrJrN&;*VFw=xHvqlx9v4}ERVJ=QonIo#e zn{_TZnr_kH)%Js{H7&Bn$4eN((=`J0FJ+xHF3UQAclz)2!~ zY6$7{nw*=!C*5A87S@xc~ z+@re!FRg;Mr-)6XA_z-Ceq!flUT#=KnElYiFzWW`+;n^Q&iG`{cduG1(<{pp+FJ4{ zxUQnu7+|9l=&&fE(kJT6J03pkT)kmTTXkGU1ScakqCEqyU4`ffT2_2pWH8n}Aa&Q7 z($xq078|sSt6?SN$#N1H1fhr{W9l-@YscXIqox)5?HX!>q6kY8VNXLqVme=18Ka=x zynH>_{DSn9zH1kU7A%-hP{xWXjw#I^TW2u%_;F3FXLUms1=|l)3L3>69)+x2rjPfq zmrYlo0}S^7i6GcM3oD6*J-n!v6WFz15;4}N-M#MuZg0v#tuFIaA<^W}Zno;cmB%&Oc=+cjAWj<`Z()j5ZwF zz2M7>YeS8Nz{3IuBUpY^l5fXAB@6mTM)TU*g8I6A%X56%iu%fvJDMu;DoYZLADRRQ zXSeoZZM+@xnq#WF(_Wgq=lO&VtgbwFytYKip7Yd1S!eVC=gh_(^~p0_m#V-{mY% zAUoC~%a=Rn;6Cl)^uV?weM<9* zvRWgE2untaEg?dmj<98AadYQ19p29P{gFx`Nb^;*=gIOQ!MtzOAdf zw=WtvXTY%j1TBpMHzV*MV@!?^;m}Q|XUaaPooOisD{Eq(o8us_)M$Nt#A#{TYj1(+IS%bEN=pDXOBKve()PPmB!>16OE z!6PPjDUYkpVl8Aa_b^${cwAqAN?}0mAJ`{so)Eg||K3^?E;^1$;KHaZz;RIxD=rtU zAD;~Iu_qX00S|H}AroCz3xEf(&H`7T!&hMNRQfUVGP71k20h7)i%JShurM(;HQNy$ zcLFo0+xaIOmLhI!D5ghJJC5)@G%3J0eFN-lH+lf_0%u% zHAhn4iE-oC@?w?P2?#L};p8IZ0FA^$NQ{nB=h7{MvNN!<5*QtXe^U-Tl(_<~@_iHj&UU@o*}O!e zOBrcbk?vR58jw}Rh{`agA#?;8M?@!P%5lyqfP)4z7{P`kxG4D+d&TH0;rC z_r+MrEAc_njNs`ItvvKyqRUc|61B3)e@U9(*7BfJjqkVATvY~_`QW2KiB`e|C~&Q3 z_THPucG2bf&w?m6Q(hs~Y7%e1v zK5yHn)~1OJi6InGDNtWT5>sJGOeZ1;8$q~;2uv3jT;Yfz7;&UVRjc{*9d2nmivsSU z5iv;#Delhk=TG;oUOOl$Ixe`46w;!E^c~s?g*mM5u{ZDBzt}e$?$6z_EbZjB9HS%I zw(lDIYS6$Pcbo>~(Vo1ppwG8$9~%C=ci8OwZs$9f%Us+eUs*dpG!JlaY^*5*25pio zxM=jAQSR-~eftvc{At{Vt?=+AH8zPeK0#t}X_IF&R~Sr3Pj4!)M(BQaT?cpIG&Mj@ z6(kjh#igodYt&vaO#b>T!})X8<2zZKH}}t9Mp2x>7L(-&kC!eXN}8e7cX(G*kPYxg zNdvuA$(a#OuBoA6BLjm`G10z3-UStz@4h{853u=e{W&`;A2-fn3EUxuDPT}2PHTU3 z4tH>Kv#LU?$}H01i%d5!=x70aAEhXxXuo00%!N2nB_1Nh6B7dk3GQ?RWMyc|T5#+H z_&P#q3NOg3?&gh_-3L1tEpMDAHX*M-kd|wgk;|Ml8|)kai>HJ~FyFqy96Zp!c@N9z z1$c)-{}2%H0vf~P`)PyCxb(1Uqw~Y-SM|$^k0>ES^OZ=a4I#jfMk0<0(3vMp*l;F* zk7uDXKzPEqp8x|XbhO4sDDwAP9u3PibcFT|tY z*vI)jMdQD*vrNcQ==!*mvB2LAy~4u5K@RkDP@xtkpU>rSvA{0_a08AZhc$!6m7{Z& zhw(GpySI0?nswC2C5A-byZrRcM+K9u%3RK5DGfnbc$YCw zeKkVcOoc@wWK}w6D{wyoFB`ZMY$p1JG`SWQs^mi9pvp3I3WZct(wGsQLmnUii#O3x zTb$u@>-NSk->+L*JdN@3%?V8vjvWCSb|Nu8VE{8eftC@$zz`=Zy-IL=T@8&n8S!N$ zsVS+!ef{k*(b?xN+`D!E+7P~m!mW=_ap`RD`0&aiGa<6ODfOHAf%@`00Jng%jLMM% zP@~bsfUg29MIcK9Xkv#{vkDi5Bwi?~DI6W+kR~uFrnBiZDw)a>05dN&zo4bJL&)yc zM8bO#@c@M~pG2B9h*{m%ajBy{9NXUpYyxlwfmZ<#N26L1hVRqc&SDZ4(ZduOJ_sZt zlpv(pePNo*y6TDxIL0L^=+Hm}9r(+Vh7oCYnG`8){&?z+zVwrs_RFg5bgRG0jk(MB z`Ar)s(*)XLV5{-S(rl(ag|fH2*udh2NVd;nQppymQ3r-TjLm@p4NAyXzauk#ty}A) zy`o%ed7HXM`+Q{R2m)?uoJe_Yup~q(()|?rog}f5v%B5oI4-K-tOgz$V6Vw~rPOy@ zn(Vlt(P!hkcZi3R=8*R~hS{=VL$$KCihdhs1Z-KHt~akiO@$&oohFO4sUqdekkMCq z%+S@N393UWl9))@G2fZR#+tu2As)w&l;;v-p))1|Wd&3R!Sg2^<_BLy8a&shx^1_y zI$-|V(Dvp*|Fi2G?=J)E4dA^IyjJu)lIpuZ9lmRTug<880y%Eop#PzfB$J|qAEu_a zn>x#5Go!z|+uD5n6#4jVmEny(=>_P7R>kQUEhKvZ5_wc7-Q{h5Vn~dgQ@n#)Zglv$ z3x_wJGB|5|;hFJ8OXG7TzMn9$P9xsdqwgN`&mS66m88o^P!WWLh>}F6ftmG{RD!KDulH$@p3Ek6!|CWCK4YF}f~0bu2ij+~5Fa?dFlW^Gc;fYu79+ zKX*9s`4g}ApE}F?xp-(w>r4I^oB!Cqrf!)Q@^}pupUy=+%=1qigvVPf9Fb4LI9$+SoAT(;>HX)Ze$_%Gt4%OGruzqtm8g z=FBVIX+RALLZeiI)l9H>1TKp%Y!(r4AY**c`Dfv_&24J4i1IVB(^a}8)d$yZB3qaP zZH$ZU8+LRp->|1oW=;=+fSdvg4fr8kQG`5gdZU_J-4ml?Ul(6zC+A--pYOlU@QrII zZA(ikOo_`H8yn9q&u-|eFDxr)>+Z+0h0^eoBgZi#kR8wc`M&Mq;nwSynLggoQ7_nR zqW)o|z5dG8*7-{rN;5!I7R1HaA_$sU)>enDyWq<=Xefs9P9Tm#eHEmo!cVKwGezz)8Ymqb1f1bf1gPWCHH*fB^3g_WiLx_J4u>t%1kubl1Qb=v zN2}t@fcu!oG6k+PK#EY$ML<;S$wrEQMEiKcN2C9DV;@J@%rRBdhvszI!qtudwD6#r z%IjgFkw^i6VrkSYJl>qo+sR@o@ifnA zC*q^RgtLByPrzdd^Ta}sAFvQk45=%iAP+%h+(&!>jkg9U~^+fvkkgn(79@ z!_awvr$A31tGuLB@UW!?S@(^PH1?!$U|is~`v$2WK0DsLf5RulzPu{EtRy`@HMuY? zB*^1RX8iey!FRxZi)OUxD+we4=P3{f2r$J3{0B5yV9Ehax4C0iM8cJb=%AR`!i?n1 zv7x>R{3wM<9wU=_NAUp>skKdQ!$S@I-QGm}O(u0EdS93Uu)2Tba7yYkZx_=lwn2e(E9e)qs}hNuUhaTSwO#FXOvG+K z;(HM(G83go=|kiivh$LPS2HX&A>NPX*u7qR;>40;CwH3MHL|*JD)P+cru$p+&&_Cm zxjf>Sc)RJ+{#V+g-{#l+(r=4A-spd4EZ@7WG|}eAN525?fXJ}Vc6N^6?QKpQl^nj+ zrnHDIrNCE3S{0DygEGw)JEFcE47WFm`tw-O4ne-|+mCEqZ7}D~x#e$9E%d%; z;B{`Z|KYtgXV0}SThb{l#@AM2D2g*@X>k_Kf>r8VJ#n%u!VpFH(`7k`I7~x`vPj=@ zUFL_^Fxb~qS?cTOHaam>nw3@)5QwvO&D^-Ja>H)g1LK^N$0mH;s$J~cqM|E2!kS$C zAwB~0>fy+VGd-L4)~?>xdh%4?_t%VEk7DmfHn*3)+bR9aP}Tjg<}U*UyIraQ2j=JQ zUy-+Eanspt!?%u5Z(kZbaiZtUwT9#8b9NbY?A$V;BFCSOfRrd+R=jn^g8o+z0W%2r z31D!9A$%ZGX^l;_by>;v4mO#`jukCh(>{N7x9-};h3hCc9zts;2-uK8hm3+Tqvzub zHsHk-L0y?9ATSApKMmm{NWZLN`Tm2ojt+Sl88xk4<>@7;vNH`Zqr8HQS~8B=)ze;3 zl-^QZHqg^Ojy4+$T^#D$1lWX!o?0kLhK3TLj0v0roiD6xVPM(rj^wlFMm1)#Wfa-d zrU|f*f*@p7rG|O>;Nc785fCskn*G<+-SQ>i7hcF)6W+?qHcrQ~`#;0+!%5bktV?n<#Ldlc7JF@aXsguH;Zr_+w9r zE&NqL{r}zmVBa76WB)>9Pfa!xj^dcYK8{c-c?#PEyWBA$(P@;f~g#rbh4n{%35? zIu=_>fPGxhq;lub`I|<@k7wte`0#dndV*hDbtjiB*djw^X<1ceijZjO=r;#fSbD^# zqMK~gR|>E)fzG)<#?+bsgae{%SimAY>aDWk6B>X)K!=V9?ug0|@_PDu6H*h?b5fhy zDp*{>Yl!7=`X_LW^;KzZE+6Y#(&(IF%;134ho7xwy_r$@5k6rRC8caG3yqiZ(E1w8 zI6XcgrK&tHD>taJJuI==V#=ZB>ZvOF6?pcXX#s`lxh`JoQ1f4#O%$zw&I#AW{E+PK2r|s>$ z#M>Rc+ZeC{U?K2zfw+uOpbKdSApQg{Xd^x6MpdNQw~x2e8zKt29EL~l_BJgB_8P!x zL9Ze!M|v=Hez}LijpH*99-M#X(z0WQGH(j&nwgO9Iqud=V&#l=t+h+tEK{{NtCZbg! zOAGQf1hhBogN#NaxF`f_=0MgAbRt}kKG?49y04z=vC`O4OZ6Qh^$7XyzdO$7q|KY< z*Y2s!e=er}eEP~O$k7wio*a;RzIU4ORq?Y25wn}}Kkg`hxisDBiJZ>|)il?o$+r5| z7ZB?Mh|_+=>nKwGX;I$Sg+4~|4hJM`_bZths^8wLHCr7yf9_;tK&Zpp_ujWJ#6LOh zcWp<&qkT4a4T8+ic--0G_hgg1se%8Gv%T5=-nn)=)@JPAGrW6!_Z+E4X{1mTX<4h+xNUjQL47#CYDg2oOCTIk1g41!V5l2G zissH9e*1!vnMm!b>*=WR@^xx!Z74}huJQgHW( zcxYsU&af7OR};s`iucM(_pMmidTmdKN~k;AhJZ%S#bw%pojXi`!s;JG8%L(Hw@BJSd0>fC(dpj2o<|zIDlFj5;|L zZQRhOKYvtZI!6(qi69^=fIl&;f?DbNZBW-La3+HsURY!Tmy3qJ@GOXpEa0Luz}cm- z!NoqV^wtJIt4f6Fg-I93rUMU;Ry&}o1$1;+#$?Krj)jgIaJ4R>o{w$Ov*}(eYz6N_cwO z=}V{Y+`8)H;80yuT-VhxFo8p#mr6sO5{&?(-4oWI2eujvNUMRI7DHaSW3E!ku`RLo z-vXn;TROVwMBz9SI%Tp;hR&aC-4J+oA>5%em+$~M22jXoEwZqhnj);MM7f$L6x2c6 z=KdzXKiK!j{@DL5#{QkUpu*(#?tc?T{;hu8FU;!p3X6tGLUm^aEeLlcQI($d_Km7bTInb6x63*bnhtz`3Mc%aAv zMFwcFVFi}Dt+H-cP0j1rDDw$Szu?ivhY4}9(Mezk8sLepj7fg>Mn-UFB&ZO3~Hs_TT7gki((O6tz_Z)%7>BW*7 zY8x~1@{-chVurCTTp$${6h?(4(6F4E!g@y=`_z;K%-9f)OcXp7l>Pj|49oMf0q)olmOS)>; zBqb=ebm`!z8a$8(kOC@##_iZkIy%mWN8NXJvvhS0j1SMp4ls(!s{&%fetCLl7T3fi z7nhaQ<)w$0W|@t3T?G0rfK?y>Tv*3|TVrgGwvn)LVjLF#sT;p|j3Yq<5jI1X-LKmo zr(5xBYn9#EXcL1p`#Vw2mJjdTG_`tn<^Jx{+}BkCA{;w4TA)VHkfz2cw>av!8qGYr zTW95(x!Q~5^_CzH&WjfNEF6heBg9P~iA4Hh6v97i`dhAf{ZQfkYenau+TPza6Mrqv za8T#9%mdZ}AZtUzG8lGMlZnil(&e;!r)bzRlqmlPehBR2a) z%?^vb*(dsGv!vUx1uwR%8|_;-Q(b)9j_sD8?cd&c`R4RRhx>Pv>?|W}KM&PcWW@x2 zdU5^y(RH6c-461xO7L<0cK@;2?Z<7!m6q?#Pn|e@+xSIS-&l-$1QazkU%gqrb}l^I zNPnPAG?U@`&f@yar3GzbWtqTud3kP5Lq#*tsBv%aIvMI%@0K;)EBon?mgB+Y5qp-5 zU0h8+IcMU4JoUyhs^x>&GuvX%tnPlWy=E!Wqk&N6Mffsupr}5erd_ak^MKv=?#O_K zjF^nnu;|3#FF!wrg@lGW`$stiQ98$+&3`(6{sO~2!%>0FZ;W%+tn6K+gOxzo0-};a z7zi>x4e3*t&0VONxNGi@t+IZ%mId5jZFx}r^G12EU2~#$EXdMTZ$uErbc8P}0<+ZU z2*N@n#}Ju5X;rv$vunjX>WVqTDxxFG;zJ8GdyZ^nxP1fiKu1l1r>FfZi?@$0Oih0N zv~hL!_Vi2e4k+>uF7xm%cXewB@QXAz^?dPcG`A2M2hkcgDwR>1S@!ID_vQWNTGRUF z5RL-EmqZhrAHz*o{l)aD^!kt^l(pfIb4n$-Vcl4ei?2s-QF|DutI;8rE6d zbov$)RkN5>l(vNMCGaR1Y=1^(BpKf>I647GcJ|gM6=sccc~};Ipm)U0*5$(KOJBeJ zsBUjh&dE*;kEn@HB<1EoZ6%DgL23xxy+fG02vp|Mg)Peg83r1nlV-y#B4_pJ#Bl(( zgt!+BQmf!~sIRUnDJmAyEMqEF&1a&UUe@p07gT_dDwNRk8yUMurJrK6F9P!=fH|48 z1B+iig5B2DdptY$Vr0a#nyOq7hC+wOXu^VNMp#^6dQOI5b6rqW&*ag^aQ!wmHr<_l zf}2&9m8Yi0wsq8|XC?Ox43epkS5WKg7m$<^CPeK)7LS{nSz1xk-Zw-@&n}Bk%53lK z2=MncH+hd8ECC1`8#@TFghUbpSPQi6bn>b0p39$pEOZH4UD~;Hlr@WwHpYsvY18pz zi-7M6(8Pz)&YsqStg?ohZjP{Avaq(=(bxNm(Y?UHz`nL>fHvTU0RJN}&k@P{#@Xj6 z@R15BL?|AH$WGRq7SaMd$nilMII|dohwD?%zQ3~Y;fak`_8B}pe>uTFydpEFJS!>0 z40?e$jT`>TQ9yp)rO>t*EXD@5l3 zGSdWEWGQtfXvO9qTFUQ4#4JRRZwT@knQMt$3084R)%T0nHMK=vy+S@*kh3+MYr9t4 zZJm1bQFZ@)NUQnm;`^d~X7k(M>Ze@Pa@s3rxnINKq^^^Js`Ks{9}diYcVpL<9m^GG z&fK(W-^uM4Z|uKma{cx9mtTLr`~qWa#=yAwH?!3TwisSME!YrbN2gjncgg7~HMc9FwoRT|TWsaj0L#}pS z1z(*)_8tygHYalX9P2%*mdDh;8!Gx8mm!$zgY`1_IE(MLXCUlt*jJJc!Ugg9SJ3M7?ji1fIm!l0U3g^EngER`xH$>JHZG0RmV zk1mZg+8wxmb*jd^UPWC{p2wReLq?`CL_|4=7)YyujMkWx3T@Z6;Uhc7&hKqHxPIc! z`S$B)>P;T=3R9CkZJq7Ee*N{yJ0_@RppQCPo48b9$v2<3#^!gAIXpd*@%mKBKK*7LdAz(hUPNS2M!tE$lJKoNbH4tJ@(lEkjOwEG zjR3J57!!gGZr9*wN-hM2a_`+Ko~K#APQUNg6-bB!3JJIzDw~NG7XLMcocnv)5KBNv zbYNfyiW8wAvB&g9$6>>2okd;q7Q?0cL`xTF>7Ud`=%+G?LeT0T9!f7vRI8 z2*@;?B*HwfWy0G7*Vs~$UmO}59^&PZ5+9SCQ`p=!A{`Gb9b?2rBb-lk4t&hNpylk#}%si56& zv6RVP4Lz@%IQ2)_8B1dsgO(5sW^Kt3%-_GC`3B8WW7=RQBcihDQz6jsmVC;fMUZJSXRB zpw65lthN&lpqtViWTLqdG*80B0W?-tyv1VQ0sH`X&0rWZcp5am1cjxAC9ZF2KcAcX zDm^7IE2FHYW|YH&hK}yg`0&Q=CKMpK5Ehw(8OKhLu)iGahlWN51_mM`gXzq1!CNpe z(C6+N@WsM4JH4*AYq+{Pp9}3A0e%U-73hrkxR{KLjM9>-u+V5vcaQFl=GwA?f^26p z;9n@~yM5x};u{v5!)kUnGq>VFp8-02SOdJ<0C`Mi4;O~G zfa~dQ4-Jk;O3O^k&x_AWDXb}R5AcqP3C~W9A8055UIWk~fn^I|%LnH^u#4@turGPg z06w+sR}IuHI9S1kr99>yU|3Ox{H;EnwEX(W-{1ei!>30L_iR}+_uRfkk)H2j{JvT| z*wPqybl7(pL@yvXDK>o8PI$TS{mBK-&uqA~bMBL)vtI8<%6`skw2`3(t1!R(l7IE9T;o0&9pD!+`Nj~0H zwH(NcfinXL(lDw(X;vW*8l(hj+TTNhE+b91k>aCB`v>h(*LAKTtBmZ`A2?`QrEGl~ zwQ{?Y+VqbI@_o7h`^3$WHTKAlirH<{9TUtQ?So4T()7F^&2`&1&vmg%>;~1Lhq_%x z5?%MEcij^Khov-|r5&ca;TM#x_G*9FGQ)At0^9xi_BZ#uyte1~`Q2K&GneVDJh}JW z6Qj3bAxY*RZB8FP_xbK?pC699&Ke$keB;`C(?L8#a3a=4rTsX25fc-dYWc#;yu3Df_4uF$U&?KI%OnfcB;KjT*$SIypU*bpE6Uc$}>0O z=INy@o#(Sb-SvQ++Zm+PLKX8v34-Q>t12gaOHb;{l5{)ZwAJT&wgft?S!ODSAnhiWWYhqNe&7;r>eVAS%n>slI{pJJt;L+iwt9tbo*UVW!{$K39 z1ymeumn~dTLI@flXb2$*ad%IM5qIM5PTY_P34sI+!QI{6-QC@3+TG>+T@87^eDBQM zHFxemcYSvjXVpreX}YV5u2cIwXP>>FJb@Z<1xsK6n*zdrjP{LsGzqyQ^SweIn&w6S z71GWa>>G=*_*aU5n&cg!_l+?8e$(>)T(J|4a`;Bf&;+A^Ph2CSykLns9ZNJa>;H$y z=YQwU3(Bu#iT?fVf#@(H$HE~`C~*uQ1Zp^v6bIwSo{-eUXCk+ z84B==1veO?O=7%7Wg~ph5yCz)%@`m>TrhVu<4X$r%s)E6xA>e^n8-$nL-dxeuDrrB z8l73!P#+o=MI_O3^YeyrgG{ypbwGxF2R$QW9BV)EtlJMZ7qFRna6;TdpYIa9Q#8LL;z+hnwq{34h^AjP{oJj>;wjj-qYGs znwFav9o|wB3xX1W%r23qMe3oo?-Z85p7uCwdqn$f5_}=T6Cu0-fjw~3IvV^_v(0LH z{4%NnJ~}_R@MyJzw^2#uJ8RpGuS}QPTP{lZuE>qmWqTq#pYfcC+1cjPJa5Z~K3f!e zXI}h$y>ioe1+U}_-b}>$s0gB^dfkztAL=Ep`t6bGxVTB2Fht~w^qa`JZ$)apPlBGU zfM4DF2}xUulr5f=Hdo$j-ng*K(+CCI*~3eKrv=O@z?lMEO=`V-U7~uK|D2NVGh58m z;gcrV=?{L?XmeW{>!knjA(HF1D%NSCi~pj1pC-tfBU5Y?C%m6B-+JoR$VvI6#Y}-7 zfCh`J)K#O_>}mLGJ79%0epe5=y~5()`e5Um z&v$Q$wEmD6?Q`nXmf7=U_2(+6X-?jEe(&+eXZ3e2z5L{2Wnl_~*!lR=or4dIww^y5 z6q73cxYh;)`<_1$b!o5tZe53yOF}uQs716;b6QfO_ucZ3Z%fxNlvh`PGYR%4Aq~SdeA-m1Pc*|0}rb4(fcuWxb z&7`E>rZxvL$ki$7KEUq;QpaFidd@q$UPpiE=mHpG!tjW}G=;@t3sH_2l}%5{&GL^5 z^N9@)PYjC<_*rUa)^gskN_{eB5+aaBXj5be(^UIqXSD0ht=P3bY0JWxIV$Dasw5fZ z*6~s?3se#w@5_mL-x~EDUYWqj2k^#b;Nk<`8&i1uhP8bgNllG4S&AK6d`;h5K6;fOo?ek$k`^B0{>#zK;**7~jiZaZwWa;{H{U%hTrZ7_1v`{~oq$d;RU()z6@87Cmzh59z$Z|Fi5ux>Fh*0iq-6f>+dfM49)o>jF&6)?hv;t$et#mrKXsD% zlBOq$g#Ph8dQ0H&QTiEGfJ&v1MZXsdM`7Q8yDgA7%hEyE0{k!m?-@t1mMK(bh?J-> zM*!yl90j6E789*t$#|}pi`&bGcLtm4LDbvX^d&QWJB@LSOy4DhOKgU5LDnrkHLIgG zyQ-j&jHQc4lnQgWl>(!-(?qDspu#K+)Z=h)oTRa4WFmX#}pSwwPQTzp_| zS^}_8#jK&G-f$lek6-Ueq!c#K7vKZ%U({5d3-LYCUYpWXUTgm1(~~Ga~0Y)Pk)oEKExBC~FT4YcM}(J$sfh zqW2tGdJl0gI*$P_5h_?+0-Q1sG^8h((73L2aO&eb74*OV5xd*Of2C8#-cO;+ezrkCKTYd%T~2jciL;#BW2Rd85Gi~k%?O_l(NnllQq`{0 zBfm%`yGwUv&BUe8Xmy>Kau2C`t5*1IQn@kGX^ixpMLHKD4Kon@_9-pLks4EkQK7>p z%#ctRrGPCBR25#e{9wFlU+BC}uO;o@^{LL;rvdLmvXzRLO&QdO224D(+45mw^!E<}m8U6I*ieR@Cb%*mnz)=kx zX182JAXJ;-Grq-H#q^+Jri&#_eB%!e^R)W%^y;xA+ceJ~*0^^?@6m&0CZ7+#G&{O( zv+|z3bASA}U)z-QE6m0<(c9#w&8_>7;=MdWUp*;(e#LK_j^B1guRY4v2AZ~4_S`#u z*u?asQ&@OTTP=w_;NyBT%WrF;r|xk2=DM&IP1&b%V@`ToZceZ|%m_1y*|Riz-KG@X zg}DoL3Js*|ULcSp&kCQ&4;c?JvMjghEnWwKT9}_1*wkCkKpl3_&oiNuQ4kQD6B5uq zTt)>V>e9oK&{+Xm+FD~`BdTi4_#!5qi%*L6vV3sb|IChp4fEQyq+E^YX7BH`1@3$v$v0=;+0WyHC5l zJt52wV|1iUdOUtSLPC&X>G3tQ)xEc^y0mJs`HMFx$%Uoa&4sDe`RPRkd4+i;MHO{5 z?d_c%wapy`b%SNCrKv@!DOoW&*`bNC=?PKT{5(cLsDYcEGF0<9JT9|1 zF&=POlnTga6H!5S)JQ{ICihjf)@7BBW=phF%$GD*&3cX?A{3;f#l+v{EG6JvfRkj@W61- zyo&sc#Kw!9go3-UoTLPcv z-&Au2z#Nag($#uhz;>@Fw4vdvfzK3;$i~87En)^2$XwJ8gvABEq}eOtN<5D;GSZ0T zHh@}|Ac!e=3vgr@HiVQzl+kBVIy2Irf@VRTK)D3OT z`@;RtIaxnBb7a4#vwL}QQAT=_o10yAT`~_!^UFSG7QQ!ntiJ3XqIwTeekQMOqCMq4 zvizgglfYBq#Xo9WlFLi7qr=m(vggX0YoAzr)ot09r zvj*E`GK5a;@JIZ=BHzCvzudJgt+ei6K}@e8U16)ceyA}lGz1S+gvX}|HXy^&iJ~x@?>ky+ti; zuVUaTBzm`I$g#CQjvwZ>^@!_@k5)gd-oNww{HQSOMZC?v%+EW#uWKfMTV3kAqugg* zPvkCE{DnI6Jz<7xlMHtzY*~PLwT|PXhV@3+kqAF>JlP4M`_3UGzkvSYlB`5AV!=!N z1Zg9+9T$2ki_<<(bZpo8OTlOAZkZj zO<{ggpnJKSb>87^rCQTKR}rQlJgISc2;#S9`On)oj2}Gl4vlYWzz+Ab#h2em{$Y#Q z=z5tLtP)5-LQ6X=nMel_j*O)?G(l>9+x1t~3pZj^R0#+|9VbVhwTg1#0fZ$0fgny@ zd698Iq=7)l;W6lm+V`1_x()bB(_d)iYg~K{(nd6a) znffs+x7c325fc&K)zMwk(A zzs1a0Mjofh9Ir}{Rl=(&g5eg}ypTI@I%&ovs)`gGHH7jSVDl+}iZ65%+A+A+lJcDD z!t^rlU!l7;XH7yVQm81mM88mvy1)Q@1A&TVi~p$T3jF&6)_@q0c0-vNIN3G5T&JmGK5_g8P1E>CK;RD|J7Ads|2>0pucghf4}WxkxKSX~69|-e0xd3YS6t*?ryrL-ym?es zP*PKcPJRpy673y+vACqf)DWZd2e%zxx#s>X&4<$J?-f)&C{BEYth8M6DDZ))yV*CJ zuVsbh*g-mnjj98pa<1asF1~Rw#rsC35C^JjYy5(u3(DFA;-8X%NfSzFNWH^Q99(a@ z=Zo6>H^{ul$o5-EVA?4%WH123V!=QcC&>FgfF%pJ8pOK_I)gAjxMRMd8zS=tQGAQc zxrqd2E{$@KDX^Z}=B7oAT~p+#oAN`p%z6sVR~f>TAygKEr{Y}4w>v9Pg7m;k6@p}` z@ko6F5*f|MbAKKxn%FLmIV4!AFaBo*ja1Iko%V3|9P zN1xc!j8K}VaPy@gOSI@Bd{gy#z5L-py?ck(&z*`q zKC$?x>8@wDHJ?A#JZXgN+l7SMsnZHqL#385Zz8!yr8RSAuKl8rhdQ=LkeJ6(wRUoZ zpgAn>HO()S+g~X3f01PwdH42{myL2uSOlm^{rzk_Wbzr%>A43*K0>Y@1G7dm>;xS-hU|)xC9AV zIw^e1Y}Z{|l0R9*1O#NKruOCJnx8uO^vJ5$2NnihFbFulB*$b!z3bX8f4%-;@VLkB1=Z(jqn#olT$&g*fg3-a?62PEzd6n2j>U^RF<}wn&pB%6Mdjhy zl1Y3P6&P4XZ(CY)curO}iAd}2=+8*b>TK#n$q#f&M_ox*b%yV!yB0@Rck-r`Idj&h3yzaN`k*^n_ZK>8>l5|9bS;$;H#s)yK;(5=UfFcmfVcghqr6 z)X#=VMs@GRA6fy6!KN@!2^Bs%usTeKpW*zY*L$VqI8w5damuZ7b1L^3HCQ>wfVq%8kUOWj?R)M{H*n74%Y+TuO z{bu%)7at!z{`$=_CO9l5E-ooQEv+U$r>V4?I> zjOw^Bz=;oC4EYF5RBBmVMlx6=d~RYgaD}M%qj0nc7nhU_@hK5sji@yc2XOeb@Sxh_ zOb`&DyAd4SJ67**m79&3F}Huw0?O8v^d+FAD)U3HmruA2p~{LMv{3b zm?txtSk$Ghuer_SMeF?8Y-Kr~^mx`}DTd0z;q`|guSk3*#k+umQS{ILOK??u{qs;} zkLbTX$U6r6#$qh~g+lm;*X`fpFZ}n{HtOdsX?OA^awOtMMmPA!_NRI7e|PXDSX1J; zE6$06Y7k`r!~$fZLNh`>l>ph9jkUGy9bN5K)}P3di$&rw`>jBc+yFXxXpQ57=Wu_Zjf4Sx3=@J!@ znv@vd+!8~fxNvy)*(?JlVliR8)3{geAXzZ?by*x>IK^dPW4FHa)L)URB}VWX%tXT4pLb##(D`uQoVu z@cHxSn9!J%g!qh%jNHPS%)FMYjQ-fL(xQy?+~n~5oOCgWy#Ms=ohR>tB2p@m7)8(nj44KLf zLr7uBUT1)dySk+A3Fcmagv4on0S@;9`;CAcP)?up_7O zqUQ2b=Rl7dFs9~&L%azSv?p=O+_uAGCdUqb} z4@vkO7;v>F^4L(+j#`JADMyf69R#N!&(YBDo;BZho~qZ$4WG{LFg~@#!T4yP>BUc{ zw!GdrH~Qob=Vi)4o0Wps%XrK|!q#cIZe9}c@J3g82BoLA*88XT)#GpWE`Pgef%lfp z9xLa@UeK$w(4~cIb5myZ2F$JU+4TYxVphnTeI_*1ny;SbwVgwmk>$nwnWT z+1tB2W|il4_O}894|E)KHb7Pqq$WUJIk1RgAV>o`O~gQHxx*xgFCgD~NnW|3M^=?S ze>3^ewbbhmDng@4gSf`()`G}Px6k(Z`Gt9vrCr3K>aOPKtYmZ%k;Wn8s1>!1naQcp z)eccX(K<8g=FV*2uoS+$gY-n`Y6UhUFD=F4iE**7&p>@eb4yuvMOtz}Tv|qSZCL@a ztG~ItF)lSfHm{5_0-o3`sH^I+Fb5wGC@zKWE^)ESmh?tyD)K9e3q&H($YSBh7}>~j zra;I+TdQm~@OVrLE+HYTt+oW%R8dw|pUJ206PFqc_U9ek4j+t(YgbZLX0T=CITI(t zv}v3rD~PWwpdBNAkzNi-G-{H`LWQXMI(mM5>s_~ztfY=bCiYF0Z`NPkcl{9zVt`E- z3As!VaDT5{O5n9nVu6Rw?v25|u^5a086o_=us3SO`)^cNgnw)!&l2Kh!-##6f`>`c@o?pkR??iXtC`L%4oiT@|*mkt#0!-Mzgsb5cvIlHXgNYwJrF z|7Ln@8m_BO#EcWcbs|fL4YCC8G=L*i`g621E3S+Ml6Dqvg@Em%F;3}Tk!j2NJO#tjR1)>#+QEVDS&I$jcY1v zj?~v%4-S-wpNZ-3X>15+XnaVf82~H+kYg~Ao^GVMby{hKdQF2inW73Xo59iU?pu+T zblk%1kn2y==f-#J9W5y|a$Co6OB?3Q`70YXZtd>IW9iJ~y6oMr_NZKumAN@#zWIVB zFPH3jy2HWW?x&4gluvYcfLB~hKzepoK}mhz0E$hC{R2W8L43by>*}s-ZvEx!?-deS z(@@XhP=*IuhKFjg36jPWKU_b`!bbw$UZ(dUn)nZgZUPhkI@x9y(e+czth^``bk+HfpV* zQj~%T!saQ^D$C86CWIi|KqY9m!rK(Il)$Unx zi8BzN<)|#PgOSvWwTRnJg`iD}Axp+38z@H}o0ayUth!IK442=X`*~ndUYz{7bwR@J`+2!$XPJA$2 zZoE?0X7dWC&5NCP%<|nmGj_MG=L+Qj!__~}?R5Bbtvb)U-plmcE8t@ zId$`lZ)$o;WsQ%shxd1fIB(zXrfNoibyKuu#;2Pxj~+XoKJReb$mG&~SMxU+5z)!9 z**V$OJ$+;v6`h&r$cqg%yXACgyUFHRKXxz4ettFM?LBf~DzL~B9b?oymnyE4n7_Rr zBwq-`kBERKMD>RGbaZWzN@K8yG*W#FH8l-F{SvpW=)HV|`Sz*9@?{1J>Pu%XQkkib zC@Stcxb4}iTh_K8QX~8c=?S9nKGrhMndfe=sBjgZ5DrshFMr;@u5bBtfL1O^a$YIwq7 z+;FQHnH49ed^5Lj@bS;8s~^IXPzqpLCey^U_u#(fEn6seo`Zil47Q=0 z{K;5Mi6ar3TG4fXNqIja+&rS_#~p)xV=)&0@5f)xxtxDeT@n5>1pF=LBKkeR`wx9S z;csDQ!9Rpu{&?~KA_e$A8KO~(@R7a20i*-C0el7cDMo%d+2QG_MJzFtA@eQ2+txKT zzW?~VqB)rlxa_Q=K+lk_ru>|=2Q=1x258Yl6L~P5#$V3hTn9*yh~zOwQypqk%#t8K ziN&z+4?9gu~a&$fd^qC zcu8Wf=fDCXENJMQ$rgM8s1zR!pUn{R%K@DE{2e@=f`Bs)K!=J~&&!ZYj9kj18Uvi< z@pdw4MgTS-OfSlFijI5p#r9}s$(NWk%dlAA6Gmrt?b-eE<;#wau8!XJ^Y@Q!y0BvH zlU4Gkl~4QJe4F&~z~db^9-bR)B#ZAm92^GIS5eoLolzK*mY0~48c)Rx4YagLc#XoQ z{gdVGwOOp>xwHwTlNBDs+$mZRB+fRMvEszxif%?D8F3H>>h`Q!?!= zi#CHzK!7tF*sJOEO{J|X?4z~6$8EBXKI0Jl$j|NNKvk}-xrMW>$EQ!$yLPTRcR<T&{YIjY3PuHnrYn9Nz4Wra;G9=KnbYIKvIWNS*F)Gp4;rsPb&4_r?vaZ z_D4_X4L~4T1v)oDhcfVV`LLJ`OYxN|th|}S5gIM-GyU#Qbbh4z<(dB7cMCR~sm^;l zZv8vtthuzK{l+Jk6%X!2Y~N|6e4g18u(&%?qc>s-Ghz}1Pl7OQ@SC4uJk4tplBI&Q zPF3ictlX?Lt!Ari&)a3WZ|8s6huG{?__o%^<)di=m9 zEV8<-Yvbk(2ls3Yw7tVDcs&@hwZ&I2>D{Eu+Pc`$tsOx-E z?)^sO-KH6zc5C_E(atrU4nZ1%Ks6{aDvi65Um93kmTPw9rq`?2B`Lm7a;ODwi8x8J+jV&4MiyZh3e zzoiEThXy88)?*nX9Y8J*7}$mszeuY`p6@QZn_N#1w(Y3M&WK2=FKK1bMrXfJ_<~+U z9JH^?LD#pqz!=qs8==h#N7n+YGAejI*!qOnIP15p@)TTNQeR&F?X&mvbl2(3TR&&< zvUz&S>zB+pwr$1NGeI!bjYT_)-9%Yp}oz?x9j9H2Cz$K8|Yf7_| zK*ZzxUI7)uK0X^=tQ_^76!JzEM@clg_>qo*p{R&xUfUqh_~ zCu&TDs!a1n`#ORVl`1|*f;h)u-&l;r|D^b<&gA@qjrSkJJ`nx$g5cjUpWu(*1^C@T z{{IE`3IE*%n_ysw^6jCg}FDCyyoqtpauY3DVYbzXjJF;40h>*f99K7o?Z%+(16V3zDzFL4*zT41zU#$V#x2S#x?ks4o)}`j#xiX`Tc3GndOspE+kDuJ%cW~?eqia7pJp3B*MtR3% zli0ghDD=*_cJJ9rYtz@5x?Z$P4RpQ;%Yr_-sH?j#za}@LHa9LQAviTRp|-O+G%1h= z06QxmddQ}fgPVq$&;1$7J$HFe~~#NPCEQ{mI} zMPvn_EW*@lwAU!%F-x(S-Bix=)GFhgS~q`Rs~`_|gH8Kiy|DW35OVSQ)vH$){&di* zZJ0}8Ytw}a01Cj+fPPsZDgbkafTPVOsM6XdGYb%i83#ei0y{m1(>njFNTRu1Pp}3h zdIr^ZDmy}nQ#u!0H>u)+Up93O{RZx zRu1yk%6Lrd_n%n3zSo!+V19OGUy>uK zGUJxfajkj!oA>Q8dvmw0Fad;gJia@%ILXTX>FWy%P4=sN-81X+wwaa(^&`zLwr7L^ zg-EAEa&|*(Y7LD!a=mkaixm(W2ix-od&-1B5&#d^ODoK5tf?QOa)Hhs5vBw5VllI(qKZYe}af62=?=iOAO$nFU26y$sS&jCZ-O$dMh2v(}_n%VxzaURa|xizKTlkWyuhQyP#`8`^#l+`YxHkBu@=(UmG32hT-kVF%F3 zQ~?9eBvQoq31E;xCejG`St&jq_R)#KA~7DsVk&a-7#x(d%VMG)jC4$Mi`$PvJ>8+H zQ>kLirywPin#x+Umg)7AoD?2==t$nOb)D+kLsO;?OU=cvJKFZ)U0s4lcDhe-X?$Z* zw!5wE0EN(t8w4t4(9WrL?h1^wGI2b@kdy10KCfl-u>pr)faz<=$>^x97N-=BKuDD= zhVmtSUg1c3^aBz&-(tj+Y3=n!IB%7XKsS-{bcEN23`3_Q8J(4gZ~1@BiWM3IEv1zi;9c zDb8dP0nvUD_;cYTTQrv~(ByGe^)wv^iaWJ8vbiky_0yxbuN{7A{3<`Qlsb&fh>b4E z$U$prKCY$Kt-IT}t!IBHW`h7Ov-l>J<)+0sL9`*<$eZvaY2q_cYFJt~iq%^PsaM3x{u&G_q>} zawQBxH-YdSiGH{Xvlz!*H$*v-mUXeX)Gab1*v#zfiE}3m_OE~a`ihg2@tY4vXDvm3 zhAr-38FeuYIteG--S!5!zpu|2vbXrTOu# zjTz}#-dWi`r0z0qZ(~PxdS+C3RC;V)dr2$4o(|O1lIo)R4)I-|POQufyo)WngelR% zR!-?Glwl6c<&tI%cPg?67m<1mxFW0U@=vyo@1Gez^>ua;Fa@n0!~@6oKYq11Gj}bS zpvz%PF=3(z`8ooY90(MEI*Hkekn*G`5i;N@3*H(cpV|5EC&yXL3jd^=^j@LG zOSd*eDfAnX;jtvqStr*Y!IU6CQ3KlIzT)LcPD`Edt$Mac``Ib=x3_dZy;>IGvaT*z zYal|g)kC4pO_dm|$BLXy^ONC5N%7MsGQtpW8xJ1ZOwY~X547*^pKx&|60eN(t10(s zs$o{C^_`eR{~!l(b0~4T$u8>gU+0(K-Prizg15<;!M@b2Y{yGiSKhq6_Qk8C$4_my z{{DG*u;<~8i$6`DxV^uY+%?Z*wYvYx z*+E;kyjd{+)$#4ofgkSNUV7`+yxZsI8ST(`X0*-f`kt=9M}o`?-2u}Y?2&}4NXSWL zm(!~vO>ViETrN%jj%l@{_d6BFK4lN3gOI|aQSHBdv3z&$=ey$(-;M;Cu8;qIY@jeE zJv`#WC%ctvcRaa!In4iCL4gm2QQh5F80KQ`dUcQ0UhQ`VlfRtO_;`ASr`hfDyjY$X zJ_yi`V}BblF(7%kX#glp2@Qh56_V*(JWnWLxH3RM;3nr}=hT+7MpT?QqckxViv9?E z)aM#6nQavV5fo`Lxg0_-rZNL%#eFn++cwZjLp#G94)C(mvdXfHU4A&NTd``(_6-|W z&)lGj{8*=)KToGoZhF6*5?NMJB%{cXRcX_n1LBVp12a5Qo1tCiL4Y9?Zi;wh4xPYf zaV5}d0JwPIvjnup+M@SwAN3Emk*K)bvSRc?Wl21B#i*9gz$AucuUOVULzO8tQ8;B1 zbD9F2G~xw&1Gabg#p{M;tA^#KF~*MrB~9Mi^?4_^6gxfbs0webOEEoj%Kf#efWxJ6 z7~*$^ilUzF`+BD+QYTMf%1U?1&uHJex9gjY_+8Q?;)N^`&?Mtj66W+@q&n_sj^iaDv6GePAA#EqnZr9}>ZYa6&;<2Xr+dVWKMtn~q^^tOuGDv+x zXdNqH7i0y*$G>Xt`OJcE0ImS3IDBqJL0)lo)o|}HNK!C`FO*PifJ4QgElw`#+%(ef z7T?Qal0WodZV6xy+Kr`)^P3uvwFT(5Vi<>7nG0AT!U0f)l5PnT1C7N+Wm%t_tBknp zolKf0fWC;a3m``1Hf(r<;qryIFYP`SP5TMX75WOHZ^_+m{!G3M6H?5S_Li zlE6MEk>I{iU?_s6BACq-sBwhbYO5{|c7?T6m%V)Q^vIDVw%<+_M!m&X+WT4U^t0FR zXkE*NwRG`*!m65} zxgdpNK2R3%rVBWVJdo!LrT}{?^vW{Q6xl(l;4>XO!78V5ncL>-#9-b5z?w%9-CEMk|aMGzX8@)7}H60P3MJ<&; zV|+JxdFR*)_s@r&zwIncd0LxmQypbp@WnXvmf@R&ijNM;TU_1d{_#OX@XwZ>dMty) zAHzbOMX$n-rW^St!0Vc|`Oo;*WY*Q2Brb z{;-fs5{zmUqp|>XrDZK8<&A|UwpL#;G#m|hR8*;s-_z1mn3&XG-(>dk)#lyXx9nVe zZnvt{CWT^6MWT`#U0s7TO_qv`??T46Eng1dfxyP0rWSoT$S>h01@ZG@p``(xsE|zG zkK_vRm5)PV6&-Lr>2}{@znI55*cQdbqMhfF(GxDTG}gq~eW|{7o_p+Yr>u0R)FjgC zHGLP(GgIP$La4fWD@8?PSaBLl3h6{`#?BF#8n4a1hAAq zTvk@TY-qq7CFhBuO+jr%bql&oC_sB1E^fi$F`*<*lK?8%kjsMx0SuEU+kns)BU^g-LYxO zCT4H1p4)5l;i~8NOZ^SuLv3YsRr%r_L+@F<0bmr1Sk?m8IX+`0m#HS?$pVU#)F{No zAaDhGZKR#Uy`wt<9k;b*?juBPgY2UvR*L=cy6pDZz}BU5rcpt!hp;`j%*f`)MGNzX zc2-aOdqT<7F97#Nj8(v2N*3;>!>c;HoqN=Wyo&t9l+4t$jHafRn#u-`Uw+Ou-YK!E zC8g;-Lj@E@Zc^&!y6PYvA)8QcIT&pO=|>@I0pB0t$05vuNkGy8P-nm-4om})Dz8@s zx|VmRFRMz~-q-W2s`8z$&BI?NrviU0Nr_gj>6Px{NfAJS%$f>}X~2`=!ZZp@2DS$D z&SuBW;QGixAOhY9C0e>WX??Qm%^wf1yx6wp<-R!t%(sk^gB6V@qvPhf+9T0!2&P0$ zRKKChX|eS|rTY&n1~G+pE~mFYQS*iWl1Uac=vnVT#P5eNh! zoS=!Mgt-0C+pJ-=Zf^F{1vxV(;clvlzNmq<3fL>t{O3fttFQTjOuvs@ zzponVu&X@(Qgy-6{-Rxtf!f$0y&_A^$nS?mfwO|KFd}G8t(sfalg$SgV(3D9Uiaqd3Q_< z>`pGy_kD1q`s-Dk>t14@5-)gy&_-J5wwhA##^U)t5vf~T+i-zX-dS7J+DPS$x@*wq9gK6#G85XrmAI*GrJ?V_GH|mS37pa*p_)% zxV$j>cE`}-+O9NrvyO_y=Gt8U5Z}D=G7^~~F_925V+*1)cBmj5|KoGn9h|f8Jr&4Y)CY^GGy{m!uey3LQ)>u{}qT&3SA5hv-lPkC3_E4oI2n4 z@e3m<8F2XFlKQ}3o>7TWTyj5T$5TJP>e#Wle$(3S*N@5Z;kgmP)v>Y7AFTS%Tx`=& z!N^Gqbr&(`uB30;0(Wmv4({*RusK(Ke$mb&{DNFD+Kx{T;58M&+xLPsn_-?lY}>@q zoe5`8Lw#*ma!Pws2X`dN@x@?|AwtDL(RDxc5mXd`j~0zWDxXi5SVT%tpF~VSoP8*R zy^+7{c#RcpA>I{smNnL&p(r2vdQdM90ZJq0(wTqj!2AjO_+zkdEXHCi{w3p|=7|5j ze@7Po1|-8A-vn@!FIprLXoEl*M2aAi0Z_qG5dwVyz*io%jL<(Emk>|kGFZS7XT8e0 z!FOgZ4XtEJZ-qjGJ~}GS#~5)`uWhf1PLC|9uO1;Xb2*YbmWaWZ%yqCiJV^;oO~|w? zsJJjhI)tMv6~ammY~sQdHrtcMs1yp=Bg%i`*N;lfPDswdVg)RiEg zgHIo-hlgCx$Z%tF2Dw0tNb@(kZglIx9Vf@12~klQF)`xfBlWRL3Tx=>A&#IGMU$*x z5#uuO*8=DO$b&$N$DKl^N&y&(NX}6X@6C*^eL8Dc9=3y3yo%`~FZ7%$3RUR$LAYg8 zfj@`KlH!3nj=hw^yT###`?wo>I6jCBxZK@+o=RQE<0^_@lL4B2tSx~#haP-76qAvX zo!8jcPOK?zX{sNNODwUlc6ar1jt+nC<#I4EaCds@$*RITY5oQ^0UHLr=YhXAI3VDK zKo|mfQ(#ydL~2Z!LIFe|m5&z9SM-sAm2C&68&rELa* zHHS)D+1{v$?V81=&!)h1rWn*>)u2Ly6QMzJk)pdGv>>F=4XI8xbpCYZ!?nvcCk<`R zF6*gz)ZTfD%sfS=?`dh)FN~M&j#C$AZooLMh&DoEuT5}2jQre&*sVu0E+ZTd9SE7t z_SPWT>2y2mqZSs_l z^5CxszN+A^P6}Ec6tHBWtLns$6BnOHUcZpbja=TAFqfRHMGuz|1ISR?U za9+xSAVpr7PEYtIV!`vCR2yzxE~~PbSDp_8)zFwx?{3u`?N8`xY-p_U4hhaKD8_YT zfX*ZK_Gcu-r6k6XF@wOB;HqIJ3Mgn~w2dp`q$MTh=all9zt@-83}BOiEB+A>L@KBtm+8N!yXoGmsJ)D-sB^%8G8jzqjS=s>@q6D(BAu zHC>Lh98E!~OG7hRe`eG7XN^`bNaxOx7jGn}%pIICV|aoFUVCN1lC@Ziuh7$to({=0 z3Ku23QK3A4`2KD7{)4?b`k19_`}Q8HxN)`kt3^X>GM7XXjF6t$!_Zj+Vg8Vn44ri# zpwsEhuG+Tbk5<(W9uFVehuyTAf8aD6zFeoILz=y)Pg=1JnZ#0GG_-L~)7Ou{9p

      mf&jFC)+GLBj$4znKst^n^4@HPP~Vl$`Fnc{?_fTt_7IHv##^V35b+EVz?NrfsF zw1vd@yLbdLnCK0QEgHEoiSJPC5qb?(NbbUQM5e?PSC$LVec?-N*689S%%YCGGq1D1 z^{x2@&%mR-Lr;b<7YBz=aNsUYWX={Qi*JCa7%T_sTJgzQHD$H^cp8r_qzeFBQW9|D z$YLwAhmUTY^K!Et!t@D+FodOi`2POHwcW42+>T81p>T)02RbV%#Wx%1Zh^9Vf`phu zL`nZ2#3>ArLD z+m_D$z79@zapyo~Z)@#vd2MfAV|_rpWkkqkHt|eD?Ls8jQ4e2-O*!&q38ooLr^!B7CVj`s@iKmrTiPx95Fk>Z-tNlNv9uo zvVA_3ecn@VsYgs(1f;F3K3zURmy6S8G-_}YXA8Y`c|VkiX48rvA|a=dpkqkVRjHnD zYE-XzC7)GNjb*a0O4q#C$oJ4pa+Aq-9M|tPo$sm3v(^zK&(DYZ4a6aak_lJ$q+h?<2eN) z7B|?gZuP$G|9rc@`MDIYuMTEU4sKXvcJ26^L#r(f=erna1#OZ~SS6jlczn(rBu;Of z^HN2R!|OcG8#-UuZ+Cpd*S&M>4$pkMS@zrURUeJ^J~({T`sK5_+(2f#7cuutZ_M%# zbEMdN2{!EkKf+?@@^!4?b;60qowv=Pto8Sp7BeHtj8guD6sipo6VOo=7X+Rt!WGS; zJqXr9AEM_&j3VC#fuO0Z_!T{ICv!aR45WoYT?cgIiJxAA<417#PXFmp;p~WQsv96< zYI@rYfz=bG3@)WJ(dFPeovOr3ng(#7vwd-cg9J+P| zs!LHkL`7jAM_j-0$Pg3Ux_u*AN1HHy((pK`A)VQ+#}5XdJ0ADTad@DAsHdl+wi44^ z2E#2-T?CC)%#Ox^{@$KGa%&SN*v;4d@q_eBM+mPT*57)7z5j;1_hiouUE<_vAU}hG zOl7RtP-lFv(CY`TJ|9|Y_?PbyROT=y$$+W~eIkOJuhaARZg)f|@P#bi2%`@5L+1la zVmvY;k|0Ex#HdXgZv_7G=sY$D{V5Y9<7HGF`1tm(*h11+FswAQTTPR`ZwLSLdl<(2 zO^rdo-mjR*x;MoK36@U?!wTLG~ zRqnX3h{72qBmb_86K~NYt&~{w$F|;$k4&#^8l@u z1A}ZJ6g3nVRMl`r(Ah37>`812Fv+bgHD>t1V_dE_mxr(!2!|yr6s-n0HOx3|{Zsd+ z{e?((`$SuhjtDPq(Ze?H8MHuYNK|aKML4O-!g4LZ>jOug#~lZt*^iwo;#=}4@xW18j4R5QG8k;mrA%u>eXe_v_YT? zf_bza4Qi4{gGN-*}-r!i|z*W-ALD76N7&3@HL$8EO@K z9m>Od6*|wT6y8(!*n`AvpICWh0{_EgaGpeQpNjLEAq-qs?78B|*Qq+j$W=2W!D|LR zTpdE?1$JTxHI3^cO>v)D`%%yDqEgU-(1rl(lp-C zNaGGcf(1z+ge1g>ySux)yH+KYa<5%g^VF_{{`YzB?RVS{=RMGCx|<3r%aQ^Dpt7>`-tCUt*NIQ>)IPil+0ii6 zO^u7{fAfKGs#YNHK5z#^60v1ms9U2wrP0yazh6dLZl;ovWWTjTOw)WQ*I2xylSF@+P;3J?>ohrJiiwq&UnOCLR}NJOOMz? zMTrFa=3A-Zd9*EWsyeB(L*HKfbv zq{tYjGBn8~f7x89tPrrIvow5xSb#~@2L6*f{|otzVc%Gc#aR6RA^zFDCyoEtdEtKy zm;EYWNY&(d4B+$vKNlFU$;54B`m_P6=@4U8Pv3<%Pmb2*2Z`BE80V-^Y)hgos;^&; zEBs-FAR^;~x@^9f&G}ocMkvG`I;0*XyuzZ~u<*eC{$|N4oJ)tkc5zBl!N*TuJ>3I* ze0)SgVrP3^U1=JK7$Z6_4EAt+Yh`v(LSa=pfifUDp4$4p(9qadukYWvd+7dyy~*+4 zC3m3uvhJ@QA%0=0+wA{1e*8{MR4Rec&&7r@T*RNAUXj*8B}i@q4K2MXNvT!E$$6-$ntu1!z+{Pv_- zAr7cCa1yu42AC@VOabHp)Odm!BJO>FeBgC5Db0ZIn@cQ6MHF{UbrRRm%c7DK0}#+X=WJe-865eVWHi9U!B zih9H4m?^WNcr~O?f_%f_e9e+%t-wI@3m*(k?u|o_5pqK43EJ`)`G3=A#+mecXxI4f zzJ1#I)vNn;ZI$m`E_yg^$on*nedhCt)@sCzG9yE42HNW81`Oe!HI~FZ%Qde)#MpaW#$u_OzCFAOh<(gFyY_sH@ zv8cH_q5bVNaNQvOv@+zS;Y){cey$45F$NH$1_1~>5%{ZtpYgD>Nw@R-ue)U)+oBIg zEbeSIe{*8hsYS-ut<7Jpu=H3qF>0=M#zfg_9n`OhIGW?zOq3(mP2FdSb}!SpdSLF4 z^XtCYTYa`Od9q47^uh7KclX1B0=FGL{MW|^ZKOP`)i?j)I^U%#X)`rD%uU$SXEYkB zSLn($8p?Ido!E1D2ZZ^ymSm-6MgDkyEz;>wckGb?uZ3g>J$N(~R_K(gqx2<)q>o3a z;Ro8h7WI9cC=9WN?Bh+IJ6k`UO54Atbh&l=jCn1}^24gCEX@hrMe7NF`~@uyG~Qr% zS(<-jV0~Q`#$i%fl-!o8fSiQzM9@cG<`ui-k4{@0M-SzXIC$=44Yx(rp3NObK51fuhdE72svx=y1$(C2a6lKZEh^UIlnu^0Z z>QqBL?BGu5D8mc8JbV*vSRle^!_+|cQro3nI_jXOhbe0jCRx-R*(1tM03M6SVy34h zHRok>;}eGOKkZ+$qkq#r##c8|QYH?LlBC8g9=f7BP9!1LO## zA}u29*BGyKSC7ZSw+^?gZGk+G; zRR1R}3ja5U%zt8^@IO-{|2JX``^I7{#^T@NpMo;~E%yC-MXJZomn!J}%9prE3bmo^ zQ$@+y(u%bJkJ$W24fPIjVPAUdBZgZJ1FQv+kyxk&LMx1E512QI8-Pz8@gN#mSpTg) z$QO)=8e(Ct$ip>dT3ZRJ z>1pMa*(7RHX+`qe_jg>JALr!!;0coR3UB=k+|OfY@P`N7KYH)je`w#y)8{VUZLA}V zW@5<^iCA3VM`k2;kcYa+eUWkA-adEhD&P0DzsSox)ZDa_1ml_^P8Rhs7rJQS(b%U8;NTG~p;%`i+pi@@RvuraF8h6&<}Q77Y^~Ah%@dz)ocMH!`X^gGmz`4| z?wI%S)|p`cpZm_7-gsvJXAg(GuQx*0StS^1^y*Jwjo0l$I6+zBmwbb};fx zPvqsC$6JP;oyc7~kY@M#EMH>tNQFDEM|rnaJV zh|d@lu}Ukeie0~#t+DMgG!f_-4#|z{&{gKzuZG8$;HL}hwQtwZVk#(!4Gl}Ql`A%^ zfVePVl1aT?5pgjw3Gu0^=?!(wBiFrvD;{~mX9*;ip+F!kE-x#ut!4=%lmvAxP2U1s zcb~Gm_TXqx;>QR4k~lq=87pp$rdYWvA>qSTSokck;qY zq*k$Xb>HJx&_IAesoWq=U^J$f)=*cU7ZpQFi7mf>IeYVR@|gpoM>oOGQ&?FR_}V}x?jNt6J?A&;AY7~6`MzP+=HJjJ3W=t@(Xr48#V(-rCpn!p%UciI` z4i_))N|TsKB>HD6{lCn7Fou0&F&1O-@A0qLCt)ekf9yeEq)Azb>m9Iocux#n2rMp- zCSfKam)TgEmmKpgJm^_Njjw>0&t?@QBz)wsT7jPd+>1b+CZxy#ZyX5Jh3v&ZbpTcl zkb6YKoDmmdu>jX+5r}@tT#cHNiz=%No;`c==7qbrM|f#bGLPE?(3+I=xxYU%I`X^Z z8wCuYk$Kr!MVYB(wPhXg5eZ+M9K!Xda$N!Noas72(MQc zUEq>af_&WFzkB@%{QSZ1bTji#H+Fb{yQ!mRA;9mzx#}Ne^UY)D-d)D_cFGA6TQTk_ z4u3KR+H8@&0M_Og?DX(B`ryGSH!tVlgr5O%o)2DJEG>?~2yN*B-bvx12}!YZU{Zj| z1ia5($i>U*Jv|xWkw;oPx3#vKfWQK{#tg0%2aW&)u&9Z{?E##&U?3&~F)Gi0UrE4@ zo`_SBa~jIlLAW|ZY7o7&Q@j>kah$Dn9O=H)lYfm&pQ0dHzZeh4nsxwIQ4`c2PxtRC~5T+$BRf_Zwa%4(o#LPPkzQI(}k`Xtn7u3K%SRcNl2Z)@(o zOW${$T)QP2Rzv+JXmBm!JXV4rZ3ssqf5h>{L6h!j5OE5<*@+%)LH9SK$2(LW?9jZk zMdQAm)~l@sA2#T^uG9!yshhc6KWDN^p}u^Bo@$AyPQXkf$EC(s*BD+|XZUKh-se@i z&Ne#l)*8RtyZrVZ+Yk@;YuEmAaC&*;?4c`D3=`)}ZB*CgD(Hf|UYp#wm|60H8#O!@ z$#~9J@m^qA|M*dTd{X!izsDy|^=8L+m-qxe+h6niGX2G~f%CU04?ld~zB}s8QxNtF zsb%@G?`x8r1+{VgZcnQ>Ea|l}q3e$msv%4c!FU9+ngi-4Y1z3s-QB&oW(ia9w+KAO7Yl{>rA}i|gMtF6EE-M7ldQwq zhy%a>dH#dzo7eAer6#&WU9@jDHRB*v0aB8XA4^t_p(w*rMPwx;oM^<{y#*RGdBAKN zpvI(E`p4(BbrK~PD%2DTethb^evxqXy7-4Lw6x+&NRal`arnc2Aq9)pHt9?d>KjqU z$q(z8@OB@C$RwcR=qbT9wip-0lYE$(-saFmFE^K4P4TWks)4=++!ce5OCJ-HOT%`a zDxbT$T}zj#hIm@2TN!n(o*@kP0E|kLY7pSfd?U+Np>XuTC2LuRh(I8;cebRaCZ{JS zEmVhsDnVl2ku-$L|jIQPF|A71qNwF4(vyb6T^ zsRoNc!de3?0uP5jItFYKrJu$iL`VDgbX9+N|D>g{O2}i^)>LI?CISqL1*yRP3s|c_ zUaeaAHWko}4M?Z@XR6gEnBgu+13SGb#*Ecrg6=uXI z#yCFz9N_CiA!h?*0K^uS-lDR6D{I^^KDoPt5EBy@8XV#CJ;dGFJ2vQNUuznOQvkjQ z1ZVl;bz-n$fz1skNA+1qPdrg338nYR+)R{zku$mqvSZTTtLDUr!3 zZoYvb;R!FEzw>eTN=S@({OReQdnbO>$JYU~0l0(G0(NV6qhH`NcaMX$b(aLJOTe}V zI3N)2Y3V-J-RC0`w1T(~Fd{Izf!)?y<=)%zgVv zf9rW<`%up2IlAbk5$?OBKVmcG%LIs88-2gbMVk}-~B{Wv08_Mxks>93a z;HE2ZMi7j6;fNor)A>~=^fY?29XV`KcxI>Quvz)}78M6O)fZb;-)z%(Z>RR!dYsEL zCEq0)0aKNtE!E2AS=QPt44OT`Wr^v7wMO^W>%U#A>%3N5@_4(>=*5nC@6PRu^ZFha z67=Eiv#__%KP_97IAuz=zA;@xho-MrZlvtHQ1SBy4cE;^ac9^ zJ`Q0r2$XQ_6OUKz)iCKfc?!B)FktqIsbUcZECH}YKxGfMw3no2ltm=*LXzO*Rl50H zqOu;(#G*xAtI}d3^Y#^R`$RjqkGgQtpn@V*Sz*B3kg|U>#QOuKr!XQoDkz|~rXo5z z5)+C?cm%h@=8o=1;a5Ly<$;yw=jNm*^Kltxj7DZrh>S~@E~P{TM8CV9e|&4Jh9(~= z3XulL=?dl4h4Km_CB#rft+M0b(gk*9T5@4ha8y!7ErG(43LoS3R3=b*p|c*^+F_Uu z(#4)|1ia`hieFIGf;Al~x*`oVjtuITfb6DC5EB6`1}0Va8(9_-nN$`C$_K0Nf4-TJ z?9ttvhYLjWx!8yeaW@YUE6G^BzAJJvyV5^(7(5D$)wIP>tSXAzT5 z>LE0iHx;FpWyEHuB&DXMr3`d;&$blcmh6`&y#v>O7pAa{0fEmtN#xK z{67_A*f$nqF&6(8|B8KoGasI#a5p2i2v@q|F)-Zemc+y*__-JmhM39eX?0B{&7HZy zk8J6C0nNl^7WlSYBFIS)Nc>e5<49f)K9tk@vI3muuV3 zrDi+4cy-Oe!NK{Xds1}LaDP2O83?{{n5O}D2>5fEY&{WZ0BCT;YD}I*Tl0coKPxZK zjbu`U)Oe#8=nQfzX{ec$7M@d+*AW+x&K_hFx^goz4>fgK11uP3%Tp$ef|E{(e0*F{G&~8*G+!3XsO3Z8=rgk zi!wYKTivn>f4_hK$-75A{Z%-sitB*ljr$-A9@f?zt*v;-9ts0~7I59-Pxpb~Wt_*Oed<(^seDDJuFRoAOO2#2IA=YnA*oY4NjYdZUVcnP0hc zV!~pz(AB8x)%5NkGlxQF40`Jk-^jyjbKzslwr4ZAnK!xJF(IzEZf=`Z<8@qIVb>Ui z+6oc5M>jJW{d7h54$HkfyYSQj^TP+HZCXF&#;KJr_i6Wguiyn5Lxc(Zu%O>C&apr( zQ#A;)OzP&y58TkDeb?dmDL|4bxEo`aG+7IkDN3?Dq{$nv*Q2b_Y@l9eu2#KNCdLMJ zyhN<9CG|JWv&_f$%$gRfu3WcnI=r<5&jt8?WB{W1qTw+07B_?VYwAw6<36p`{A8o% zv`W!wwbDmh6{n3V?=~vC?lyRDr}4>7&&|%nb%nm?V#DC&mf_Y`ev8aptxcZW8a=Vo zdu6Bd)=uZmCLM>3Mv;$>ef)Euo5Rig_@EcpPrp04@5M~Z@Y$2|E%nn3#z$Hz`pr`Q zu~hfV3Pb1B=Aq|ym-xBlCq&-7fB)j`hqvxN3-J#vNK38A$j*<=ZOE&u&MHceiSYIQ zv}yCAhxg7ANi9SnjSHflimJ2=f7MS~jA>0|OPEhWDhLTAPgHb>rdGnI4$$8vVGqVg zqnYHAqx12;PYEk=d3}|2ZGq7Zf8MQJyS>zELC?yK?b~*h9XwR>=uwHo)8uWtDwo;x z8kk9NP%vjMck54gcA zbW$tsc}QSN2q-fq+2P#pJ#KHZmd)rzW+*02!D8aby=^OnyQ=pd%#WGakr)6(rd zdl4#X0TZzV0_xB}Z*x^?W^7SvOn!Q5MPWr(OCO!alQ#K@#25#J{E;p_q4d*HKt#gp z8$7Y(6o}~{qyn1)92#!CA;P6zB%H@$N%#<`RBB2}N?crALqh|H!{PCG61@9ow*B8W z0>xsHH2x>xZw&j!Vl2ku-{N1f?^hp+WWmZ8i8)e+laL&T%@>S1cnI;_n48y-lV64b zH!j^ft2m^tr7W+gxS*(BS~6mCg>}V6j)McQMetAnx4GCGfOs5VMwwD_4;bb`H&fI@ z5%lzs+uHluVxtlt-h1)wtB<>v??Z>Xk+J?1%76%~>+QeOH?)NhwiN#SF7mAM)>YA| zhh06dR8<$#$Rqm+z0x)EbsWZX;QP~v z$pwii5kHcfi+jbA6+J~zln_^x=$9OIg-ARlgpC6{O%^C|L7vK*$Y5Xm>blw0`PQZ1 z54$A`qEv}QCLIeYBE&W}xi2XpztPE2%y= zLwv8gyWDd9=#dzakdu{@luN0v0CEKIoPhf$nRJ-YaiXv4B(dNWyZq0Fn7wV;SM!q` z+`rwue)IHScMn7-pDr)nRuEy>8LA9{TKJenycUFxrw7WD!Ze66roDa>hTdCduSShi zbsHzD_H9=jx}nJRFyMyikv!4h3xr+O>)4{6z0tV+$#2kE6?*f+%>|RXe9zDeZMrKo zcmiV@XbQlhr*Ed`hsoA6kfI_oGBaAR!usfLJD2lo8(lUEf@Z=GBk_By_I)bF=BUw3 zrp{QUd9gPAt|bJ|7X4Hf`N@L2%D`y^2PY02$Z?f*Y2$U8lSE-P<4T_{YyT*Pfi)_2-V|4)!bj_pbdq(;#7yaljmv&uevG zZ#4O`VY2hu>F+l#@VS06%;!T|VPZv3b75=C{r8_X+aC_}iYUpfDl6}nTGj%AJwU;_ zL!w-6Tt58h@wMRm#9+RYk0<%9m4PA|)Jq%6qM0kN1SZRj1fk{i)b?(K$8-}w8F2v;qpEVLl)St0g#3WHT* z=_u`}lLlV|xMVvmHaHODF+m_Aws&Xe6lUcXQ3k0nK!%h&aQA^5S77ymVL1c}2xJvI z$E%ervu$|ty8LHIL3mVSbVOHtn4lsXmja7PcGNb+AXhFh^oPa26t9!ZoE$m8NtR4_a|oS2vx6BAQiUCm~*M_!e@#KXbz zKW%@bx%L$pcX*BE3 zwnLdIw<_~W#Ej9Yl(zDuxA(V{rT!&iJ^?r|j7{z4Yf(Ul2FhJR6R$LjyUx~sy*j=3 z==SRu?0&vpKbUF@#b!`~fFK9FiNIdOr0lJzIujav*ZJMELwgR`th9}ej^tqs$=$N8 zzwT#vNH>utxl?0&7$OKl0^%tIE-t7+=aD%5Uvl2A_^?#R*#f=NGIg78A9mpS_oGp{ zUw&qN^C<|-W3_bxW2n2YzdRHhx*=e$;|XR0%mV&=TJ5BPSR-nJd1r_!y>w-i=TgV# z)}isYq6=?)44vxm6$SWcB)KRu0wzI#0R$k93xZHpR*>#+#3YXY0{CEGtS3uUHt$t4 zfHQO8(@f#diHsl>MhFsnjOQO8S3e2m>7ZgOi%jdq-z;>$S&l3E!=%znMnE$Kupomx zoq>QgxjJi>T@)c^2KSlP(57(>T4+#9o@%N?SfV#;H(OKF$Wh*()}rU zM=XdhuEv}`>2>|`&3&r_9&apqzfj~e75l0P5k>=UDt$kuM4eG~U!~-`!q|V6vG+>D zAFB<&uQUCy-t@(Kj@uf;S0`rOJGJ2L z@l_vA*u6fz%j3*jS`HfxG!9p ze(Mor=Rpr>76}_5Zz==JWY~Zvl#>@jsE)$FOfK#$qh~ zE&gSsW8_bhF5yB83`bA>SsLTNnF>W}B-wSwGKyU{e{y?by){`4q9hV@Yu=BGD zgF_;vN`5>ln}DyhMdgEo=LFa)fyfZRlm*5F`m*x6(|ILM^=&B~eGP+zAr`qszzX0p zPVsoSV2W6PKsX*i6Tm?75QsE6Vr2m+Qz(eZGy=HD<+)=#s^nfWG}zzL6cg;bt2}o% zaIayUy#O|KM78)D)FhgmO;w8M(RrL`HvgQ$Y3Fk`$7};a9f(yg2NRY+ng(Pbs8@i# znb5xtz>^}3e&Kg*+tn>|cg?apv$3S7WEeMT$M1auBymts7NHN{N|5}};XzhZSa@$| z2ZPMasmS}w@2^Ssr=i=*=&k1VWSeUti*u89R>mFi@I2xY=it$t;+z+m812)P^@K6B zi6=7Ug0fI72V6~PQ|BhCaYE!F6*c`tDQ>!PK`WoVGItA`o7=gthqpQ>cT$XpV$oL( zN`OA*g#=#_MCvm9bXvUhI^J9K?Vr|Qq|>ix&}XDgI%O>QIvM=U*xm}f2nii(z!CWY z1Jtjs(kQ1+Ubtvz^}LRy29;Y7#ak6<)2S4BE~W^e4}Dra)#hK{=lnG% zHOFOR-b1~*rz%w6S$R*iQlC$E|IP5r25r~X`d?QXd|6}iam|DmHWMDMv$(x^+VPb} zju-6`zFd!fd-~(C1ur%jecGV=ah>+t^@gw38#r#zbg)ysyG!NHF7=07w4SavxV=>8 z&PwC+OSGQswDLT$sx$?VpIjw89<-30X7e}G>h9RU*8YjnB zQN(1CP!@?*jQ$39>37&@aCRw{Dh2nXyB_pdXW^6ru0z#OI`Y=N62n|k|+PHI9;@$(k_ItzZ_mphf*06e2@yuy$b{mnbNKb6ISNF0GU8MC|c_5-BmlcJV6 z+L%0?rg~O(!bR0RA2!#Cs%an?pt2-4f}G4E@9zO*0=>SrDK0+p-jlNjj&88Gw_mZz zcG8YnbDrC(IWIE(G5J#b)XcmY#P8!EVtRYhu{UnV_qg3Y^YgX8-@UGqbAy9ZIH1S{ zSw1EUG(FstBW5z+TbuI}Nf@-@r&_v~iN{A}0&yEtI9~+Qg)FP;TosQGC_g}*8zo^r z1QGhfeukZq)63sm`cFp%)8r~FQKKztek#X{G6OGDaMPf=kKxNT{XHQq&ci znhvOsuS2L@2f-74$eRTXrofvA?Gsr=YUH*Fejei;ZdqN}yZq6KGdVsU`Qg_pqPLT? zR}V)UF@t32en=QUfgWz$<*C@|Wl-)kIpN`~)VJF+f1HmGzMLC-C++po+yv%2FM7L7HrdWBZkkrOzFOi`%ckSI=spjk?$~@hs2*`^ zdc@tuUoJ0jd3wP2!?|}S=f1Kxe!Ru-*-o>2JEvUQGUc58{L3fSuUKrdYPspL-3uOH zSoh-Syr-M=oNSf9Zc=~0LGPWdKHdwsPVwnBuj>|@iFB>O) zS+425#pJ22^7D;KZd=vd_ZTHP9UW?jmRuK18YwCw*7K_;zF9>jiH6(yddgqAy~t`P zk^G-d9^_?Z9_^dsaU{ed zArUb)NTg(zHvR~Wt8Z)okU($0;8&GU?)=#`(~PC5z#We$3Nq!oT0fUszuCLj_5SUY z*x-z!%+RFBw5-ginCSe%g1V;0K@NGCL&E(kh6bRxxbDu~!4t=djSWa3Dm+C6j_i1@q8i_DVwbiqdFhM>yG6~9E*tWYw>T_Au_AW9I@U8t#Cc#Afo~(A8@v zSey=lkU%F>5~I@z9g>`m-wj9Fa{lvvd?G+Hz-8{hgzPYs>WQ+yqC%%B{qB#N+`WmF zH6Y|;Vr~zi%P+)t+peuMt)^N{LZ^?Rf$q5gQvfVPV9gi&2~Z*y5gCG>)ZB>3aPQK5 zpPtsIbkaV7KmvIPgc<--$^FxcvkeO~EL$38lgJYVd@cU4Bqf@OMYaOCTifXznR%yf z;1UN8k(mYnIsm$uK$i|veq^9CpV0;fWOWZ2-9>tTp=nQbX5Kfpf4e-l%e$x5nNf6< z9zK~9pu`N+5tbZiFTGhe7_s}q9>weWa?iEoA0zeqXvVXNX97?9mIhZ7S_{gGQW6p$ z-nhHjcGIyV$5-2~T(M>L*2Amz9o?H*QS6%Jzt8!i=IwdQy;fZMX_cKlSCpjF?~EE^ zb_YZ~cog^ZPSE$qx9zfmtUJ54$vlMH#DOL7Y@lHcFJ>7(XbMCjk`KyzhsuM^KRco! z>M9JZ*j#nM%!gY|zdNC9A7g%`CIpUSdyHd;OsMgl+~Bh!?dYU|>r03JGHSn%2EygB zTzv@C2XEv4r>N&LBCb+uP(l5w3RG2jn5ZaJN1%xaYKX8_PWZqG5~o3=5~S%up>ca6 zDu_@G`ZRr@%+2e`eYc0#iW4vQmhbP3pE3|CNA^dMf~ZOIygakMV2wg2wXi!D=T^)A zxqsgGPglSExYp7h@$K<}NXK)x_bxhfeBSS$>=r*ToqP!``AcQN0p+tNr@r5$?_-I| zCdv^eY4;oJw@f!L+O{b5_f@_p=cGFBy0>G(`OOo~9$51B@ges|>m4slzjMOs-sy!m zPc6G}Xz3rPHr%~^^5utntG6teGDCUi#tAp~O@6Rh`_($7Pa8Fy>btJg__9jn z?MAgXo3)%b70ijw))Y5Q7Ng@?ye+ac@B~Db)4b_gC+OT)e9i%hCdq zl4DX*BE$21zd=<#JbMINHV>OkYSU2eFw)94*9+dZf?klorw%Z9tRW_&p`$COpg1x- zCMr0pG{3mLxv9Iog+}Vf`3z%#m?f3~mSly7;oTRKi>!-8k4s5vObBndda7gPT)w3d zT>;TFWvL6T;;k26u`mt#>yNtRsK&bTAvU#xIM_?3wha*D(lXL>v*Qw@5|blxBLkXT z-BY*hD4uNAqNX?`E6Y((g*6*s_EK27k*aP=8Luo-QR2!XhCHIFqXrYzzBBuwz7+VP zAs!~3e&q{!WC`1`@HrI0=zPRTQzD*Vuu(4>+!76wyh0P>AVrmA3^pzaI#O1^(P`A_ zSn_R53|PtKIT^JT^pTVzHHxfh$Wrwe39^Uv&*e&0*l~$^ z0bX0?Gl543E*+nD7I5%?{KwYdKlJ}bWVJ+p?=SvS8c_V}?_=0E7Gp6M{~n`+H`3uJ z70i<+JApuIfH7jACX%G)LN=Ud!eTC%k?2|gOL)Q)0BP*u_QtaI<|jt*2N1}9XoZ_$;?#Q?s*Qe06%A?0hrk_Py9EKTH2i%3f5E zw<vQAoQb$4yTx547m_4Qh0@^~21h5kkCvK_54n=1kqXS`RX2CE68jEX;- ze0!wuhCoNmvC8yAuBTDKJ!8TXWx4}`FfGnc?KbZ@ErC1g+zu4IUD57s z2^G_UG8qOb+Hi1U8f<9D-5k!*nN0?+nv2RcQ!fhHGFx@!5i!WjMLg$>-yc< z&E=!}!-u5~PR*Ael&#-Q-uIjM)LG8qBg`ZFJ2tKE*|-MNIq(Wnh$s-u9~I+LH3zq?m*-?~07E%|^1%4e+X*2- z&09Cu8yLXs>AiZ2u+J7eUo@Sue>2U*>+rs0uOG2-NhPiQ?Mz(I2(On)Wn7qev6l)d znK{eXbWPGHDIqFCplb|^R=`Rd@`@Gh)2Fp+XmU+VIuIggt8+K6tJ!M@1+l;%00B*i z7obHHz?i@0A@FuX;jaWEt&Ivqz!m~aDCUa=_|c7Qn~fB0rJI40>@z4W2jicLCmrDB z2!KkCPKat7?BR;|UHzRYd1;}^At~jFL3LTSPkNS2;C80;6o1Rkhhph@BA3E?1({6oy^sVpw`ql#7LMpFi6kd-flrbMOImNrm;+zEuZ{z~^spKm!QqpB_^l(|>6H}HhO zC-Vo3z6=%aYAsfyG>n51Eqbg)rO)P6_Z>lBXAp`vkV3SvNRxJ->9Nknf80S|V`U2J zmASkHHMwT4kI=J=%AYP9xSc@J7g4K|EIHJ?JZN#C^E{W=bHjZWcNHyy{^>x|2C^!& z89>&o28V@C3)J3Q8M)4$lyPtY+lj07A^;=CQva4O4bBFoc#MbHP6?7)>kHa z`#*T`V)OUMbJE`|t$(xGWv{`d1NxiK>Kk8`Q+cW&=O8bCUlAQeTP~Tsu+@m3h=z>D zgZ6j=LW9Z(Y^M^oT6AAtko=oLhv!;i_y%H-b>4IR{5Mn6KUsUeULWT3M`vANnCFA@ zdsavKxUe~N3Xk^k!@VEw_b%_7a(Cy12OCTutk!#PKl#}TjSmY|16LSCEYS^_trj#- zJ!Zac;Vk1y0}XtL?Df$oc%j&OA2|Qp-xK z>e>bem{?MN8i77oSKTt$)=Q|%&kXtS?)Fi?H#c&8zYJ96N|r#Kq{h~kqJ+24BA2bM zuv|>DvL2W)yTaUxcH$_!d(M1yA0oZ0U)%}&==kBi6Q!F`oLbUUSD#;$$paQ&f`rx;m9ezIo~_xbO%2&UNPPD;3KZ*36y>`wtH6*v@$M6q-r_8xo61Ae4MQ7AD=m z8v!FO4;a!>`+rUii^aeEeekd0GjV2?YPzuT21xM$lfe~Gr1Kv%X%`?cxxm5N3X0Q` z5;HQ=(v#wIvr=38+bH<#37N|tfUb7<9>}-9LY=ppv}_;i_v;WH0*qc@({Qxn@$rdW zG7Ul_y6yMX>FaZJ)IbT*5TYxiIwNhu@q@fzU&%Sex7q&ZLb8aaNe73;Kp18el$4jG zCUr#zQ{3M(@0{=7yS8!dyq5i&`WznMbIlw9zE>y)OeEyvV?C1JC^quoh&aNLjZ^Wc z)Z)mR8vB)I$FOfK#$qh~Jw}O4^lQ;568{20E{i;pM1%}Fh)MwN64(}8(4=DuY?wl4 z9|XuG_0<*TrsNkTI(|IYPb;WxDM?C6#zhVAcfcEu#NyGE#1nDwzHTuG?~maPL3==R z%nEU{25`nfJAw{VsJwP_^`r~CcG;|2H*e7t>vc0W?X*65WBo(#}$GsOoPoY@#p1wcW{G=So>1b?f3rk8Ma9NTydQ42p!z)iDU?;i% zNp5mJxn=3y-Q|RwUUvBai@+IK#KS;$PMyTtAC-Xe#ij3+y)*6PuB z+hoq3_jA>5NGuwqSt@cu)hk#BVjONdLyE@dT-Rc^1xZOaf<~F>tSFKbkBx3 z6Jfxlrk9FgzoY95(XMI8dVzXS^vm$X>tQ|{XbE=WpNrUz(<`qT|F~-W+j*r~eF%Z6KjGrM7H7&OnI&RC74ikM0WGe^UVk?@=n+d-4*s>$(I-|1yOIWpFWTf9y8Lynwyk$eCvh_8|PhLGvU!{?e7OH-|n6G zcC((pt$yMhhI69_-imKIG>iu>$;PDX#+hCxDPaE_L42q0Q^myS013UqV z7M~RT?EAaIjykE8;Hb>r2s4ecl?b?8=%nhZ_xL`05gQj-+EtyGlQ85T0$u@>uPzW804YiE z(t)sTOTUdxvXObN^}41jH~L*&p{xK3Gohl0<^Kb+5?Vt%i1h`M#T=DSla6NKV!DLx zJ3Bhs+MnEe5EB^QTv9FQB>`2!k|Hr)ALU@Uz6uk%Iw39-JVGEa63UB!+k+3HiSa`5 zuZu_kg98xYM?Q3jGI=szLz9dUQ$w?1f<^DqldPZq_!J9<{m&*_>1HPz|70Q(mr3tw z?Qs8+xnW(SnFU$jWKdnV!^FIH{T4_}11baiwJOIz$Pw}wJQf@K<)b%JhxvOH`qwt< z2nCK|-&l;rSo}938eKI?oA0D!9HVmb0zO-WmlW_OYXPr@NPXNsxE5d)g=r~<9c<2R zfTo(F!lbz9s95iWET@EAr8B0xWBI~E@ch!8Keq;Cjjd) zu?dZ-%7N)3IKl!afS&*kU4yIn;(1isR4#oVm;Hzbq5W8DXtnpd?1zWGO?jI=rD13u z#-9O9W8h3;v1bpG4`!r4tS!#0Ov&!3XwAqi-t*w7<--Z+7Ls=ukM1M%0Li~pQFx-E zcwKX?%f@fj-)Xp#EJw&-mZj%qC!`hT7i1OZWfbQR46)0L8*^il3X@_*FjUl#AC^}n z?HS_mfko!9iBz_Pm-$!&srxLk*nBV)VZjRGpN3fU#(}J7mHBU8y`P;FIjc89ksg2` zRAF=r)Y74OMpNU0t8~j-QC^Y-Cd9SoHJ);-+AW*+V9@=-M?*)AIT|>ly30r9B+YB$ zSf0GG^|w>|_wLyI{_*X=56`mR-faEwFyz2a<0UIxZokS73T-bBjdVIu8@#VOV4Bn) zQ+Q$!h!JiW&l6(@kARY8kh4%0J~sa)mVFbedW&a|DBTnhj5}E(;8q#nIR9je#R`*bD+U{cfu{Wlo0^T^`&#_xsxyW@zOX zA<%Vr)#jHc4&T`{o#Sx?87e)^e#41o-IazqpsF`Wctr%i~t3Z|pU z2Le&*R8wS9P|8RFNtSfCrqq@PCPaIM21caDIl8-wc@j`f^s1VgN=ib}Ns1sS37QU( zia{)5PK<}6DPlIM#0j}l%v23&9Erw(yW1cnh_iQJ>!fKNy5s6b>v!)s(0u(a#3cj0 z3nTuO(b}Fs>_37el*~8?g#-x+k=)0lbc;c^HZr?|K3Z*kwPXiQfo}*iTue#{8S}=H zKMsPBqqO`3^Zmfq-^+h=+UG3d!)6b=jd;*(;DB}tW1bpE6WsW`Qp{~Js8lZ2eW7EWksTiw!p1&9o# zv^^r!4@V=9AQ5!(gqBs!2DKeCxyt2AIN2_}DIkIske!tGz}a`T67CjN?qRXs0HhFP znG)C%`W`@gTA4{s>5-P!sQ{ya8Y)x{Y^Ss!Zy{b56I=0PHSK15&I|0K2&*EXdzCz&$=C;q|LeC0Ruv zs*Ch`QdnRak@!BVCX5ela_FFdL!j0ki6~F9=GMBG4N0qn0lMI;CHXd160o}}_C-#P zrE|!cuQqcUqGva{tMmM|liy<@7Fee5SXRs`BK}bN^Ti)-O`LZf8}kU8_1AzUCk7t) zb@a-^gLUo=#GYUyUt+sHVs3UrY6C{IlE(@%Cl+~*a5mQYV*C4n)7vjxIlp$rI%f-~ zjF1EiL!(z0u3X-|+hF<9(CFx-{M5Ifu2|{I*iYnwU>}5FTxU!ih_Sq} zHe0Oh!4SW#82{}k`B!b(x1q#0z5EkHJU8}@xW72@;|g)?7Koh(Rv36`fbB4;oqp0w z-ADVevj*7j`}^-ZpmpKK@?X!)Ff%=Q^@PE*LkHu}-pKvsSk|Ul4QFQxESE_Fm)F?q z`(4E(?o&jL+K@bx`wasR44g1<9tdtf6_~9IHT&b#-fdUU-Hh~(Y07W%`s8|g`@!R@ zS3Wwh()QRi(=8fr491%6o&I(A)QG*a8n(=>8aKSG5608cZWubOW4N|tkd9bwc(=yj z>e-{?E-s2T-4XS|z-XJU(fU!w8+G4o85H+o2Rq&dWUb+D_F;j3D(-4)S)r5B1IufB zUK{8cFTy-`jkDM}E9%3&_$YS*gmQiZWXCq`H(*a1FX-1tpx&>0>ZG`76O(^C>T&z8 zl7d1S{@_KW(U)&fs?GLU z*uj-PlQm05YO;ons>0N3G)A=OPf40FE9KU0Zbl}wbO35jLYEAu;Z{p)XQ-!dU49XV z*G)g#X+=d*L9s}VkBuIA`W7(wMg{vw5Q|AUpWgCGp<4)@BBbTSK^RqBW zSamX)DiQHyWDB>cv?e_@EhI8DHaT7*6Uvm**xaPVq72ovFG=qs8PPt}i(r0_5Y-q7 z1SHXcOOXPNL<$UyV`fBz@SVR%|K5%^aI|kZf40nEd-j&CZ4V!J+dD&TBa(yuH&GwL z#4VN>>?jyfT#?1yy2h_&!hUoJQj<8M4f!!uUC>UL?LN24x-uE9u zU!LqgWCj1GA(G4xowyhF^+s>>#(xXOlAQW6#Ve*58wu4RM^k9@HeFs^T$+)Q(Au6r zD3Y?1o|R|600;ueZmG(1wsJ~}Nh{1RFgATsRGCiEIaMKN9LGT}0hRo{5=3IMzFV5# z(4A0F77`!f7vkcc;ui4c^lu-|t$KfY;``HDcXtfBzjgNWV@F&*+uy(Q_Q73aKcB>| zHr0uBGLp`BM2a0U<#vFJ09F9W0ClL^Clyy0dDqsSCg2#Z*wfs+vAg{eg*%dXl2n|Z zl4h@@kSPdBFECIdl3OMhY!JdlB^VK?&Q~54>u~4M!I0RObzPP*>G!(1vB>sQ#FEQV zl9l1z0F^?PIj4O+A)f&-lTA%%gr%9yJ8_7Wu^NC)*2EX5CniReMBj*dG4=I|k@xk+ z8BNlBGN%9aq1yNKwtd_AspNA7lnK$QC`Ic3EG}A#C`C?Q^1JshZ(aQ>+{aqMb^)+0 zD?cex8jHb^f)wWJ1oZtB;Z#eTvb?k`H{(ry*q(O(2IgHD~Av-Z@K z5%))7w=f+;?6Td!H1Bb39@;HF!`NYZ-GQ?|{i!j3Vc%P?ZU+U2{=8t#7b6ExtB{)) zpKV>g%iQ?sl`Dtb11&3CQmk!m*u2>r{Bl*N=VrEtp3uFY(q9ci)j2MhJQ$PuVr?JP zg17atn1spR&IhOI;5ita27vu+?(1n?HcRtfYw{g5dyJG0z^06b! z3>KumJ&@vcCG_o)7X}lIx9WX180+xMXwNeN{O!K^R`<{3=O>FPrk2&-AbsQ*jURDI$E#Kk3B zTiU#T)sdIjYW+HT)7;zzW6P(HN!J}+wtWv|W`U3;V49w7JZ?&6zF$mgcvfM%Sf(1b z=TpEIh;f{eUZ-%RtJK*5K3<#?XE}@3ln>PIo;cu(j_S}=8a5EO^-}WDQIrzG6J6THMBLh z7v<&qM};-;yXpTe&Mqr=O`>rq!4%bcC>}Ut;E3i@RXWX`O6ZgUjTE_vI!xme8NJPv z5=t2#HF-&nQqt7YSd^TWoATA%zFlbI@iV8~v$S~z-o9Sz5qY^XnYyx!crEHj~^wOS(OQwo`C zltif@5Y6#>ubB95eaFmmkj&OvRR|UJ!oJ?J_u%ZlCloRk@LH9X@qnS#;} zz#qV!BQ(h@J}p$Zq9unU1*`_Euh&B&&y(Olfun-(ONz6+bmkWuYYTdD2v6`%&W{w4 z5<=DE?zs!}YQmu)SIB+W()bI&2_bQc1D~Vwjiu0`R4Q7E8uBC4>I?I{{T<4LjeB4G zvDswzUgtyGzwKH7dCQrA3ofm$)lkhrOPvVSV3^j7f-gX~9q@=qZ$EbnyU)LLwCn;{ zkHfr9(JVO}k-|%%AgQjRDl5GvKDr?)xGgiKs=Yak;(M^O-%*8Mpwy*3(t>1H)0=xf z5N(Q%^mOlV?^j0mwg$MKPl~?Mm}4kSH{`hN5`CWB@n%TaMa=dvW_xh-$Kwm1T-bK< z&ej>XChFhnxAQ4x?J%e*K$Gwtfcs+dz`;_#S*)m2pFW%qiVY19joP>A(97%ZO&@$Q zSiSGk@gwi98GOCFCfW6Trsw6xh^No@>4Y0DY;jsI4O`shK1v=or`uy(yZZ>P|CknQ zz1lZ(9G4AqU##1BZ4rE#2R=gxS4`nE2vQetTt`vC6Cq@zjD~$7gTS^ge4WC%Jicv{ zPW5DsIvw@;kwY4^^cuHr4!(EH)9Kay`-W#v-@kt1No7JQNV>BF-@h_kcm2}bOGoBg z+&k79YS$KM7WB#>>iy#Oz(u_9*{%MwvYqC~xE(OQztP_4Lc;sU0s9TS=g-RDZs0h5 zn%n+uNv3yR{JGM@^8ER2v(If_dT{-s^>e12+qx;m(xTkS_Tzy~FIG;nSU%csu4d?r z;bHT2L-sB3d2=Mv^N#tY^)Gi$H(IazcI6<`)tKqJJ`uOK#eKLIW^3kZ`!y^gJRvDD z+}}4ZDlpgg^Q#lfKVDw@=-7N4!!uz{UnLTeYS4k;bD^TF>{m5@LiasGs3?+vpbJ&a#Xw3M>T5$IBYYzK zl8e(iX{mq5ELA4@9vph zTEZ0z#FG{M2Vr1)dgh&W`>IfCnQ&bjMR_{vItshps^UbDgbY1 zh#Aa`m!jwCmdRv=4b{QvaemPu390eT^)-};hkD&&0n|2tyDw$y*lhYP^Ycp()Z+*s zLI)x55Q&KIR>CTQJvoEwRG^S)STJpUdh@EJ2q`K5F0kCARfRg`B8uO}hzklCt|9sF z!#<#RLev3a+O;H&&Iu8)S|KY1KAN)rIFbDGGRYG|2n&Ly>bklj|Cpfogs9qzQl%8B z8={xf`*MYXN65v@m2y$DnB)M#rIQMJ?{NhGy6&%jsF(kVPB8vlSqkRAF}*J zEEj;rwgyi>_u`UDig9D3vr3$AczWnJG4CTz80Hl3OUXUP;@dmB-OWsoLQFnfRiER@ z6*SU<`pRrxmo0~Nv!(k~RmY*M>U}{8`z)+}efj85A1mM2S3a6tH~VADZ?CUCO!N*9 zaSF0FwY+!la(J|lL@Gc(O$>5Ij||a%kdjw5C>wz2pmQfst)wy?AhNOQxv=92fU$)2 zC_mwmqALU-R>*lHkX{s0hdIif3b>2QB7kDaxE-J0-%d+@Od~?6>_uDiJr~=3-Y)lm zlBTB@#-!x8iy4VRruBqwhzR9S5y6(zpiP#OoApA1e~?nP3MdOr_pRZSHFIhs0zCx{ zHTf}Noz)d$5pLz>^4QDhRoq+C^bd|z&RIleD>QdQ2MzmB?~78+Dk27!5|`x{WJSk# zfwDp={VG?kkiq~iauC+xDJF^(CkpdF=cXptmvf|CK-wM1?UPTW)m5{KQ1(e0(K6rY|+QI1Sw;OFW?$F>Z2tEg1izVhmSTBYbUe>C9xU1L3>gPWzr-7}Y-T)dMW98;2- zuw%*a6pO88w#y5x7ZuyBN%*j)zjkCuOGI(YYMix*gf-PR`gM*|YQD{`{kp#O z>rt1T(-Kas3Ee%p$#6b6OprPDX?MfKQCgkO>V(fQi9H5B7?OaoAI-Zwx^iLv#t~{9 zwSgkF0fN2*c!PAh*RIWcbJ^nE^?Nt&ojLYbiho|7UouG8P@Cv#de-9gALg%aAKAX4 zG%}Qz88tWJ943Ez)`TD?p)4aG@V@sy!W{n(>Gfb-Q>XB_nURCwrkmK)37xdYJYs@jkV)mUOZy-@IhuyAxnri z)HS9>hqmR%MmxReuH2HdRm2DlsBB(APd1#uKl(x24MJI;@M*37% z7oY+=fin^x3I&ZPXapdZLYDv%av{B-D=ebM$q}OCYp&ji*}W(A*ulo9*V)%j)#{Du z?5jqo_lJRMFmwP1!-Ru|wu~N^GG|)J-D?HG{^7wves+##k6%82_}Ikc-D}g=FJ3(j zaC1qG32*DHt6|rgdVWc+%IN5B@bz+Lv0F)r5Y6tg1i%vkn+0i!H5O*YPam{?F_XuI z(SQbUiISkVK~#6UEGj%9Ix0RfJ-w{HMj#O@(W8z7B>;Bcru*3Wj%^343=C>- zp5=!)g0M?f-IVnVo>52^Q2KqY1(oy=DFOjO8Wwd)6%rwVzKPkvsjPveOrMle;z&3K z7gBuG7*$A^aTLYB4f~iH9W(4ACny?bo_GpvUioq|u%bRWGG-6=PCPRa^P3uxF$gEuM7xJsK!?PzhpFFzq=Frw7 zW-pD4^Rj~i{jIHCtnKV0liZtFnG|XXiIuYN!+?mjBmuclHLyklIDlFuxK%YCmPpsj zWeWh-v)Xq^J3r8v58$gz_J@#IB!%%DQkx}P3SbYAlOAR1`Rz|3>p2kT0Sp@Ir{-qP z1o+V25EL2`fHS26K2soIEYktuTqHKj5!1_*0_gl3m1JJs6(|OFPHRR)v4c~Xp;VsQ zSRRrd>_fk%Cc8R2BR``uGbAs#4VwADu9sEOFUL`NMy6CbP%*ajlngi=o?o!P1aAQ- zz=h8``I7*KNdRLZTF$%<_BKix067WmP+P=F&-v@82 zWwCY>D_zHPy(Z$W6BRZC!GW2IuQEkzn*w>@El||<~(>)cXp}!t%w{cgDB(?OkAU zZ+orlaj%QRTyGEi{MU$gSEhWr`m^CiebeO=b9Svtout`1aYRL5tXsEFwVry%p^eps zSKj?`^6SU+>^K}DE4MyMFGWq4jUo1?FlhX_6_o|?@Iw3wb z(#K8CY9vJ+2&d9Wh+dZ&yH|SA=YIF46-wnL*?F~DIiQd>mgi-JziSBJB?z5H_zfkj z>Gxtt%P~y0Z%oomZMO~UzFfNa+3`b4ZH9Cu!YZpo z^(`Rj&jVY=LoG!bdkJu8X)8#L_i?uF%FQhF^Z4@gud7EkzB#+cYRgieK7A!R`VwrQ z7*i8!YSsW_NY6NKvK8_@l0q?+ldEzcJ4pktT(rFn0}+y*!tD;Tko8O ziV{QuMDxE)sgS-v`r#BQ5Q$Jh%D*E*6bK8F^ux;nT+D1`|1J%ZD)ngkE)jComuMV{ zlPc`^F9!9WLl~&XLNGW~z-T{rGc9xug))Xz)wiX$+foU?xv3#HGb14`F()UloyBEb z(n&r^f20&*DGQWBkaI!CMzb+YJ-HY5^+s>>#(xWvkcm}IUMXsCif|>1C+5@tRZ-KP zoR(WyRv;&&h^R+zC#7N_8Z+acwKrTQzyO&*z-SRndxXi6B`foru(Hby4?`oa9-y}xdqJbWQNDWk9`*WK6IBgE^2 zy{%ttSeu|q3GEcBY;j0jh&JjdS<`u1t`a+Cq##6HT@ekPciMRCMRF|w4KY4gBK;Y_ z9AvgK;SITRr;yMlpuYmtMe^~WxX5dNnh|%lApLhK?-(gr3B-5+J%Gg>&0AAqZ%c$N zd=YRJ%u3v&^+4YfF(}ytqWmDEC9aH0sq_oVH_7bq&*uiXrCiRayU&xFr6ybhStIb} zmATc;we5U?C^RaBFJ+75bov%k^r9pe_tg1_9tbPo$^wJJiYm%Fx@ya^eT#E$bqUu9 zVH{gJh=B1*Sk&CIt+tMauq{aS7IlIMw8leScYC5pf;fLP9_4pc_yoL@L$wUrr6BC! zaPkT(_8mTa?98u0vA$XP5rXbKc}u9j@v1`CEjf-W#Rd1e3#?vT-{Sh=Z0MH*#SXK$ zJ`;p4T4H-G@RqM zi3ir{HLNyxR;u^>oaoKDacfij#uNk$?}{7`Ia{IlA%x#7vDjT?zq2Low^VzBhg-+` z{&}eM`SmA@2brDeUm3Qh$Zkrb+jxHHXev?<0%leo#7cB9_6SYENWIDd8d+od#LwyP zw^$?aw*}_c*5ChQ-OUXvUo2hIdiz<)&b?J@=H<@Q$yz(<&8$Ix9$viv;>vmZhd+2R zzcXZao8@eN(2nre3*TQ}^!(V;`%A_BIX^J!2hTnBQGHf7vf9kImEDx^2y=^XIOz8grrArQCjJ+UxO&uXH1fCVSnUZn9(S z(-othZy5LKz|=Pz)NS@?e%&(k?dpNHzs|k8eX7gbTOVIPi;PL_7D8oXi>aA;ac({X zm=p}Oz?m@v8objVWs1)r`ZA#uZ7^^s4hT~ghdi)&^Ok_z9K8Jt+H6y(1L~~7$m{1ZVPqwcX#rLN=V3U zs!l4*D$mR)^z~0R{!roI=>7I-f{$}oMM-r@UQum+V3hl}FE4^0-N@LurAmJeM|T=$ z?3DIJ3mZ4Cqpn;94=>2i1_2MLqX-G$OX+J-qRUWlWLz$%v$MFYFf7{X<0q@nA1rLl z&CHD~Up@YI?!>3*dRZ$MbsB7KUc07n(R}fd1LFO=^2Uv+(;Z(wc51r*%*?%qTf96u zH8pHDQZJGyQTGyo6`n%e*4@_HP{polU@RNcs2ex3{=gPU zNr7$}=-^6{v2&xCsWBS)Ns+g&5=*xZ`QjKBMlU73qUbS#=|7|03JT2g}Wmz`TVZk)84;*+ECXR9v<%I?HU{(Y;619 zE74cT>~WFq3vzAZA-)PFMhDu%i^U?;`KAAw&uvOddsp3jQ3?BGqz-^O2RZOA19*l@ zUIE;eDAtMT zod5@xQpzIue7RVr5P^~dBnvGffYbLVFvabq_ugv`lb?qyFb*47;a zD4>_0gshzWihBACEAq-SVw1ZYn@|ZsNys?dAAnCadzg_{Xl3gd86O`Q5LTI9pAg{K z$$m({Z!&5fPORo}HY$ln{sDI?t1{>p@H^^(s%6#K)mPpZ@=dDCK1#V6jG3t-EZj3A zKfb21DmfuNJ+dIsKJxsb({5G{gbe5Mx(Sl+?PVDs?ogcJW&i09(rVmro^K^-?1O*JDzYKC8c4X-(Xq7#0tOh%`sb9aeMgw%ecM^JEJzGy3Q>K z`MJ{fC){H=cxr(A2yh-jIBR#gPfGWkmld}yK5%JT@Qk*MnNYe3N^e5yMXvYUa)*(! zye$p!2Y+3uc4yz(X9thIJpKEdSI0bk&SplRj&fcUVLbracSG3xqP>`O(>Up}nN2Ha z#4Xo$GtjU-IQZkXfmX*Sy*@eB^vq(D^)q~z&#hd%uzk|FZrwi3!?EgdnCo=xk0W|N z-<&Y@uEuGLQHB09l-{!xc8kLogw;bRxS>-zt0Z3DstIKdM^@s%qaUd zBjmwon;nClc2D@UW9*Yv>Mz$|w!e%r-Jw|NwOLtUa-`Yc3V2+4+hybE@8=Vh5fCS9?y769br15hw{rdX*7lvTwY9CcpKr8Z zP@Ho>n601RORJB5fgVsv0P*I1}loh0$$H`BM$#QYbeDJ97 z=pSjbmnY1aAGUo%)%z#8=0+7E9zY2wxdauL`ONbtr)213M@nc=CC612Xc}I%vv@HX zY3}Z>z9t{N_8lspIjwcD8hhBF_Q3S5?WeNu#~X?Tq)>Ow?xBlt|clYw$LfeNkkp9nnl@R)x*kBDeV zGWs$!Hb)2bZjD`k55X^ z%P(YeIl#Co5j_S;IZbVMIlRp>#ZL;*lTt&)&|eH{LQ0J#)ot%wBjDU@uX&#l|5rxp z{J?gNG>W)uBpr@$xbUNNcRi3wFw3lQqHyd;z#Wj zk*!r_39%6-ChxM+^J5>~c7wrtT)FGa(&(Kj!P|?I_j7BG^73~|qj$*y zmMa2B^8NeP`wp)NUSs&j?ALcM?c1_5FSk?*#8*cnPxq@n-`2UCsIj9abG{6PPcz`| z&qW7*@;ox}??q=$2mnq<~Cia}J<+)kU56^zGVzlc*z3kD$*dsKFp?a)- zBiIx5yEZLvx^f(9i|IFo+tz9N;L!VvYi-XQe!2h9ni+FX{eF1bjKMcg%%ozjg8N3w ze!AFUBFk@4?uV)NyRk3p`daQB`DR0Z`(H;r-_Y;o;(_Kzmp z)sfgu=Ggxv4cJ7)oDn8GhPF6KYjHtYS!p9XATZ7BxowbjkmDzZ&+g7%$??fqB@KS&5-JV!~8Yz%=KwM?!;n2*oEQ?#R1GJi#(N0AXFRUj0OCVa^r zWkJT#4DSJpg#6!xeJWK&**}0C@to& z&(|BoEw{+9+Q@o=;a>Z>KvGnS=)hX8GISwNHALQkBu;{ zL?DKe;{+d#6?4Uj83n27X#i~$6<$?&jga1Eb?=vud!=xhfDnNE&hD>e<@7n60BQ}u zJmyzV1^7ux4#wpJDaCk&csjruA%6{rdw|1!hNM!Y^6vuo913`1iLAYYRaIAzUz%Rs zSf5^$WMX?ZH2qA2^pso7Fuz1y8()pLmg?UEbTZqg*2#C}*FACaeB$EzH8nY{xS+bS zxG^d?+S<$_H#wywE1Tcm!EWh{42p8JbB~Eiee>pHLUKlJV-sJ?^ z^>TBsDz2t)RAEY)ho$p3Gb3Mz4}i1f(hf_zmwqu$`E4b+mD!+V%k#XmUaf$&xKkZKEVMNdbiHClx%_PXa0*yWe-p?H0?(#BOU*l@P`!*eH`hYnGUKs8*pus_- z!*_Cv!^8%gNuAy+I3BZc8#VA4j(g5+@!pW-X~3#^g|rOWABE{Vc>!a^t{6mUw7ROt znT?Hdx)U4ex9hjP_CWzc5p>kF`Pmr0y*MuLDVF~hQ#cL-zXeH#6K);pxBe`)__4+U zBlX2Mv90_2UHwJp$-&_?z%pH~?!Qbsck;-pf&F0kFv$S5ilKeN=a0O&S!4AtYMXD5 zJo#M9*+icew7cfxO8dhaZw`!jv1Rzjg@dAJY1WL;Ahh*3gGQGP(armLQS#C4P?`wr zIuMHM{Ji|H?|HLvjM2s!uMe+3x_8m9=hyE0b;^a~+VyrjB~~j)t2sQ2No@{u3qSr8 zbzL{@-W;!A$GqR%_vIGM?C5CYBa2=fTKV+Y(p&pxy*ROws!4#ZdYk*#qU?-{W8F}v zPKoSOMTnFtm7%SRYSI)@?+JnjJQmRJ-HoTMW9Ep5jaBGqHtAsa$?1*PX4E`g-)eQ1 zh_=YJd0Lt5mz)@yUS1NKlHJjahdRZDI!2`?<_Be`cc@5VIMq%dQWzHyJ))G&&26ZQ zj0?~7aLhb&AzXJt{g6>j7^cu1Ay~Me=F(qyR)GK)2pNJK(aJ53OsY&slhxJ7d%32^ zhkP)4WOVsYM>Df$AKxc6RwLOJROF-4n1T;t4v4!!B4Ci8P!8>4lC1zS$v_wy&04vE z!FHk6FvZ{zyg@^nN9z`j)2>=LBX7bOV#x~O{N>!8d*G89G!`&aHVMVdYW;^RxRNAL z0$}toTdKj%mHYdZ)>V7jHtplzx(+^0AY>!LE1LL_aG3E`nW`5`GGLDB75uP$>rr+= zbX|hUbc}>(&&V_Rz1oMqOMu$$WEb<=e=*tq1NQyEd_55FNA>Y<+8=ssQ4@dhu0bt z8=9Pw!Ki;pNkka$$wx{_r5ucK*eF$uP!JeF^iz_3DddifR&U`Qz06FJlQ~h2fO&&lk%p7WDagu9uUIkoC1q)U!OG9 zR@K$DRn)Y0baGssUA~x`dN|t~zk2@u&FcWaz>v^LW0Q}sjm_eclH(KOA|iuLj9=M* zu}VpfkID*2EDX!ePR@?Y$d4{}`W*V~jdf6LqP301$G5wMO*=s_NzCmFpeYj%B7|#d zavp=Mg}z(7*3RVE#LDs-Cs!{|fA82NKjZgzXjGAr5`E{^S&q2XH$FHmHx?8PM9RC? z&$GCn)gfqzz!8&pkK{RPQ$FLnEw!pY>qEl_;8nPMH2q+BsVeF-lzveL?gOXM4B@X2 zc=YeK#(1t;ZH^Oa-4;}b?JV>Cxy5df(oF-m8o{xdTIsn_Tx|-%q|OqX*0@8g;Gd+P z1La;C($G1v@8{H}I{JA#oxXkkPs0bF&3rS%a*CrJzy38P`U%E&QipK82Akog4+gG3 zhYh`hA&--XnErzSt9~DObdT1p9U8B<5B_K{>g!U?*rk&TrcaVBSX!s4n>9{5a?u#m zbtCWY82b3s*mr;Gd)=9t|6zO5^UdZ5Mm^cs&upDq;Bt+knZw!#_vd5ktbQXJCrqzB zc`WqeQC7Scu&as4fRH~9#+=>yVU_MDgXuTcjk<7T`t4gQPyRN>{QkTg<2Av@1~_fR z{10PQ7W4VRyYo!uh2I#L{$OG9qh)4?G;-`tRt9~te|RC>>aNR+WB2z=jed7F(fGFE z)&-x=uk(I=I?L-DNQFuYQ9FfIg+o0Jk!2>KvGo9O!! zBT_Q{7_9d4bC;^-EfeWXgb_N*LH+8rhH`J*g3K&ANV(sObutc~n2_e_@6S_6!_s1$ zJsiVb++N+oc5KMTBa8-@C0UaL8r4+4BE0L;k(H zsh;cV!GB5|*d36YQS`^jk_C&Kb;imD>vUiPJ7>*-ix+!gUvKnAZ~XTlfl(7938geE zJ2^Eik;kQ%50ER^C||04dg*is)c_Ag)O2K40fU)i5Wqm7)PU*-%8>x8g>CCPtG9}~ zHYyZ)G9@OX`Y_+44pe^@mrg|M0G{RLy-A4+Do8Dk2@Z=6@(pmaRq(nHZ&`q{Z)-~@ z!_x*OB|;!fs_5AlD?y?VCFduHro?k_g_45&obvRTFc7veO0X>yBoR<3mP1aSC@DFe zROX}c4&nf+_Q^GHAqJ!xi3*b=6)+B9YkrwQan&ON3g~M@tZ2R}n{oh}(Za?s#Z?U~ z#K8hZaH09Pl&yA|1IkI=PSEDa=*yqPY10n!!W818pnL}qlM-X8z}fW4*YSAS<)tyH z3C1Qb6OzK4nyV=^cu2}fB04cqh)bC{LPc#=VP;AkDV8XdvTk7UfJZ}H3RkK+!X-^j z?vcKHQD=6-8*ce}Ws5ezcu))@>HAOa<2Kl%1{Yrne4IwlmQGgoQGuzfs)V-OYwsQw zRyA9G@(gqdqePIBln*jKbj3o!y)L&2WatP8A1w3gL%I$DhoRu8A^bX^&RVZ6@j`aG z<3|fqQ^O~M(n6{sxjy`Uv(Hvh@S;`+EuPaL3Rz!ZGWX$~0kbm0w}<(x&Tt#g4H_-C z#K3+a+h=ZL#P88|w;Z3Jj(T?>-E;}dV^+PLeyRQD499cLdH#-HO}<%K2K&c4Sq473 ze%IITX133sCO0j)Pd||p)*3kBwYl!1yZr|}8;Ct0sCEaNe0#*IGgFToo_=fJSfc~u zAMDV(xpK_ARdZ~YE=<|JGiA-%;5k#m=8TD%JtB7Iz_?ihBNq+vU88BUQLo1GS;*bv zA9v{5ti&Q_V}+xU-x|@UAE7oxJbjkz)^(P>h2!1xje*~yj&Jr`H~sCbVGeueJz75E z$<|Si59z%*GtJ3xh3B0m0q16X-J9AMMIXp*K|o9L(5(R3dC@Obsu!c$<6mQt9z?ZPy>iUi!=S z%vqneFF5H*P*MSf70}U1gE>ABEua)Be*iUAO^Z~eBNgbul?a8T~|~(x41Y*5EyCIr1N5M{awRI&;N8F%DiPpD6&;k{6^Z$AK7Nie zUSes&4nfUIAa?P(m+>W=s_PHSIMIyZy@KQl0(?DQeR$)N7}Ee`T6eRthsV1wUgoBr zQVyg><`(DFp(V8|HqT~5y@Q^}ZzRv7pTHFZ&q3g%4&VBKn-14yPJr=}x2ESDecb-~ z<62R8T5^cLuic{r?+XoazZKcfl=x3(z8_P%_V4o1OLU*>X)`g!d6Xz>EE@jxz{GAk z752+LjE=gzyPf50mg{-Dz-@1J;EAu-H-~+FksaabYH4Qo&8j@9+Ynnw>Qo3HOXPyg%2<>q^qybSne9xadGCc7V>_-Bi{WTv_#bgJz^yqLqh-%rJ!g6X(!TwLuzm|t`k63cH_lAc?S>I%$}V#W_b3` zvjv~ugRql+z9b|coezhQNqSnMIfbPzA;I+mR&85za8N*Uc%;>nryu`3M3-wfh$spxwiq?_Cr*e-sdTlOfF(->~aOjlpyb^^^2IMtm@{8m~GhKSnUT~ zlT!aX=KBHrls#H4-wiF0dmDrN&|oj|z@(Utm>^-FRU^ic zXwXPYOklXOKS63sB|nk)A)NEPI>$US(vwr)!ldT!v}=`XJ1F?#HWmL#Dq=HP5^}P# zBO=0D+gs=>tE_EsclYPf*F+(Gf1KbeK_Der)eV*5VJTd;99U%(;=4YE|7<#+C~y5RuSLuhvQVu~GRM^es%SX!8wA z|E@u!D9uVsN{YfsDbbVhRfC7$89eA$(`O>h&rL{AbgwF~$w_)tmiMBi@ndnhu^bX; zT&qyhZ;+s-rii{XXU|@#ZxRb6P}f2lo4OzQ?Rsq^tG=~4GA@d~r0)9q9KX*IuXpi% z_Q-waOT0$$-1|!W`bk|daO)3Whe<3KH+r1S&GI(3Fu!p4e6(v+PG*U_m*a;ImrK)L zG=(4G`!6P(HNjOw;W7|H$Fl<_2_k25y+%lV)Hn_p_+k*CBk^A?t8@?vOX&|=P`q<~ zuDh4r%a_;hi|PfnnMEztWictfEKxmuVOdG>?`^Lczg${vy^!@mQ|73}@zIZSntJ+H zzZutt485!~{mGIQMw{oHnYrx9jF4D|m(Q;5-m`kq9KDPCH^mznMjzZ2KXXd!l(CXA zL-7%PWrHx8Iwl%4kgYwYbh^I70!@>3TE<&+ZPt#7o;|j7+-TM?9ihf3?#PL$Vx;i2!3 z552Ssdv$QY>wN=X?;Y}bOCP8G17EJfl0N^ESX-q$ew@8~f7A5&?IScR)UXz9HGJBHrfE|Jr!IhZ zh_a@+EWa!%Af_n0h~6W%qe%-zY(ivCda{_u7Kpg?&t^;bZ%p66diK1byo#aU_3X3$ z18>Q{Z@uVqMz*^2g_1w!9{vOBQUCq;KQrO?@rdtvk^IN;k5~L}Km6ZL(LbaUsK5Qt zsef$6slU&O_@`$&^^d%SzDUT>yDZ>Qxp|EaE{&h9S?)f_J((-$g?+uz8~<}cwUzv5 zYXc{c_c@*D#W*6%5E7(9Cu0oDRqaznbYgH^Qiw<-L7QxqqZ&bjomwE9Rn@+Z#`5I! z2Rzu_q8K6&4FO7*B8O7s{IbG51zC5L@^FBn#_H5(_piQvbgdxcZF$Y|nzq4GrJg`G z6o-L0^d&)^R1Q{1hbg7gC2WJHvJ3o{2*8CsX$_cBJ0_n|DBDZr3ZYoSQGLCrFgG;@shnWivW$HvQrlG^(T&;yj*CUa zLF=%!Y1JDln#XeagMb={Sd7#pxoih6HU}sbw(~})o6}M{Km38bZfr!fyfCW_r63>UkD#8x53(NCi5#lDWRncgtjrh zDJAJIUcG$(+K#@q_`n2KOH)Q+3`^XU6Pr?=o!u8Z#N?QoSZ#Qn*2wVL8n&y4nQR$jv2J4MEd8RfdR@bG1%q_!`)FoNoMEwZ zV@Gj*%fM?gx6a+;G1)TiCobb5tu!M-Bni3f$6Vv+%`h$l{KqAQZ^nl7r zC@O`v7DCi6M_&aY^MPz_s?AUEtj!Mr#x=1UN0o5&$gm0^_ZjZHy6MXhh+ov^xwyi8 z2S5J<*_BGeK8lbkl~hjylm=2AtgMU>8sf{7h$xEbjf$1uyNDo@5^|EN ztgZ10@UN(?M7xuk%G8UOQfJR=8m0p}+A{5aq9GWehP7iD>^qjb=UADmXIW}4bjW0F zf=)u#4d9a;9i5ZXD&a_g6i_k=lV`ek9J5cKj~@FqD=UXleHW=}bvU0X=c~4%XwpGW zKONuk9`U2;`S;9+^52JL{~yQSpp;^E%Q*43FE2-mSOYkaVi`dYy|Ax0dgFgi5UM(v zs?EUw2s*E`m1sa!p#l?11q+q9{ug_10ToB~b&uA>LyUyDXX2iTySr!N?(XjHfdB~! zAq0ZETX1)GckhOV_U`Jk^XgVNnaofAZ+&mAx7PdKa8?ZypryLFb??6C?6Ws2gH|c! z6`7f_fr0*wd?h=jB5x<8bBZ%e4Wg(uXMr396$%eq=fuRf3fNjpbf!>!vkL`Ua(Yr& z$R<)A4NR?zgW27i=PXThKfk^ENq2o(@)Ul9jzG``Ku3xEA=?79h^VIN}No37|)U(7=cHx7Xni>}um#V5NK*D{Jh=_>P)N~eAF`RR*fk#&6W$_M_C(FVqB>{9ON+ zdKYGd+g_#`Bb;n>UA=9cLcQ{9ial)X3jN&^KU_(8HNVuPpUkbj+!3RkG1{&zAUz@f%Iz%@2LDk(9B9X2_+elb7pW_xeX{L)L|)PuBZQ~w!Lc#lZ^+&x0SOJm^l zVyhncpINlm0o(>lKeZ`08Jrn>Sq8b)z;}rDcMo#E|KzTlm#;*u{Nd@zLZ7hkNN|+mTgAr}o2-|fo?%pL~bT7A= zeLgH3{BiMM&x!q02lZ^~+KKAW8M^mR?5b_FY*vW*TX9NAg^6C=!g+#~D-%YIb{Rgv zXwJ~r>wdqxdcfmNgY~uyf402WqvhH!*Yww2Jy?I~z>f&4`c6jSlB(vO-WvQI`UP3jcCkgLq0xlY_z>YQ*Id&c=Z~ zM1)w{7=stTuke^l`1Y^OwZbXfmektf^prx{S2KzSd}s ze@>8`Glb%onH1+pzzT-o+}T+?L`4C(1K8sti>ku!!G7kFCb4=>RcZvd#0=CEbQyrK zsa6!|8j+WnU6gI^_M=<9qK`;ESOp_RiNU_51u`_ap(Hsw@;2n z1)fvMwgQY{@D7v`6A3T^Z;MMX)K|fA?36?wfQG0p zDNsZ<38di+qfg>HarqR0sZ!~@>Y6=rxd%!#5)47oam^_w9M3QMxNPF1QH$=3TC%^p z+2>8Q2`}($13`jb>W_B-wPZ`1^4|fUW@e`m^@tRU8aZ$%jf?RQ339OuFga~|X4a>< z?e4e5USXJS2kcJUZfAR0=IKVnJ*Z2$on$&G@#E0iADi=iP8&Zym=OCTzah^f!o9Jw zAkoS6&yiTR>AHe1eVS}B$_4{ZjCSp<2wGYYcoxW1B}iPt0t#x1yaF5vswO(fAi{1> zz29n)laAN|W1KLVbGHVc>AbMb9{Qt$zfB7NIxzI@Zw=r3f!j#1@6+r!$;oIR)VT-U zt_{%vBRe7>LUwM?>(ME3gVPRo$@Zdo&FE5lvSCKMY#o4J*a!zkgiHLV9C8J<|VO>BAr%kSr z;`@iie}sKMzd`#0m6{F7rTsMJ;8xhz8m;k93FhxRV~r@3nk9lOfGPyW9_Tv&yMft^ zH*TxSJt?gZMfCy&pz0{M1T=^%DUizW23~z_uuoJ%aEhOu{+p&1^)}3|A#b& zL?U9}oNs`dx=_%CY(^+Sh;85rl3}2)&^<*p72kW+1@EM*K7V+!z~JfZclY{- znf@+!8%bGrqpiD$-KH4c`y)KcCM+gO&%nf3@4c&ynLtt(nHVJC7gS~^eZK!nl3UO9 zCQ?#aGey*Tn%;DOy4wC&H@D-M^-(PMX*=fbVB-3Wx_3t<;m-bcHddC7+cs{uHL?C- z=P_^Uv~xF4?_4?a{rIubLq_nr^isBMFK&-XS=iSJ!+Ult`Mq1hB%P3{gWP8iHCr^y za!$XXF4ZU#wpkdKxoNH2v4dYPok{n0wSIQf@8ctJf~)JRqXySknmtpaNoBh`^w$Tsj5f{Pwz?1DY33=dYQTIkHn@FHEG}A-89*{M8%k?d<95Dx|W8t6J!`AdsqL z+^|nCEQ(1_ZZY6eAc#*9Xw$)MHk1l9&cpzGP=usKx%oJ7gaZL6%RrnYOZ|u^UKD%& z-eB9E3R{zJu^!6Z6>;A228NJTDsuI&w{qb-xIkheh*&g%^n?1Mps$9Nr8x>*`WLZSNh4l~+V+CI#c&k|DQS0i*FF1ofBkMMmb3ReJGcoc zD1^wgnB7*P1}l^YWvIU4qd-mdhOBW8lcwuO6ZU5laqu2V6#PTeF z6hXtoy1FrPsTL@-07gmqgzgNh{FGq>&Y5p<*p4qXB|y%S=-eKgwhU$V6C*F8Q}?_xYxOc6ivQ`+aQo9n8XO zY{uc0E}>2&T*qRs_q!7udfe+`n<{7^2Ja>Z*xDB^{shwON|Z{(E;ZEyXC zy~EJ)3hR5n&k=*Et`<{MjoN`b2EG{21j7U8LV-n1if?GZH%6J5=6%-x{X|*(`aJ8N z)z(^4%MM_sC4bkU_QNp9c_gp&hz#?H2uv<66jq9w48Om({jUGE3iiE0iPJxajRZVUt4`Ypw)ELd7Y(|S+dNi1{ zgLlKhWV6KOAygy;I9Y!({BU5`t{<*0&1GdLj_m&P%(i1k)_hz&&tv%Dik|%>9kiO; zV=@-@b#12@(miKZpU`E4Txa#OnLpTg=@9b;eSb{om@yb@=!vPcFuDUqj2s#?X#(%# zhfFIgTZ4D;5uOJ6*OKF{jP(z{ytC5u@p{v9^Ypim*WW%vcmGOfgGWSNX_T$4zmd_C zom*n8^hrr_wJ69dO+Ta50hE<-hmTZGm@Dr#gzVTu*r9Xj!2V^kCzh|97Co$OaVJdB zt8;2k?Wl!|GQNJ2)^N?#B%zYZ=@vZ04o{vKw{`cdY;2Ix$W=_iNNHqCLn2!jE)T~r zXqzG8kRQ9-33sukbc3q>ttux3~uJ+E^IeAKrN)ZT192HjCTduAFBn0GF zMIJj9J+5z&Hm2;{f$r9w-&H&XE*xbrnucJN{n7md5OR%P^*Mg zfm{wb$_67}!qI_@@M?p~~*5r6e?vz@vcciclj# zN(Q(OuG)n&!wI@8iR*xTAt>&GI0{sJj_p@1SJbIhSaS<&_4G~OynTA_{+5dubwVR1 z^7xZtVlavrUtK$_tYXf!>)5%o~4eUJT=INiA zT|qKX$EPH6aPtZ&DzD;X4>({-DJY7NNtBTh2^nd{>aI9sE23yd3jQ(?%BkQQyvwg_ zPKis(%}h{8E0A0olp=9H5UP7$m=;zacd+Z0m_jvfuFS-=$m*lTut^TZO-zmw^~a!R z5kd`C1%jw7NK8)+;6!I|_NnZ{J*pxPdSCAMc<8W)?YF(cJp4x%xOJ8}VNwfB=FnLY zv6`Q78<=9ybT9v{q@_M&QdAeiRVWciF*Hsf%43zF@}>Fn)wh2qLskNvDJk)yG)zL=ydCdV*dFKypRlhU3%Nw)ZI z_~54PrT|M2CQAZcDJZOI?3y2)jT;Ay?t`*;PyGA zd;3it>3LwA@x2Ra8BrX84zWd*BqPNc8CSr~EvO6*i&eApQ9=u$VS++lTsWs(&gI@Y z%!dlEEHtXCthsh3XiWcxHFH%v*C|gN0NeMFWe(ZKAdjm{_el1%fMyj*Nb?%q5A72l~Zr<#AMbOdm1YCs}1|@5_rF&`qGM~$Ctb|tg%?W zz~;yvdxN*G_Kq<=v1#6sz^|9aJG(tu^!?^Y{c}T)uhJ=u3UM-XH!*f^RzN*o7m{VQ z^Gv_T@29*soa5|yCaKcjFV6bGmBrEKyAofIZT`Nf!F)@_vz3jf=j2Z8SgV6Ij_9m9 zzP9q!Rj93GS8+cp(|nf~kbD(9rd672ReOwUj=h)_`fAOl@vi<~0gArsow&Sndcfz~ zk~kM%-OB-{cj9dxKU*`t`1X;+38Sj|4v@9$p=#4bq}4TdP*>Y&9pBDxW4NS~-l}d7 zH}`$Mv9H+zZQqF<(}rLbI#~H{7=Pkq{}n6!uHT3>F@AVt&)bW8-k#s`;`mDabF1}^ z&VI6S$dg6dj~DlTvtz39^_^yh_q^>*p|b4T`O^+(553zl_sZ@OH%^T=f4lq5sTJjR zUm9P(ET6x)uxIZI?XLO5`uI*7nsetcPX%!VjNd?XBJqUG^FGVqGoi;Q&Bse%ZWz3jRKe8XM zcjuxGZDIXRq@q%l4Ma7Irht+Jxi}I@NmY05$hK~7=+Xgak(qXTevkG=tCm1a3~-ws zAvbIVxWe4QUdGV1!e)c7rec!o>nFKNv|Ndbvmyl*mKd@3K;DGGwcW6$c3O4XgR|z& zZ#;R7Iet_$bB3@(XS_{&c~9+YjeXY?N|CnGIpeEGt@o6vZlU-AW^3Fv1=H(NSlV2z2Xi+$7l12R%LJ==DE%Ar1zq)e7 zk>fQVpIyGxl3WDmI9Fa?>Hi~yUylq({f$Q)^zoNn2%?T$ZN?J++sicwLKr@K*%p9UWpHdjuVb3s@`;A4hq% z0S651S?txF?=m0H)&nY^TZq}WV`prV`9*aWSJLX_%ddwD6~dan1{yB9Gk4JGAuGOb zTl{(3UX!ygoSp;*=@&-44l!PpY`s3#e%II6>oU?k1Drm3-`kn@dX?h)Ovz_0uxChNd+SZCr)S+zkL+X*9=s+yZMYx)eNkvGX?`O$*8XK!&{*+tD(Hq;=S&-2d9(29*_i`PWrT%$ax>+ z_KpGZ{kIR6ub%qu#tP#L(_e2H>TqPX-+}oyi+}f6HZJYdrqEOCgZC~CpEo(Kckjk_ zy`kH%njSh?69>7k9P(^lo9Bx$qdmQ^uhDw2M#p;f2Ua9wmE>D@ zqm4dT+`9I7=bAUW7Cc%u>h;nI-*+w2TR%>3)2J6q`si;M@nZD|=j%HYKRv5=_+I<= zUBZC_E{mp_EFS!N_kareBOAAlbb4Y z9NDdl(Ej=7<)glx+?3{P?&4&4;KW96e;aO#AY)f*N-1N9R$)eNQbdAMq$K1jXxXr- zP)`RT=bCub`vFa5mi=F&nACk&F@6NUGpcJh{?<-0bQT=DM~B3q<_NX6uUMIG?;5;& zZ|&+84PDx)SlHJcE5xwURr4V=P9+mHa7HvDfM$kF%*_rA4`-iPR$l4q=Fuz^vnL@I z@DaHV=WIcwvY@l4(r1jR9@3lFcSO;!X@&caf|DzlT2#(mQs1rz^ccj)+NEiAFW!HE z^z?;B1lm=uD#Ovg+T>)nRP5~@YQGE#LDu@1jVs5?sg8ru+r_byl49u$FsZR&T2=W0N)if;6qkwZ9c`kL{AvXy5;?&_B3uR`{-HMJAN+i+`OWn# zfN4p_;i#JPvxtLElszRYq4W$*9|Y!5W6hrI%nJY^02u%^{{9~M1*MhMB2vQT3-DT!kTdjusPiJEsYFu0N}N(5M>Uz5Sclp0DnCKAv}b_4h(I<7jKQSeLLcTfmFr9Ul1^9glRpcJJJrS5J4_d^nnH zxViSj9Hqr5#-b-!YlFR(!Uj`$c9yz#0?Yn2>oMu%Ui6?LFtAVk0IY0z$GR_9peWJi z$?N*$%9>okM+4JsdpAyBIC9N~X;&^D^0ahz{pKDMkj7pC9wBH3MRsBRvpdh7_3sKZ zY=vd^rY~nEn2m&}adOmLf;D`>z;Qf07%W_dHTA|C+hNLqov2-F#qQr-jo;kgx6SbE z9;e4U-Jfr@y}gN-DDFBqcl=oY>BEc{_ItZl`}6)@4_0F@{_J;ic9$=6hQ;k! zXFheP_ww1_$BoOnd?Vq*CwJYq#;1?E-8g=KmX7Jhai%LLznwGU#q!}V*Z=-xS%19^ zBXw7e)LS|x?f%({yH`@yu1;UN(Bt=>4zsl1tZ9F5lh)mp{mss%+_S4y>|#dcpj|-G_INJh*Wx+wSGs{j29KOx%B<&c+#?FDJw)2a;pykqS_WDFr8lNwY&&O{PGb zG=>x0q}fvu#D@kyxN_~~ooBHDiFh-5f0+tl-UeWrfo89UfDtJfrFzo?%2wF-SNw|n z{zc%X75248Yy6YqXX`GS0N9zhjzN~S4K#yx$^_X|TgJsPWufsD_|y4Wb#PB3!;MkpwDKIu_e&3;`4pa+mtotqHk z=}}ft!h#}+ScN+9NN{tDy?_5@Sd@>9sZ)YXj>t+#43KV-m!~w+`*i>=66%alF@=3` zpoj6RhcpZJ0$9+Lb4L2t$Z&g!7}bJNmXFIRDo9y1ztr8!u~AUp@)>0cC7(oQOPu2p zp;;JIlsG8dB_%V6OW@&Diz@!VAuHgJ9E;MV47yxWP*-?=PN*$x^k1e{waKbl^+G0* z@dJYVkiN4@qnapZS0{GuB;=ryp}tG97*wHn#uZiga>{ck<x)?_}WaXSM!Se_f|R0t+yJ|VBS_`*AdLLanruI z%lHP@(e)mK!EtQ%9ISa5R*hjI3~O$SDdua%ADaM`-kCPKx&9vC3_j?6_;l#j*{)-I zpSXSe_Twv0Zru!a_Dl#2V?VwacoLA6G>RPTtt?(_3jMY`FLY0^`RH2DQ8~I8xVHh% zPW(^p!Fd+k9aX=mb3^~mHC?+kOc<*=f4Jn^L;W)=FHi6DcI^b?6HC5c+4^Yr>iYC# zse<(M4}Jge;j051-z^{fb!B(6H66`nVnLI!)InHXua3!Dzm=ZaeCH1xg9A(Kj&2TH zxr4*r zet+ul$B*|MZC*ZFJ<)3WLfs{Q+?hZ5>$Rg^pPyJfx$y1YX)pczcF!)G+&=a7!G%y8 zV-QvWgL=kApkN`6T9dB{NtyhFeF!s2)LSIE4BQrk&rnTfxv_)yw~Ee4q!s z8VYS{lv0XIQ7eJ(=^t?LM8n0aynTD?_UzzaIis@sR_tM`lp>-7QiM?`)x*2Ks506& z7`3J=sHC{~$mlSUQpkx#i&*F-XBvU#L2)Gb*|4`X#HK-QGiukUsFm9|WNg}=xo|1} z@Bx>_^PJC~EcEk_aSg$nXb$$VLm%g4;R&@Lnu1m+NO_Aa3pYowV;bFA?5>Gm(dX+| z`uFd>bMuUDmP1R63CY4=B}k>vs6xKmsM%g6A^vGa-v1!l@=HT6)0*vTjn??b2mM!p zl7mDX>?>uUhC-P(go^UB(uxYyOuGPpXA=4E0G(*)B%`~MFrm15u9M@2gh(SrV=h2- zeB{gfw}+P%T`DbJpI|3gTpu7%6cwKw8W`rRCOFFQ288XH zpQ%3jr)COQ3MU!0{Us-GsP5ppQc=(mRrV*~NAapNa`G}Gqa%F$yxCxv zz~rR#{Ji3-$~w7Br9|d!ppYqL64W7FUXt%&ZyxMrCl^)|vYMLm#KZd*ntwQ(;PVC= zVxY+1_rZSeo9n!9%!z#SN2Tlh=AaohE`4k4uwr9OXoE?5)OIbQ zzq$6n{Msc;RND`yuUuF8;7X#&74xf0zp}tHlKsfbSa?e40MP@6ZYBo0rXBKPZli6qJn($CZW;Ib$~JLTDk&|ow|r!8ek0ENeSpd1bStZXPbNo>ZvSj>)9A>GSM!E_ zTsG2mACRC5$r(i>4RsbimE^*gcQuE%7OY)Tx_z7cl>x*if|4EELJBp+<0xlL(!dv1 zc=#ng*M-1v2nmCtVkj>xjtL72@~&>ER!pZZ6LEtt|Tj z;0*8;;0#5N1n2_LUaD$O!MLib*=g|y#r2kAoY^z@F;WT&X|SW=A_z+4v(vM^7 zjqdHg^7v3lhF?i~cy&q$(4}whZToU({iPM!PuKUpy{N~_P1B#RTl8SgeDAxDpPWAC zYHi}@z>ZR)x~d|@$AkCfll_=MbxX$t_r<~%jP>2S+WW~#C&OzI9!812U)~;D@nY-5 zN6UJDT;1o(tPW1|d)Q3UGM}Ptwrc$6o%5Yup8xUfT|;sLNSn&igCaw|3!Bq{X#k-D zFRHnD=i<3@TVEetczsrPy|tqaxBTI9LQF9K+g5RDR`8@cSay1|Dd#o=nl7qLdbg+|2A%0h_n2M}@1d67h zn83vJNbar?11;n_MQVZK10`iK7fGr#%IfTRRv-mIyfz1zrYc_1q4UR%Ubx~L7#ig7 zT~nNeIEu84k~dLuA=jl#BG}3$+gaF0)0({(^21Rf{v!LSBeIJg7#JTpd&TPWSGuSG z^3ovK1AOfv!W*&^LBs=^{D)=wP%yuMxxWEE`agibpVy}q_O(W9{Bwflx?@q_8$)Vp znp}2D4OuFHj;OAB#cwzda2wzkMNaJA+!&aPnyj7AE?cSZ80DrP86bfNJMxUw_AM7d-Iz;jyR2vAC+ z3K&Y%#V4hE`}=U+x2PkMBLABHXp&+m4U#J>D+>q+V85y_2$yCcG&=wPT`=Xp?4(&V zrr_9q|8gi;2&_j) zkrB_ec|j-A0^TZWGXZZ#iiaXG@S9N@h*Bg*Sbe&^;p^6s-`5YaIymj~`DKsp?M_Vd zM<7Sh)SMml?7%AXa~mvAFMPALkHw*(!KbDs9-3KoXchm~ai80VP4%wcH@IaT<|vfc z`}^7y%uRW&Uih06KeYXtMU7Q2(Q3E59IgC0{?`h z(9JtS2Mwqh*tM!pyQ*1JvUhI@ymiX-(b=k^*vi7F$0yd^TtDvKavigsBlKorHY;>q z{EmHJJ;nOSR_Et8RDwcEQdbffLFSbKDUOPC4NP<=*&)I8h)2bS9Xq!6#`baV_l$Y9 zy6?vw6Ace96NK64*u2!;Ipf)yVb50&e7>sh+bttL9iGt?X$tZ@plU$Xs37I)CJr20 z7`gM4mHGCtKB6S8@MK%$P2vHhXL?>6d`h?ar;CunX zRSLwvfRd7;*oc^<$T(#)>Zww9lO(5Sgv3VSsQybdHrGZ4dsU|=AO+<{9`LI`DMX1k zP7jG4&RhkSmUHY?sa(C8l(7Riqq1Z6}^9NV1D<0?EQO9w{OZ$9TOkeRleuXqPy22F_wKPf>Oah zDVV?K@csgPzaGE9y;j)Q8m;k<3yuYcx=l$=GmD{rveVe*p@t!SY34z*WQ9yQ31GIo zd6B4Y4ya}d8@mIrM{iO#Q`xi`lvfKfmhkxF#pLj2YIqTEq(F9@BLZeBURId9}CVgyE%wVXV4OLSsW4nT2IzObN*uNX@ z-B)4PpJ%8=+YeIN^%R(6q+5IN>8}fJHHc`L$_j8TtlSq8(4t4ol%Kn$DZt7V0Lql)8~6ecJG}-Jc&9nR5n9XgPqZ1udmk)E-kpcsMCX` zT6&xMf80F6X76m<6KlWSK78@urlZIAJbV4Lp|&c@%h~whS=)z4KJT3s_;_!c`~8Ea?HBC{Vhc4j_SUxTg9oWE8TW`PvPE#2YDtD-K0@-Yks_K^bPW*y6;lXQF~AcZM9Fb=&YEsfi&Q7-XiZzICJTr(gJ`uBxw?CoeK4ui z^e~Y~uv18aSiy)1WNeci5SYv&E~Nsg{;3CVbv-XIG%PYUu1?sjLK^^1GfO)XA7}+DCCx*@-9I!I4+k<*)0l%My^WU)PT?@LC^@Y(nZCk zyEbihH_!|E@IJ%AMx2}gjeG?6aU7BKNKPV&qM2VlpQf#g(hw{p6x~o%T=w01sXXIoQWe{ffUKKSZi4BJfWHY?4#`ai$Z=^yF6!uPd1>6)#pO=MeM+TrnNw#}b1U z1iFm!_tcW%!zj=uKuZEcrOZA6%lhhw0AH_^tc-eO6pM2T&mPUqXJpFRN@@TBeI)X6 z3~>!07?pT%RC3i2=!h0}9A~S>r3{c%aK52c9qMl0G=uFKPi|EFjy?n#+3^I zt^?r!R5tr+DjDwR;3`wdm4uW6oS+prp`xf30aqHO61bO^C8dN#MENoxSDP0prJ9+M zKzg)N8d0Z+zZFXUZ(tuyYKm^ZB3JYn6-}u)7g67cAC>jR9H0~cBSecFPD&ZP2}l9= zQ<7;ZAZy${#4?bJfR?i_!f0S5`)Ue`U=M=3X6kd3HOaWfNEQ%~Md_*U9^a2}xm6oH zo=WQg{NAd%4gkYIrp?r8$ulv5KUQUhsoZ-DEw#aSJot@LdFseqI;xyIO1@!W)lqKR zx50c>f!Ucvr%!ZqJ^LqN-%f2pCzG?#TIg?aRTzIo7PLs^-j6WFzy<@$c8y;L#2T;N zJim{LiSg34+cPqXOboxp_(aF~B?P(0l@wM5hdL(vKTCZ(Q(-a|oW_D(PujgpopYBW z?*JxCAG}hOTRMUWSdq8!wX&(Lr99Ufq58NuQRmAcf`^ zrbKvs%nN^S`*hv6voqgp8Te$K&aIU?FZchUdud}@l&itphfg2e*|}l8js9DcI~R@4 zZhx|7g2|RC-q$v}>Yp)wb@I{iKNFqpK(0&(3wdMn(I(C_EF+>YBN6HvDO0D?Ki1WC z83sB78!?R6NhiGbknG#{?(N>mC}b6-1p$`EwpWiBubS|FLKo*XW8Y2dlk(=K)62U8 z8NUYTVj#05wHe`w4$lo-4c|uvJK6Y~IfpuPt>)}y6f3LJ^$*Q{w5a3z)t&54OmKSg zXQb5~sPHavzH4@L{Fj{rUM&61aAP-vExinP4=-_fLgoGdB@aO}ImgJL{to*%VM}D5 z#!&_|@_#5uw-TPIK73IpWwcpWXYyE?g73djqcllsJd^GS&rqj+v+_q6!#S6>~w#j1w|#nS(D#h=4H8sSheyv`{0=3kb0~;BTzM^VODHVBpz-MSg@;&+ymN?;M!9W&O$>JNG`j zdnL)=t0}bzs+8)2d02q+&2x)PZck|PTg|u(Q+>g}4kMhq)%ea8<-ca3U3~>F*gIM* zQHnt%L}^ZTPl5`OEdC=JC2^jiO?kY`*lf?R$o*H&T6`u!5@UXNxuJc4moI(tDu3AI=62o0U3D6#OwZf4 z+QCcxt3=BCZA)nqD!H<7TJi=2)9=h zSl>AK=IGMfCl)@tv(v)hab;?pEH5YA*wE+J(T^L)8LaH~Zf%z@rzToloei6LZxtQh>{vkwplhP)XvW!Xkh8^2P$E&{o zENR;OtZsd4yY&|i{X?{RyWq_$kn)sxgN)PUqmbV&XDUpfY&1u@;P|Y9+SChsGnfC7 zIdEud_aT)FH;HdNlezhTT){q)L0Lk;o1~gsi9LIX6qFLA^uq4j6be*ImNvn6v)n(X z^EzuW?SF&Lzkzl~*`U7Sm5br?TPV#!xR8A{N@LnVaJ^#Wulr(3L$UoUMf$%v{&i;h z?~IgRqZRhGMr-_U4Msh!w`j|8u&-q*`;V}XBg#-{wihdrylKm<$J7G&5UTSEVwi%$ zI31SW0P}>RO^G=>O8L(d(r(yUA3pNu)NRW<-?@yXCJqw|=M&UC0;ciB3-fa?@YL0$i&A`#V+b79nwk;YMlnlsu`a>^R@ zB;gPor2Gl5>Hi2H|4)BlM$I;))Zd}1v6|jT5`PH?CWplvy?EVRUILN~GVQuJafV#j zPX$_h0%Kqhz$~h4db!t-V*B5l0#|t6>*jS=E6-$z#ABe!x4X=pg?*iHr;fB;53uhK zKC^3_SIY{G8E7bOY^2n=SaA~&4QYvfuipL{_+v#*(D)kL9!d+8@WZV-QeJpPEh%#@>V zLPTgxY;3d?s#H)elji|);bXxpA+8q&xr8;BE8@bkBa?GfuE5?mj+-dXH$ zaInF0?BjNAm#fnwpRYAOI@$HZi63ryaq)hU0Un<&o=MVw;e2Sd<)4#YFVlXtyyxdl zqYXFw{%l?Ehbwx%TRqbD{8mT3+hsWkF80rZE|nLT^?{L_mDBwK^- zFOWAW(?Yx+9K5??$dxq%9_*j;=-2`;!>ctvEIe)={km&`-pZj*=6BFt*Y)M9POb+> zWu2K_dwzb|p~?K43(b~dpSQMsy`{~y71*02Bh%a;gD4Xy0iqt_)c)xdj~ZBjuc&9;b&wO3Eu!Lc(*?Gub<-PT1_}>wk+OFORAfk$)nX3zsnz+^J5Y9?$))lLMj?aZoa(cGmW|U9bm%Pa z*tf3V*qrH$Y92m;1|H%g3YtNLBj-1YP)LNx2b@KjDkD5!n&IJ~9s%SzaR;iR^OVbp(0`#vozMw8){|EE3e?YXt zzSd}s|FzL#Ccz=PpHtkwcANg~@hv8^3P!CPtbSGjMd>T>jRf7F0gOV^LQlD5Y;pe5 z$mlD+e(&#G+p}xqj2)}Se0jGrId+jsxrrvXs+2o;^?Qr*FI1P<6Vl2SK@&nlEJC29 zj3(Vx^i4Uv2Zwzm+y>ecS#fC479s#q+{xlNj%?;R*vFnvc6O%!kFdHLp?bE)Ngj>d zJ2Fv^h$4c;f1peSya;GvL0U?nZ-7*+U>K&wJ6F>Q$UT?+Xmt{CHbaO*145GGvs$tv z>ZBTXB7dPV#s75w&$MLwC=IwIIiIM%>MS*t4IwO6U_jj?jQ`KMN62fpr7Wdl5W@>c zvS)-$bEw#F-y6O4^t46vd8!t8!Bp7;VAj=%yUU;t!Au6?biD0i=lip~o~(kr&pGyc z<39Y6{dus+t~+6iOhMR#w8p@(9XS7{a_O9B-IJGgq?+f5Lj$2^I)WlDJ?yOE{S!uK zpMBqMU^LzT%Y=;2I+SNul}GnR&-td;$2>Z}{>GKl4i0ww2BErbS*0qqvb=Hi%#!DK z)>~N|dvIlp?c=evPV2;0%j?Wm$rGP}Fa!s_1PGqI(bwNTCd4GpM>o&cM3NUqDk>O| zN~K~y??6d2?c*OEm6v}{_qj}5K^8|5L0|M1bl07SnQrR*Yz5|iYhu0U?QfTtr+OMD zyPCv*)$==d!eREb;@#_m<`1)-(_L>thxhAsjJ6GVyg=*y%AUrn`hQtA-1F2Xqcc1G zEIx#IIQe>dm|0t<73ZWBq#(*=RtmT|$lkxmJas7l`gNPV`(B!JVH=ElBvyZ(5ycjA>*-9H>3 z>+@t8L_SY`xz2juu#a2&-d*1L_1a$6YkP+*=vgqbQ%#??x!o{0wcLMe-#2T2yT9_c zha0q=Z?0>KvIe{w7#U8{{|}0i|3(5KG}Qo39h2k$QDISuhnFj_v5M=j2dPR$bBQ-b z26-7}p%Lj33Ar_m(!}rx$W4dhEO>Zb@W(hwFRiAwZRCSTSIk z&Zu?C*}ID!v|N>!pxIyMWH8r`^GEiTcI=|k(k|%GIZ8*nbo*NHu%X{QZ@ha4-hYC~ zXm(pDWkfVP%-K&&ablaK=Bz<3qir2?7q3u`8L#TtLG>F3-P(gzCqCA;vhTo>P(Hj4o<6q(3Plg}y_d|mP8MS>S zQeXwnsEl((g9M7XMN$g^1_EdSw5Nz3g8G4}3DfLs|J=1}`sC65m(LmDVtur_^gO_R zf?f=;Q6Sh{R(MlX?+#Q2C*1!NX%Q_ICM8fsz<4o?DZn=XZ^V6Kf2b*85@>N?fX@?g zXHF25kOf}FB{{*tfsGB4mQ{{@T_FJ_IHYK7d4&<62n8yRR}q+==u=%@@)Q2HoYLB5g)>9474m5ha@7$^V&K};yHH`@OQoQi+yFL!vJ@hxkTA%s+YQyudSmuYkytKvNycI*Y?SzkWHgTzCGkFZ0KFo!K2=tJ^3oZc;UR2L50- z27w{5-&|Z?e|(3?k);U`^xbCjc;j_C?^gWwVJjB&Y>q6^*iG+zX|#8m{ddp9`<SWur{5{9o+72Y6Ivw>CT~ zNbgO06%?=l0xBI;REmNkpcHA+q<07{p|=ncQh*dfdhfmW-bwF0lj&{uy}teIJrm;j z&$+%oUg!M(>wB`Ux$u3H$;{p}dq2;;*1FeyZw|D(`|#>XaYKcf<>Tn=ME}$Xf`k5~ z9Mml!E{BTD+!xljw*GSc_pMNnTYuzK@$!}Q-mO(XuT|ZA5>oYHtyki`;!a9A8wO`T0uh|AmK^sZ#E_W%0}(X z{lzh~AgGmGqD24 z*fwNn^IIZa4-xZDPpznzs_&@UnF|wl9fex%{?H&=AkDgkl=m`Km{;msCh?IoggU5@UV~6t1cj5RNq(@9O745St*|I8&ggK^iJ>j7J;F1&?Wi;|BM(+di*o0FW>NY`K zCX}}xoci+grWIHB9&xvDO%6>0Ql-j!RqnJF{APjM0Pq-sbXELEN}WU+50NgQXM4aH zaqN0kxg8JT*$bF%unXjFl+)XVW zX_WOq)+^&2@2{JF>Wg0YHw?AjGC1MknznSuP#25D;DB&bbK3fRr`Nup@MPmq+n+>P=S1K#hIJj6eQU7u$x$J; z`<`9jl5VxT;r#ltVZEC~BJG%wsgnl8pITL)c&{$Q5~~h@<~(Sv;Sd;$d?}!n=pyW6 z|HHDt2wIW|{0IbKl2>x#-Oi$dt!oN=L6!{daUe+otOOYDc0i>vH3nrVnh~=}@(}qh1_s|U;q!~D-cFyNH_l*^;6=1Nt5k{tHp1*=lqFw^Q zk@Ecqu{GaGKAhS*pr2CIgBA^d{u62^&&fYw1Z{GVDrlLE=k}qcP{j+uzsbt<4?Y9# zhJD@99o_Md3WNS$XBr42*d2XN~N{ioAslHOHCNSFf zfEWryKY;fDKEyQh${Lpk$A0DGbD+7=Oo{)*z^7^$fI)8s^itAGNu3w(8YSU%BS=1% z)}hWIYe=jqetG543r+H6#y9O1zRijgB*I*LUzTibtBZ^Zs;DgC@_WH!gZmc|bQn1= zRa;3gE|_uyz+48Vi#5}lRo~-ahGvi{2E9o~?*?Ae@JIs-6)>!wo}`BF#Hzy>;eu=q zXtWFIJ6&{VLpI7_c-1x7C+tlW|K1}6CUfl>|{ZU<5;wT&1n>muw!PF!*% zZ7862ii3THs)9Swx4*po#YCgYQ;a9ix-s()_uavDJ{pi{C^dIF)in)45ta<30>r(} zcyjQU&#jCvpZ?=aOn9EBUlMnGvhw)!=i3`>K5n-hp>&$oPJ;XJ0x0@$c(~ z9o#(U`lTb=e%}=mo5aD>P`BIR*58q-pJVR5z-Ab2KE2vu-J=sico#zkK-W_T~`hi)I&peRggC%Ufq2 zUpVaL@Fc>=u^}r-UQwWs)esh$pBq0OP^k0q^Ksh&KOdAkoO619p%ZPTX|ZQKY| zbr^tCRGLpx{*xy!vQx6JT)CDI8$q^KR>t~z-#G4nb??*P7P#!4;kR2C<_1=bC{1&ss%2Z zg6Sd`jWysK_6_C}fRs)T`_TUcsul1`(Bu|I-Kr1!Qy%>Xo_3llevCJVVcJ@ZzYw8` zOh0=qWx=fc;i6KJh#3AB%$xxmeuju}Xlnqi6uI>BcL92`TAu@92tJc?(CP#&KMf8F zDfs#l7frzy%CK=q2mFcgg2Zuw1)J9qHQdC2*&3c8gdla?Plq> z?XvgPs)4`^1n2`qPoVk$(_g9@BF5$?6)X!5IZ$45tE%x^9n4k2@OJiH4UEBHBd+zM zbPc>&sD{IlYMpQh2DyBF1Detc@v~G%%gI2nV<-XMF^p0rO-hc<$V?U%Vu}QVyBed# z1V7R`nbK0HS_m+bfIbp@AO=eaIHjUP87`j(UQ9)3+|VT?eqYOctb!gI7$hdg1MF*; zUEq7f@($%>U^+7+f&WbKE^h;!^dR`YE`_04P8|;FHf>F4tec8#;J*bWkNLXR{^@df%K3@?&-U$msn6(ZqgI+Oc^G#`2~rKE;WpOr=#-kaIyEWQ zXd7O>xO-&JX1A9%d-mRX z^jh1o&CjRTSgj1dvcu-^5!dUMAm_H-R-aPKW2rlP-+V`OQzW_~8h>@nceg&@e|5!g z+dlnz`P9=V4qPz4zh=w9{PK1$H&>s>e~N>4P`=}VANzpk167{OVxMpD@xFfM`r+Sp zY-(w!1%?cb@QntKmx8$34i{r4??{NkC_S&t;wBezcZxq8I7 z1>HYqa%k5dCU>rf#CqiA2P8yW`8l5pd~v)b(i*CgOEY3^?O!~yw2t-kh&%CT$VXFK zX1rHEK$J7_ZO=&}tED5&)=zW#?eiDkPWM_q#(&X>v<{wkepO?A0k(r6&yx7a9HyzpXMd0b(Q7gV^9Y2}rJ&1Z+r0D-n z&bXnO+kS%f7Vhp!t8DU*&2n+@$cjtKNsh6)cQMTFwj$HNEz&8=`kL$UjpZ*6S6QtP zJG`%P?I(Bd)8I3+By2n2MGCAbD&00U`T-sJyyD!>j9oFC%eAW`L3~Y1k8cFMA>5FkV3a`dQvlmawr2oaSQ&A$&FxbrU@9IsQxd#fQ+x#w zvqBA82I>{^8xLwf`+%J>lpG{NqJ|%T4!<6Nl48(EIM_$vYQYajKhMEuPg-CUybw9Y z>bL|B4O?(aPKzAcHK_8Zsi$H*tGE47J8PPF@Ib6rUrf{+W-o>l7a=tln$_rb;Z_J< zlZEC{FT$WGr((D?3M6%q6GvO$t=s)u>9?zEzWe_5uiI>Q?Fx5#iK)0-hEy;b87h#2 z2B|6O1=%(HQQ>bYiQTZTJG!Gg{z*X!R$v`NoqvUWQa$E-Gqu1ipltwcdIlb-2r{l! zxx1`!0>MmT=z+lYVX&TRX-`5s0AMf;6E*NnZTkV1js-{ucr4Ydr(lH|<|>$_7@Pn| z=1YC8I4wi^5{$5vGCcA_)IFBfu?)^KxPfAbvHYMNjk-(-*J*Wn_w=Z!h?LX>?ytBI zW-`<&TwvXi$ZA~*+&-Xv3GgjV^wm=DkT6;c3nlbEH50@ETncz;FMU#9NMUzLm_tKv zExfCOaT4uvfCm6^+@Z3Bwln(t>!a(K&j2EM&!1$K(@>bn{YK2?wJK z3QN8HLIpm3L~Nqj6U!rq|8#V8IDO@8t)@MsDC2>vwVkVlxyhmE$0s1$Ey(JK?cL1@ zzGn)8_iN*R(fZB@?|H1xnmn^@&L{TRpS$MuMv-O?eeSHAz`>4SskZfxFv&E7fM`u-D;WHJe-Y2S~)yFa*#aOXaWcZL@R z?QUucT>smO(?-WCt4e{u>guc96GK`+E|!%9T|XM~(TtiopT>DTK-}5)PDIZW+bkiX}^B5 z>Dr067Y{dEGS+_Cbjx{@EEj$FY{fj!bzkRRI+1k$;+@~WF+0Bc(uNrhzklVsYC+8K z;S~dh$c7DSn=~?g-n9DLXF$`WL=q$585Eh9nDy%Z{dlkEmX~+B8E<#Lv^m}K0u;wV zX)=_BH2M8m9-mOI^pV0-X*3h~|`t89+; zY|f3>w$}0~QzvnuuE9Qm$ty^GQ|uekDG&YxopwqJSPih{E%}bJloN#aSa1_*okU`< z(aP-Ypz=i1)*NChu9q3zsQz$5Q(uv0ph!7#Xzbe~<@+x|T`kZ`hS8F|`=ADUvk-Cb ztOQbtQWuiqXL|K-Od|ZsKarxFE1J#kYYy+j7cZixOw|n-1|KbyF8Nk{@GRJRLVY_B z8k*)ZTQqrvQOL~~C3oD?!l7YqSs=vFR76GkDjcoB(-~3{Jg!_iy=%LNs~vZ{5xhe% ze_vvN1TPtWzMfK{+>FxCV0FX3?&yy0_(uiKm%*f+r}O}A4LZKJtLeZahCPEqr9qNH z(s&d!R=v*5xR;SXPXja57{YnL4#gEibkb=+eF`vHfz8rTM+n+eZ@bAMKgN-P8zdM} z@PGz4VDkVP0P1LvlQdvt2kaBnWb}sd+;JlO$J2y(uAzoP&t(aQ#c)&>$ji$M4-Zu+ zI*f7AIVj|a%tKLx4(BkR5~rI0VhPn&fH^EoP_a|w^jZS$F_1+<8;O)(aDE-g6}R;>YX9+h%0>&EXP zJ6F4{`Q+ik(@B@Db8MvobR)X9ZS&kdT*GShyS@s2i6!FV^KNICK3>}MP>*5Ph7CVI zVw2q_>r@*KAQBiE;N_=QYPk2QmPlUt2VA;w_ud0*FP{jLdynhY(xCk4Q)XvuJk73N z`LiiA3*=aCZ2I<}Hp=VsiE8&O%eig#??L#47Rz^iPY+Zi-T|z!ttj8|>BIYvE}OhC z`osLt!B-d88?7i3oDr0r5tNc~|E|TeN6$(#3rMvT194hexP|+*?8?Z{Aa6=8ugOpG zw!iu3&c&y;&3mwI!Lv0peHKmfoHfvM;ZVE9gC4IM@o4qfM~lY$F8(li%+T7Y<5R~D z@_)a-@5ULomJhI4JBE9L>EfYwYo=abIQZ_WiBDF~c)o3s`>h@4w|w~Er*X!M``lVN z@x}M^gH|mG`*?QFCm+JW-)fIp0C<(1zZ&aE;2ar~uK z!)}^c>B`^3_rqvf{9j)UKjHaO~JHyYO%#4&eTSo8w zCI8qTVOxJHxN$Az@guCdoad)e1V7fwXte)bu+QMcO8_kcwi%c*b(K$D=q}Z(zThF^ zz`De9h%{p(U~W7$f`sxG*^NilU(RWqIH+Ps&+K8t>(}lB+aORY86=cPaL`PL%xT`t z$+D=f#$hZ{D<}qVN21dk$#M6;m>=3lQCC_0=yvrF-@*6a(DOf*PnlRYaHw?d7mZ)7 zZ@+vEw7j7((vYTv3Oxtq2t9ITc>fNyzLTtzf>sXg;#6Pn4c{!cw>1M+jg;YaDr6QS zm-F*nxQbTdyfh~IObm*C|J$M)_H{>h{Br~6mCwX)ru1l`YK4qqIh4T~^nQ6t3s3lC zBxorlp&~CQBgrW)es7(4p_CXx0SEhrkeVsVmQR(<>lxke09z>dg9dvv5-W^i&=R0I zoEHWV0!$h(CHy=`BMUV&NJ$3K`Z{1A&5H=KdY4;1f!s0h(-Klv7^C3M zk>xf^Uw}Vp_*TmVc&7NVMDWa;>c1_R|3S{)RjT-Z2=6*nwJTh>&{z z9=LVv{qgDh!B<*Jb(1$y%uxXUUk16nh`fv)8Ws`GtA91)7`L)wM~S!n{u0 zJ#pOlz$;hlx`Gn!cv8Z0&F@${m_4eDeTv0xqx>c-Ux*;^oqC_?@eZrPU5whQ^11I# z(JXUz^S)_jdf}n*-UF-cFE1#tTp)A$qQqjB!}&?Uj=%r7_QP*iekLx+LvzNeycC<= z5l=T%r5{gB*)M(38ChseN#KsHt1{C<6CwkV7YPM5Z7SQ)uoht2pjFzQ)Li3l z?`vGv6c1QEw~qcDNJo?6_@kUDrnJ*h-^~Er|RU^%o_Pw>D@BP&yZ+$-0@uShD6DCW> zOf4KSB67-b&(%}!t{8H2S$_`vJzO!E!+$qd55N8W*z=$FzrJq5*=-*lSwF&Pf#}Kd z-cP=nX8+5om=g!Z_wTAt9S4(3l<^7N?iLisBGDX>w=|W;+dXqSzWd~g_ik<+ePPL4 zkA4_^b;Y<>$G5kpB=e)=VxV&>a&N}^>_`nd2}BWJ^%h#9+S~viNpzsg{w>bGtakox zsms^PoL7JAwQFD06LVg|2Un4D8u90FL5s2r*XMuTo`Dclp`SF9qO^t)GlP#Q=)mv22N7UT&EzO9L z^`pi#Z2SRUCsHIDvSE>jFD<06!aJx89a}9LsN!=k3J0ZvgLj!RVZW{W%KE}-$jgP) zEbtD3m=q|fK&5j=LsDuTp%QEoyJ258;nQ1&2*P;$5Xl=vMe1#n&~J5=hC?&llglc~0|NuA zs;Yp0Ce2$4=(PH|3ZZdJkQ$0c?gB}1!Hu~YWMeK6Nx;Sdoy`yJ;=HK80Q*RNVGLIT zl>=-5fE|DP?-o_Ju^*9n(7DwUGRq7zyVap+G*K)x+j;zJ2Lz+X*hhMmsYGPgbJXV&V0JT^q zjqndrH`j7sQ(5_I|IomvwolHUKhjW^=Vonfb=x8)Fex-5aqrP153Fv*CVGLk5eQjH zd}2{-jP;z1l z9~QK0{r6gNV{B*;t0sg3kex?FT5YWq)oLmmAlD(^v5c(*R)ICv#)gExaB_(&E$1LY zNq*J6D|Z3{{IZ+#Px#ze`*izaqvfZ)FCKh;#oW{E>6MM9TgRKM5Iy=%boYmMjlb=4 zd)3fKUyXRSe$4%^``rAt&*PupyZiM>hq$91Fb&L4gA^N(E){!#VtarO4y ziXS#LFZjIW>y^zX_CsDAFpb=882996;F%p}o2K4dH}Lur(bFGC8-Fw9#nBzXFC4N{ za51UQpVd=cV(9MzaHr@h$FEm2}C9E21cHtE3<72+#vB@)Z)9eHExHd-kO0t zEozbSmtUI}ab5-+v)qaZzkFGOk$F^+T~%No)LlYpWr&E zXWLOEG*rT_ecI8Z>)#QvqkF)J9%NrplSov(U=|kVrR8z}Zk?eN=wslpGT+naNM;2H zh;^JVFya<2yVKPt#&{h#nEdNrIA~NfYbCY$7@WTi@hPC;wr$&d^=9t;!^u1mkvrDC{I(Z7awLuTqMkj&i@AN z8yk&NNp^N_X=r2wB@)<$oE7CuB#<|qki^8-IVw0CPUlQ*PH@)WE)mtT)qZMrL)}BNe_y4Lb&Y&{HRUVWJF-c?ZuwLD zOHaf$Ywy~2?3z6At{&06>b9kbP)R|r5b)+jgVe- z+LlXanby+&RteL@>0+e)Ubs27di4m$ z@{z)|(Q-su43j&BU_Gt(LDH$XgCgT@AGTJ9)$>j5ak6e zD&MFyPEZQiY73xn1j$=Sh%R1$SfQJwU>G(4hnHW5T3FJ}T^{DXVOjnik-~77bN>AW zUPdwC!j6BOZ=KTJ43+69aamkfC1Q9qbf!9^8@OkP&SXEDg8EYV9ULcS=TaS-vQrcG&JN3)>k8~hNpb>HCbO($Wrr?Hbzy0W4Y8LCzGPfziJYCUR(B z0s}K0a=v>^XCJ%Bx(Q5rJl$+}c--zky1)zcBas}N#6ahx5pi2C9V5DF&)4Au-qT$# zeN%5EopdQ?6;D&=v??D#EL&I#yuWTaWE2OX*``2&V9-$UHkGp*th^MW;Vt4q^^AMtU8p6#E@bejjoBWlOm^XNt8u1OU$KE8RB( z(H}4<;nENoE>4U4Ht~sEub!)ES60|5`Y)Tjg%)p}>MLhkZ`s0VZ#ruXcZ&hLgR2ER z+^idii_#)hQ%9|+3+_qh+Dj6+DXi=5+9vPyv`a8K)%d+5Ti2nxZ~aJ1PCV_Mz1u^z zn482pQ`L1!BIG9dJrmJQIvXek=7_ph$zrlmn;pv^qws`RW1{ zil9O7smjQe^f=~j&H`+FhiZWjuH*)rkc|Sw++oC?i`bjSAgiH^!pgro3XMjscNN)t zZ3Jkq$m@p`D>>U=p|Q~jEFWhJYCLdmQNB(as_1cN)SeZ*rmXG)GAaPZ4v$LtMt-xF zffteYIoVZUsXRAXuOG&fV2SuMu3sbYtesj)!u4srYcFO5fdS=|88glv2c z+C>N$J!yHDPkmz}beekI6b}h-cyfdHs2>Z!aXw^r6v|M{4wSlu_VvFSnHC8QJ=c84Or%?h+ogr{NJ7*?n@l2lh|B?AjIs{%l$-_Q{s z!-_x@SXUItxIvGPM6=vJTcAax*uj})bC7tdSZXQ}u{mMc(P12Lx*F7+@ZkP&4;Yfg zTf%l;0{YTKRtPvxC^*cN9R2h8Z80shBNordUChg8q)o~YRZ3c-lW82i{`}RPhzcxZu|YNY#XT;D&*x>kkn@mGe@Q_b#e+lJnKzy4q)zNR&m<{ zLn`>ISj?i=^(KGqp#RS+(m^g(6s{rty8@mqGHHHZvXEs)UTJ@IW*cAn!QzXC19a~n zy?3N_@!}!Dq3Q>w3_5^rC37AC6RcTRznG(Eb{pp-6%`VA%Efs1Li_Q0TcLdUcsOMM z%|96&Vy4*^CO9211F_fRZ}Ct~ayScR;k&!DVYm4DYb6*`ZruVg@my44e0rb|%tVJH zyT7T>xw(IvGff-`c)g6|?`LZQn|OMky@fKo%u&ewDIAum=FWG z3@t5U8@s*1uuc6TL)ar!=!jg;_8iX+HK;M{wWVZ(1G({^q|am4++ z>pQkJ;b~!smX>^iiDY4D*Y_GIQr?myA9J2`vIT8QBS7CCrQ)Srn46==U)))kOIOer zurCv@866#~_S6QnNUnHymXVpw?wy#xrPh9Q7>k<#63O=VY`&{QG0DyGXd{{XNSQgq znci65RR1{J%O%6%cCnDoeu@9G6%j}#%3h%N$&%xk2l-`3C1iG3cJIs*w`bqTfq zE37p2DPni6AQ>J0TwKqb5Y6jcHeB0nZH?bc#y2ui> zB;C>FS+`i|q_R9&P5V@j&xKBtsj-%{iOBH1F1%wEjhK!M;W_90obSlR#koqg9xC|N zdy-N%H#VvS{u5fE#na>5>hwad!sGNKO%A7FSX}`GYs{vA4^iR3O+w^e*U>OC*XF#M$-% zi>BtUWk6a}^{+Lf(Zt{BiO^w);nj!HXB9l5eh%-}r+pK$0NDFLEn37;Jh~U{ke@mx zN&WT1zj%?-hoHc|Y!XaWbIBj)$WWvS8bgV_%yy^~Zje=$UzoCP(_t`=xiB*E>4)XN z{AF;!)gPv8JX=hrAAW&G>N(u%VHZHg=wYMHV&>`j^fOxky639C;^W^q> zgNB9$^$ZLMFxU&0eJ-a83&^&Neo||%$v?b--klNA^+B{MQM^uTQdPO=;IX1D9MD!^@UQW^2`oZ17I~JDa3Q?!-wP4MS21s zHm3rr_DAc~u7j75kr+MDUOXGq2kWc{bWBu;SQ@eQ=Gy57;4gSVU>y zd$1u~fcng<2K6}(vzJ(eRFZmPqNews-{+(1$z%U!C%wq;&HfX~+glN%nzZ!t>BXJ? z^C7-VKHnDR^!-(o6KF&`Aht3$Ig#HeZM$3p=LTEj2ydQvu-k!=kBG#J@4=HY}S(YiEwXdv+F|86@pc-cVVZ&X(tSNvvAhq0G_bvurND zlS%E;<^C`kPrM=3(qSJqT9{`Oy$IcjmS_0zeZiLQwp+QHWCL zP7JCMot672z(C$6ZXS*>r$IB^<*!Cn3Jf1o;SIVt8v9-lL+J`-h9MCh^a?y=6tqoF z(bz-#uT9?fPVMQs%WeZvVK%J|bpguC;xb80yzen zT}pi7ID~jtw^Emt=Q4RBiyj@FdT+y=v!mut@El^v0%q8g1_S?``7^`MnG{_SJNEPS zt&dvF|NWEdlBjjR8liVy9JYk0SkU?}pR&7z%wyJ$;uE}5XwW*FJeAZTi#t-)CM?I6 zC-Czx^YD;iW!9#ySgXKa*uM%FtR<>8lCh4IbxlFx!{o`;Eu)NUQ|Crvp&rR#jp+_+ zV+1~xO#kKck03#|ZHlZAI*m1Nq`#HT6T`v8Tr?0LD;F21;7HEUtZjqYo;6X(m)BA; z)T=v_*W_2x(Gp6MiCN`hB4w*!y!74N;9%*2mD+q`$BE+BS2qn}ypX%RcogszY|_1r zVfb6K${NtFA%%pAB2b4uo#(EP2{8f=M6BJ3pBD4&Y&tr6Ytf-`Z8>6=U8hZ%i?Y7BLi}%8I(-7u!j(ELUWFR`a|V$vt;t$)-W?$(aq!Q7IUM>S&2RQ|Eq(GMojDj{onj^BV zmLWs}DfAIXAcJIS#}jXx;B`!dYj>iDh=(>yu8?AxDXIU z@=8y7jN{=0;a=w}6UdoqrF^rvcwRGLXTYIrb&(0IoKkfSt>>9Vq8hSn{+u5BXeQrA z0QRk!%~3vh7J<76)Q0<&G4O^#0$MTQ4|kmI|BixGA-=|`pfQ|uJm3)zAy}z*xap&0 zZnLmppT!?(VS#zgBF_*AER5>laqv>JFz-emU;V!v`wN~Y$}4LxT#Qk1tL5K%D(`9H|@#Mk0m}J4!K! zkfsvLIwe0{N>ME(DOyZkIcs4MSy-~z{E6j<7gAG0f1w+4ZEf{k>!GKa#B5%hPR9A3 zSek=`TtqhOM@ zl70|9nVLQ(E4k&;2ufoOomM;jnOIgQOzne7M^O?yRbG-x@k>|M#n&we(rZ6H>DIry1Ig% zfn<1xthKu$E11K$i@WHuFPjH{Yh>=)kUsN{2Gy`)lgxxO+6v~2dc{-Sc!~%Ixh$Wj z8o#g&lJ%DK;(F&Df6UXMa)&d>7p{CHOkhO)h0Z4~#hikOl8uSK4v->%e>aGjubO;f zOd>31c27e{+AzJWD5-xR5|Gu+b&g5eEa*}=CfrbwXz|>o=&MpcPiC^1jiZU0nUjT^ zxr&mdjAnS`W4+JmB-F`M+}T{?>>{gueS9#;OlqS(4%vQH6{cxpc`Y?P%<%TFuH zM@e9}GJ$EQW^g;I!c_+S+q3qf2Y9NdFr^fwfl@mtR@Xv``sEu^s-_T_9^kPTf`q>S z!Yg6c$(^`Zvd_Ev^g!W4tqt-JxaHDr%`OwuLdk4dFcP*^W(_eDRU_@NHu7Gvvp#-3 zld21cfDxle!GE-hNN#`Jg*c%R_xa!d#RNA3ZHxCi&v7AalWM*bqjg_VUJOYH78oFj zh4^~T-sd=qBxm|+TZ^nhRc$`LSDo#CYGbdn%cvhHsk;*6JFky03j z^T4T5uz6N%YVE7?4NT-x$^qP9uk&S7A`YGtydcl^rSOP|1xpqnt0SD#&fWv3TK@FNB0)ch$n9c> z62B_>4jD-XVkOmOkl63bgD$I+E$M@kGRHW(5 zWR6A-hfiPMSW{V-!Nowft+jXQPTTfZ?IX5B&C1CNFS}V+M@-osep=SzFE*{2*;^hN z4@O8vbDf>bj^c`;d23I*-w+;n`cDsq#SenPOy5;ZVZx&)q4%#hui%0#4`?V zKp$pb$ph#{8sDV>Jv$iVb$^qSQNcbwnwSr4pf_d8h&xnU$&jhz(6R2wJvd<>u|hsvesEYlcRe0xUI>;nGf~SA1SfE0 zuKI~MBl9r~pynU*kP4!~l?4iRAu`m*C1lo2pDd-{qFrnV_$aql0mar5*a5tG{o%TS z;4$p8_L%yTFMc6S+Qft6!3$h5vpagy>hw(E_8GZ4+;!xY1$TD0A34;~lT-6jXrCL} zSh3;iH2-iLh~6>L%6i*g+PlethvB`uE|A2#>TW5P(jtsHi(_l;x`lG~0l6nPp7(*} z4_L8_)|3TD7gG!09ny4wwfA^gScSE2PRlWA=Nh>Jl-HHheq5hn^z%%OYh4}O6ieL5 zl)=N+-t8%xG}MX~{h`K^zx~Q_{>RH)qGNz8kIYp0N42i1uGTRvh;yYRW3dRGQ{d^{ z*~X3E275x)OUdudEdy9~Pw0q=LQdVexLdw&v`L@KNUUj$dlCIbk?~q(rD;^NwW*! zX~M}Ba%k~Svbxwg$%5ICP#vUP^Akx+@Sv*n39OY~Kpz=zV6N!>m)5Czctf(8_i1qbejbwsWad_!!B?m) zxC`Yt<^{;vKs$m2;7TS%?zDM2nL)Ny8J)g`>m!cb2q#&H_&ghJr;#hD#l|8W0P>&}kIw=pPB&D1CL*Cb&Ydro zId?Od3rk4@!1Eoco4QG;^zSOiH<}t?PIAcL;0fIzjH`Re@x%(RMIuZG2 z#j2;$5;hiae$wtfPuuI`{Mc^z60uYxEg}^q9kD-ky4>7<=DK!pb<@wRIVcai$&b6s zM!QW5Z$wG1E%Ua3@+3eg>BS8IOZmw_$xROd%mKhG7owp0ZN|p~-zG(?L839mDY3*H z#Y#gb#=>S>SC@}beKvu_4IN&jxGNLvYm1Ul*xLM#9txy60aLv-eUD_i+N5)WNsteZ zJ=g*oac^CU^1>lEys+Zuk(SMRu}GX*=xB`ESzzt=HH6)5^b2I23PhO#B-qN{1rLjIp&6grU!l>_ zOVj-N8)&*O}@lnibeZ25s3K_qqISP3O8xhkg}7>jNhL19;#hVGz_W^v@bJ z$!|biNqjF#MzqeURB&?yIc&$OA*x$673@Ql}BKr8$Lr95ykjVh8JW=;d}7ITrk^JDg6l!LEz!mC7%2 zqqNbzbWLsHjNH^7rr}Lyp2#YySzQ~&C<*Iv{5C&+l1$~w5IoiNY>vx$7Tad)+8BJ?unS`1=6ZJ*)1Nt2Gy zB!lo&QT;F6}64p41AM9vJGz4QcFD==-y#Ab}#2-q8&N-YG1yH)Lw3w`uQ6yL+I=_bh&d0tVyYer34L{)4 zd2ik5eLV*cmeDJM64QMC93{CRV(HAhT6ei)6;}EDK=(M{(i!7;EZW3JG{Vc}4x5?S z85pCnDEn$EXcDq$9o))ojsw@FVUPlzv0IaG;D4DWg}HEkgWcP+k;QGXM*K<9v!$xm z?0ocIjheNHe;4MjM5HDg{@#wcTnsTWkPnZZDM<$?7`B=UsIeZ(7DZdV3|x!*t}s$l z3&)R<>LW%rrh8qqz;GW}`0JZYjdla7R46CEI zZMLcAJHABf+Oi1Qaavm2Jm~4){qW6*!$J&-NYn>k(91+IVgv+)-jz4M()X`uG=8F9 zGeS+8UT53~`wlBAITE2lTbbGX_13GYe>*o*rT^CW!y>CM?j+I#}7l_}J zo=vShpBK%8wviDonTNU@s??)49y z{Q+_1W+LJmAZMR};%mc2{|7-@IJ$?`d3aZ<-gl{>N|I24aV#obkq(}4UrbREUNOe+ z1)CEf+XD{gVAvJH017zR`;5U9I%$i8mxgd%!9`Jc4n>4c;MlkLTP@I>3C)}t%D*48 zH}@mWrm3r0JBJ^Kpk`v~>P<(Ij2*M7#`mB!x=33+rnXXt4d?rz!tuJPAYN)09L0k+ zsOjFg>p<~N?DMGn^Mt)+{t~H2SF}$!G|v|#E@#CLnq{!7Fv$RUdpRW~!CN||c=1c- zbFS(OYFHQuljMWFX04ubp++Kh`1D+*3NA0~Z%eU*tuM)e!;R0l8u%ViJa1ADbES1I z){A8bNpl!tP(T$JK*d?1_t-20&H-)=^YCL*Q3tqU?7{j0fPmWCTSHe0QHYI{6gZJ; zK2Jx*Yyarik2&O80xM9;VI58TCZb%Oh@OBhtNuRR@p1{|3mDF6XiQk%5_p>y?G?m< z39s1dCAR ztX-(bQ;}G}>fam^^GD)*A+^KyqRCK)P#P~((Nmx%z`dRhM@D``fBeh7`dM8zl$4ni zqA9>e$uMAHNfoU09W3~?tQZoCiJp-9sB8Pl!rmLg8Xso-ylH4?-8x{u zgU)L={YKfjvfk zK+BS@wh^M*zp;ytOx1u*ipurR703Oz_=S|Y$u!f31n-KsY`Fy!l1J z9Ol95WtE>aj4iYvfK|{0s&!tHN}>nY za?1+n07~+P>Q)|7-r_vxUk;%(<^a}IzF9wsFSYt#MZo}@G1IIirhhGG8$)1~re@_T ze4dFNM@-V^!sn4XuyVHj@2Qy@1F?me*mTbg`RQ~PRF`}O>?FlGCjhfNYqQ_uo{1}H1f8-b_&$uk(L zyz%SC>;u=ez|7N`oV@m6E=dOTLObd`RJ^Mf;WE+hg?3>-m07{e%ZFI z=e26L4*rTZ%p(V~E^j7aWTXiB*v+6v44h{O zNNgzh3=puU)&cQX?zf3r#-V5H=bn z1a_;$7Slkjdt*qiS6l^qa!n|*mV&*lX9~hj5r+a5p@C15mnf*{`2Zwou%`Ogf$8m; z^Y(e;_L}omlW<70;Q5<7a~PUV`&E9asc<$SmN+`xrCVo47Vk88n0=KhG=nhJ(-hkV z$KA_(Wb0w0>L#b?XsOeLVaH|}Q7-$&!#;M%AJ4|-gZI)K%nf_#>1)0(&BU-lZQ+@&bK|?6xt0uln7gYh zhQIxBMOIt%bZvG#<$1E@Im7O|)wWyyigNRUJnW}8z|Q+D+V0WwIkj(^mJe_L4VN#Q z0%TBB+Dzuai-!dCLAZIbEcSM{%jhQtrgr5&9Xc?_@-7O-2=TU`gAPF&pA{eCm+?ml zzQ-FJ6mvK-;q{rpF7{8YRg`-A$s&yg->%|dhl~w_`s zC@*kRPynP+0=^LVju9z z2*yf2^#v1#a;Xv2qS}5wL$86qjzJt=*0jQ|vokuV0@hoLCj?Dj2jumUWd4 zjj$aB5oHXkb+Te^u!OaH{dD+>Ht1q*WgO2B^Hp6_S8avOJ{{S0F?DTs%x;a)9h+;* zcdK<*vMk3w>PQbR@8g`3&D=^zcP;l{vPM^}RIV#HSK_g65;`_JF3HT7FU?n`?9I4j zqS@f~q7Voi^6W)PXU(S*QROoOeEn*c>N+MW{5}vwd9*-yf`=$53<0Nw7#$1COG}GI zyn2&A9^{uS@4an<{uG4}rmV!;sn}0a_D3e6tKxmFq73z7WeK<#=T?(qmt$jk(vS&y zlTn=IQnExB;Ph4x+ky7t)l)|WdS@2n;%hxF7pzb`UQcb=d!C7q^^p?y6_gd6n$*h( zF|KdD>R?1Wf+I-XRZyJ}C;W2@O78*whtazYM1tV*z0mI4Wc0`hAbWFi*q7+*oi(S^ z9Ur*9UXNER+?vb%uYU~A>ZujwjC}Uq{#Dleon#AaKgDi+x8VeAX`0umSI2u>vb;Fi`S*Ip+wH#c7OC6b#N`83U)-B) zWG7!uuc?UEbNl3->hmy*qRZ{DRDW`uB3cD(0Zz=iJs}>N;^Di-X4J0WPJ(@nBmZ$B zvT?EV3wVqbi-rG4G+jOBHKCB1NB_4UGW(-kDJnvz|;?wBWyIa8v`;n zz-Z#nkY6Iy#6cq!XK)LuwOjY}w>dHKdkyuq1^IsuL56^!1|X=HjJn}sg2t8dV%0qJ z5=x_%6BDbikqehp1Z&3DiQK;R@tu41)P(mm&w4083gm~9y_dN`2`ck>kzXHPr1ZAg zkEZOZ_GjV1=UMtX9TWHB+JaKTSXu<%JWy(wMTY{;sEeHEEb~V_E~-nzeb+CUTb5&p zEc*cU#O{`!k9$4Y641}WmnA3l$FOp(he$(HlaqrZuI=mxpj-I-OuB7dU=|Z&1nR2AcL|IwMV#N7HF$+GOxhtd9-jCkq(PO{S$>xQ{;*}XT4a^YXcdF`23ap z=weQ4K6pCIv$3Sh4hh z!K|hBld{ihvdg#UDDTW4?A$x~2{jI&1LQd4?CPOn<>_CTM5}xV=xg`g-Io2Bc3{b2 z(4(tvc4{JW%i~4xTbpV|+ zgD|+8e;$$ZA%ZC}KHs#|Z@@h6E6y`lTRk1E5Bn4CHC;`6+avEPRUnzt?3EP^*aQ{{ zRM;xapu=-HsF?3_2a-2GC3R?DpBJhPY6n3mG2`f{(VS%;Eq^4N0?c2oUhtUi`za zjMWFaQNP(FZ}vdtJjoR5s zN0cQaF@j{*uG$?D=n63I=WL!GT-JToTNk@hG20Vw-qP0A*3f|T>38$@0YgC#1`ko6 z4#M}Yubesy*p8*u_RmA1oI6~$6AW<{l)b6Ip;TCgwM1(@>D9I{)eQv(jRWC=M+(?Q zU3pVpTYrom0E8A<_6HA+cXSd_Vu9}O7#UQ&FBzo4wMuA)5*E|5-! zi%_ujxCA)+?NWBL^-AA26u~n##XT^s3Rt=*f5vnv>2pC$$SMWAV*;{0(;zm`*hm;X z2c?!MY4!=NhyPi49SlqJiYs%HD@MnaABd7hCTIHxn3}qJnLGcz9}}oj7wZ9$YQML$ zWhtq5Vdc7~(#LmtNkJ$>0JZ+^@(XL7Q-ih`DQ6RWlMv}_6zmCMy5fIb=k62cf#Toai%&|XbY zL-i+$Oh23!*r&telQc0h&~Yxgq6}`pn4mtMiH2=iZ79xX>tXd}{!8;hJ z5>ldOQ95{i4AzKJX*xWQLqzGmk82(VP5M!N4WWnoT>&O0ewQMr792uC83zKfy zheVoi@Mf4&(d@ei4UkJXt{utw@9rR*bqi{t76k$kLlmJYi@LGI#U3_=RuS+*_`8En zz)6L8YwW|Re_8Cy<9{_D=l=Kt>+AjvxVs4V6U6jSuZC4eB7_H))Wm}_lYcsRgmtLOgoq1AzP?%Z-J7%Dha61yWT!0g_*%u6C zF4|IU0E!dANsu^~f9%Jv!>j4S6f=?zSY+&SrU`tFu1u_rE=WwYKH`Bj8N$z#Z9?N? zId0h`_9=z0@^f`qbvBHZy#&x9t#|S7Szq`r0gHQQA?l3{Q5D1EU zy>sN1rT9H;kGs!jgI|!BnXvMv44$R+A2MjLQbfoe2nNVSfr8qY<4ZNehb_5=(S7^AH?`GrnYna^)4AW$MkDcg=8F2& zg%%5^q@KpNZx$`#)p%WP*XDLTHIUh=G2C3^dUM^fSb0-n{~92E$QXR;@jlI8n1~1( zgmZw0uVo_2+uvoo$h3eGZFNbsi8@76O0hnZ73eyWK0mXVJuZ9JY-jS&U^E0<>Vfq71$4 zO+PgMq#F(oT3_ryZ?uoHhe2cU;e#xu2Sw<1Qr}UMz(~?h?5bRG!UcOHUF&}PezOM~ zATDOQ?LWm2z26U)xcCdACls2+^Dj$}W@dw_2*825p6ZVmL3OKJKs@?FRrPnsg&&73GMA30PtOJlKBE0``BFlBqmAu##I&Co$7|7z@@*~{ap zH6GPiVf#C83#h@)(yw<)O+7JGC##$!G|R(bqNzW&JvsS8Hymy3Km})YqZsD zK7@y;nDWT!F@;aa?8vH7DqK~U4;^=W-aPw7U&&%y;P)c2y`A%%)68;YYirGltipO7 z6}H)EY@dNjlTj4&Sm0>^?je6!wgJ2c9Q7vp|bvkx+J;{6G^3hLKLR7{c%5<9o5 zgxv9Vv4|r%pi1*;wJkW^Co&mlth{v$F-g`(w+%&QIfYMYEI{(S&p=g;ji@Zo^t>#v z!2I%v$6WdHiV*{LeffUNoL?&VRof#JulgLw+7#x$@d4j@V27#z@NST2i2+~)dqH!b zG1L|Wg($Ea1o5p@0{USjeYfjN5Ku>l-uKEpR0Puw?PCb)j8>~hQ58)%+Y$2qV}hP8 zc^{j2p9%)0LrI|#*+{%dO^`6D3W8%Mep{8*Cd;M#Dt^AuSsdt!;r-${!3q$ zb>tQK%aU1LT-}uaZ|%%wuPvbnor;c%PjStKY645bR<6PqIU#~{c83tl-v-2%_IB^j z2QfW_qPMv7^e|(qL(u0S!nc9x*A8w|Q`6Sg*6lAZ&0-_ouhBCuL=+4v9s4r1)hE z{5ff`YIu9R`FpwsH!c}q_YlOA-@IhV{7In^QVr9iJ>?R%{GY4q+)_Hp11Hq=<0X$` zM0*wU-L`Y(jRh9<{U6)7EuO^$VJMYI7Kvm<#jX3bRNavy{L}o&^WWEY#c~50mffnz1)7#=x z?h!(Xefwb1U|)+)3-2A@4UH;BPDjJZLu!8UA2Fy3XhckC0Se-rejoO*Fd9R`l>72= zur-d5j)!sg54L6IyF5I_pa7jE;-vdr8tp0Xv+$5EO^+1sJ*}3`(_+)p(XWy)yZjmi zedCZMvlTJhM8ihQq6QAHlk;qBo{>{H-srB)ezq?b2jCC&|9#@IjqjwhY4t0q9u&HbQ|~!b zcd7|pS%H@5ubF+%G1R7kX3JIFBcycO^t z&_6hxtrQ*AhG+jSla*2${B z3Ij6tCm!@V132y1*DC%$p3W&SvuN4Yv2D9!+vwP~)v=8~wr$(CosMnWHv8tBz0ZAL zk8{#9AU;MV#~W!FV`O^X~HPtG)vDRUQ7%a|)Fsm8J> zD4ZbSN%#qig6hh*&A62U{v+byr>O4WBUk*x?RQ0Yb%|HdqbOtC)RLn^XQ$@)=uWiQ zUI{dwGE6`S!QX6fwbIfBPd zN#k6l&EN5Pu%|11gkid1Bo;?B%Tcl`sdoEe64wwF^(Tn~TUij-c9}LRj5rz;j}yC& zxSDeydNv$L4l*3|56FCnQ;e`Hj(2C<-LrNlp+}kTH}A!I@~i?p-UMS(N@PNLPRzSJ zW{+882Etf~mDe!P<;UH@4w}#E=sUg}Q!*IxFjlkTkdc1}#$Vc?DqgH>pz+U0n6_7o ze?Om8h?-s1f%V40#;(gGeMkb|li30nbxSBp*ymT3i%{Vj`;SyHV@!k=fDx6hNB?hI zjU46cg``KA!|4%0qM}eti@_{~NtE1^|6Nv5v0^h_%60aK!zBXfWPMElX`Tk?1899# zrT7U$66@JR^(a+X2fzlC$$AES{saFM%s0TmN!47+F~eAqWtACu=lPRVh<|M}BO9JV z6*|6h8z`DgMN)PZ=yP31C_e5F8M~t|J9sPLLHiF_YkyA^XsrA_*#86`9^Nuu9lAOF zj4?u!WSZ+$bQFlfe2V0e$h*o9*0kRUPvuLm$1;cX!0jNOoBxedf+umNqhI%lIL>od zw!x2f%WX4vd${kc`8A|1D}$U|uu9rW!(d69$d!149Z93P#q zB!xe8^h{GnE%Uy-rCeed5hHK z%?Yo)zrE@7Du7|pt#gE0P7`vcOK528B_Am#zcpQB(qU2>3+qYFZ_@q?Y~cLo4>izZ z0Cuo~ny|b_=7i@Oo1l0u{rg*xSzR|s0)d7k|@V0ZS-sDGw;#3ZJ~MO4Z1aQ z@eV+cvc*~b0E++FIOj=2=z$@U1ws8Sz38#v9Oxj9_-oR7%R}qtI5GT|h4>haNe6O_ z!t+2LXQp>j>j6iD2i-tkE9iKn1r%EQS*)5VCd!D^!2UT33nwBT8%JDCY?SqKd#=ve zq92vPjZRCc)vk1KX$cw-zjX(pPKp6=4k*gg9jjpPE+Do6U1K>;TCt7QZ?t8T*H}uz zr4urAIcOU!H>f=xxLnG$_Ij14I+2Uzd)W-@Bn;DSC+;b(L@1<3;SD|f>-COfiA((} z%O3@7jMHd(>~D(VH?~_f6=97Zb^^FX0PgN643NO;{H`0?mlv6;t>&%}Ntb9iGT(4{ zcJW;x=&+h}WJoX7qc);t{iC3G@Y}NLw_9EHf^8o$0ejUr za0IYPFNfvS&eGr{1~=F=8i*hNeRXoEj-QB4=YJAFq@u2l3m4MYYM(X_(D+nC`DAm| zdq_?FQbG$-#F4pG>A9NhY4rUlCe3$K5WVPMi=HHiE1Dk_ZeIpbo5E^D?wAY#Xtuf=Pa0YTc{qg{FykEr*4$8D+g2Y<*zV|B zS0`L`9m6)IIPY-jct0C8>;AO%RB3+LiN^s0h!)YjgF{K|c*M@NV-w#TwhZ`>Ki6*ZF$>QtafH z3)cZ|QM7kg{Y^`C0LwRjqWLL$a@OcAo(n;dEt|zX==h|1m`XTk(96B5k)pbzv!-%* zd>j%KR3-}!Y>eg7m`|+8b^DZB{tR)zf&O31?RU;D3v*RyLHt_r)KPSlq_wrBx{ro_ zZjbTkMR)fEnMd))3v(X{wi@sd38Ky#*VrbApz^^7uz>ojrW3#qo1@%N!V8owbT6;` zbu#MtTZk-t7(e{tZ>(YNeeyxu{k>!4aKZQazq~z?y%0GJR5kEVo!P_DxK*$VT#2S7 z0CO7uO1eWJ%jF;TH(jhUL2gp4icZQ&ko|m0E&S-JD*<$b>AXQ_qfP7@P?nFuKO2#`zr0$3c4D+n!A{T1$}0rSOL;Q9BDFU%VVvei zy_~|ozg!>*0-lRWd3LCMy{A`~%%CxIj(M*+C%76nZoFpeJXoNLdKwm4O_Uy{VMvsk zgITF-h^Wj=Z~RND4WCru0r=T;ue2Vb_HY{+u#qUq|7Nup{_RCdHkP1eSK($C;BuXu zW_ZM6;;)vAZ|U3vbvP)X5`@H~)D@^`6mg_S;obD*4t7A9nK&lR8J8%qXOjCBlB0ux z0%`{G1s3UZ^>Nb7xU%$${a;-Fh+k>lwwlK3f82F9_IkgK9Nh@aCZa+fGc}t9h`M3d z0+~{q6rMeQXv=eU{yTKimg7qGzW!zLfncA1tR7-W9uh{DK6PBJ-TKGG1PKiJl}mQv zpMi)F(lh=ZauA>&YNVF@5a@p4{5Sj*TX{Nmci`Z-mXeZIyT?ODK$pEAh961F8fY9) zTqq;qDiZDPR|&#A;(f1=|3&g{Yfc<+8_1vifh@ z(_*~G6}r&-rm1fcDm^Jr`*{D_>NRwB7J(kPrJ(BufIyQ;?Yeh8KKQTmF@UEXbX{J9 zHZTd?9qb3{b60y{r*C+MdP+Tt3c`0zJ|rZO+5;bn65xWAp6iUGX1xf`(71&; z=JeWt?zvz{c5p%t^mspgl-BG#P7|ojluXC#^Isb z{z+_BC$E;Q*x2YL>E-1DsY9&AAc%t}atT<3b42}npfZ%@cMBgaR?x&T}Vy)JdOj;=Rr)62;H>Bp=NUt$2aH#`? z7*AAJr%Rh)H3rT>C&A_onomeoj)zTB!1J$3?#}n70&`G#A}jR|3&PPb@gsd<8$Nt^|hHbP23@hQ~4#dXce29p=Vgg%YOV@@^2V6Ei zX%|+rK1RQsq?YE{Ko`8A#C$&o=67~PG*sgsbuQa^o?L~E63BY)%1iDrm`}-{lGB0> z7>)ufs%ZtK84mmk_7F?^_mnyKf4n9=>r}4|VM+IWT4Knk-jBq^|W| z1Xq2CuZR}emEML`KM;wYq`S(2bOu!?+Rj_Y2}nsuNT{gBjOt|=O?)Y3U;jb+3az>o ziTua?%eZ}+friu8Q_M=hkhPanD_F6GS!-~r-3WdElp&|^p_*V~2Ri^!?Ikqs5!QMK z%Obg}P+9Z+vj04XluJ@hV)jD>_2H@G5tN#o`c{hb6bX{(8zdF%mwRj>)MQGZ$=~J8 zok`8ito#`yX9I7O$BB0`Y{kz3h*{)V(9o?PCZ((fReKS8iNf@b01wk?A6l9h+T@%S$ zTaBK*Ngc-4*u%BqlHg_>zPV_+^`mv7ahubq-= z$I?O?Y95aer-fD|0y7SzhSVb7eijiKc~uxBrbdB=M>Q*_F6LP=dXSH$w2X7C!h6#C zLy6Xv=_Lm98!1yD46FFr)=X$McOXkILjxC?675`1eO_D!Paoi!Gu{SFXEG$(TBE zyUy5`$gt$KCrJ`G9Lqtxl4Pt1?Lb#~-}2sPErLp{co02*os`b^zP5j)>WQ-WJukT# zb3pX&`uZ*(ji|4DTY3R|GJWR#WhxO3TnlaPCG9e1uHESfx?3pXtwnH26dTBCWGfxL z(b$yVk+UBcZkh6(w_pUWM!nwCY;e@Nv+3~AL1CxIcQc=KHA5Bz$VhLgucF<&DjSB0 z7XuprgRgP1b8 z#qg*l6D6Zaq{Wf*C*Wr*tcsyGEgBvj6&DeKLqx0;K`TI}=GQHQ@l`hM{q{! z^vjm)eQ9}1ABYQrz*tsPNL4s+X_OLDRe8+>54(^Hd|?)iIaL(GS|bb$I{;Y%fZ)>N z(6loSPGPrUm9b2xTkg#>zrL=m43i~(Ey)_HJ8{WPP$2LB_aF$Ao8DttV?HY;&Fcl= zdDavACtdk?b${k|=yX*Bzuq_Hsju343#Hz;TY+0%q6fm`b;JO72b7!1wS>j8h33Gr z{Bwph{=^;i8%Xj+=$nL6(p;mH{<^wV83V8$hjPIB{I0Q*2B@lO(Yo@^OH=eiCR0D8 z0d`U|T}%yzT$Vr>GE)4G2&!_fT@a0Z#tiZ9I4ZXizHWDZvtdc)b3YzwJr1G!m&eFd z(OFzm-UCTRh4f!cJMdGYil(Jd-g zWMu75>X1PD+Nzh>WkPP;BRNS8{>enpzi3u5cJ-H+u}VJh4tT9}QzXf6DBHn|kYWR_ zq$2=j`}#M2Y5+Gx9&2Y^M;8fW*5X~sc}rPfb$K}z3kwSa!;EcffslcLsxM+g7c7VFm* z+%w<QKQMEL@#_Y3pI%rfcq+*;-kqVO?jkr?5g>OqwK}utNFj-RNsy2tJ zYT7_39k47UGAE}tFE8SB@CX*r$uhxb{*w^T4#aaecK+QIBa_C2Ay7(n+4AY)H2%8= zD^?&jj7KW0Kc-vpB#x;SsX4N*OS!^lBh~#U5>o-h549qcASaO}Suz!Wp?9y^`-ct? zZTX7EtrkdyN{Q>PiPhM0Cf#bi-R6GuaxpkS!pzbdeC_6*%^YAwTE($5-!-mxOJAw4 z(EBp{efhM@N&_@X+A+@aB&+M0Th44VoOnT)(X&@;X47#JuoSD?wfEE;1bDE}v{W|Mx@s4-_Z*Awx^$iwhq44M>gX-#TQ#&OH)ny*j4vbP zHf$0nVcl|cVJXq&>eXTCL0zh%8xM>0`7F&c9|uQ3Z5yc%tiH!3%R4xlz4)b7F}Z}u z<_0uKx+?Az);|cuWkfYS<;I=upI^-W3_gM`grupdd31DicXu~#Mg9=&o@1u@%s50E zc-M>hpDPRVcXlm$A7q#X`n5?i3I+R~mdqQ}0! zItmVh!2xHezo97#-G!atf5V(?mr|Hz-pp8 zwn_AqaHp$ivihQZLYitEhXE4;58hjQJQePBsT6^!;~x)HXO*sLT&DG_KtnHwPn$z|rBM#gwI{oe z+(pU-UNGU{s*s-?$azP4po8fIuZoz*;zFl>di(v%g4)hq=aIZ#w`u>v9Byjr5xN>1 z+@b2THXmhhXk6~0k^$jNr%5k4dn_kC8G~mfHMN%~q-5_ER_Ku~CE=qp0_F)ia*(d7 z$jiMNM*0UylLQoSEF}6vIrkV2TSU6VFl1kD(KhFJi)7jm_+1A8Qkj{O!|?#gowg|2 zF{rG*zNVs~y`76OWie^{kbeIA`@5~JZGV5iq=cMTC&_r9oGb$!i3YL@6LBpFue|l< z|5QP@-vjx)cka9%oDvBHr}%pBQ!kQt7UOtTlV<^bYF>zjpw%LkDN4gwo*|<_kSP8XL1hNCnNC{ z7fy?IVoL=;0K|Q0fpnSI&{yNSGEYHcEjmbWZG-Y?aTu|P@p`}Xj^iDuLc>TiB%(3{ z9v=%5_UBVdm`z2{lS)@LrNz?uds+F67No6Dev5Ah4?;RmOJuWLVo7uk<2wZM?SnA6 zgk<>{5{^G-6XUl*jAI0dt}(*-Rr{H146F3m%5XD%mLi9UckO1b+C))o)~16=qcdbvXYXgmlq2&v$a~}Py%HxP2+1n>2U+dhbj>#^z#?&J$?V{PB_setQg}r zEOJ_v4ZF6SoSd+5;F@(!dHGtMp{}7}eusmZMU3&Qq|^lJCj6to9q$#x$9zKz?;KLu zXMuV>b)s;h?RUCzXO)XDjomz`JrZdzgW$UgQVNg=a1uz=zN+#RtPyq`sr6YJ?F6R8 zO}h^L6G=s{lHtx?MxzD5l;2-2F61ymU*VZu>0U$iW=?+P z&hA96>y7y64Hu{%|5VEbV|O9jX3pvrQ}}LbONN6r7^JCBG#u1)O36S33TN8(h>$i8 zdoQ*z?pY!1Prqqt6tjdh8be(-M{occG|!8mCr3+Hr*@?37tCoF;hq)Z;aNI zT60k7V)Cluby{50M$w-E;z(!bCdnx>14He~iU!Gr^TL zygWiC$WT+PT3e)=Q@;&uS;k<-E!^ZE<@1zJE-c+liUoC{SyO*Yb0usVL+LOfnzKnK zMs@+K#7a#+P10moA8{CW=n%a1GXSoI#0DkZ>3x5H z@1rMSJns{zPIDt!}-nx`}k&4?KSwk`Oj1RU$6p& zRban@k`gm7Z`I_YhlfX{S~V84voteZ7@Q|?PB=b$tFKfFYk_TEq8tL_lg8xZkdG{| zci0~nHrUf4HlszKM<+u*n)zL|u?t10nzZFc##fr?W4JF2wKp>k?jQDB@sKNV>b;vOfM&KY z4YP=8G_L|ary3O&`3jF#u37!@g6aAUEmycm!ZD}J7u81*4zoj(DmYU zX!W%<_s**Esn#rLbxx~4$|x_dD;7B@rCW*xn0!74M;cD5&(t|dV=)I^_6hKZa>lO( zgk(6!Fw6UT61$j6qpmxP4BpU}2Tc3i{11_^LbVjwaWk|n8-)n$Xr&!voBc&xTb(&L zo@wI3M0Um2n(h0{KcH|l0-OqcxiBajm__}=#r$0TF7qctC*vE5-&rNNzz1Rr3glf* zb{2Q;Um^2L^_mpizTOuYJ``S#8=N}4)FygD9Hgg4q8L&x1U!k~pVwerFnx+Tzdj;+ zC*FO~&M@CSl!APH$pXUeFUHBG3?#~^&RTGzVC3$JSqze-#hK?o5Jwa@-kWPgVRdi1 z&)zPD>76N9mq=03G~*~1wL#e1Li^nGo?o;LoY2sr3an)3PP4fxD!cRB8p1%v!VPc& z2Kd27Jy4Pp(5)#wcynG~-A%24L)Tr(KxL+wN=r-KTwIbdjy6KMO;@1mTn)0J{z;NG zkO^@wq5hT6DZ0gsBK>cY>Lw`Q?dM&cnVBgrF8*@8)oHuM-8o3&2YGU3p73Zhztgr{O%VL*>H^j2gl0JK9FAk2vt8sw?MzDTvG3;=wW8HbQ4od&}M{zAwarq zCWbbTtv?btq-`5YcX~FM)STX=$p5A((*RmS6uyTGTNe79bG$cIxC2kCjF-zT(qWTQ zGsFm_rw>>8gkqQ_-_QUE!C=9p4=@~VLjFyn5hs~${j-Tsm{}H5n`vyvT%tQct_MDM zGU%U3{M*AUJE@2iHMVs$`%iTDY_f~E@BG2Ui7xAU3Iqx;T8erK(pSFkqMFK56`mtN z+00El79`>^-Lp1bW$87k8kSD+-2J%#hLyN5-wMnV3xb;GfP<-8(trm_CSdh5hQ<`Z z%^;-i0V^yE1{zan$*FtEj<@qX>+I%2?w|Mim>t2x-`0gnh*YoqWvHo#!kspYgWTy4 zpXc((ST3{r>-QVEoYtFQOZCR^`UQ}{27M6MHX#cRq2FpX$rupr5=Xcz;oO->sxKGc4axuwYFXs`{x(cX6gA)8e1{$&g z`mmN1!M6HYf#IgG-Z#c0v14U-EhYzGoWZ5ASfo%w#Z){z+L`;n@cjD=`h=dKB@@Qk zm1^TJ9{vXmlHMbK9ap*#gBE^F7Gp0F(I4PnLj?bP zhkEsoGjLDgCk(E52;WrDn0E$F0#XK*<(GgF73BwtZ(qpWV5m=YHOeTXuCDIo#e+w^ zUdnCyCBbI%TmLRJn0-zENEL~%+yFVFG(0V>Ojes6Au|LlYqOXa4q^|l4%FmBN`3)X zW`kGxu5lI}B!$deXRzoiI=n+8hATUgcgtm0O8ppeHXD+^)bUFsXsdp)&Jo&_V|ojr zQRn8>z{bp{tkE9(^dy|=M489MewUN7eHzm@k?omW{@wKWt27)LbKo16=#eapS50QN z9&!dNf_o;8xs`YyL;WfM_-g?3Ce!ko0j298j5KmIbcO;!J1wgL!IabGk*Cs{r0AZ4mIP@*Fx zx;rN9AB{8p3wDc7`^WZ1@7v(yTchpQb4RM0Uggb(K|*DT!kpW#%WV>w3=vBnmfmNku|rX7to<(&`l(jNR(9MaBfO_9@YJPD<=0_MlU>t zECM1OLRM9@KLB(I{k_RVepM_M^QVtm49Xodfq}zT;*$By z2idczpgk+6T76j3wz$9`tJZCMxoBNz@fFxs^PjE>>W=oUn8yA8#SA-NZtJ_GHM|y0 zc;6d0XVnxQBK$Rc>UwCQGPBSUC<4f}^GaIFQUIi-OwreVA_8Ak6o3XyRx9&a#jBW? zOzwLwFW|G8iYgd{NnB-H#`<>}w?MJ&f;Tum80(@2heyp!kW3INAey3LTz+hC;%eG=Lklh@g%vka%3X z(X_}N7jDQtI`=tPmqb)nD1pYyIgTBlnLf^_ROp;_g&qHA3`Wt+;iFXW_;Py7Ozozp zf%-f+KjZ0xgKvbpz|v{w-RQf$WPNMbRo zSo8vB*ta%74a|Pl;7A2r$^(qHT;&LNx`|6YV$KWle`HkL{39%;iGfy-p=oaS~FRF`^~jTxt#Co zdJb>z@6Tp8TK#_-L)UKaR)zl9q9Tw1igeZ{&Ro3ay+hjLF#B|avK7;jq4jx%j@sq3 zbvc>;=3O??MisdY-z7my@fKV@nsvmm><_{&Ca5?XSobsnZ$wO`#U*|4Vt?gXOy-~2vZX}JW9VNp|J9sT# zmWQuZ{y>kPIg7!n87s~bjNNYNKcMHke^}3ZjzlE*MEQi7Q!O?dEw{J!f*@X(h%rLX zkmSomJyZiS&hTtlirI`i%ePzZTQ4;!bb@Muol|^^ONtB28r!*vD@toTe;l74Lim0k zWh%VlC4LvNyzpV^P&fn3C9(&}vtb=dVKZHj_`&C=9fL0pvGBXqDMsZRL*!UQbth8< z;ycdQBTf-fTWIN}w;MgqSW;r5gCVc~uD*WcCt>Ef&}d8uGLqn9FWV5{lvP%#UMKn+ z8f&WB$SMoZ7}Qcap}wjrOb2E{S__ozqWEawAUGv&vE(o(%gxUza=-VP#(gnAh63x- zODDS)OdeT-V-(K-cleis+8rqf%NRO_o#Kn&(%pLYs~N}ute38wF!i0kaYuEY`lGCN z_#duY&3HL{k1u9&+s(I=J-jf-3ZuhDrKKzQ5%|fHzQIm$t|MA-6a1?|P6f9K?y)$< zi77_!FS~l27|3Tb40Anoer8%TzCDE}rjx7Ros(*dED}n$n1#m@PTd@oH|W9S#6k$s zj5L_q009sA)|B$ZWa?_V>OZas5lW;McbtNH^?M)do1kUx5n1eLDFY=QE*l6vp0qzc zh0?PtayJ{-Mvv~x{?rCRR`XM>a#F1oVOsZNd?m}WuhmN}$QvKlzm5N# zq+WuG(>oFE=b}(%j)$(Y%ONgS$qCdkMmdFv&MVf&wKK12QQad`24~Njv&*gti}gVF z7!;JrVJ{zIs1F~fJLA<*Jl(5j!Y;V6llN3L-}A+pm!Y%O@T%%(M8u3d+=#TaG?$BI zY`73#XDqBIVgn*cFfqD6NS@%CJ=eQ#yHu_&!z#r!U?sR@xKL;ph1qH)Zhacd8jA}A zmg8Ql*nFTya!JN=zYwPVr#XW28E_JB2qk+%Cx5VE0;!XogNi=t^5<(JAu)1u?JUz) zt6ml^i-9#UTUG*qa$*)9CuwU_bCsj#;G%C*;lm^AWz!hY6RF{uxGO`b6lj>s2hLF$ zwxcDoeuU_ZhI^O^%Ze&`+zDlz%ZCR8Hs?SsN%+TGIj3QpkVj+QbZ!#OlZf|jo+ET~ zd9dlKP35l3{v-L#K59fm*na+9&j)2!|3mP83eX9nit*KgI*wfZhg5-?+`Azfv#DH; zi)^#fudlfU0WbICR(YND=dB$8-I4^N<}8?w#vC|fi2?tv(^v}l^*!d_4!6Dr=!f(S z|I^U9@44X#oa1eqw}Z`+vlG@G#+4q+=;rovJ99>XU&SSLb=|ECBHZnM2^~OWGzwio zA%5?v_owp@AhM49v>BA{?)v8Qz=392AY&A>s0)xeh@ZI9S7h;MmIP(gKl27uN1E_` zOchv7e`)G!TWO7twB@}&UfHZ}_`DXj7TEcn`xCtAKkxf17udbeaLvV%VkAqtwJ3~! z;v-D|cZN_#yer_giT}#SiO3nCPnLEhjg_$nrg$Ls8?(>jU`}OEj3wtJW0qMKO zPIw&5%=b&``b$eo{`#jMgh9;YnhC>6%9{PzYQ}bwZZ_0^#SB7lA`hasmda|SO}^3z z`eh%ZTkNo-I+mN2OEXHDs2a=1d&5b!6I}Ura$c${JxI6efk$2*mwo3|pUE_z#f=A? zo%+8#J_uGj&w^u8vgG+c+e-4dT$~)w?^Q=$Ypt1H!YnX78l144=iR%!FP*oE0>Jaf4t`~tS&?e|UlJ@AsK#3~DxEsK|i zVQB*eAq(1d+B?hWBob_v^Ni$BA$C6lS{exxU4$I6Tb9b34*z4gxm zcUOTF{NjsIsV~y#_HJ#$4w4Ze1zu6sA%QVNunBFL;8Ir9N-z#hlF`^(-Q4l{!Pdam zV(XKP3*y@erbuVd*5C&?C9Zp7D(265Q%}i->gH|ZC=AlZLDbA37Uc(|&WY;N>C(@z zN8*`WScz%NFg!#HYDqp4&2By~_bSx|IK8d_C$zmcpYk_d_GeD5nBvxGRe^0_d~<=6 zmUaX|fd7k=)P@Ez5nhbCCaT%15G5?>ORQ;^N{~S2Z|Q>JCQzX@+4kZpQ>+666!g3O;2H?0coCn$u;6-#l1F!$_pm zy)1Hxl*d;|AreQ72~l3A1HMLouR3@Su*tlEh-;tMxa?qib!cRQLy`VH9tijv#Fx~W zC>d};gu|$o-)wzN6n)rtLfQWQOnl7BDyr+=5b*8!>f0GHt_{qBH31hAla1fEYf`fY zn)ge(ALu|@Z^B+Lh@g^3?pB{xo^+^hFJE?&Fy zxQ1_L?R)s?;+EBZc+9fA^>6g2M%ji(kC0hBs--PgZT9|AnJ;$YjkfzpC>5-j#;jza z$QE{)hUC%=;YZ#q^B)`$Oobj_pH#FE%&RI?RHC{B%4I}8^Z(LhX4CII#TvQ=QU}+K0AYnZr z#Waw|dYMRqn+dWf-QyJ+uu!gpzc7EVz9}sm8#zgcdb+<}L&2lv1(a7)(ja*M!Lmfq zebw@H&@$5ai>cq;O6vHaZiO*Q7)47PrS&pZIJ}4WK2G>x@);Ax4_Yu&igNwTegl~c z30NS~-Iau_@G652bVOKz3$o?d?$Y;=0p)w9zS2 z9k7{8AnWV~{HLVSTT|wOklp4#f0l?xWMA z4&`f2^e}BAlIwxCBgV+Deb3k;beNx?aZ^cYTSr%OT}yRM-Odq#)aEjWiBse9<2o|F zw|Dhxo<$dSJCvIbN{+%#luS?3*cIBxjk6h6Wa#(u_wT@=9}JMt-fNp|cEIcpgIVe8 zF6t=AZvACSdxsaW@V@em(s3Fb0HC3@@!Z`aqFKpBrI3WKKtKG+0yvWVZx+CpuPE0% zKoK0r-R9`n!$nL~ufp|3!q!Jl&bDM}IWe=<7zbhGpp!{_rf70s53ktI`n5}a-7#Saz zI&!nzTRnN`H$fOB_Q#Py=PmM)6Si{Gwel4V5cg<$iB4AG;O5nfqDq!0hGL9R;=xjW zD;PG}JcxS%X`**x6Z(VzYc*NB^CIiM3VO1M(rx0->0*k+XDu2Ucq z?#q2AM-=j>&ZY$W-oF8okMv_)7%HUNs@Wf34#M4#f_+2pP)zj6J<*%?a9WpCmYk}1 zO&Du-n&J&4H_`kJ2&J8+%QJM-5bzz1c7k}{wIDNl>@dn|~M`q7a@O&}yGDnO`FTZo_F z67El_NiGmq@Gp!`&|a0@c)V;?FqYfF=?*ZbZyc-J<`Yic?cVczA4sL(DL6on?hsrO9#+7g&WBb12PHC`*bDvOZM zyf(oD_a5a=CP+H&Dtj8CK?&Y&}Y;90Bk1 zy{cf27gVuUa&KyV9(SiBy5Et9t78R2-a9s$J=cG$M=tVsj+UmLyRA{bYKjI~gMX>T z;ZFF=sj+b(WMcwP$N9&H7l-v~Tt-5Wn&%i*qp$?T?{m>$vUIpIy2ys}CS=9{;=ALO_7jy&Y`{0X28I*Q~i?G^0^O2XF(D_pbRMt6&g>tAg5jbCCBbt@q{*e_DW z02H#ayZ}T+I0?@PB5JHSGxq>QG;m}9NdZv;IL0b?iYaO*OJkx_PEMB7v`2|qNvV^| zQODlLO1J6~UuT&wWPB@+_Q?udXCq(svD#Am&P%q_xTcM!QKgzOiQ}=#=GH9x@|Gok zGXraaZddnwBC$K}*rAo!!#&+ZJ)kK^Siu+=IzwC&OF2tDpibdbVP;u-!>F_A| zml$7svM@-;z*#0f-Bc`;k5dve*&NAe%3+Eo)mpV-sb~y-N?N?GNR_0}QiQ&8Mnt4> z&#$2bVI+#MWA#L-i7@=x%f6+w2l#x9hZ2xDB98uY$Z8Z7M4In>pTq=911Mf8`jH%I zgU8tSi^%6Pp=&zji_H!XzN~WP(4M|fv8@>Qc<4P=3ET=ynT^x<(srQ(ce~LaA^L!Kz^UjpDpP*}2}jpsAoJm7^arE$VuD-)&FGgHGgE%#-_F%;HT*_dg_Nx~;MH`b^Y_w$D0 zVN0wpnwl?eG@ri)DA9vN2a6KN!wa1=TI&x!#2;Hmo(Ty)WjTP3G?#jv@1^xrdgOP4 z3wmGYm61?AFT+ANu{{^rft`X9s|ZR7)#T=FaD5LCQ#~OcqC;a{rP01%Huo!b4mYy= zJ0^(-I;`iIzh~hG?hrT)u|!1AQAMIgQZ#(=WhP7f9@Eit8=IW$%oX<)Io89wb>hrf z5N#=p?4{H2tiKBy8G!T*i{eqq*KCQztI@LRb4v1C5SD37zsI7UEzmZa>fD7e9i~bbHl#xeax+Zg+(-)XW)|f6#bNC zq`N_!?bb(qc0wp4nxpBA7apD|$x(gFs&>sps`l%jT+gVrqMnbv?Wl1&HD>HBa$zz5 zv8uI4)-O;-6XT6=iiOih;_Shfha>AN>bY=IqG6(Uy$<~JY1tA^5Fl&6W*JrZD}}Rv zgP(XWZ>D;E^9j~-yQrre{WKer3D1@{TK2rWND?Tp_Nw5O_2ypfKd^dIEofv!tU%FD z_A8$^(_iEo0u>dOyq_;OJqYK{;mC_AEAL&fA^q!FtMvKdA%+Q9HjeI1^5MxVl^&%MRFaf@;U?tR?UKK>5>hh3z$MKQ zU%Vyx;azj&TXm5(oq-p>-HIMDw={jJSKlTd7t@T8^ejzW$P>XXaHwdiE2=FnFE49q z*ex`Pgg)GtvTDW&oM~-kTnmYirsU!3TiQ%Zt#`8gS#$mvx{cHA7|m}lFmbc}`CjW& zYaq_ZPA@CBHE`3J0!+5rnHxy(t_5gs*b^FeRS^8i+)Mvw8TL7y6@hZWPoiu)r$LKH zIZ1D3>a}EK8WXkZ&2;SM-ahATl0IfG`(4d=k#g>O7Jo9*d%rX`qs#AehQ4k0g}K%O zih+3v1wR#YJ?oyhBDXR8j2)KE7-r*W3?D8xJ$;m_LaFB1Qqd@(KfaSn$4~lbmpMrx zVnL5U(AG2)9O;YWHOC(wkUM|^PDoDd3asql;UH}LCzyQjIJLFs_A{h-7`&1{Kb)Ed zv<@YJzHW|)hiRq4QUT`#lpeuABZw9cVP$cVG6KcS3%WqeJvwa3107ogxQQ~i)yNr~ zIIfgPi4u{Y0QXh8?c^=W?E3a;sV?#*v+QSq@JETdK_m9B0*oWHy1!V1Bq@qwA5c~; zBvXgiMRFzI82U54BIK-%*D*@Qt?KX$+{O=m5^#HZ=lw17-hGj1L(*&p@g*9jV` znn{TnO|APM=_Y8PYF)f431OnsQy<6ZECYR#Gk3ryI{RQoGVFMXP7g_Dm5@1tg^O=_ zemQJy)KWtq{8vh*dK6k|4F!*{0{rs^qeq%8vq12Id}j`YlbGDqRr;H)Necd?uHK~v z!Sim$bkc`{)_fNuYO|}`CFf4)GZTYp0TZmnwhM>Fnj_e{=i^oF1a1y;?l+CBT(G~7 zg2;76*d-_C_6hpyjlA1xk@1U7T>?RiR;!3g^W}cScD==ehp28V_dZtnBcT#s>MefO ze}C3Xa(n|?i;z@C`4xxj%Z^>ds3nzDjzzYQ)WX=WxAaFoO|1HhbYiFQ0*~>QZgMpK zI>J`JHIA#nVPMF}#Q(?AH-%@SG~LGb#I|kQwryi#TN6)gJDJ$FZQITp^JIVjx$ld9 zs=B(gYSoa`_RkQVuW#4bmcX043OiVwHQ#rkB9g=S_Lm z%nZ7pFZm698a)UK6V@SrMM5jp$AFKANJEOm{=# z{g;ph`l`OdH@pugTyWT%@QSbQ>Lz!?Z%=d??}!H<_pEY2mW`Ncj8W{PAt39rKk3j# zOTDsUBGnh*h63%HG4(zesw8o9iR)?k*gWyn5!pgBR#6yH^@8zps8u_Cj^BC3Y@4L# zXE(^4T{8uD>GrZF5uNFvorA%i8xK}d2L8spWz5M`IH^4 z(e~-ZVL6^snbF8~HYedOy1yRN_yl7*nu*BrF+A*hTWXjO7|-;7DzyppokI3BJs}iK zmBl{sH7mzbXmL+sV3-nkeNh<-HCt|yhz_NK)vurK%CH}-3PH@qUM5r5c^K$kg~TKaG!xGt;~$9Fj^Z z1Z5o%UX`t0NNEr6X`hAW|G~jH$G4;NYbz{Po)hgqH-lbpah8zw(~m5$%&Dk<4W)__ zG_pl9*67Cb1YTuv0~_*%w+iY&%0#_q&YA8;!TpBg^i2qLOfFUBz&vBMYVIGLQq);F z%uZ`6g(UONLU*zGgH-2*-|Ss7x)+irz{>C)*?C?rTZ{XJg!*$J%|f zxtNgXeAIZDc~^m%wTSJzpf8(-{v+~Q;`ntpU^iIiUAI*ev~@fF)-zKo03lg@lJ?5_ z+Q-Pay|$X}y${lFZe7zNhc7t|^NC?2kpuNCW7Pd~l1WD7Jz7<#k%3?7nz;uQnw+TN z+~;oiVsKDYMrPt%wi)b-#bZm{xSNzaDr$-KUqPrQ5c-ADZn@)=7)uS=8F;vimISpY zhRBG6P-e71>Z@WU$?n&1l#$q=rKVm*8Qq_>I4QiTc>9s;x?6#9;esi(mH#5=MWf&8 z@kA+$CGauo>?@|hjUanwo<>D=#*n?Zg0q4&hA(|-AZws3GxVi?N~^vZImx08o{1b6 zj-y->vRpfvZu)uW>!-a88xz;%qu$Ns-L;h+=^E>B+13z8p7ogeTbbIJPQC{ofV?0j+PyOwe>jf{B%9AYgBC)Ldx}~o$9Hv@0jUwBogTto|L zQqiOKVf>0J7?{}LQuPo0&ElbnGNCLI5s05efO09+_#$RuYc;)UNad?%<1Psc+qoRw zLxg~18j>)%_sLw7B}vn4p*M01J~}-bCN}xUFNmuKejVw3kftv90lg~!ACEfJ2XsHP zeVWUo$fr)G^MVf|#JFijD5L&z;XV1Z+v%K6<+_--?mM=RiaaIKxW8sdT z7Tthrs5;=Bl?avhTyPij^>I-g?d*6=M1E4p^OuloOp@Qud_n?Z{$G~WyPPFiVcAD0 zp0~dBne{22B?r{IqG{1ODp+4-4e5J|C;;^A3SToVQ)2UN)O<22lFl*c30NSBc3na} z;cpj$8ikS_nrnmj9g=-zrB+(Hs^@Wy7-VTF#f1(ZN`oTPhiH@ev2ZM!VS(p4oXIUj zqvwM$ylA2V&1K2$Ia%e5G7k!!?HoUfexz&+{IbTyTmZuHT&yVou2&{$vCgJ(tF>)d zWF?CWz6s#^{yRDfRkD7>4Io3gMD;jmdNfc5S|qBHW;}cYbHBW`+)! zlKH)DXfnPAjUMd^PA2ZiL@E6V&*<(vKgI>76My*E7u|G=hgocVmqKWWq1=+D^YUgL zNx_}`8na+ztbh34TfAGdB*SU$%kAosnETK-(Tx_~ywT##(E zD;a=u+M^{AHGrIIqMk7lOeTaaouZ>o?F`Sx2kXrsDdT%q`CdTp%0H=C*pq8X=kY@E z3~{i+K^)r;?h7se%X?TpeU`#)vP7S!VGnGSA4eG>yjrT3LTc^qIvvkKPW1~>aK^Tb zp{Bg7sHq^o^k3JK$*X*2h~~Y{4$9|2a19Ld(;th{{+vJPfTl5&WzUVj^&vQ!GFrKI z{d8vzPXt!`qp>!4n3oAKn>))~Tzth&C)kM?z>&ygSsKcy`%Jw;*}oX%+eC3|p0r$f zV|H3Gmvg$sR`Gu|hC4kkXA_P*4V6*PpcjFIgXxcxF~zK16K>VsST${Njr2o14ezMw zb$Y^%Yh=A{Soqq#tD3A1Pe(Tpi=CaPZL~6d9ID*jPblg_(M08?CuKL4s+Ew4M}=WV z(IhGHzOI#oeRrD-4Sj*8KDB9%tcC-yD!Z?powf0uoFU=bDOCo}<1s6c=PaYOuU~f| zCGoO9`hOpKfNoRllA=xPK=0V**?nN2)rvDhIlsP*3~2rrOZ&t%>1lDhqP|*fR(0>n!ktf3JW7Ir3icX#%v)3k#r8gNb#-* zaC~-}F5t;LDn#PV^gXB}SqJ=y_33lO^(LFd9t=B24Uc}g7o$ZkEUy)c4wXZoY!FUw zs+jUO#_*##4@zE`4F79QVuuLkrjNYtB;~MfA%5P2lkF>t*~*~n7?biTmE|f=dsFW? zAu_sRED0m!zMm3YZMT~xm8y#VP&y<|wh0;^oM|;1g)v7>dA8=n zP}8d0Anp~;)#=3nGdvp0`w^63-LeW+w4|(dp9P%{(cCWjx;KX)G6Spi<;8j$lJ!;9 zHI{vIa|CLZJE}(R;IG%+YP3}-0=33R;>h`;DQY$(kiIJ&?tkB2@2>;|-0vg{E4m~O z-tfodf^kS}gn^_$FibS(YUNH(IbcfO)2n8F7X&{wkGj}81w@SMH|x}8OP}5&Ir$RW zitU^TA_%2cqbi|Vq7bi(bARkVz684y$e*KSPOL#QgN;0tq|Qb)AnL}bHlL!|D)mvW zP%ip9dsh~ z>2Tr8`2=^r{DOML@CcASO!QU63iT)^2wIH6q8RSD+^Yo({JD&O3+_Gv*PPVqj$w;0 zR$WKbO%KYet|+#?1VwnT;HjcEe(*GOZf#9Fq7+dioC0POoe(nn=IO{B1kNDH`({3Xa_n>**8CJP3ZmT(am;NIHfEgoYXr}xhwd};AcIv|mOOnx#Q?EO(dzaaOj2dg7>G(lA~WFP=v6v zYy6)R-jMV!hlffO-aF2N`@U&uq`%)v>cw|+b(@zOB_#$h5;ry%+`o3I11f{- zE=)fa!0v&~q1Ce5yf28a)ZdrsSZ6(; zV1L0Y^#~B22`$?$6v$4=sj8(EW2%tW?xJsz`seIVrsjGdu8mwLeg+G6>B=*KS^{(N z6K>Xu7%Kz2u~3=x|I)1cMY3AmR#REmBLyM^A=D3|Gl<=j*MOZ*n#qd>>9K2D-E-Gb z*AGj0gt_{P&-RSjbPw0`U_1KFXqpAp5i5w3*q9VYm%1&Zv0gneNE(&cWrdU5UDEY% zvb)8V_zXDeB$zJVng1APTVn9XQ0I>DM-f9Gzt^u}SO>~qud9bgx&m|kBNm4wAPNy~ zlUpOx0ekE&#F{y6i$~c(&`-{0YwpUK?Mi>AgPh2WP0Hj6hKxzDKf)#uLWX>lq9)dA zz{1lXyyvK3u?LHY*7L$Mvpgcfdxn%|oI^`aw#0Vf`D^AoWw1J_t#7uoA4ZL^`Dw*T z&kg_gV>5v6x4>oE#Htr^cBVWDe1RVwNz-e_g}D4{t;=D#@qB(DqF?J{<_?1CNq|E- z)-UB|RMD!9eUvQ2{3_%r2l^`>%>TJD!oe#74Kls}h0QHC zYp&55k7JRBu4P!Jhf|T|4)D#0ID*AzH|?HWiIj9KrguxAV9~K8Bqpk9YiDKEvv4pB zY_gYmX|f3W9LlQLRkG`+Gp*&IksIkBw2cSuc5PnDnrkf1@}rxM;WN|hD&`0a37VPM za?{9K6JeWr&bEq_b@FAnL7V1OUh(^peTEcgEqrL5P->kF;FLx(;Mx`G5~6RTo_ZlS zQUS`@^nlv@i$0UHYrbkrjZ92y%{e)$ADiG+uxix#0S&`b?gO^$_};F?0FADpl?OCk zY`gDu(c{$Ue~_~-%%@#QmSJ^ziLV@SCH~?zFXWAW8dfjEs@JKvr!$Y`7QH)ToAOyP z%i4xh?L_HlELg7%-l?O_L-dsu0EmHQ!=jq5p3AjTgRi?(gZ8Vx7=ABnTZle249{pt z$C!Wv{V{-l77kf& z<9>9`?f^CoIp5oXb0vz<#J|U}{P}O|I4LUgMt;bqp)>L*6RTuG@3mwJ?5U_7M$bvE zNWI0r^vhUZL|29HUsGIxanxX`E-odu5OOce6OA}7Z67GsFq z%=BvX(<-?f!(%j{7GjPmFBX8N$YU-)jIJHsG^t3uVYMOz;4+WE&QlJDm3;0>F zGi-;MfS78|`Azz!QNt$es#?kEg)nPTB0leCz=*&9Z1D^(1IifHw*!loI^C9(xrQqB z;ZJg1GwwS{9j{5m?g5<=V1(&u?mTGdzRVnk@x#uVKKe@HN4-FT$R(itl5)DI3yid* z)55uRlz9H95uCF)y>e~k^Mx~e-y^=|Zq70TgPL2LmVG$GKbY#1rT7u*cu-?_?~1vW zuex<0$1~G&V#)O>YBnQfygt^w_-pW70m8;%8R97b)%=KGO^4@wpHBT8=L)<#4y$G$`Z0Eo|1);)CMC>m+H6;QA4Tzb zqM#S}V9y~`%8MyU-q0_s0)WLu zV)_ZP<=N?W7lnzX4>wH@D_e&n6Xn=q&T3+n+UOa6jJM}yv1tEh<22+s6qt3=oZj|y z^%ne^AgcOY{J|BxIRcek|G@#pf^<| z*Y7(vriG*FGUM4xcw5%@V@&eyB&hA=!6FOi>~-5?&z-Q{bS z`22Kg3m!yUIMKu~*W;)5a9e&sO+}2m2tZ_R(dKB8gfJ1(`_i!7{v1cD?s5PWJa<0R zAmkSHRA{zP_u+Qt8wBcCFkzBoJ`1e~2FM=})iW$BJNz=1I2*$J^{oy4t_$xtJ>PQ^ z1E250m7M$A7>v%_2AmH*{D{|d1DGbd>nuyS6O2yrw4Psc0hjF=+qd&D7D=nj+U%ES zGS&jHeGV~CTE{c$j=IK#;A7Fd5`s_re`xm-Ac+Oq?2c&+&O7dC@>f~{8Nir^nqBgz z1zGf(c#bWETgJR^|7YsI)Zyj6P1Y^GqdZgC8N!JGVQW*Se$h`#@o=QtC}Tf8PHF`` zbMbA!j9usFQ%BUqdCDiqyHxtb8C>L$u`>A^3*%q-3glA5-5;OMN1KCu1SSW}yS>r) zo{-Y`9x^v2@;D@#VE3%u1GZz1g2j7xI8__!1nr-s^;Ge!(AGU z#IoHzatd_)g%5POy)k9rZIb3bWHqACTG4l%ySY|m=Cnn(4`U$?q9 z{-0a%z8X9ZNtsMiZN+bl9dJ)3 z-fi<9&~G=pUe3uZXN=+ps=f>@PyqgGzLKPmdifs;m~uFTj1Xa|9ZOYWAYyH3F>3FF z^4W3eFX;qc(ZXd5ItKhnbbTOUZ55kxS_(U#4;=jWx2bX86YwXU9wSTM_}}1K?714m zP$uDkv%55;O4*hYk=6NY<;8jJjinXC(z8E)%c&M@+92|WVL1^vS_cOJXC2;R_>F8c zVn*i_%*q4tM)x+O}Svd(uq8C}v>Xf~dsYglHhR1|=b zqyYIo<>vLbO3ghrLo7^hFw8ti$LfJT&!! z%&#ey5x{w!(=o)8D!RxJVe33RQ9xdXU+IcZ6o#o%X9%H5EQg0t`3LNUr_R6U){5R1 zlNU)z!dGr(nOjcuZn+sA(I~`P$f30epj7qbApPYs@zYj6!1+!@IIkSrKS67+SnEGs zT%WO|_u9%HHy9H@6R-$nd}NK~8sjRl9@HS@Aifb6wjeQ#D1D7jvOD}>4%{rI33p=>Umn~Ju) zM{bXM=q;{UQ^5yQumB{Z{20jI8KzTPrZDOReHD-!HHj)x&UT^c1)&`un8am9j6nhY zInk=-bWws!`TV z`A*5_RMxOm)$BY+c4uFXHCpV~>W>)W_&x>`{d2PM)E)_qmxS=Aq~^9+=E9a3mao>0 zLUR9*X^eH`2;otYh5g_%^>lbUc#{s+$O0T#d6nn)?xYLUk`tCNbr4CdxAhJ^ZQ-l4 zW2*6vj|6xq9IUM?##QzVv~|&jhdxgjHExWD{*(-{GToNeF3|-oi%=M(@a?29Pmeg@ z%5O`eEX0x0X^8By@!MLLeI#5;rxj_nuTVxEn<27svNF$ZjZZAueg@NVBm{*21rOTm z|1f2Qh~6s|el>`-`ifJ2@746;%=%r)2_R>Be3P7h6rSQ}%ophW zjibWVfE>j}y;J_H16^akK_E?s*9nU}r`+qI0^Wf8W-rd?o&m!Ta72?^)9OyIoz?Q_ z-htoN@bE~H0dNB^4GW~gMJc(el=dB*hSI{hTEJYxw$%1`KqSlU=)6?HaNqiPyeq{I zxmTOtmxDZZ5T&|SW z{L?yBOI}Z%AT?1d_ueXm*TJ;G4iK(!E2zT=t;ZQkLhQr23DlE1|3{Md#;v&u(_!xJ zPuBSERo3}yognDgyHrJhC)amQ_&WXTqv7$M=Gfgwb@UbFU-y^~AKrgPpvyoWD4jX!Y^f$O)GP`-GjC#)}re)b_pTM zr_>KZfW%3+OvF00u(EQX+M2=-AcX4-% z;Clo};M|&=!lW)B`4s$nk7w_)Z(+Kk`w5mrC%1SM!)yyEysWN4eskRnUZJ-XQtLj` zI@`r%qz94$7dn7t52_|RHax}sxMWJ)XW5=eJjO%G`6g-G7t&C-%H;okd4VEdpNt$> zwpKg3xCQ1xt~_M7ZgR@~DC|z&)V9*MwNf`#&-Pp|>jf1B0Svg;$;8Sk+PgN7(8^o& z^z7d~D(9Db$cRq+$L*EDqIy*#j?;lIvjv=4`hWM)EJl1pn)vXcjk)te4GBB)L`euK zF^BXLfyK!?)47IVWq_Tnii(N|ManBEbFi-<^%_gXrUcR&XF6Li&`%EB=obGyNktaK zLU|w^yZ><&h&0zPpvVO)FBQD@7X&aE*1;(%9)QiUTO}S~g@k!RrPc6aFGv01RpP43 zu*s}#1Iu;=f={Ntbp5TR?C>nAhQe$9d>uc*aV5LqSLH_DXTI z?J|+5>)Ln=nYy>?e2m&M-G@_lUThP)=hVXJz2lrh_k!o~Yqy9(XyFXL=L+&;Zdc=Z zey&Z}%FKT2>%kq%tWJq1Vr0hMgU(DyR!%ss0Bb=nhLo`L!<4E^TT>;h(pZA&7B-B;OSJpa74yl#-TJUft5<(3;8^ z=b--Ein1mXJKYc*+nEk&`2aR_s6VK_e2gplY;;u8PA(ZG>hPq9#yEbAN$#&jS+pY~ zbNTwCknn!$;nFy$U*EHEBLaL6YUO9T?(al(7Zw+Ogo9TkEhde;riQsH{1W->J+_#k zDxzG*xeetsU{*oG5|jcY8Gc#OVGb=R6y%67B{Qvj;y7exrK`2BtKx8&W?qnjMKpHx!*ykEEB^89y!EZtw@kDdos zC=vBl*bwXvX*jEEDdg`J-B~3k>!f(tqQ}xVJ(XAc6D4tPj)xHoBXK-UfUwF7{EnF- zU!g>O9cLF_#cR4b{wFUccgm#d9<4~<@rqrkEriR3*AuI4J9<>g@@lO}(b{Uh7L(As zZqpS$Ej-$vwszEGNDs+b?Uw=*IkPJ#K_D5(&mf2h(D79bSl7Oe*HuimJKx>Q_k+t4 z`OSKEvKFNjEBz=uM9M4IO1`hI74-mL;y-6YkOJ!@4Ns4jq{ikJcm#xo z9sZt&Fbi@U*!lGGhHG$Ef+*P)ukmoN@Tteb!NL8HPRU-5#hoD6){^B{^MI2 z_^IXtUuTNx^n;ZbX?>tNyyFJLOIJ)!ybpN7$qd92r%2ryTg#^o>@V#;TfVf%n;G zGy|?vI>Ttj{y&DchX3mRZQ%047NT&^>Tgjtl z9)JUudr9%~(>ojcHvGBWx8R#bav8ruCpvE0U5V{T#JVw0GteSz%>?S7qn2sUb}iT{ z+s`fq+DKZ=c>mzR;%w;$s|&O=G<4K;N7B)#wcrfJ(!LnnQMot;^#4~#`Spc-MWQ-F zyUIU);lmGV&N>SI(k;vIAVz;LRzp|)<)Sj;KXRCrs{iHbYF847s%5l{&<-O!SbM2o zdsQxUiBGR`guMN$L02=f%XcKQP5|QqJ?YUtmHhaeZwU;C9AIg4g?*mu==Y8iu|OBf z@Di3d6xx{P{%m+bW7qWR1D|mBwLP68u7`|$!3ztZg&xo~fFPr8d0c3-6fvk~i9pOU zq9bkN5<*@<5SD4EPBO_qMM^1%V0;SE`qZ@u$^2X>1=WBK|)Jauw~h3j(b;Q_)a zp*$dvI%$RrEJRUAteJr|NGgKK7}OlH)H}oLAcu^*l;n8h@C{%L&LW9yuzaUUb2rsr2;>Z@3S)1?fZ2)eX*zChhmpez<+f^czPD8c_DV? z{x*zmH)3r_*c-GW=$|Hf!s4#5-7+luPMm#hM+gSU;}E{l@<_clT=#Mr-c2Q7Pcrob zPxU_&{lo&>#x-hO!5p`K)Z)!wti49>!4N1+KM{bgtIQw0`897@Bh4}PvcQciw&w!Q zPd)&j9c-{s4|rP-ljGLj@6w47SG|x5qVZYWx;JQ&@!Zr6PNi-|j_#W|h#fqj)8o+n zARyLf?2p_{=|WNcbrH*5jDh84y*~WrMfrSaV{MH^@N^F@ET|PCdG&iEH`Co84RHs< z`bEflyhZ2|lF3lHWq(tbsTSmqYEGh1QA7n)TBeQUVC@s^_iowHl z$ra=u!sX>fOmi#T-_dN`YmK@gb%B)mM(l9r5lA>Nk6pEzuE90CvTVD&X*8zd7 z5ngMFXK4h&VUmS{a&H`YqbBf8I5_V(vhu5IoatZM3I4RB z(DY15d!%S=&r7Zk>WtG;vQLz*9_Qu|C6_Zvj5CJIn?)!Pftr6jhF@Pkh=rG-4zvSS z_iXF+8#%66($d_K&AESgkIL$~6?w{IVeU~{`u7M@%}VY4qP3$1c>LknuSo8aZ_yD9 zy4hvzabo0}Aibo|Y*|9WfM2ytQrtC|R@tXgFfC&mf}hNl-ObrtHymaZS8>a%9T$YZ z=;5v{Bx}bY;emfNa;{qX9!0`cUJZASXSLs2*h8pnR&!>qZsi&Twf zP`yTmbJNGA?HueP=(x%*d)7OJYo7;T=*vNq+gJCJA~8c(LS)!cLf%i{J9DxF&dZ>4 zJZM!6c1HOI}Ja-Gr353YhpC{8JHwK`nH3*;Y?8q#FQ_( zXt;8d8e+p6i$-Hf`v7QZt6+6fnI`K))`@UapjJ!aekW7kNve*2GkiGLe-kJ9OsqUg zXH!i1*x7l?*`>?fdq9y6rz4qLSq8;|Dlsu|kf39Nrh&Pxsx(rdqrnaED}oP-(nLsY zk!027hur<~wXK2fTsCtxAJgZP`J$QE{Dp;@EM?i`%O&Tt(AwVBaUT4MfnYE_P3bOa z$}!}oUlbW$yvzHZA$Wyo%|=t)yjG9L(Dvps=r`yHMxa#Ko(C57H@`9>27-9|eFT}- z*hUyB^HNKSzj9D=WIqu2F0$)sBz zrG>!}w5S`AVflR_iW1i##C)FgPqtNfke66Uzi}BINoRjS=LNSJSHn~t8dM*u3~j;J zm>60R=B&sf8H?%aaN5k+E0f3cKK($2RA#%w?z)$ug+*bAdN}p;HHbya$SNiI5ku`H1kX#I4ib@Y%ByKO2w0J#z3!u$BB>NvZB}JW8fV&Ze|5~ZG#{B8 z+#fiQBA>~qrzILt%GiFb{eB}8J|ByBElO}dH$GY#KZGcmSA0ERuC_=5nhH=Sd)nbM zxK%2@<*YyHs(s|7Z1*cTPN?rs71CIbvesd~C@AEQH(Ndg7c-xn`N&+~QEpi@0RCb& zBk!ubcGb&nDH(IQ#x7)8{{j8Q4O$U=D{du4w4g0+^D?dzBx6_{^Cll9q4_0DPCKdR z2CX1YC1#R4LQ14QRsRp|^95%dYQd>v`g9`d8Sr^hx?9K#m%2U&-uz4ryk*D{tYcey zGfWy0$|O)T1?JO1Gw!hYasJ+}qmY}+%Yo7<9KH1?rWE+x!{7QwIrsCVReW{mO)TrW zA+yihzteEQL22F=IuLyioH7Dkvfy@7SN9-sv1Nh>&H6~TX2^D=WD8~bI!^e&kO{6u_S zUa2{^5Z$k`<*07*4+fdt50AzHRRuu)-8Dx09IOX*_k);Hfh`8(8?A}2XW!GX`AjK7 z4OHLsNOn>be)(%E{hVe04fS%q+aHv12S3w5ExYZh$gGH#fuSCuCidzVRkO{YsMHtj=z%@do zOcZ0UY4vs_y%qOS?M@9_6EHve*s*z!G1Op8CaAJMIsF>e(Rs8&cjQB!-bt-+Hvd;);He?RRA7U7?9-Ch%yuTOIA{Z1r6#-PC5QcyHiFJj)YicW zS3o$Kld3q($s;6@QUxr3;u?B!gJ^YAqavgw|a@F%}4C zp&sdu_%K&!G+b`X9@sU*fCItgd2D@YqUUEEoT6WhUslU)FadDLZ6 zy6iACI}#W105?o(dQ|Ob5I-nnahh&6w6VLaOP88CSnMR(IvyeQpy0mXa*PZd7>c znF&0<({W)-1}oL(>_p7X-;w*1ga&>G)B#(PrFmO8F7-Uy*2bF~{-=WdjOo46ivfhJ zI#-{d5#MIVKF{)3<0Hz=0<0m_MlHkSrTP`Bo%8m;*3&!JQ@(GI+Q&f?F!TUQ7$F4D z=%W2l28O;F8o(qpQPa_$*m)%J`#mZu*Yjib-?a)<7PzCbsE+}q8+{-TFmFHs`^mtv zvsGkz|I*~l7DA25tXf!9SRo=|^S_|agyPOqa-P3%#oj)h&4q{?%s?U(LjE2V742Xb zvFssGK%jZ(<=Yy4EqKJCgBsmDM+!q~GwMkn1qG)gIxW5{IMt8AsC!w6YwON(Ch}9q z@Mkvv{D`{Oi(A78|H-*k-B+oG6ZWE<_&a|fA`3$evrC9Nbx5f*!+~H2Sz?7a3(`Uq zA{@SrjgL@N;@eKO6?Pr+yS70+h!JuNhfIKkl{swqtZ5K~pPZdZRCpug%JEq>-%6*g zfy`s__`L3t3hlxjU{_k^qB$JQcelXDE(WJRpV9_I$dNoeH7~Dd81^fh?9bWDCWu}t zS4FuZ{?Ef1Q9bUT^M=;LUx7Y(w=|c(xqXs&2A!V&z3>5@8>In-m^oXJ^NRjY|?UrBbJG)?gbBfX+e= zpK7k;jNpF=e@$ONIMdVl-<6H)VU*f-@Rz}@r|U(;87n0|5y^m5bSK}^^VqJZ#44|b zf0Tn!rOipht7cymLt#RxB()+obsl*zr@wB49Yetv0tM%_^zWveUOPyz)a1;xy3ii{ zW__cGU0S^{0dhmFrxb23)2z>I~tDZi(E>DJgX0C?U1{!=3oVtT41R5Ujy4>1*Y7oRyqx zLdk_Z8eXu+;?k;3=#EpC)zrv{v(L=voVwdM<3&F}{l2!0b?xc&3F532I?2|VCimnt zgM3ng6#8lK2og&>sc@3C$V6|+ScxuEIQtXa^KU!UQ+u~%U}&R5u#8wO%s<$E)8P24 zq}_i$O-`uaE8L&pvuQldtxr~xmo}IsvTI3e1h+AbJR&$mK2=w!&5KrK=2O(0pc+^5 zEq2vwK|ePyA~CgAU zK%#?x{P<1(Es)}td#1o=VAJZ__J$9o0iwNYgh{^8n4OTIHc@m(+QoBh;=AV>EtHO1? zH5t{|1wC- zY{wh;gs>%@9fr5BeiMgdS;wI8LHVvUR*`*PZ1oFlHOlUVQ6_g+@kj`f+ZuWq#p@-8 zqC=_(augZLDLonmAyMW{Rj^=cmX?xI=d2x^RMrf~P*vlQlGoH z@j4JG-Gzmf5vuOfZ0RUs8nusZKJle#G-`D@a_yh)a%L}!6cTx}+ zaTcwFm1La8=;iRRd8vDwh_mPi=9oCSkh%&=U#Bg;UeYJG46-nLh6#9N!d0kzROjeb z7ZrdvVIII|6M>nbQ~}cYmP?Y87i`2C&P>c6YcmQ&#uGfcRR)BaheWm-Ig@4q9)Jl! z4;aXan7-1-yxS1gypIXpMYaDc9+AtBvt#zG4IB)XL>c+ z3asPANt14fdz`qS_Cj4R%TlhAaDd-wiV7}bl72aU?bQVhqylErXxb9wF@j?!A(7OO z>c(6?zuNv(#kgy9-Fjo*fbB4r@tcEBPC|=87ckwzBE_fM4$ZCTm?ZsN^9!$MEf`gv zOzjYh`jVj?#JQoC!u2gh=eVZbd{-qmt?MjoXlD~N`!SCeV5RyDF!=tzbQ1kaMSYHc zV;;O}jl``9NmBshiSVTx?1b7;HJUu~0SPUagUjo2ImQCr!q?pF72V*07E7MoF$3Ym z?P~WO`skz#++e{U0 zB#ndVCpX{0aQAWM1*|}B`p0XvP7fQiPN@j5X%Ot+s=Cf5nTS(;A+%jlMa<2B`0!Wb zB+H9ohm^}mJn&6U2tevT9*A^y3YIu9_#*qZ6CwXIHCVA+-SV#AoiD>jPo<;xX`Tex z0UCzUTHyXBGV3^*Us3ylH9hZf4}2%d{9LhW+Q%ptZ@G)X@o18!QX>8n#X2T zP^PGPXoQHnN&z&;Syj6tNVo+99cTxug1aBh$IiTBxn)#6mR{G!JuddqIduCnYzOoK zrgxLFJ7UwX!VW|%eM5oZ*f*8+hU}5a;_<~Sn)4VeA;&zAA8~Vw zCdJj^La(JmZ#sRsFkve&kMnF}Dq7T{1F`2>OhhT#LWWGVC5Oq9vcuBt6cfx71kw!c zHVW4Ye{8-NSrX`i2?6Fb27#kY+ai_AAK3Z=+u=`ZL#$Ke{%I>8`{Ub#dUWH(t5#Og zw|A*R5mS@21cLQIbN9EszXoy$wBOrccn`ipB|vg4BS)a?|Ag>d3IP%C$Q`W5?Rzzx|aVemN;Ez;ecvS8a12b^|8Q ziHPu-m*LXrGkKOX5G&^I33n|1g1%nt`~SZW(6zdP*xvcEv02UJ*k5&&rP&^1n)ZV#vzmNjF10DV;mHWM|n%uU_PbJbP&`>{-Py*w7YKkb>OW{#i@4@=d zD4uVuI~seRPW?KzA^K$ol*XCeMui2Bi6vc7KUboulQy|APFUtxbPfukyOFLT4q-z0 z*{mhWm{5@>Hnl&r%GPlK_3K2&0$JG_yi8ra&{)Dqcjbj_BtwtC)CTf_)5~=91FCw`5g*ktl50Bc^gH`5qtgSMOCpG7Vc`1&ZB4Q&s)GaLHgrc+&3gY z4ceu@ff67Dg#dwSOMiK3a<=y~u0JZ?vJS6!)>`0rtXA__v!7=tzFCr7cyXQGPSaQhzF+0`$}NP@H?yyVK>ioY z3E7HhY*4S&)`(nEvsL70GhHE!wMwfAkieV__K!%-JtJTFf^`wxG69XKaEdO;pWbl$ z=N*t!J3utz?4rRq05$5knN8L#N|Wb7scfY{;@rWpUk43K6GDp>MY+OVM(Aj9mebcI zuuVi`i7pw`YkFI-9)L*lNUi=(Qoq%zIVmMvslVgb&jNU2WMX3@8|zc17-Sfq{?u27 zl0QC#>1W6Wks|R62*o^I8gm7*iLuq!qBcscs4G1}F^}F>;DT|_rS>_nQ?0MBM6hOh z>U955HjCoZx`(g`lrq?s3t`DDG`@}t&E&S<->8ll0M{jBshFgscHzR7rkYNHQEW@@ zD68wA%AzZ`kG~@|)ElOuJ-w7n>?iR`Bix0cq`Gzb=J6JzFu^$Z?N4kjq6YDMm`GeM zZyb@@4JBQqoPO=1%rG9VJwGp@OhM_)*rj;2Unf#K(5} z%OL22e}tg!JXJQRf6HCO^q+r9>DF~{yOGYV=$W03m`y~dpC>(*yl$aRZd$xZ+H``A zZbHlbt~f2E;?vs6pnkoVzJj;hCDNo~-@%5)0^QmkK8}p^{D!TuyT-dd94XHY??-;G z71sT*CBrk}aZgj05wrD!CHn8#?j96ZX%#dKk=G(>LDKfh#SS$901kH-=K0+_oa`OQ>395Q1Ztgz?%)aTIdVrC;2XmS~i)*!JFtM}GFV9U^ zs4sSo5`(i8-t+~|aVzkZVKVmZ0pSFzh!Bd(@|WK4a!`%>>cBNmvz5Fila{zbDTzb0 zb&O|h3;JZ5j*NZUt%pBZgKhkA-3~ZzjMH{sKnkgAx~j|UKNhg8lDfEwS=`!rT-T~N z*_7l@AUrXR^VU3asnd9#S39}|puQ-s7N%yYB3+gw> zoRs^ew|?gqbZbq}@GY)Rrqi2cctAKv-n#*wNQaaI!Djq*U!7(33=sGqDEo#{vYnEw1rAj>EG|3M5ozN1s2OENb$F;!xN?bF&DvQD7 z+B=|QwVl0l;&X6j$kmVYmgVT^Pbgxj!Thza=>+_A?MV z(Q$H_q)n)s-21Sq6g3;yfuc)pGin6ylAPQg~phXg(ZA41w(+=RSR6{^! zD)2`{o(0qskViFa9LveSwdO{7m_S$z2jF9CHRuDMWAx&to???$oqaA1Cj8*JErGcu zwj$|4dW#EQf`i`ff8s#xhhGBVGW2mp1mNjX@sIev&s23EHT^!ny{BUDh7uwr%i1eB z_O0Hh}%+xnGOj{p&WhIrNjk9!E#@k!$wf1pvK(KGa}qI*tgr2 z{#0H5x66003yfJs14cj+Kzz`V5+Jznr+h%s0Hp`dfDYhh&_zJ3fcC@?2zhZFUZj5Sydg3M@eXFvwd$9)%SCoH~n?o;Uc z#}lvd@&0?ihf@#3AV2NOtI=K|g8Ir|OLDgVwEr2qv*)m#3XS^wkN)INe*Xs%r^mde z3SGt~8xEF4N-33uTSy}Tgj-yB z!%jt*p9;*xwId(kmq%pYWplxj@&cDDu&1`yY_Io0vm3)iQb7XEq0VL%R1(=vCUAan z_3u8ZevX=0nvn;RcB0Ui*Gj^md#=<^WbcKWXYIzwIBxut^v{gT!eN ze1OU+98Q9SE3{V_@qy7x1C(w1q3c?ozxqp@@`aMbKgh20{}uI~@o;cY*l;2dLA2;y zNDys_-j=A*dy5{?dvCi$bb>^0A&B0hv&-s4Z&4Sk*VS2_XWjqject_WK5>3CbIn{c zXZGwIq%NAzqkoBo1_PGHzk#TH-!0h*|eyO8AFMQDU)Y6~( zgjSK_CLOrAkj^VOKcitXXQU6D8}Cf)V<02nOHVs^%b_CZtrH)HzjeOb(G3atFILTt zB?DIZC6J>)T_3{@uKu(~@JH_+?L1QZG$#6P%s@S1&z5az8}oZsX47(HE0-!i6Mrw% z!&~NV=i5PT@%p)v3nW4n>q2BS#Rnriu5ZG_u;zVgYUdw2PS@UdqF9S9Tum3`lGujk z`U%k|)m5)kOSx8lmIFBP{mCB@mrxGg&QLiqs6PGs7+3TrrXNJ%T~W#7YgtxMF2uAt z8IvAI>zcmC&Y_6k+bkSvFo^$*e&%30G}`l_mhjn>Op7W1>S!DK*;I~Vk4ZTjxA@t7 zX-D~UC;1-foTlk#bDFr(=gJ(apCUN069#C@-aXWM;Gt|(LYI6kx%)?a0;>(K8} zH4=8Jm|uQWxF$XEGB;>>dGa}ydR)Y+%A=AodpV4#=|WGkX+0|BcT+y4n$FvSNsH6}Q-Ci~6svWPdfEAv$|LONIF@%|`I6CHCKoV+XJvE=F~8bz(|F zv^M9DfDLYm!Imq0hsE$FWY%mxdD~icXpeo1EIqw)i$m@OKH=QsNKQ}VPK&$s?YvPD zl}{Fa^5!$m5%&{vuDg4NkmgH*4LclS&S&c5n$yBOG@V7wFemfbR=e*@_ejip!!H-l zIKy1d<^J7pR$W|tP#r(5IFBt1c!}o}GAb!=+~WP7f+F4CZr7u98x2MF|DfQ@LGVA{ z;;Yy(`xM)e>eU++5QZ-w<)SYmO#3&`+*UeavpZ(w4hDTo`t9QvN0K}Ah{B!i&`Qz` zW5d)-rTfou#x+{#!Df`YmZ2;jILPuB39Wl{v}-2?dP-X zLu~r4L*Xyk)4s4q@JZ4ya*Vfmb5;g-V{7xX8mlkTOemrjK8!JcFHF_dv#bY*5d8DZ z%_ji@|5mHadK1SOudV$dVsN7R5zYU|LU|iF(EW?yZ;vLzX17gW&s}d~^fYz&M99P6 zHY0jr1@ww5IAW&b_z8(_o62Bmofui)b#CqE^YrAWv*9?dG$alq0dry zgmsx`^tF}sDxE1B1?&HzfWAq<5f$9SyL?$jILh{gPr>i`62YVMp&?^Y;mhu-Dt3~~ zRx)xJ=Aj<{Rl@c*?_=QCWuf;Hy6q{C#a_q}M07!VN+7u}$Btxq?{ssy3`kp*rpyV3 zyIYBmih-Tn9Cz~SLbYp@2MfuN)jK`G#e`%O2Q})63MQGuhv4f+C5c>(h9-FW#$jnRXq zjfX;YEM^3I*%aejdWy!I4u}E;NxS}Uq-OBT5&6{+0%0oj>!?Q%V-s?FNu$Xe+YKs< zZixer6Y3FDgw9PKr&@s1?ulT5!~Ny)Cp<}Rs}Meu;QA>>gb@QU#O2osLs$Q%N0uB_ z((<(QdTTm7fb+9yOu`7$c5k@Xn?B24pXd?&q9*JR8cEm_QwXx7Tjamw;JzeB_G)mh z(g7Lyg8)x>k9`^py4#|dR@BUr#lxtPY>MoI3j7{b?IT$k0< z^9c53UjAccWM;)_d~ZZ3NP;=5UH_i&N02$CRVsKdZQH=FJ_}r6ot;&Fo)0Ophk7>D zG~fFVx9Av)r+)bQ8@k`zT4+;JTI%n&vAyHBn|T68{C(yW&#~I6NLVWi!-HK( z-G_f=h^XwkboTLS^xdbO2=JWCk==C2KFL}b;Id6~_nnyE{qkf?&ByFI&j09ZcEuf^ zz(o#*?X#_jC`zUHpw{etyS+vU+%a`ZIn5a(Fp`QWqB9W&oSo`E2(MM`z-1DZQ(N5_6w%${a%kOxn zovGom1p8CxXC`DF-{xueC4}MN

      pJA@WD}7)CiQrfI0*9O5|Bn$?$ApB)d=O9GSe z95+gbG3X+}vOPe*jTKqoj3;c%QB7?`+B>kWVLi_=jS42eJ9Eg+7ucSC<0mHkUL5#-DE+(d z4rwSU{ac4SN;bosXp388;xX25;8TYXRF`H?mJUEKZs^hr1Dt;)qet6a@N2NFAoGCs z&$+~k4zvIAWE&QZZ}o0=K{G+#4edq4BZ=`v7PdR=He6$2^Tvyn`BCA3quu9pUox== zxp%+LV0f_Hn#NmCRNqzy9J9pIENGp1Q{4{LIMa(Wo$)sx*&#+pM?24rJR7&~X*2KQ z#d%eM)hjpoaX;_>w%osPATQCAj6t6C$WzI5R-$H!tIXB8je0lPFm}O=8Zt=26yukXgmHpckjkQTZrL-2|%1$&>ZG9SM`5 z4awmapEVGKQH|t6DaQoUlWcy3@doG6cS%eQ_IpX~@Q~c{ zzLIvcra$|66LvLL!DP)gD${|i^kc3&6-+a}r&I{lokVj4m&*C$Fudna9^i716x{!L zAG;6@@VQ7iuro#_j4o90(DVXZA$_Its^omtTVtHYJ|?FTC$z|UV6FA;;}FWz;p3@* zF+-1f!BxxSVu|xCP|!)&kzc!L)upJqe9u(|bFK#36Ad(rXpLXy`K`X)#(h;wRfpP; z9ie1fbdA|xcnWeH1t$fQH?GJm85Kn*1!@pxkuG%+_WJtdgcFOM#VOtrraGsbuQ;Sk zZWx)ldLY?(QDg3}W5IPLv8OW=~Td@L~Q} z`lZZx%SeMnHr`?b;A$fY*Aibzh5#|Si#k|vr(naX!wWvYER_%zI^*o#4%G75sa@<{ z)YU&6neoTcM(78Q$nfEy5fSzCNORDXC9-U1hyvHOv0x&97pTL3WE}-HcZSn1R-be% zu59C2Hy+cxy(7I4I2@Avd9b8SUrBHnRxJikNx>j^?8tmF%VHW+&58HcUi zD`SaTm4xK7S;)OhA$Yz{nsWkLL2u-uVYP^vc1eCV)Y=6HiHBwUsz^H!&tJg|+BRff%HV$=Sy(W1J7_(eHGPV?6c)6M`^6sPz;DUbOypF> zcXOreKK8EPAb%-dauKKO-GY;2qqbGOEvw40d;CnHT~%Bu_%u_94W*NEdvj(;wlDtg zWy^CSlrTS)O^<8=n7Kfdq$Sae(T0o_78Ee9%F0Nlhg&v zH43Q}+<8JJhW#6X>tW8}N~=0F*LJiStL&kQM?ZwM{BDd?p{l2yj#}oPK>0m~;Of0A zn(Iyjr$d#+MmGtLr&fJ1+uJ!*&VC!m!saXJ5D)AZZN9)-LtZs`?gZD_(a{G!7&mI{ z-ef*pjltA#@lQ&#j9e<2U&V`MJvjmMn$Q-1(dqjWYYEVuyhJ0&w+755_KBKbZR!}` z?B(bj)Ozfum$|}FL_wQOmVSIAPjpB;NnG8@BDL5c4~x;PjMe4^AAUXl8kuEQjk zz1}jOk3d1UpEw49rqR~IWu?!U)qUyoD|^#MB-e?>5N`R_(3CD%#s0zxnYhOt=E2>D zcZGvNT@MfwG7c~3vY--FFj~Dvux;+oG@NBQvKQw5BS^wyzy9*1ev)1$f}7hus)M75 z)%y zBZFXQ)E;_VG38*t;QG0N9gW@B;!9{^t2t>)}>%|MOy7b z(~FK52$ifu6cXrhQZ4~G%sn>_?DRBjK$cAfeh#w4K~SL(d&`cNo_>9|E+luq3NniA zJ9wh~nSmQC<}l5)TZ?O&lHbNkf2jK>>qq;-=Jf9q)D}Bf>5ufOg>#Mp;j@)|4 zRfXIo;?;zVgkB3OJ_W7Q@v_vds7ZU+?E-rscypxQ8DVE7mb8?C)5>xBE%xsGiM8(G z_)8P*e=NQiMdCZtc7oq>uM^)y^=TDWh{r_PA#oEVjPpNX#q?D92w|0I&7nUZr=4jG zI-&#b@=}=J0i%D#)9YR*6z{Q<6;t1^XzTU9VE3`KHaLlwGS|1`Kc3br=wgcn17*za z#*M%%RHhFkJ?dF**fGbO9qD&u;=mJ*6~MbXU<%pjmALCWpo3UXXW=PEt{M4YqPosM zf5Q_{bk`$G*KgKBKJCm1h4lAh&megOR@ z5J0`nPft(3^5@-Pc_(HkA*dRVv+ZmxVa1*uPapt7mg<&iAFn6p4}c2a9mkKDl*DPh z8uX3(UKMB4Y>v#cey^u?B4e&fR{J;AQ!J_We6EGxdjgMO-sP6I^=f^AK>X&q-JvqZ zK1Yiyz3%&#g2pD1(;-`f$0-caO=;QO-fsH}h$&`cU1^!4k`yM;+bInlCzr6tF&2E@ zbz=^3YLD9z*0P_TpPi-b29maJV3z>=$y8OavC1Tx)S(u=9xH;|d%X*N95?d8|4($rCxX zOi1#c9Xnf{Wy_Ui&(0d6$3!XTh-+~%rna^gIa6iv&+fb^3eVEC=tAJ6+p=82)jqhF zA?bzj3-lwjB-c~S{;F?=U8LsjQoh)>_3ygp+Vl>MNW_R&$>HB z;dE*IXWrAM*C+CHsduBnaeM&7e`75GZKu=BR8GGj+i9aj3YTCQsgmxZ=Wf^KL{_2+ z^gq~u=Ik>+P`96f)?k0%MB;(*mWi| zycHmShaGrUX+kB0vON)e7tSTMY<2C-viU0@G{%~@U4`Vmmr}q_bR$%$RsK|d+@n$km8#(HXT>>ddeZ50k`9~OI8ul z%ReKR_%&%*Tr#_kNHb>zJ+`xG8_H67C&aFEj41ZYe4f|(R=##ED)nAZpHx!?+uePY zmB;t)9l=@uRJ^}sQuq2W4ogdn6Z@q&%Ze6QvG`L}qswx~^78V9v0>MDqu=E-<*na7 zXK6^(#;{#c3oBmLSFC=J&oB1%u83FesIT#H;EaAgB->j#ccJ*rX5JjBR40TgzDjU$ za+`zHV`xMh)|D4s{LZ|r&E*5^tBmfF`(g#0vH{hN+Z_JxGDwsS1jZcJ5duO%CSN4j zi#PES)kMZ`BcJ#?eP!J-I)z9;+#3WYa0gLpYOuz9NAy^Wla1k@{!49-UxifnTxCaY z#TDHq?$6d(cZT6*ZFUbC#qvYuel*pzxKH%#jqP@9k;4DnlA71ogi8Gy#3;fS4oUXjuA*=b-WX=rFj)C(Drfq3?Q z39?UdcS0yGee@Nv(j5(jLN)Xz{W)BRw(-3MaSZ(%#3mcu>297#PRX{%5odjSm;G*8 zty3xaQKStBZOInM!pM~^nf!Do#8aGErkbz_M=@y#79+`^?xOrQn`J7c(>_RT?WdmI zEshV%v*FT(!_dbjJ4;=;LgW`fZakZ3WTb8q11SM7ltpI12gh}-qBBjFwkI98*XPsn zEv)yxS%S>>d@}b;O-4?O&AxSM%@((NH;92Jj$6Jj!uR(Kj)#tuO@D2=MwCky?iD-P z@8w6;nB08)@xSz-B&f5O!*6F(P=T6gSm>Vr=*{DZUkJxD;g#h3QjzM7v~Bz45L*U+ zG@CYnsJB1}w;7P0yY$uelzW&zJbUrlpw%m;jtlp;G61 zU``OQpJbR3N(AePS@U;T@BU*MgLs#Skhjq#LyZ%qQWYCc0uUEd3d6H-}`0j zxWdgxQ3F;*V)7>t5w|e{E7#nQ#xT7ZqRlAGD|#<0QGxaXV`Jl@yDd6@fZkPCkmv$` zHa=OF_{HA=r3ux+5#hq-BasUYlHu7XGSTemFp1)!4tn}!nN>UsQ%K~FYs-%xheu@| zaEHMpRwbeFXTY?1!>f9RG2LUjcS8x?x<1e^PAn4|l&AYGd(-D?&AIv)w~H-$dyDSv zOKn*FN$xbBQ-6e@48WaLyQ0YFf!H9-3rVi~SN`H8TT>g-kEhRPx#-DsI zB{QN-a`ExwN4A1m+9MG})j1@;^3PuKN;U4u#Qj{Fo6&j|IeE57UEmnGhRGBE=Q9G% zx(d|HlZSZr5t@-zfCXfexcIxe`t(cf#LK7s9bG_PiO#b7*-43!#9at#o1;H^An4lD zCC{{p=qfT8N`JXH-6~>noZ#d-y@A(i09y;k-7pcDxqiYK@CYQ>H3@?e`H<*86P_&v zjUO#?xMCbK%BvPVbq;N$bbG>0qi=BwkUo`|n45#38KLp_xjv`F2+Fyccj6}Wjs@{@ zP-HB_4dlC#CU13*6*aax7vt?2+$c2o&G9pm>7ioKEZ-NSFWJ0Y@dTQi{M_8LGcBq$ zp2Jl+K3Q)3J2$fTVjfO=z!-E-zLeP#KJ4`I<__$r^-L61@A#41dBeD-;I?GfjCU=- ztOhS)2-R;=gnzxSlLKVx8VDkP6iBoqsBm8T+$w$bz09gUMcC1&EZYG+#W>`)H8g_mxDJ0ZXKx1iZ)oRDu4_mY zVA9Tzb24RN`~^H>`Z9MgrGZ$W^K)v^-nfGV-)~t3{|wN*VNO6pmp$9X!2Zp^zzU)| zSdAm|?7EL&vB_#Z_pT!NknMTyJjHDX{nM))r2-t8x2Y^p$=ar*SurGazA$;3v1Y*P^ECF^Q2U7wwKa{&doUA(P@D%-s~ zjnp+h&qOQP8t053J`-9M*Hbt5%xPYgMqsVnu)zrd1J{vI0M2Ort*^_tgO_zE(u0-@ z)lHtcKCSO>{iJtJ*b5}>22S4rg$2AjBYOZ3xpqZlCstjbL= zBflNOBHRz2!S-au@r6wtQG7b7v|xTDtZr!|`ZOL*vzZSWb>5hbh9<8WuA*95A9QZWbZ=Q7zUP@)|lqNjMS9saDGWf3R zCaE_>3VBmL$xg?X4ey6TKt|8pW&dan{Ip`CD1+y_at?c)6<&#mi0D=rVMG6tBTPhZ z1icU&6cKkDPV?Ukt_eHUg$>(EkNazH`3R6cki6$PAULr$H;O1m@Ef?ez&;JIzxIjh zU9Tr}Qix(9;Hmy2&kPdNQE%InH~4q6kBZ~A(!T}N$ixMDI@~@X;>SQkUFz7J zp0aX8gAe_uCz?PW%I}VU@M=jh2eI9kKy&10+LEs=Wc*z`YG1JSw{Y}O|PDr!}j?bN1Si(5PxF88}x6K3ovBqPms=;7W;?@n|TYQ z67=(q6U}wMSN61kW9-OU+9b@l4cxYgsp@)UofSam(hM*`h5??MByz&sew||&MvalT zj>l!;3wAC#D+Nfme6Ep%oVwS5Z+p z;yn5RYi#$qHQKfai=t~~^IOWuQrkG975{fqk5l$iV%{E0g5+i{Musno8LNxBYC0E*kqsB!L1 zZG|HVQ4G^Ju&wTpIggG?z%1+KTc4!1^UgLH^y6wh4p0+E4L!cLJI&2BhuOHjb&@jAMwDQhKS%$30Ff$^1AwCQ9BM9&FJNc1^+ zDFU>g*@<&gHd+>4yEnDRksfc7vK???9UM(|x9zn_{emYR6wehA44dS7lw>zREqY`3 z;0^8$d_?)rEKJj9K?U(MdZs>x1CBT_`)hV~dME!XXH-2uf(qHJiz$(~1Fg8<<8H=F zwH^Q-*Lwl(s*n1ddVq>Kjh~qOsNb~aE2W@L|FjDcRmT={CC z-)e7lILP!WO%@gwVl1j<>51}5(RI$-3E^28K1AkH!mHmAap#v62AjLI9%LK_u;0E% zhb}2^w{;A*vDk{}ZgClCcs0Nt$Qn~ly83ob6UCH|yUFNAI{KK~C`_*;62Z#g)Zo$OYYtvuEUF{*6*xkE^7K7U`sZ{D_$GWGP7P9hoEk%85obL6Q6@IXSb<6Z!(I+SEC#GUiDG{ug=-q|{d(XPuBrdYp zN6wF!LldMkb$(=5pG?)KG7oxQ;)~Qyz>k?ko0#X5_9zg|<+#O$JG|j{po$BPbk_Xb z?}CAMH=4!TP#%*LCPu}_JunY<6}<4&)o@dGtox-My-*>O6-TK+sz!RhpF{4G!G0Zd zTfEsXnW-BU>e=TsBxmSJDpXJ3(_HwJ#6@U^JygE4 zKpNc{#(&4^{Maqp#k%?D+J>j%j^`y)nU-tVn%eMl+OsaoV+Z;eYY|%!Cy|8RvHTCN zTFL6E*B4oe`z2Mbw_(OmFVg7l3w9SE7E%5SbZCSgk(JBoZndjhN+l&hiU!AxJ(Ugd z8fsXLYllEbA1KRr(|#d<>fJ{`qwdm#o+QX#qBdGP90Or zJ>5;RF`Ay46(ck9#f2)@x!hj>_WX(>ToT^=G}tVX2I+S)=G~k!4Fr~cWnU&V|fJ>zBEyurd#`Y5oBEmLhD zNEYr4`ux+7>C{sp9kaEXDJ*w}oK28$xg%7irKRP_dP?rdjTu0ckNrRx<(74{(QO|@ zVLxCXnQc<%Z4uOpeF8>8{BLy}(V$V+8)c)kf3rM)e8$);m4re_1F?I6jEck$OQqeY z5}Qb^quSdc*)+LTjOtG&wAi&3a}A~C0N@UxCG2A_(CdIHm(Lt2Dv%mW%0!-P@s-Y2 zg3{ZDNCTNrUm?xFfo_rob4fQrRsiWukVGEuit>|5NJCeGZ)Ak#7j!$}lpbT{Qb3et#VH_KlDtbUXbhlLjR71p{wngLrb6GaaItEiI~`4W6s-3 z=@Z!*6#ItUfB*N9G-Ga04_cDm%;e-`_KL<&Ha4t~3hnBSO$g0j zblO5UG-Z0p@a91SjtJ;J*3rjb^br>~*=x_xF-U8H&fKZ9<4Kx52Qqq6IXK>iS$qL9666jjBNJ! zBmM_voc_~DV*~dIZ={}m!WR0vMupK`5&+@4xo|0^052l1Jn6eqpp%h#`(VGf1&MsY zf_Cdv*XIYVTe;Ec85ygz#>Mek;4#DQQF$5L2l2P{qQU34Rx3Js0r{(cU%}f5j;dN4 zQw2?kUNb84pd}O0C5i3^n4k{47mrG#w4zGI6Hjw~JI>Ka4OHDdi;T!J{7{jyEy4qm+i|h@YAJznI-Nj+axE;+^+}IC&i2{h+)dVsUmoQ$_u^xDUB`J ziLfv=V|R1348vV#5c}$QZq}DSY*FZ|Wug(_A>}$3a??!AM$IL8|K$619zg$xOH$fk zsYlxbBoDaYb$VG9M}yJd>g0K@&ae#WkVBp)1>49YEcE?#dMyrSu16I@y9>`bMZhk5 zy|N=ig!VxWzf_R8%F|Xh%c*L7*{bLe{ZF0}@KtVVfrCDEB>1SCY%MvbMqT_nTs@Vf z?Dm{CVG*`{Zf$n2*V2M`2|jqC)#U7Hg`U9}U;f$0EFk z?|!ErV~B~hgHO-&{UU2tyk6`Dv#YB{1?*LuQe`l+I{ZxLS>|T4&+q>lPEEkGfkEoB+I=3dLdPZewI4NVDyQX@SnL8KrC4C zU+mQbHA5bjRSMGU%#TRikVqG=!l_VXNDbMo+R_rBLF^Fdyf;0W16}4zy4?dF<13dt z&)0T49H+0b3~IhADSQ$cF&_P3hcLS01|6nlPRQ$pjL_mu*0Sy(tW3}87XaAjA=>*d z3xW*#PmwRi3_~W$6*1eQTf@qn_S0v`?fwMe66MpNDVRU(V;-3??N|ucg%e z+B9nD;!Dl4+-Jj_fiZFRjhEmkWBba@^mgdqM+T{7&j4BF)C)8(m?q-wo19j`ffCJYU+4P)!f+>9#HXjm z8bU8>Bf%K;@EUyiPua6b(xBoUGX-=xPWivBOhaC+z`#_DQH~}yJ7S5xfDky;v4=B* zIZ)gNiWN@RqfXr6wZ+O@BAGTa<%-nGv0eC-g6;Y4NV+N`Z4?N`*=S>AVoyH3P8|LCk&)Psy}$i0z%r81qoRpUW&miTkz)t+wUEe_hO z9vo&5MDVJ1cG6S2CdnRu1=Do`c;fV)k%>p0OnhP!$m2Q1VJ`2jO)bhYWEwvV=oi@G z<(<9pDM3Gh;QRmRh{s0jPX_s4OqVAU{*}66rN0fv2iQMXcRy*EKbt7ugX4bGTRDS4 zl#G$+O0x&HHPCZ=jWN9TCKp}nGUfIVuGc&gMi|i`NEYXu*h@q#hEmj*lVdmq7p{VH zhhJFy4LX36N};<5e0q*K6^g@{bC^Iq_z1}v%RQP#9ttk1V9ts13m0$*-REC>A$30a z!%w219=YpY`6) zBiGrxM_;`f7eDw*BpkVU{g&6z>%E$bN|&!aU^HTLdOB@3ak)c71Cd9{a_p}ovSwj;f{HJ!=quAiT24};`h zu4*s1#Qo0An;U;)ApCS!IAV+9A%Dhn^Z*Sar9Kv|CUg=LD?ji=9 z7s3{dz3|*hzhn|?Q>;Zq>^#sn|Hi;cJ%W*Yse^d?GgvXjB331#GyEly zhWLLtqi+gESu6S^!M5< z85JMgDOepRND{&20W|pN?uou9c}?d_P=EJ=wuZqk@52^+A|Oj8fcYd8iIDp5wkMjb zISG?peQbTtCZ%~7lfY}F(gdyV9IpP%A4`X1ym==ZA}vY8#;xU#8S+R}fS72~{6BfX z+J05eepYvdI1`8!RbSu+toD3)TBH;Z+Nzve`kbg6_Vw*LsU#)DA)b+lNaw-jg1>V$ zJ2f3O*iGua(^B)af|cD@VRy?GZhQn{I=_{vEF5C4v=D8yS^HAfq9`Nt&_S>x)Igp< zT|dXdkF#f+HoaAZg&UTJX7e{8@YctHq+j2^9cl-TScyl9p|Kf5U%!ngdT&F|_@rTI z7I-w;Jn$?XxL($xjl99XqW?OBpF}Lu2lBF>7QAlG9DgsP9sWRTvNpH^kogKrYO5Ox zTD8C8=Xuo_?aW)TThbT`xDwdc*>ATgcG2fZ8?gt?dl+&+7j#jy;SG)^Jh+PtVfc)L zl2+)(%JS3taHa8bKo-0m4gUYO4*tT%KqJ@K>}hQ3&0M??fpwUk)WUjl3Hv#dJa}RT zwPMU2`_KQLO5!h@R|7`%p2i{vgctdIRGMio&`a0_MDS0#t zT&lwQB0l=1m6&0X@XweCn}-ObinFUyQ!LibT-0`UKHuSIXH#9&_TD07ut}^-9tRG@oWnmy6a~i{8+zI1RduDzATkpLyfKe}hg^ zYfs(0+T$W3UZS0xkIuF{4zfZLHLwYA34caJhI|?ed;?KV3+Kp~VM$mpF&seqOPOTy z{}*L3_O8eTO_Y~cxzk4{FVB4YD*-0z2q!2~k7trv0M%`M$p?xAd7$z5A2+S0)aSCV zyEQq&V_V>JuYRdG!&qlX!8I|4^>3Q{p8^fnhy?AYHq7Y5Yl!~GCabEB7J3F_NjW(= zqyu{7)dljVgsFUbV;zD+*J8GdTAsK8{q>$l2|^!aWON=>@QUB}6R0g-Ti8-5iqCL- zvcaLv1z&*q++|+iJDH_7G>&SNv14VGf!|v4ntJfi6A<;jXJjn-gmxL8mstEYob1ZF zxbl7gf63yPm9y*L4Y2|z_SX~qP!(13S2V=~UW^&)-%XYz%7trY_v^sdXt#)vdvG|O zT>HW!2V^vq6BnLL=`!@ygwB|~YmR;=X1X5GJiPny5tyO?C|KyxzQ4@;Gt+(`WHS2u ze}#*79NL~e5fzh*b8g>!>eOWKh3RNw9?wpYIKj-#dZ5s3(C+f(rXK#{%I5SVX-Sj6 z%R;C(Z|KqL%8Wch2UH9Zw9j62Z+6By%;7owyQO3v!OV|Y52b+Z)+DQYodb)&MTvVA z`Q*Ur`!;PNIeIolgVDJdTu*e(5|Q=l!MUiZ>I@+Ld@~6+9a!%qSzHL0cd|(JtgTzg zPh#lRl%zAW2TC!yKeyi O{wT<*%2Y_1ef>YfYz1Hd diff --git a/docs/source/_static/thumbnails/create_gnn.png b/docs/source/_static/thumbnails/create_gnn.png index 9986c09c1d2abfebd3a6230946d74b4632418c3e..717ccf2a1cb21c5dd6e53d51a50041b0bdc14cdc 100644 GIT binary patch literal 21717 zcmX_n1yEIAw6`cq=cPeFgiCjKcS@Ha-QB4)m+o%q?(XhxkWNA1(#?1N@4fk)!5D{g zH)rp?)~^;sNkI|?kpS`CyLTwkQew*Q-oexWf4_i#5Bv_(Gm-w^A5O}WA}}i>bU%TA zAlOT3IlX&_1p4m_=3QC_-n)0Ip3-8%s_q&8GU2>cmTrc6R5)VL(}c|>K($iv)CdUp z#xc_o9KS8pSyz!|)N@C=reo^mNfBb+O&Bs7acm5F8SV0uk6p&`QLNR{kBQdOk2KP) zy=6O3hz{2>FV0=R`0Cwr)p~EnY@BC0O|=}#;pZt)VU21PE^f^b;aleqR-2x%DQ$`-9C9ngH5;8T*<*KeyCbw~ zcUUM@YiVhjR`Md@3Nl101}iY6rKe|mopGavu+jw)XPq@MTt3{xO|AxHR7E*0W^Q@A zL|mn2-|6VPkGK5{OXu}Su8KYnA@MC%FZ<_nj}^86d_H7&ax#n4E=zB(u(d`1f$+S` z=Hm5BkDJ|wIiZ?(Xfr z5QDSh4r4q0^RDOQI#wrScD4=g#sUxiHp9{)&ez){9a~6PSnJjqX{KIQmeb*6MOj&0 z&&oF{IuUjU<(S080qA1*s%o}{K$$P}PbR;DFwusqnOD~fb`8$qc9)fViYCXDc>~_SebX#Vv z$qBR{B4FBwhK3HILnQh&hC%&;KMe3p ze!f}KPafOsyVDLVN{fY+^+tkhD7o$o_9SiiVNC+r_W?T@7aKcry+J%plM+wv zj&tJz5h*!91fK->;gdj=3P1N%-la0~7$>%Q~1VT!S_F+u_Y9$wyw$}(0{;37Td zaS>+;q?0XY$USd8l?01~*{@BUEs242(1_Vqf`R7|VV6Q~VaSS=OGxnx3JTDHD=!4X z=%$rFba$9%c-5WV6Y0{eO=jS%AxvKKLDeLSMsC#0c>91Wo zXZruou51oD~xj6C3+YS65eGKiX~Q1*b8a1P^by z)+8L*pw67rWJxXb{_k2DYJ!RmPFGOqO1sAu9_4?>IbTV>o`GeN)F^YSl zzx$B^oMQ?~%IF%&|Gmg$Rzh&RJUpdkWmlJ%JG;B0qN1sE_;>+8srMGpC&OrJYP!3< z6=5%TBY%0(PLw$>k|mFei^FXGyXpHxZ$Yr{vB|ozDgJOa(qWF|;FI%YbJu5eV9zx( zGxK|boFxWf=;zzhCLxFQ{hmg0y0ts>%H{20PVm*Ry`$s6r@a$u$7t(qz38#Fski>) z-D3R>13{2TPTKH$%Lc^1@#*P=#KgANn886oz=k3OiL~gV4%FWo2qD(|243Lvv-h$? zPfyPt)9C1EYg-$M*C{huIPn$9jx~Ijxo|tQ=}|f2Y-4KTrL0>~FMG8hDbhtkrTm?o zwUxdoVvywytlvAJDr4i|^qR9Q_NWvon3$TXtE>0AHI_0L&mI3r>b;<&p^=f6?(v-f zGwobRA%3B2>kC19y1$oWuJ5?hz9c&=+r?GyYw*oNBFWx4gLJjdn=KwT{o6bq`*ZgF z7m8p4ajccv!IfW>4or=vb;pTxE_yLo4|bZ1O4iA#PKcCLE6}(S{Z>|0RaIRbhsf+G zVTH}q)$b472_rwh4;!`dFH6?h^{SJ*ySuLKV8ZK{Wa#E8VMkKOfL0BLY0h0C{Ny56 zcDC~Mn{C*hw?{;sVf^9OpN?&(mWIFoxGa5_^o1AjwwpnntOgtMy2s}*G>olM%H}R( zmO^$vqOx35aBy&tl6DXCqV7AkR8&+DL=Kj!Z%bA4hBA^j*VnT#FhmidMNoqdefDH9 zQ=0zU%?>_({OtJ&x7IZZjaGe!%qRAy9sQxm2FSXPm@f2u?;IqNq*`Mlg$*7pF z0*JZcrA*C&Vzfxj7gf;2+}av#@)*QGilxkeSVB_L%+z#oX-V`K-FTsC^6l;I>8UkQ zZg%!&)Mwrl_a@~MqTMRewzx0H8nsCV7B=>F z!cb_UihO=EjVfpghB9_+KR7UusgqLoV02Vw%~)OW9iGF*3H~+?Vh|Iuin8*s99hH) zLbTigBJfMX>r;M3C1qMlW2M|il%gxO-t&;E%BAn^} z&gwBlLWANffd@Stt!KZPWkr1<%~fI;CSMR+4hCKg0Altg(GU^+6Ql~13ClTZ&?6<{ zv$GZbx26T|CkJmyXZkZTGLR}e2m{K?>2?E&`B)`J^OV;KFY{xhRoOP)lhE5qWjC)W zR>%q^u>O5D3IRTYf``t)`&Bq_^$YL0b7Z%*e>d z*SaWA9%qB8aMTU*nvaPi7_1cW`jpjqtRpt4SI5_xBg| z{DyF762pdeWA7C;HO-}^Au9_kN^H-gajgE6ENI9^5lekc=4w3RYKkhtZfeGoBf=gB zt-&32ND#Wk>_P>$0qRt&H24B^c-V1RB3;wDf*>-SOnK9b> z6y|1ULrqqfm#Huzn3$OM_|To@ywjg27d%Iwv+8ugv7M<@Jv>?_@`StvNTSgN^hp%_7$_+y zw(%x2xSc9jHM?*$+#^ipeY>d1(x{>ctxDK~cB-30!8?UEHZ~JeQU2YiY&v%EJSirQlTQbc!#7xye41kn%xw3v%H(jPm3)mBoPN-9Bk$LZFJSt z2FBYc$jRsD=eaToN*8D}8}_hrAS){?u3DCVjJof!b)_%!W2O3ciHvi8#yrVh%>+@H zCd?$SY3&+c;U!3i)>)F-YQzPg(0<~1-&fk)ysU*;jsN)65R;zYFSbJm@Pa8$GVI{@a4-ul7atMvXj zIL_oi$0Uo5VBSAZ5qbhx=ZFY@#>Z2!7G2x(0G4P?IxrfsY|;zX^Ryg*hUmEB!KDk9 zDfIo-{yE*M&Xv`dF}b^jkf@@WM^mpy>RCSn;=8Mtt$H%3hva9Ws42gqKb;1 zGbf{?21C&W>l+%lYZFomYHFCdxl>CP+#jzk;23pyjYADTe*Cy?{^0!@2#4U^oB^g+ z&iF_U~ATgh=&?O@l8gVbwU4d@2~v%l0=b ztl6Aw`(+92{Ph|z0|pMd@~Tu=z+%XoiSY5YzJ1#kyRps*zBmJzz(^|rUJC*O0w$y% z=Ia=4eqCKWU(#^zA%ar7e&>lYL+rpGR*%;vME`6Ia0S*A{0yO>1BZm49XU0i1>C-`-X#dPo2f7Bt&=$CB($G zU@1A?2Ud|4^B+lX=RWxVsdFD3)%<+o-BN&sJBXSF|9i@smg+uTrT}wLN=XTwu77@B zwSA7-5zRn)WZ#h?d90izkggYMXJ_XYw%bJ-)BElNP7+-Ji>r*MPMB(M_ifHl2qk`9 zS7NP820%oYwL$Cz@&*RETJi}p1sBQEbUW9_a|+Bip9mX&|Nd|}< zUNF~XZ7s{tISM~>SXkKbI)lzXz26(fWNE{j6hD! znFMzs|D5ZgL~m3Cq?i6;hdDvhcLaoEQPaCtqg4|%jLX)Y>S=! zJwm#6&sDO9$g|hv=fmD5)_@lm7vn<3e*E|$E*=y>X(WpB@GL^aWJK8eJ8C#9F_&R#YASgGh7CKO=46Pdhsm$VFbH_#41`XYAU@=Mn9 zv;ZinhlfYhvGsQ$kQxm(YNqjsF1kP$`Z-C-XMnuST0Bo?^18*@!d#?5b$CZn1o)(K zf#T=J`+=MnXtde1i6TbB{y3W9pFf|%hM&Z>VhR@XH#R3&AV<aQMTH++%*sO z_JnQ0!u}^X6C&w%1nPZ<>*uE(SVx(fac*yyqSy!ktJ>l^)6|vD1Pp=HwM=k zf(}X6E&`Tno7U}81PA`axA*+{<8wS)01(Jqfm^4PB{2Z>WoG&;8qVIy1j9!aVgNjH zo#vtlFL|z6lF8drY6EYPf*e~TKPTtiEcf<){jONi+_n)29u`1xgM*Awd<+bg?d?Ra zwCu=3zeaGn>jG~eTWMSr3u1>9?Ck$RbV`{+*O97rech3V%arpCzPj`&G>DRPG%s-) z%L2-Ih)##eSdxS9?+Xt1{Y9klv>CA!sr+7Si#CB*e+G9-TkJe`fUoA;VW~{}35hhJ z=R`yx$4Z6zXz2vlf2>JQPWZ#ZTn9tJb9FC%VJC&n$BEEBG(n=Nunhpc zDP(il{9A2M*CsP4g~Xr@uONtuTe7k*TR6`UQXiWb(v4cCPtvK-!Q8C^ipVuO*vA|8E-(YDna@@~Z0ySQT==KnzP^bw21ds7+z{ekVuNpbp2r2@IP^TOr+6on zE()OZw6q*jN@QyGq$#LyMYC?D!7WolVkwMha63NNKsAp;G1Di5if&YXm%mtFgYIG4 zWTm9Kt9~5zI%$-tG3X}u=w08T4NW*YIxc%I4_~T@f!rqcu>aXu&@N3gg$H~fv^n4C z@JwEDYXu~5O{!2&D+iVQwNpaoE&jXp-^!_}JCsdtj9t&Y&|Ba0j=PJxl_srWZh4@w zlYaZr=xE5p!vkDqroy0xTInLnWiXSGL9vls{5Wx5JAiTBKj^wN53;#h+lY~UK~QdJ zYN})TQKj7U6E)i*JNl1KT5D@1b(7ALD2W9sVn$utS*z^%#l{`7nPl*OT5+`G+Y4M= zq0{xz%*<`u&o^K59Z3=&77g0VZkQ0D zEQ=QI?(9gIM#PT@;{^Z&z8GM*m#B@5rxyiRW9#I1>eGoTk+B9gF7N}RHy-9`}VucD$;;W6Ew1L43;0^qes zF8XA%a!KHl#6Mv#zbV`1QENjootmc}zc2WTi8n5NBa)+&^j6zg_nTm)F>}vfM!D)` zNetf+p_iTq{FArNfauK_{>IN^QkveBtf8O~weo#U1e6<)U;AthXMkXDp@_Fi82dKR zN&05Y!35NeD5iD2Ix9G7PUBE&ISnrJdt}|;O^~tanHgbXjdIREXU9i& z6Ak87odMyAhM{Z6(VWa|LJ#L-thH_O$u7t5k2yBEkbB2?;{&Ky^R6U0X52eAWJJ)( z6dut#NMu4%5}Y#um3^x|RWTClJp_)Xgy@*qSZ|wp|hm#={xsVLS#`x*FmF4m| zE3&!ZpaD9Pu-#u~#MGUcv9Ucn?IEQtjE^opb#s|>%^wAMh3(cx2HVa%%d!>PABcfZ zoRaA<68QP*UH8&YKhxjyl?1K4ZNi=Kfw%)4$B_uAl8aPbUz5jxv&Y=|)h#zq4)2JR z_<$1m;|TQG$ZEN=b)(ws1}4d;?t!h_5V!TO*N~K-_RGwn=jKE3$H{%(Xbn)V`eJwu zpq(fu4bGGvD=&-uo{4D>>J?eMY{KWoNPn|=-{ad-n}icwjS*IwHJlt0dEIvv?y;60 zxfZgJd4}xXd^+ZJcaqFKm12S|@9o0=>FrN^7b~Glbr6_9d*&pd7f?Nt^d>Q|aB;KQ znDEm`6}2y~u11Q+4Jk(dnhz-GGk{E2*aOm^YnoMNd2@cJBBKQw=KE8Q3<@Vg8`wszIj@Eqwa)sn`3*Oj#ZmSeGH$J7`(H(T;X* z&(@ZYOT%6P2N%n2ac983B z?usK0D|3qczxCIePU*94EUYbe1l^7clKSv%s5>IrYRbyO%Q>iNXljo<31Q+vllKUs z>70$DLEngyZf{*@E5->}nY0rt=eu0aPq)RVBOiAEg$Of$GW~JZ6bXJ`f_(&Mhk0>#gp~VsJ*Q}EYpc@1oslS5LlG(mr%AV8V~dn|Uum|Qj>(LVwUu1J3=J2h3B*l#hks3h$v zih7(3DYA{SPy$;8q@6d0^ zdyj$KTB%+6!o{-b0gIg&;rkIcf?D&DRo+YmNx4sWSr=&~v0NU%6ooEZH5Dc0UxzMU zh@h4fyNll<@Q;=xr6eX_GCfObd1}AfxbQ~^x4&<(-p*qE&_?(K`?P-%6}_%S)GoJd zUwD$oaDFu=i%7ygeG2q>W+cTkN7 zpcSbw>pj#L={jPt7&<$W4FQL6XK>B|)%a_p>@p7L?sQ^CKmi5hbq7(0748hTuB;R~ z$HmO+<{JTI7Qio_j;s+6Feh{M1PI-me0t+}=c@aYNy+QY`R((?Ytq8tT4jJ2^bv}d z&Gou-devAryx;eSOx`OM5Ij?HZU88cODGx`kp(c6>rcZYBe_Au;4gqi+%NuPMwO0t zRYsTZKHxKUeCt0$#gIy{%$KNdDReByrROMKwp>-{5+uGlpDW-!<;)E^9;dL)S63{Y zf2)@b7&=&5S~@#le|B51bj#y!Z})xM5J%df8y6KM#}mn&l#Xy(lOO}g|e123n!?}#9;BMUs%^~lPomT(Dme=N=6~HyJ^Y0+`um}t3gbA;TW~A$f z5`BnhOr+KD?dAY%DPXy2T*gdyGBPq!2+J`s=zNG5aR2`O0~{L7gA3MUhUm>5A;?_d z&j;^Lf*sU=_vGp#>RBW$ z=E5?)Fim(JSn?+X3G=p{%F3rNb`vLFLwRXFmF@N~07h^v_b4n~#>Ag&?%5pPdaD9$ zU_~i8IFg(}22_lUl$8wG`uDeMKX0z58EqZX0S7xyP#B+kGw}X7#rRa|>DhbrsZDCq z%y*+p|IGK#Kb7@$=&O!x%STJr$pTv?%HeQ-n;#3Y&ye)-bVjOD7Tibafs z&a0aVC_B%{;lGRvFmkdkQa+)elMtcBhlav07tmD#Mljd?HI(8@AsKDZmz-ZU=ks_~ zS4cmF=hb;yGLG{2>DRdXihf?K=EYhBlT&S~PfnWjpy+)2=EqBLSG96R3cLHHU7);; zJsbb)or80_Q{B^g5HS)lRP#Av$mXt1pLgnCZCPQn7t{H}BcMgbVo#5=MFth8J;kYqMQRB3 ziD8~5*0D72&S}jN-n(;iLnGei(NK|lgcTkHM_|4>taGO8e;yEVWIh#0l~Y$zipj{3 z%jlZm~E;}ob*4@9K={XJ8?FStT24UBX1nz6#1^kS?@P1begn{ zYCt*>8X$R6R`_XlmKG&7HTFyX=7jv-MlBIhQ3DQ4$+bMz94?xRQmNzdz(CkWe|phi zcI?QOKXS!itU&2E9sgt>po{!nJWb8+hipDF@_z+SO8c;V%{08t%B{MwtD%0(C(bX= z&!kR!IKU+)c?-mouuq~(K(Hk9gD|DpZLU`ZJ=l~7Jl&lO`aT0KcpXee_Pz-*Vq#~< zu($}YUK}*H@G;P|$qiS;!(LA89UV8K8I*$vH=vUig$p=xUuo(Pp&W!Hcv)sln2-^) zwTt1{uY0bhH5~q;2C?AY{(vXBLhNy#-ux%*uc7Y4)ta(lJNM>n1hshx(Ly-1IBpBs z@zL5EP#~sto6YTBmor|)ew&#l+JqGG(c#>>^PeKlE90rKBhs$v`Pmtm{*W|k(R}d; zUNaveYNmq~scwwS7&nB}y&JS=7CK#toV~&2b93)P_Egd56=je9*GS}1GR#sX%d?i=55J&1K zYBE(FUXTVaWqD`56xyut&Ne}pfs#2jB?T|7lFI{#yAXdGqL@A#b9$_BKNL)My|g z@?NigcUy}Phm9E!iUPmrtex74lVN1BR^x;qFE)#jA)ulLgMeZKn^^CCj#ZRXQUbx6 z8y_95W%@MnQjqjjB7d0C`?-Mm;BL@N?>gHn z1OB&|51d%rX7Mi8sIqP<`g>)Gd&7!!)JS^ZbuZs-5o@!~;wGQ0NM?6kAh8jTC#0>N zwsJX(F1D2QWGDyuU)%`;I=p$iN!+igOg6dz6hjEyAFU-X zV}p?v5ymD28Pu_Vv=qC-8TsVl_EFD`&D4^cyf8y_@=H_nrZx$tzH6k?^=9urHyHq| ztY+U}6F#VS1~cS)rx+VjTCpmPbW)xs9mmrrJb1YY1b^)cFfnJPue^NZ{*aIZA^{=c zNWG7#=|Lc60-Q?#O6nh&=mbKF608GjOXD)wq{QJBQVL1J2hT)^|HN17FI}eS0BLX zc`=TAVB21!VgDty%&G^V*HI9a|dl( zE-`UMkTu+p@JCim8;~Z@R_3h?DkD2%ecgXGG|N_{KD)^_lLFuEOLXXBzgR9*UbPLt z@#!9r`Y&&6OqMUpN=w)9Sv0mRV?qKGcXPS8+;MuP zYA&>vs}_R+58h1zZUM)okO)Y8}nozPobTZjG0SRhlQ%CbuV z?DFMRRg{}{1&(AE>dGYv*Gy8fvQ7&P`&gi@+St<;G4&GbP&z+`M*v-B-VY4_` zqW@P0G0ok%7LUCb{%3Fe05=py2Le!})Fcr?NZQ0uA#zPIkPe6nUG0C?Pq`*1vM6q# zG(0J$LXDClCug0}Ww`b%BR$pnrWV+6Fr2WnF;{jXl7;n= zpC-b)08{YluF?LdCD_E)?a^^f>u+c;j{7YWN*ENKhMqoiZ-v9CFAeMu*e}Y)Hikb@ ztGN03KY#6xwlz2Z0qv%WLe?Tg+_Kcw-SD?X*ZW&-(P*>Gc58rycVa)yffM|8Pr(%t}01gXU@E3Wp4QS0B=b+^#-=9ZQyV?!Mbw%vtM0KxB~i|vmMUv&wD>1_Ce zK0~%UX9JL2+zAjhIPTvehnf6b!j+ts#$y>{V_(c8gK-%c(l;;RV1t8m%E z!Tx@%Gy=RK0D);gn@}QHTawZ7DzEg!^-^AB{ikX4=Z(-e)OpgOUb$5T>UD!_HnqWV`VUM~z9((t4aS_NfMA9KHKj zdT)@?&_a&o&CnPTIoL5`g5p70%H-VvU{LtIxkYO?ISeOj_7tVb6W}UJAcM0@jwedvpgnD_;-VzYU<8Qd|7~`!iig>qOI+Dq|lA z6RmR@-=g0|36bXf*#y5EOOvwPi$$zWdS@1vpO|J-D(@ie>E$)TitToT2MRAe5ZKM* z5<_>p&q`n7Z7g=~9a18!+IwNM6bt~&5|wu|G*^kBkCn{3bi28j*w_oRvmykjDhxPD z>=@b-#41V2t$yuskO3gn`FN~=3Ha?LHFy{pFlA=-O*d4|lq3v<^p{=-n>QeVU&9(@ zaT=+L;1kFAZG)6c8WRVFj)VGCLd*)1_J_0qpeg`7gF0z;i>xwiMEY9|P!;IlWMpM$*z} z{};v8S0K$dtLS<8PBx_6ZUl(_isA&p*nq`xsy}yi)i)UX>$le$Rp4#UmmRw3jBQZ; zF`6fTr{KA2=E{NTVAi7oj`fTS{;sSnf;&+A@hMHJHGRjaww;HV8Z6;RT%b%%8zSl8 zEfoZh8GHbB|2?4d=@HIj%@~}9_o9B&zewOF<0<;Hs)`EZw%uH)j6`^09{1(YG9GHQ zQ5p=gnK{<_B~#G*(ctj|92{I9Y_7%fX>TWZ2TA_aBUj1%!Cbo{DdJOdhG~WZv3)iW zPly#eV^(gf|I>QJSsKAkYc>Z9!ruBEKkPCcFH#{x$*4T^R*?kT+~kB9^+%tesXeJn zwy?aRPx<{0^)65U&`|uck(3mCWwadlH+Oq^Unkh#-~WBw&#_V6D!`Vckh;-Y$JM9) zwUd8+a}%2+eZf7LqNF}nPc9y7y|V{NAoa7re^Nlf-`?vpSt#NG#J^uyaT;X$CCV#R zv2629oSe!12?#}s*|v6eMOq>1y)9{Ewm+b<%qcXB5NmCEb{05* z;)O*+nnVlQrC&<^OY!_gmp5jq0ZVLix@l^O0>Ds~bi#XDXluBJLOm0xH+-;me40@kL`a9T|r=htyg+a20VsTcic`Dz8BHS+)7_o?7p=izVsf|re zmeiFZjy7BT26&2L$l;HB3vb@nGm=&9m!rFYls?WnMW^5S9M0Hzadkz=yRLz;E};F$ znZUdidpW=0BL1X9z}iOHZftKoZ>grHhCQd0P;x?n2{DHsxYd*e861M_k_T{T0o@>< z;<~@JQz-)%kg#Y1-nqxh)mnB{3ewV^loVxMUSFiJfH^n4uWF2jBsjQ_Y)>QITO1=i zLnEN>7hvIG*1XY}C>V}M#O|#iEWl%+q@ajZVyOvoSlW_1R@saKJ_}kwQKkfFj9M8m zbc~EccVGZP7=A{jnnY4@#LMoKA`g7Sz{b8gNVfxgPn5XB?PG`I#N-M6+KX6?tX3Q7 z`*s$e1B&SO=e<0$r16%0PrIn5SN=0%BpkZs4zDJM^w!afs94A*`J3rx)dM9lL%2^> zaMO47pM|pD7K}XJ%$I%aV+IVF&!UEhrQ!k*MA>*7Z%RTZ#La?sGgOy_S!bf zd&8a1vzk~B$zSiy&a$8f+8lGEf;9+i9~~Ie?UkU(~7sxwA!~!@*-|fdb?f4 zP0(~)@C;^00aui(f5`w@w0RuxP3qH&cYlb>_b){q;nI$*Ap=pQe$AZl9Lg^-i5C|a zXae`Yir4B>S@m?T7y_r#KbX;UO(S1mcDe|CyPa>fNK7Yb35a)61Ss;X%4cV4jieE%0wwwOIob-I;&yJMqc zFSSDdU?icBkM|c3H^GPQs0RJ270)CpSc2&adzSa zUr=XF)zKM?A;vnL+aOGLKSEmwD4=T~pan?Ye9EbDlgnDMH~E1w9mYNx6BP0J$dfuK-mw^-T_*Nx*hw|A(b#;k4uYkMg9<450A#&LFH*I-(T1ZaZ zbpQ!5Jtjsd$$O@e@D=M6-;Q0Hw6O}VqrK| z7M3mec*D!VjsK)haq*ANF_ru-uH}`Gmsa1}Ng#L_{>MPKWcbGaZt_6(~&7l4ZJvEi;DO6ZCs=v5pNeGBcs%D%RzEE85sbI+E`lV z)df}tF0f;SiiwEkQ;f%TPJR7WqIypgpJ!ZOpJ(N?)eVEGtwr)Yx6I=9Ev=bX@S@P` z`t=@H-`lSWr29Qcr00kf8Xk$88W<9?E8aZFga9=e%${DZIWa(z4-^#^Ap^pbe`29G z6zu)x2JlM<4NH0fjy(&XQ$36zU_&p5g>?5TU>GUK$HBn?$Qh6Ck$&jx0J?lN91yoE zmOi>%76EMwTs=I4f@SniCwX?MM+8cH&p7d3wZQq|LR zoSaRJim%Zw^6PjFC!>UwgHM7_r>ys50LAzu4d&*U6~l8jh9a~9WDY};Pu|fE1PQIOLZm?iKL19Hry%w%SRkNU_g!fKFfn+x__Ze zW9>jCVha=`lkQb z{$PKQRy1;rj+E+Hr6oK`Y zLOy@~oKNu;IJ%N&XJ-N)m&U*id8TufEYu`C`k|qrp`xM!hy|T^KqnsB9KnenBlf+A(K1}q7l{_p^ z;+jD~Ok`zZQ@uo$oVo~T=^P)M7-K~EQi13FKoB{&(2CY2@|QGnc4p(-&&wlgR{_VD zEc|9jT&pn(X-f7rxLhO`#9_xk^jeyIx;W^1dpWPyrwaN3CvRrwRRFH5A z%jR&iabq<9D@RV@GzS9gK|UpnEwSqovXD*13`{T!=6&6C&%uFdtdu?jfQ?W!HZ&1| zNgp7*oWrim&;+pRf`X61czgkniQ66Ct3Pg=@|ip?s}yrfr*CQ4^6OJ*9VW4z@A`pY z&e3}T#DLFi4#P}kQeky<>EGa!Ba3a6l`+518KUIKBL)G{^zQtmQiqm6K)@IrlqgVM znVah~Y?r0O(f`>YVDU+Wqp^gGSbkjt@ABa1b!26sVtOMGENMiI+j+k%334dg>SWoY z39fA`7YD~!~-hQWj(k~tATum#v*AQ8CO*!cYN!oLm(#JGY_ zRONiKY|wu$B=H*`Xf-PH*&)<}z%xUTS(W-#$H&HuX3~Bs;$mX@dnL9`CB6F$STnYk zmYDGW?eMp6-{c07eSfKB050_*t_YmLr4<`N*?}O+-(WWS)hHQxm(u4y8n1)UTURHy zpAXsR-HYkJz>mpd&nMlS$@*^C(6sRI@Cvs*EVF*wGW!Pa+nsmkT6hA zRaF&0ueiBA@~wzIe@5}DZfL*`#?COeB7vwEltrrVd9UH!crHMnWM2+&9=_|(&y?$S zdUH!}cb=m~r?}(|E%x7azVy$@>JwQHM&NUOw#xoEa9mvk?p+}Bu+-AhqC=&P4TXFh zJwp`1{VzmX5MYv-kYXOkjYmQNRXcJ9d zxJ-nv8hFKul;(DJW}X}RF1uBNuMYpX*rDmw`oumr7H$D|FaHe!u2?Lv=5bNq&Wrte zy2yuV{$U+ivS4X&u$9C>m%q?A_#PMT@x9K7`3noK4if^Kh zdy|Gn53Fq;>F7gjS$@90+yS@R9o?k;?6#%su3|IIc}wV9GX&hLvXSL-dNQ3&;`<+fYKNKi-Ts?OvGN$@mHRydZORcv3!-GT8jkVM_nFbG_u;Fq3@Mv52 ziTnoiXYyeVD=t!Xp;*0=3ewUB-g}{;p}?FD6)@FckIo*rY_a+Xgb>;@YXh3nF#10L6izL| z>dO`|bZKT*1Y|0t$dya54yDq|lo)VArp2W2q;vef{8pvW!UvtP;@dkWjrg$v!4pD8 z{=q3-hNT%pXls_=DwoO56T5IUB0k1CdqCTXGVy~4ToLI$2-1NT&jPWB2g*KTV|TRa z$%^madFsVvG#)PPipGwP7a$%k7sgH}c0UYM|62jJtBi+s-w=mMl%j4s+gPd`{^tV3 z%h8&F0>{^jOT0)4qkwj)QSza0Z2tisWo?WzV%p=yi+*WS$k-iqh_bVr+ioimTEpPv z=HkK=+{JF5Le=I5G%K(~jn`r|fAibY^Y3t(S%ES;>Ul~LiPx4eDdUYKFiZUM++3aO zKVX1H6VBm*x-#s8MV@wrfYSl;*A*&*DTQdMv;%dVUZik`zoNxfbH z!;xFx>*Xu=bJ$iCPNmEe=QCY|r)U)<#1?n>p|52J7~lABHow;RzhnIpmA3Q96$3`a z=QB4v=pL-`W($2k$ZZfL5{c?Bk~=&#WwyRoU0n^BTV~RoR>OZ+*+tLUN!q2SGwfQW zz>gJ{&!u}xC%+V`;xHk`Kwy`=>>!eJBceuFRTk>{#mi%rIy?3u{{3*BvPa^ zZE6*`X=p|dhGbLexHA5mHG??dl8|gJESPMY1Bi+|kT+H{0wrRzw^yXl6xX-7wA8R; zI~hglyNVR_q(t#xT3EP7sz8b8iG;|6~n^Y<~G4RGXO8~u2Q;3F+Pa&0k3?0q5JiW6k0M)VEjkRAVVYF)4G#wXgp2C!gNXK&4ZN^m@I7U!C-e7CJpcJL_$Jp zn8xbu5VPD7)mrA^ONUABs;oqXQ&g@RQ6}-Cm&h+^ z|E9e^0ka`W`ud8-%#@TvaW2y|iGKrg{aWR~M$ilEUz&6ZeCY-tfcNY|%fI)>eBUAs z;rmLUm+kidIymoms=q&uSKmZd%80B&H{#m&y4i^+*?W(SxVR|Wr5~Z|nq_2l5s9)z z+{?{YLb6@++Up`K`*-U9`}jQW=bX=ZpZ9pZp6-dKTJKkO<3t4Y2IKZ#e^zCtrrHc& zI%y*%{QA~F&yq3y1@zcD$dEaAPbJeU;c?N?-nmpzL|3OaXgx!c==yQq$X5(7WN2h;wsi0A@7-dwn=VrmRm@e1*ZxmtFqO&bzsHYm{Ct`J5{?pXoZjCZ+=HINYK%?TU z?3%Jx=I@{G%}*ol#?#a)!8M=OIm)vdknTSVi}0p#0*4K}3zIJ0T()L-c{a%IGE#Vleceas#hFAV*pF=|hoc8XX z=fE*!!2n?nKhInXgbi3zJ4V?DAO67L~k(37X4o^bVmIDW%=AHM1=^^Q+zK7gx8MDpt8+@ui^M)#wttG&|dP@L@n2J_* zq{AI*LO1rnqS%=CM9m% zN|NBT|CK6G!;JCYazemz8b&ls_z8xmkg)#5>Qen>S^63ngd_~ z3u!7CoGD#}SKqI4b1O*d%9&r|w?tLwmz5`GX)GdKfYSg=*9fWs7TO5tp2v-_N4BF1rVM9m#BT?t!ZE($!=C6B+ zKk~dIEp12?Z7tMySKg?YP7zwNS(H0b_vkQQ_;+|Sjlkh$Y!MO_kFs2}muFGtg7l1Y)kQs*K4$iS|icUup;Vfmf zMH?NtuUhty7F4cQleVMj2X@o}CC`^WIE(#z31)D*Z!||rqyD#DRbA~WeQo%8OAb6^ zb7*kze&CxJCyhAX53YuW{TjEd^7SPg8}#+{XT1ksr4*kPQJmPFPrSk6_Sb2O-tqPP z5{BMYqRYiRo&RBm_gxz-wlJ}QclsZaCk{8rshmSZ!U6ZpTAvSYdZ)N_0>6XljH!Xz z1lJ8H-@W_4`kolFDWV2!Q=LkCTiYwgP`*S#Ny%47Zi81|V70#QSs?Oh8e5^V-sCGM zl8+F_sdosrl5C7F4jlE+&{Kt=-9P)> z*Na-_yt^7W*!Wy}?7#7N@h1j6dPR3J^p2QVd>(acd%IjZajePUX0R^jt|iq8D_4Ra zj6Q0~s0*#Wu7GE9JPl|cgRUXFjUM#jO@&+kLmh*ft_l1 z(#u4RuPaV71Sk}$N`P4AuvOcEGJ6F32-5sKkH8<t`!TIs#e& z;~>RPG|?6n3XgcQ&JRrA=N9@}ZiHJw5;dkyCA;Ff?BRlA0|ROfhJ2Ur5&Kshn%`cPK>m(dpanN=B2#q3JW?0Wzy=sg+W!M?*|X=) zb3&DraBp1rFJoKzPal39Xj(Y#2|cOwnrT}no`!(@Bm?dq7K_;fGn~78?GuIC1&@7e z(1*U{qGrp_o1^T^fvmDx860=IPjfH=>?%y1aaHqf9^1r`s(=ZQuJ+%3ugjik%FHoE z1*|Qgjgt^4MK8YirbFyO?k=btzIf(<;^m26$5P*Ws7(VG+Xpl3rEq2ESif?=b?f06 z(D`THZGox|$(U|D-g+y%T|U#i6-`&unFMRL7Ehu~F*MV@*)&qvI*IS(D-lShD&fx`$;L}(X`Wl^^S>+ zjYzxb;()rX zQT-};x{u20Ai(j;>E{~7n39@$FVD8IF~7K^#Ja-aTN(G_^ir*7A)rEeDaw5vxw3$l zLHU!?zU?cf13<=Kx0;jngNc>7rF~$=Vvo19u#jupzIj7hnq{}pomQdB)98`cwbxz# zb>dr^n#}z|N#DYKV0yQcy9sXd;Dh{Bm)iUM+istZQsZJDDbiZey7Q!IAHJH&7}wwh zXiL6b#Vt-pKx@yB{RU=qMOAGHqc%XB_t(P(8}03F^f1rOExNTu+=-fJ#RX@J>@#De zD>a9tEPHH zMztYaT=T9M%t!WMpq=`kC;rGB7azJYk^<+s+m8zJMy#A8@B}$VhH@TwDBz?lMx=H27 z$^gH~s2wR3sRSR8l^o(Qg^8$m-QZ1>2s zox-dbnt7DVZKV;m-&IfY6yN%1$Fw~VWML~$fi%4V)}qN86mP%7NQEO%J%Y$oZFY`+ z&b-z;-kw z?|h*@$sw#Bs{3HD1Tc6croz(wdldE-Z=Egu!|>M(7KY&R-nXq(d>3@E&95FOF6Rn0 zmRTq~7;v&2dM`h4a%Z7fastTXhQ)@emT&QKc0c2`7XK9ZfEP1;WSq5(2(c-|ASl~& z-Zd~1rAbeaWvH)ZbkrPe*xroYDm`AMA&+#erDur$UI&nw8~0S-q|v_EP3CqVU^ila zA>=n((`+V*clsSOBLJjjM(RG|M1?>Di)FI{cLw9g7i_D$*^dYs_pZ?C|Wwv?-8dlANKc6hE zI~Uf$v;DqO!P8~r_o5&no6kMUG)GAZ6Gnk~tf#*|NJQ8NSw3WDrELX%*le4UiVZIW zK?F&zJ9A7X3K>*S;yV6R%N1m;tX&K*A1;M$eZ27AA+&db&D*w2xKc(VwzAOff{Lj7 zQ!@bR`{N~~#e!{(m;YU+SogcYXqA=^j^kw>FB!!5d@P+*6No3O+u&f7`mJ9@Y0!hN z+pk5^-mg+-iP-;AQm7U6MCqSYcENIV1+PY|N$ZhB47RW_GK3}y1@MqO2%Gm;6SpXV zgRsO(sXjDCg}&kFVCPn9(wv6}A7;=qMYvhE>BQ8IOFb_5bM%*A5PC-Xo!dTWt7ACw z>_c$GXh|;iD~;ih<6E1wc>K&3owGHZIcVE#A7H^=cFj~DO8i+^5AHKA;#gGD8IWBG z0s^-1)Qj;a)dF*;0#-iKnvp7A55Zyq%7FqsQX7FZWt6>yBz9gT2cxU=mX8 z3D$t-d}Zm|;-MDSjNze{@jE;0*yUt=CM3eC!!{i~s3N;_!tfZuN`<6Rg6RLN-Cfnh((T zbiX7)!nU`cq17^7Qau|k+LuPJ!C;JP=_4eS;HhclfFVN7K1cqrqA6&?=_01I~ z<^?M3m}GW6OV551BIOCROYtrT}Q^Q^05N zqhs)+^C2PB8p6({?bbWvX)ZZKt8gS^WE~jda$b94pbM6#JOa`H=47BrA9&b3Mw;h9 z`BRlfvE8D4S8yR1!3W4bYO5u2pJg~v{%Us0AM2H|g0c52?OSQlSJ`F!)|Eoxc4o6~ zG&tGEY=w{v91b;rR0I2S*5U~{hC_-CYQ9ZEme!7wNdC>j{%A^8jrG%ZhBi_de!Xc0 z>ruP=R=7S^hEUO=tgruz%5S{4x7>5O$<8d8a*UxN?@4%6qQ0I+#U=;FjGsOLKMyX=R+R6-_PG;+T zK3Lqu@)F1>w<*>$_uE*mH3x6-+by4j9;tcoo63gi1;==gUox_7`xkpHb*4{5Q#qi;G|;M;EV|qicx; z0HX8B4_Udmj>y+RWC2#ps;Z>+nTzMoC29yHY5;};HMNV*8CpkuDGpeoN{;URnbWfg5#;Np T*eY>=COz{&O0+P}V(jXzyA|Ksd(xK9E591FaND$K{vhz%P6Tl-}PPZ;M1zvpf8o_WQRx~FKu>00v z(bwk_r8Lyu%aFd&&mv^^&Wo2cMn&O`Lxo`vnSovv@=wE^!_3NHk~r@by6VQ$@Amf@ zRl36wW=jOVsk_`M=D}rh&sxk`hSlqI$C8gLw=~n@c>9KYuF}%7V^EFJ8`Jk>B`@@5 zsH#piVDE?R(v3yFAATAzuv011WRQ^j^3LMTu~_pzmh7LABjK!L`8s{QeZ|irF%?$P zoZn%tJ;r`YOE~f4R-`eqNYLa$KI?U{XPJlU_n^18k<-_##_!%lahvP(j_?@vgx=^q zRke75un-peqXiUmtJ>oBzA>*@YoBr3>&%*RV34)UbrdX=l|jtFXH*bE zm^BCjd_n-;q`(^lLQD(=p#Z<}fwy!H_&-;{b2*6r`HYbAaHE8Vl!5~ATjQ0pxw*ZI zm4j>5lx8td)ReWRj;oIHbHP^*aQ2sG4yNYpo^Z#9M?k`!g1|?(x$8@)C*027MbJ~^ z$?rP^fzJY{0s5@u!Od0V$&-hQ{`>Q%o#vj_|EtN~_^&5*{`Vw5_tSqr`L8Sg?~^bWb7v_B zI8ddl=>KNs@5BGT`1e6!j)$KAYb5>*^Y3qgaTY}v=J@ZNiK0iKvnGN-;vfZS2~AIg z{Y*4Z0`rz5<0%MOVFpirraK*~G-Hk;VS-Dj_MXC0I@9~``!f$$i$gn)t^Tj0F7KP= zPK zE)`6%rJ{yF1m6uIrA-r{&IyMesnk=rSme6*6*jnp; zpf}s*_3^#1*H~2=Hl?r*7dN;0+5Rk-)j!aVZ>ti2>%qkW*S~Y4C7tn7#t(l1mW!i= zCGQm|r7{n?6%(ZMSU*|sjY--WFBiFmmam-)A!;>D0+t1!j*?IaMICv6V%%#W?%B&Ae# zFJYC|4_XOwWS?nw*J_MBOmSSH~=CIV~+s{C(lnHRD^PC~ysD;|TFlV|v<*ozuJ zoCBQga~g4T947X2*Bh%qu;v=7&`-+bHr9F#aUCzPGoJsbReE|RKe-zbU%N8xJ7WlX?i_1LHoG$_S6=mX= zQ5+?BYAuCAV951pt@ftYs?(x*j@|v;c}FTkmZxN+$OQ7%LcOhaOegfr#eC0D`S{2SC^t`;V_|Ne@ZVU2Fk`DzsDF@KatY4kEoYLY_g5$*QniK|P zb6)M@-06H! z?BXI-D@bN28=YE+5s`p8|FFn$vmsWau_M>tbd$t&P@%2iY6GEk^Vo`f^g8$7ILEBU z&fQ#OL>-!%#dWc@R71w}jgF~qzsphoqS|lrr2M_?0zRRtjy6o+u2{);aza+3Ftsr= zn-=)gsr;FC-`X9#!w*&AZcgY-y_vPlo}}D5Z%G*nKQUumb*1K1f0^;HW7gt8hH`61 zSuVY_xI|!&W#7hfgXY>64#rvbY&(DS5zTB(7u(e3t4tB);Xh- zDSZc~k~_g73Vlw{gH82zVPcPmJ_fMgTj!}|JoDjAixyKtD0xi;&wxF z-bqSKDvmWd*k&{s%85D%6Ig9Bh+RfB47mMS?f9r<2+VUYr3PZxV4@>;NuVY?dE^e3sgL@%ka z!z|CF0xsivi7Yl6VkeVRSEtFJr56$73jaHnz=-|?dc5aVE%UF!#lbJB#btz8iD%LN zu9QE9{X5#zPuW>0`am;`A2Bj9shwIpe*73i)Uefk6856a=pBnW->B)}v1%yD58~CJxebQ}x0v{jWHN)QUQy$Q8r@hZ4i`5x`!~56p zKCB9Kkt`)J_LCguvBw-n1Uh9ROcPm!nlRX=_2b5{gid1*jwx-N`a_P0f79F&)XSP*Y#P&Ba$vmuYh>UcxQ~##9@7bYsYn!dww4 zd9339)#3-YR}AqIMU##I5o}{TW@n7#Nw)UM(Fq$m#neuin9x*EQF%+RoG!1cn`U3g ze|k1FT&vEhr87Zcwk~`5HqPyXl9JN+|CYEan(8Qa3Uq`akQC~8d*vqJariz@UaC1% zGNx^+(S8yAayN#I|7~(I!6sr4DHY?CY_^?2G2o}2k5?qld8-a|SH#{b`HfzXn8-4w!n)AYe^PLQM)ZjoW<$XqDVdP4jr* zbDC;-2{MR-JahH7VOf0s4?b}%ilfESz|E}4dr^S}=H2lRmdDM-$>(SqL0Y>%tL9B0 zuz;V6n?0so;Xp9Edggsrek$O9*M^@@Tz)OL*y2XWkE2rz3u*ECvGtan>MnL<+6k4| zZn`q_J<#g2Ty4dT-P-B=)#N^(Oq-0`V-}9(w&(m=2{dxMbE`J{yQA|}l~DA#fPN-JfPL>jC`scpq zaoKvF32XJ)LsuZbt$r2sgk`FoX6|No<(cKT_p0&727fJJHLbYJJU#}0qWB7=(x7=w zt2|ee#`xE820!AVJghWVM=CeAP#w;Dv7P0uvNfDhl#io%)>=NNYLsa*sZ~B(iGo=1 zHA^JJo<Ax)mr5MHnP5FnMLuU7GpS zvv$II>Dw;Byj?e9U=t;#W*{#a4mQEx_Juj zq)s*|Ggv>}!aHxUs_Nuu-KbdJHIRK;*c0_w&fBU@^p9DHegw3Co&VkV50#v@HUqVC z*rs57IaIYgSGCc4Tvo5qz5>Gbx-W23V5aE!2~xn&=#By*;pD8w$u>~Y7{ zE5o;V3n2hF(v2^T|BZwIbC^J@I%Hmh@hQ}WR<6HY0U8ZCc9RKpoY} zbh_!jMBGj0u1H%DKJ7+9$kD+D0DdSog^hEC^|Q^j>VOHIj$b`P;K#kt>b8k- zt5Dz6X22HhS1jSH-J5T$`nfm#;vpIdM#145Zt&~iOys%spYn`_UjXB9Qa)e^A%tpI z8d#HBR+Ama6U9bmbQy_}Zyzjo7{5N-CyA|`LHXT+0Kfpi5pnrm1%tTWY;13{4Dy0W za_z2Z1_lPk8sPd0N=h1URkZVN(zs)r-4D{$Rpsi{jayt5RMD4Gb3Bzl$i^gh8+mOK z8an2`wy$WQpxYGcD65(9-?UwFhn4wAeQNU-$p1my+_BH^T(K5fH>8-L^(m3oSF^N^ zYF-mR1L;U+`)61cA_}&A9Hq#@`#%7d7FQIoncaPfA~ITq(+RCdEJIxXaH4}d{TUyygS53a#cS}P8FVzJon zZdI0CahJN#&z;JBe|FKAKho}?2)lG!=gIdAZgE~-_Ero-Q&zsdnKdyVINug-B@gC_ zxj5AL18Y9N036OmT5TUGEht3K4daCWma=?GwfrRTm*7;zlxpeATv;MQ&9W~Ha*pE}ecd5B%(c;4SgofoQVCr|iNeX=Vt`Ea!S!e4o}yy7h5%=MGPh^2hC z09^QB%l)sVKz#=o<$#2fH&%}Vo_OrfaQ3{8*^O?8RGr~9p?bu}QW)A}GBLHQINkW^ zoUaOgbXacBVb;0=o0tzzWd(clSbifPVftx+?r|G^r{3X0Rg%ifY{J5yyf^sXP2I`r zBh!o;1xl&t-Llv%O!0okNHL0ea+or;+Roo$5oA3^H@eQb@E;Hr_~7MH*(k|)!K&oI z1eG$*-04)lEC^LXHHz@~{*MGHpB-_1jokI;YO`)-Nc*l1DI#ew$#R|b_>H0^YRAEL zwtss|P<~icS6r*#^^$sP7u^5owc%AuT$|VE<}3e`=&|-YSeAhc)9f7A?ja&C3y$z{ zfU}`6t|ozDDu;gpg8)@lQYre!z3?rbT$sL6@ZKM!mHS|km?g5PIKik^Z$@~_rycQ5 z=#Cq;d@FKI01PoQ&E(W<{A)yu(1Gnu2EW6VYe5&{S72||7hM|2mHYran(meTwXi?{ zVm#UPi2vQkKJwYrSS|Uk-6&`Kb3&z_)*satT(5L3K-wF*f6?7a8n9hP?3x(RA(h4* zl}*Cp07O9T8OflfrDbHNuGRw(_@-j?(lbaa&aA8|(j$BN;$u`>kMqODPwxEIW3+UO zxzoX{?w>OuQ*g*Kb)45TBq{??4k z8o1)3hvY4jB?l&gnAC{6evc;?^E1e?^M0gLIzB#L;IjQK()r-$Pn)GJ(j8xu2=r9d z1|hdy+T!R4ezb^)DZjyL3z{O&r(@;1++M38kCSm~lUViYhASBWHt>iPBV zj^;mkH}22X58ZBo98gs|Arc?7{0=_7{&m{n!Xfv$2a$?#soK?HzCqdPQBWDK8Q?b# zKjM8UEzFj3dVhNAoF?Mk;4P;8H%jTO1We=nuWw(Dn{eY%_IMWv+Xxc_3rOikURw8{ zC_?FkA;$0s<@T8z)`}o;^ z+%PiWhDUQK!=~RT1=)OJXJR^0%uz_9kK81!q*gs-T%`+mKbP%yRboCTc44{fwVer8 zYE-*AKXO>mT!))wODM|etY+}RN%49*dKr2!5`WW2Us!=sIlXNUI63pz=094-t$7~| zzs9l!+%O{&mP`gd3Ankrxz>JBP-DA)hRgH|YL7W&%JjoZV3F-~W$!TWl58OXto`Ox ziLNI{4{Wq}cyziix-2krdl1!^@Q0xaAOz~r(rjX*1vC2kt$ji3nSN~lBnwhWjaoj` z->=G=RAZDo`Y0@Y23tEpIfD<40RDl;enE%+`8z1;)1S2N)fTVp9QoCeNYvw*%m20N8lZnI-ifUA&MfTQi(o=`7F^ z>;6oQl(Vz*g09R&*%MogTE9+MUn+Dw9Ojr`_2cs=Dnj;(QoTG=MliGY`60zA-3Zku zW+f^C#(R~ZhUY%&>>dGNG}2&+T4GXC3i-s{#pmwy%rxsb#Rw#%t2M4UFKP~cXCHot zC4<)4z1O|BY|2QsrGr&Ncbk?eac3JFL}-aVa(VeUN-5DhZ24|y2kIwtubNc;4zVn- zqhh3I&J;oDype-Y7NlBjPyCB(fEUzIt4;ec_o$%XPR_`gG3$7U51p$SlS*`mJvHp#k+c0UYp^bBxm7t#W-RFI)^%cPfn);^^j`qCo#N8 zXUCkzVVrSXMw8w*9qxS>b2r=T)mdRSTWikcd*!;$9(Ukvv_p?!armogc^sQ@!9Tm& zu&`rJqh;7`XXmN?AN(Bd0(5l!24ltc3K%L)X8zA3(;5LZ$&~)5R3TjoUDn zn_4?Ep{sp=s-&~^^s%^k)KK`J{T%|>XFohvH0U-jSGagr!=P4R(uzOU9xs7iE2G)l zq=?lRPh81M=ZitI+l|%Q;KWT`U0n&hY9&Q6%%#-%aQ7e-Y;skf`rF}atjCEAvDgVB zRYZziE%*Y>^)n6yWQVj2TE_V>eQ(6Sv$;YKgdH(KEYN^*>g%%u4T8qVNOUDeEK(j- z@Wx88^1ZE+G;b{)yKuJeSz1Aru+K(cG|3~)6Do-K=X44?>UZ+bAL>0x2V9ia7NxwS z82|<#R4{gmiulB~GIg(=miLQpGK%QT=J$~wckRz>P2ik_aDJ@HKM|Z2uwAFd!q?q& zWOGf97F)Gd*%uY#?@2{l`~8R4xU}Bb*pn^s*)6tACad82oyyOKP55>#(X^)Ug&heP zNP_V$Y`asC@8-JN;ehK`I}R-Nn~8AQ?T-x!X~}&r;3)II=<^|rh2cB3ylL&ql?qE{ zeUfD$t*)+~{~}v7GUZVuIo)f>G49XH?Q9Hn7E&Y!Qq?B93E@p3oha2C` zI=XyDdFxpLEd-VK#!*!*l8?QXWWF~Gg~T%egmk?4d%Tq@+4qZ%2d+O(Pm_b_`BaBCHAk0l)4E5PmC9g1e?HDH;O^io`B%p)gm~TZuM3mmrE42!sKS&B& z4Hf$ftsakfZ}07InUPg3B}q^|O(J8QHP);@fdOt6;9&YzGv%)M8L6~-o(FsYVlYJz zMKZDCPqfYo6t9|9c2i8xH|o)tWOvASGX8vlah=S#_&9#W$Z5&R8(Bi8$nimAIwH=~ zm4^829>gsC2@~#E;B11Czhp7S!|Gq-Th7`52xTM*Q=lOq-}8d|;tQUzu?;(m@lOc{ zuC1BUt>7sEj=*Z7SgiqFk0+LM;w*(lJHNdA>jc(TOqdK@jgoN=5gAjJYZHnQrL}r@ zI>eFkoHhaV__@N1r!jG4e?2DXp+8;cAVpH}b~$>Q$yL8E77`f@k?9NyV&N-wc=C+! zu1`-Qp2pzyafDkcCCj}XuceTIY~8Z88NEafd;1XToJ`1b$c3a@*w`nVN#-;zb4B?> zURdg{b4hRG|Hpcn7GoJ3G0u2q+PeyL@;rL`hCNrhp%Ez>uBv+7AwBDk483H*$O!7r-X8Jw_Tf#&!4 zBU+(D5dEUg~U<_6<&4uR7|H$XSIZfyP9Yln?t(#`9y6m|H`tx8T>s*ueJ-A50U9#r@Sirm2<^kXVZBdW&`*VyP~ke_G^1) z*4JEJ?e%b(twI#G0++Y8`$EMN{eo}0S^AT;_j@h z6L6f~P3dWbzadX7z;n+RHljBNHp1r%8F7261{UBql2KNJc@{yp@{($zHJtyn5Cds0I1R=ffv+e&};d2rMZtm>>M zahKmRh1h~A5->>FMMb>NqFvMzGDMSYV57Y-lgWnQJi|n7yk>=umVbcEJiv#+lk_1h zjODrV))vV^hfPv7ru;TvZdzseM(qKvi1B=wnSrl2bdBk_M;3upaRar}7h=8-lc*sx zbBbt4-pC+?L!|y@eb#~jPJwaRGI_6Nv23r7M~#2q>vQpy!prPgilR|kin&se{a^ao(`%ANE+VL>XN z>BV2bbdO$BK5^O^8&luR(rrFp(S!({?M^BoV-n|-Uw@Bn?Un}h_yJjk{Myq7(uXnO!XTJb905|c3*3>idFBm182|qYL6qM*+z%#ovf1WE;z!@ zm0q1yuKgMpI>eExE|QPZEbWyjI)6|q?Qcus2+YudEZ!`kCh854(?-(b`_Q?|_xh(* zp)Yq(geh5?G&Y4`z8QWX@4TXF!Xu*nx;mb+#RaWyNOX!C2F9-tQyb4`lpNaCCDN*2 zk-ZxJ2HZ8u0M~@IV9aQz-)PUPE4?$Kk9Zv11i7hFJERnC*9sEv^SixL0LZR#cNhI+ z)Rh>pI+GR*#zfQE%@(Bf;j$3nZl=gZ(M|dx44AIvS8|SmSf`E3>sjz7{v0hC`x_Ds z2 zeqd{Iy6CxQVpU}64~cpH1V|AP3KQ$1ssA#!wB&|m6Ov~* z=+~Hrd^5yI5jY`YngjDE!ursJV_$O=78OU zh2UCBr~Ij>`b{hew`_FE-o&lJ6nP-G0E^SOwp78b`ydL_W{R)2m zbj{3*N%nVFtrL1~#eDI=s68s<7@RfK-uTjMDv<3BD`l?q-l)F*B}sB)*V1?Um-jO< z20K+*Z3A?08hQ|{4)3@LAX)p z+*XFn`n;TVG|OpmDVXXI;7JAmE3X{3hT;b>4GpZfcBFQg_QmDpn#p?rDUCWMqF`fF z`txv6AI;tD?D31__pKBT^qMkcAH|8FLM(I4JCW)iF%-&{6n5?ZrnJdrp_POOK;o6D z5o6FjgYT|j^mE)fkm^*{QZGD+I^?I)vb`{->BKW17a5@NSjc6Zam724X5?#FzdM}$ z*e62rqJ(S_WyWmtCV_>I1TE|j8Hs1g6OYK&3~%7;SAq>#A%L6n|B()){#Ajz(0Y8ocz7?TG?KCWxo&r! z@yvnPQvLz_awxqS1i@>c^8;LyU}BnhC(E~Hp!=*jnHvseyibOU^E`sK> z$<4(@%fh0x5~Uo$0rqSfYw=hWdSxj?wsT)jTcGjNIY2)qZ2r%H-!R~&M)HkacJ*{p zNlc1w^ztE3aITC}yKB%`qHte>=eRXi8)Q*oFW<_o-^2U1bWA(rWWZ``(BQ9Q-G%S z&JO1L*f>-co`c#iQJrON#9%9TAz_?CiUlEgi!BTlv+ix$T2w@W#*ZU&(aX54rve3oc{>jI;S9Oilgm7awhTB>^McBb6AetFWAS|tyKWBwp+zltEr=_kU)ej)_aV|hkhdr zkQ=Xts4UiXGN(k{c9Zi=x(BVEr&+yM8ZT5X034cPAc+vOLhnf}90x4_DT1F82#Ph3 z-`wx@iw1@9wnh~3_{kX#@{7h3Q2wj*s@2J310cRa!Ima`6H_BQD#vi8Q_bRXY7`ce;uLh$z$v z?6x}2A|r)9;m!bbwH0Cxo@HQ|rO;qo`}~HFUz)L$l6?a76O|PkqJN^SQls&gKe~kp z;lOZ~IaJX%NN6E6?8fb^=H`w5(vgvo1O<;%uAtTGe6~~1b>$p0I#Nhe9^C-Ual3D= z`4G`~0x?eHv=!n`JSDdIgnqq^xnLF3un+;nh@zS)2UTtQd+kubu8xgU zF96IUE(0WIo7JdICygz#xuhflVBAUk#Mz-MXl#KK!J1eVZ zvh7Aih%-fWXL^JvrEYm7-l(3Bl)fIk+62(F{Ongyc6F&1odE`+=Q-8U!HBTd;l z2vkk7JVXvo7!o$3S4L%;O_+}Z0V$RCc_Zq5--$<0tU3@1z4hD988Duo*6C<<4tToV zz@fPC0$d{l3M~hyjlo1ZrDSRB_OA*&=g z4wUVYVG;Wd?JX2SQaq2RmI_k;9Pdve5xWGL($aZxqKRToHvOSBh~IR}*E1f$wkgD* z&B}07#hb`8*ahHZQi^Pa^oZ-%H(OlL?B?ne072bK7RXN%;KNt!=bbjkM$vWe%hsKp zZEKQ=(v&`u^8TGQZHIr%Wf}tI#IlGr(#)W4yBO{!o7BY zH06tyZ2L4sA5|0$4Xs^e)ej#v4P=V=mfdjo5Cz8#h%}tPX0S9RZ}_{bsZ??#Bnc@B zx4ac@fLHVEd~S3)BpW8g59b6|oBY*zhJ=qihc(@=i`~(zO-F8GyveufmVtyA`ZR}i z+MwO<0oxx#eN!SH4Ri=q{iccW3_U5Qs;d2J@Iwa5w-E3GAhe;Qs^mz;_b;FQK2Ftj zvD!$%?m^^VY2Elr75Nsh5RuMUNb}m_G`%6C8mZwmkwRmpTg zm1MDdw^>P&U#sb9y_9|mypg%G|CdIy^KF@Mv)0OBla|(Q0{s$5vN_Bt2>)4N4&gzj zkO#AtU71qp-DEWwWe=r9PZ3XkX8;R9bW)@6`N?j_?MWY1RMlXRmdkNH)^z9)V$hin z({=k*?X@4I_OmHg4=K~-3jIc0o!zXuTBVT3AuHjBiDGa7iG}S@8r{n@UYfh<`|#uvyaE^`L-aAuGGHOlb9a&1(T#49(3$XWxdQ z3JgnDoJ`JFq0l9|u~R@>fr?3O6BI3P%rSP0Qf?(RFxTYt!QAUqoOL+xIp$lJ3mttV z?zQwJdga=mMZI2t?i6)H{cYc8^tgS-rXAJl?o7O%(>%4Ma68W9`cUVX>BDNXYno%* zuuamz`m3|g**`on(WKKsL0LeCiek~jYj>g~2t~38%|PK!^ku8{3}!eF^(r#HBM*WZ z-X)+xt%~AQV}KwDSx-nMK6OoRVPYLB8!}2dWgB9vSODU{w}AFU<~drg6jJoN^JV{z z2Z_VWv=?iCCRlL>3n+I23|Bc~gc>fl#VSwbf8yr^ID^NHHm&RmIL#T}L!F zo$QgBcVMmjKuTXe>4?#V7@3aWKWzZZwrxULJwQsaHU82hmJSiU-sh$p&KE>(OCS~X zX_|BW>ilB?e0dY+Rd^!mZJlgQ#t$$I&yql_t&Y=A&GDU@!+6MVb9jh<$bX@CAKz3| zuQ{Vo3Td?*$vWN2aFC^?g9c4#SLhXd#Rye5lXPmisIclzK}^!REt!n){>WI?_r_&s zI8PLaA{%QD;&{nkg@uq#?Ru)0I*F+kp>7t#dPvWwjmvnOa!u(Cf>&Az_bRzpZ|-WV zzTP!^gi>P1`H}j3f!*|*1;*ry?x9|)4JUZ~m?e@o+4iDh zz5R*>6%wM3HNNPs zNI3P5sC<8_^|i(WP2@0cHyD8A(HT6dd@{GETd|Hz^Gw{d{+XvwjS@Xz6fO3% ze&pj$*oOGJ7O=Jf&M;6(i`*x|%I=T*7leUkR{;&!Rz?Z+Z#*u!@A4BWNsU zEa-o6A@|4pKyqF$^ErQ)W4C@}{dCejw+ia=o<9>wR+@J8qL;sYwY@M&OV~!^c$s&} zkI13lf@T zykqI3(=D?0R2-(Kyi8Gg7+<>gvjz6dATb~Y4Vpr=U|64cy0~ne(V!nzn|agEk^j>{%q0$s z$ZO*+7HM-vq~vM24)Yu%`UgVD07+Dr)%mIW_oB)4%gM^= zS2+ekI=3FK02ZsWu?`VD-KBDj!E%XyvH+W_xI_p3dsOaHvgN95GAzNOuV@$p&6_7uU#B}LLpbVKryraWE=v=U8YO2fz zdtgMRPaTnbH^na#WD9Y8;=Azd{&^KeT5G!c5S_=E1>$U?YG?HPt{7XY$D z)dljPt8FD_?MCHtO`lLWVh?g^RjijfcBcwKM8P86s4I~+YZH{wzkU^5wu;h!7$}id zoMYFdHOi;A`!aKid0a=E(55Nqjg%@e;#(WdsxSCdjFXr;m?#g2W>%>|WesNzRFs2i zZ3EUNPghmO1cjtjRi49IJXl$IBm`?}X6j-da<(>EAZ7>K0;6qK__*5Muhw7FDVF~0 zG5p0={=O(~kqzmz2A^8xake!%_10dNgmKVk)#a$JKT-jt?d~k&rIO0oyN>u4#m+w2E8=QN(;}-jFimbUtQp`Y>N#8nR7$G5X+B9YzNR7_N>jw{0RNBGd5g@G2D zaY4<1pz1s)!Ji6do_RP1)7=+giSYhxK#hQ3tOoA?nwz3GY`Eh^5kK}q$l~DjVkKgo za?(+=z2$pe*T@D^;bJbw78xS+8m-Z@X>&xews%54B`-^4O$gpPB3RE<=SGGp;X?z7 zsX;Q{z`=<_NwsQwnm%_xcStVMy}&C|c+yBUC~o_Rz#baN_^3E@fpw4u_24wpp}F@3 z*(d6HPRxW8c0n1TDHAItFH`&8_v6R8S}9hHlRAC)R7Lu=rUypOqO`aQ;0yggO@Btm z0`&5^#a0jg+eOv$d0#ZKgRvi5o=IB1X-e~l=dtTizF1lW(iLnKHZiAcRdPt!N(O_e0h{$jw%+) zIC&^7fs79@D6z076-|z0XcM#uAM+MV={ZJhC!vAwzA;()=yc*Rlxr22%hRJWun1^NbcBYf z-e_!u&=&;&Ku-_{0*Qhh1DW^&#<3K08cu;1_B13K-#JLomiCh%V7h>JxD^xdY1q8$ z#_LSYPq3^=fXtE~52fk&M$Q8)7+O>)D(UQ7AfI)3=m#hm6_pXrkx|OuFTTudubao* zz77-wiG|67MbSE8${9;XS0x4|{@cXa+q0Je8JpU!-Q4Ze69;up>s$A`+tc4|?n?wU zQW_tb&gH?&op(0cR+j9aoCD&vH*w+sTjG|P%i7B4onT+2Oh$J}SeQ^h{Jd~JJOce8 z7lI3hLL0G=%P zT-|6)=Ro^hO9TMdXmPm$BlGqWJ9ZNd1a16sel`BUj%nS&=G6;!6Pg8rq{hAQnu*t} z%q8;Ma===CU>J|}v@;@hCQ39EVY-Z~DkoxC1Egt9eTEtVhQYXK)}o`xU{+{} zDH+J<42{WQD+2kJ1`-YwNw7OZ0BF6j?_i8+mYCall4?ek_zC(CTj5VZ5;i4kV4?m%N^KrzImZpTkpen_v=lYK>pP|LGnJ~2*oI6 zUOCQ=ux#VOTvmNcLlP;h`HJ7~5rc~B*fEWuR%RZ&&Ps@F%t&eQJD;gf_&=0Gzt7iI zKZ#FEE18dOtJba(Tqktc+m}7PWt$DMqiKh{71U|U#Z{GuPN8;YB5GOq93!LHL+{WR zM1b_$$+_dXlV><=nN*i)_L-ye)bsNbosxwPwP|1?n^&BCDqd`H$HE6ODxa-H+b*bfjKS@IC z`XnTlFH*q=^ygRk+uZrEd$ZXyV+KHDw!0#Kj$m9RzS@A_e194wWctO3IX2)+kBAQQ2op@3TrfFRPbtf2QYGEm;U(4=rz#@z*GWV`&3& zj9KW&lGGLp+j^LMgs*cnQ}%o-oa}+%HGkwiUr?j0#`d}Do$;mf_-C)ui5KN8=WK(? zFV|6`zNKC&MrhjKsET~$96GJXaC=1|=GRt9afFo}$v22&J8mF=F!Pp8-?f{k<;!>X z+TV#4BUFHWl)}d^_06m@?qCN*d_SB+eLbjei3x)H8nE8x#uZ@r9mfRm1Q6gm3I@Vl zV&8GS(HT~NH7%1iD?Kh+88C;S|1$Ldp1+5FUzPPtP}*d(;YzGPg!tOh*R@Efr*Jg2 zdH)AeEPCV{-d5*vt^Yg5;7PN7ielijLRjbbqPrOKZpQmChbODmZ0*RQlhvS8q{qwHpIdM&p|tc ziqEI?@rwbq(_vfgH&!EL*J0l%S;{{T-hL4s!Kn#P4M@aNYGMYo;wiu>x%!eO6U}np zzQu;?!O0g(6IggeSHBM{vn`QXxP(TbD&kh@)O;RB4h*TbAuB6r#;G2S)5v z54#-oF`^yP zE!5a`AWgBP^l58wv1|MzeZFG0+gdwV{<0RFGxQNiq0n|nj`ij1^@>8L zf#cpEH+vo%yL#xiyM@rk`svs(TwXYL6O>jD?RZu^Z2br6fb<9eqfI%5G3^lhTZmVHY^m4=^FF z9|Fy@rtB#2AC8Cy0UM=16pO57MAiuYN;urR2vI9ut&171d!0L3RYUO<3!AeS71~2< z@T_iG`JtuJ?~iPb^J<`&PgjIK-DXd*(`YP}yWa<`VLZk$4(wvqyMWPfh?Gwz;Of+8 zdu@>zP6IrP{AGHh+x-4SXD}#o-)fV;<1(gU;8zuzr)FJl4(Toas^lp!Ei>3lAsCO% zfD?l)PSnb|ocaNWpx@_Mp&;<_`ijHv z1Vdt_UaOMbTPm9HasBm&dOx>WTlQ2uU%Gx2N>B0Vz|2w7?8VBxUniJaa0rnN#x%Le zYfUG%9U$w^wiT4vujlWKiN13KaO4cXb|*jf5DTCJU}^>$&^C8{ev}IQON3^IIj4RAkS+-+X{0-(kyJvuySuxQMp96^ySux-ef$2tKUvG=%ssPb?<>ypxa`gd z_V_B2<8T)nJc#~?CPeuOPqlViM*6{^2YB9&MW|HW=YYO+^!mrGA&O76T5^p9EL5h0 z(-E)-ptcrmY-T3s;85*3vvu=AS*)-ioqlv(%~$5jZ}d_jikfrFjUfB2 zq4;Bh^%G1+NtgY}!M2fOCVB&-crv$6Bby^3rwBGl1lKQiGYBGj}_Kw2P|^I z4R-X43N4GHk>yKzu?N!2r=YyOi~70wu|9OgObCb zTuuISxdBUJ<{HZtl%m{YRHx;uEiI;7l`vvnmDb|EY2|8co|$*F?>0az+S=FGg$iQF zUNXO};Y3uOJynavrs~zH;%lk*OJ@CBT)cIwy?Aq?OQ_pKNpMG;5N@6@}Vh2j@9L>f5df8(v zs8!{mrJr;A&vzM*b#JyhSES=h354*$Vbu3(>C{WB&Vf_5Hj;#YdNT)_2fHajKA9t5 za*6YFAHUKn9OhT1Y(q540yqxWXrUTgK!JT_iN*gH2C2ZCtH!U#(07`=&29Hnbspy1 zoNaiLOQST%w{lQ>hY`%YmBbnILZUJJTv6y7skZ`NPnDOV&ilwlK1+k?A5KQe>Y~eE zl*t!fly?)m%Z-s8!2W)t!4<%`Sw7ZFMiDX7h7ylvmlc^hJu0%~=4#nH*eJ*aefbpwxr zeP{1n`(l`{RJB>6X$hQVPDLM6t2;?i5GVU)0mx%9Nzb^&0rmgpI!!V`t-3O zMsyuafUT8LAN`0oyJ91S-{lZf^KZFvFVJk#5I5VNiS@^MtH(8k1DRhMTt&+x{A*Sn zdeB{Ua$P9b=8?j!7WMv|d)ZPLANC?QLx}pwp_gg1gROZVJd9`2{OJ?-XpydZh_UdT zdW)du(>2^(O?~m>(i+^mm*LS3{%PbaF_15|6 z<}k}Lw;zBB8{AR;AwJ8iE*z&1XAXGrSmEKP_p*rJu29Ix+uB)R zfRzI{)PB{QfNbnIt%1G)HlWq(;d(#&e=m9#Ig4*ILvX5#+wF+gd0*9HEJtFt5^og! zh3j)++9(5;bMhjkXy8j;o3Ws}jSMGAFB)|gMU}g)pBtk_1+RQ#$VqaeJVlkXk5#%* zd_e_lDvtrN>eXxdnLbSp#h#!K7v*+d!*xV=_gJuw`;!b4Le@nqc45?)q_obh=twP~ z9wJ_G^rSQ7T2`Z4>OKq+c{Zm!z+WT(H1YbH#&W){cvUU9V6l;FR6qQBlJ3m%)97VJ)x?q$1YXfmINPiRCS9(<6sbkpZ$9fQ!25aZD_-Vm-i?UgFWnS(e6=LJ*gBZP24Q^saxduiJQ{e%M`k-n?= zBmc`RY{uYF?p1`^I=`8XXtqX7*)bjwGsJ zsoxn}r1>JeGI_hoH3mqVZ=b0WeJyz*lvE9!h> zt~Re_!ylrWkos6Is4KOEE_Z+NN4WA0%an}{f+V@IgmzmRo3Q0olR;w9(d4-H2co|jx($`7Ye94cvl~h_+ z&*t}XJ=vHZPdhS`)+EdclL7jPH60x^+Gkf%SsRHRe;S=W*0k_(1pHo(x2Ye-UpU<+ zwmb9MBj|dCFWEa7OZVW#h8&;BvUwu|O&lEO<@0=zNwpnH(y2u3V?K*(6Ay=L zE3nrkjI8L9jTC3ql-4;zG}hP-w`U)gKiifL+lUQEqWP)6Exq3hJW=W_8mPv`n`O^G z(SFgV)mL;(5M=Q&G!>FW3=8@4ui&_?Zw{Kmn~kX3aPn*`XAvyLfRK$>`$j^C*paBx-9; zIefl1LAH|^bdZ}$``U2$uGePNcR3jl5yE)1h^;gsJkr48X*MfwVYe^VIU>qNLR=BP zP%4qzo10CB?`ZzRh7EKc&m{i!!M*&X9a-m02CBKTvCT(VWkQT{{vYeLfTQ_F+=S+uPhUZU)Z_-j->t`mlYT8eKAz7fuW{K!ZmuB1_2uaX^_&ZVa$t|7Xr-~iN2|G>6Jjg@^oc?QNz0ub zxi!fTn}w)BR*Gva-;i%{a6=EtlUJ;AKeh;(5E5L~e&D#Bb3(*HUio`=1BGWCjfLjV zb1`d5v+Pe^9)V#>6lV`-`7s$iP|tj4(t&sCg|?2>pT8BfZ42lG#18<#qlv%wHH;bq zY&3A*0MdnvNr}kl$H;(~2)4ZaZvkwyeX{@RKX!0R-XwM#n8d}h0MpHqzo%>05LAaG{x7V9~ff!s)SWTVl zu*2NT8QN+>#BWW*WXMo>#N5z;=;pu0r5a&jq}hERIK4`9_z>4D|H3|dBppqq?I(yI z;ha#CXcsr)c@B8{MGmocL7c*{4cpw1uI>x%T{HJGJ$;VAW&WOK0Xys)_xi}Menk>w z^WL3{VdD*b#es9PNX>!^qSJJ4W|vJ$l&yP!?@?NmuJc{f@^8Ea)q}j00)T=#nk6?V z?MPzLR@VrHSvFvvuvyT0c~{hkx+SO#h1eDEHlzxF;5PrgQ(xAr&EHu7`TZ$bzzYQ~ z?qd)rVZcJ?T~8oLb}OI@#nM-7Q8Q?MwC2^JgDd@no<_6;w;D+{x;S$>LVEkLC5)WZ z=4ig2_BGZ|0;weorVWbEJ@nNIo$4XnCBui0vHH6e4dhUz!&T3XGKtHT{YQ%;jABP$yMv54>4 zSLio>;My#<`=3Qp9}Z-rbV^P2FdCet8vXM|hc)d$j4KyPS0kpBrU2DGr#^jH-Eo|U z8rJlRO6rr%21<$*xkE^oj_L>4{=`S8RzQtFS)K9v5yWjU@TLt zt`)PwG^o@$NEu8`4}sc!okGX7;TSgx^Ol8$MGM;_V9dqM2=p026i?)SN=U0GXnJ^7 zu$fuLxgixe#yhEn3O(OdrfdS2+x|FV^2}y*F8d@KW!l?4{Iba<1`er(era9|Ep}>>&>1 zdD&ButEAqrzmXtLS>x&lqrQ8Gvsr z9z*z1DNzzrg7!2Mh5(ixax!4D!npZLX|Wlzt85Lkb;J?8HKli;+~?+JY3wky88NL& zO+St)y<6zt~t{ptVI!Zr4s_|I;p@at~MoG`1RDC`nd5sAt_aW`w7_PyOGN>+NUSua7 zfea9kdLymkamX@R!Q=lIr_hj_rOF-0o%=V%h8lZ`dztUitJ?)qnSSt2pVy|mC5K<} zN4y#EY52qjF-UvrGvO|3Xx8#POPs9xej+!L7I@G^^q|AKyafccWRB2PmMI9vL8XQO zj)30TjLeQ-g6LhiYz@204B4jiiWzQGoJiJhDwH?yw5p;aJwx$Kcr)0}ME~H4n2gXr zb>AWgf+$_u-ysy$nYn$GP19@a98;^7rCQvCig}JH<02iF2s%=!H~Qwj{&3`6CGjdj zG^!`Vf-K(s-BHe8tm(2(II4fCwoCTOGBLzkcUYd zT6*Qhs{7={DqWIk>wCS2zG4JH%hL9BW}k1Q;Ti^*dC}6=jU?IThzH}@^>9 zV~RDx!C7G^bPWym1pr-O`Bc-Zx`)MD0gToOqv^0l1LOlqPbF??Ni4vzLuVXS?7uc9%g^kFc z-|td=G{u@+PZIKKVWn=2;lI7F07)RW zrkV4rMu`NG;~j|tRPs!&#1!!(lRu0DSjjwsZ`X>z|PIB!4} zjg-7$g%bo-(>A^t)dRiX+H2!OvQeh2)$PAJq*myad~?dt@Voh#-}@oy&AjhwcRR~r z#q635O8K$*bh9$^X*4lr;KW%AD%v+nQs&%7S(9ss@ z>9RG1M{5F@#3Lts%*nb(dT^;ZLK=dHvv|4IchES3tr_LYTxlH<$)1d;g9fsrFf-~MS-Mh&rYgZm1Yl@9ik@k7AvT$G7O44YfdAyPs?2E5EF1u%{#T$kr&NVL$ zu!3nLIPC}o(zpL`eh9|_MRl{YLId2L57pdN+|t#S8kvfap^9NUOA%u z;}r$Ot9NM%L~si|>VIQ%OT((%VT z)oiad9VEjU?{X^T41(Af9A2&NDs~aTaTH+4M8hSHr?;;Ax5DJPd=M7mOFJglB8RSk z2x~xV*|g+bv(Z+UK)|ia3D74yN!{NeCn2i}bEMu#c!gHmD^z*U`+eyBNE-uEb9Nk= ztpD5vK(jy!2K>ooqB#$IpPfUdJfeoNbk}GNmrJaI`h$}`-i!Zf7Cb4}jNzRpQk~+` zOjXP_gyL`8T~%q=4pu)@GWQE)CLnSEAGY_0w6V`a-l` zH=9-WcjT6J-rKX

      l$~>M9>deU{pn>1J`o6fW z@%n8E6Z?OD`?U%=jQXeXSp(AZ?AF#+ORS-in8%1=PvD;Si8ErgD2ym}RecVcRb;g# z6(iSF9{I?sFEp%EqImugSh2ee;9r0@+=VT2h>T)r4i8@&W&4{IX5}+(IXg<(oIO*v z8=d{8SS>&1_yw95v+kV}>9qV~@qX~eC+j|btBvKF8jepO_BmW`TOj#bMhg3S3Ml%8 zSNTZx?|=-84Q6Wlv%Zw1g|NzTI$#5PU3GVN6W*i`{r(;IvG4fL{=Kl5=| ze^2#2!aHiPxvHejK9v+(mHHumJ+JZM7VEobD2X>*ZrAr2kj*2JLH7oT%=MMpa1r1R z30G>>%N8i4;!kPJ+j~}($Odz)(0_k__5o_B7O$h?hKnaXn0WhE={=RNW)Q?~1=tbt zBs8?NL$lRJPZiaM`;cln6ikB)NtXbGb$qLsjglc-&s#Bd6s`C9)h$-<$128wGc+HY zD=B42X%rkYaAHuu>Z5*$GZm=>9U_h4D%xVWK8f0ehi|_*`f8(DR1QZo#;`^Qbu&sM zm!Qcw_uq1tTJOqxmo+jH&oCaC28}ugu`qR;a#SgW<^LZ04<`yEk7JbXL3_F6|B%jb za!?`MpT+7RfX6PTQ*)U!2CvgMeb9}FJ`=-REg{vtwYX)q*ETna`U176#>Z8kdoYM( zT^!DA8i}sH#wkllsN-gh5uN~(UW`Z{5`a1i0dPv5+wEbaeusBRYw)$m=1jzPQk~Hn zANYPRkXP$n-oN*^r96Erp_HhbcN^zJz?o%C_Rvd=WS;0g+57H;?F^Gwlj%Kqf+7Z; zr>a_jfB0|8zR4s@0%zRx+$a$R9Q*G%=?Mu18;;k9pYnC8ZH>^lR{8wNM2)J)=f6T)9b`4Af`iB-way9vTXdm{^!K+NtsuN9O}L71*LKf3y$-05R9eN3%7ZAnPy;r#~O1O$E{ZH#c&UYmfV^UgNWn^SH95G7Fi0^b9)y|Vr=ZjF|0a&Tp0KEk_#DTc091K))*N&Pt)i~mLu#j zWPY*m&S!4%iCk>*{>b+sjD*H)3P&m~BwTA!iYJNXnqDpwka*IDxjF(w2s>=wYFW0f zdSJop6jV`nR7lC^itF8usM^yUm1uR? zqe4`WZE)#uA;F*=;I{?xYp1szPYd;4j!)dsO-kiD%__xNmZ~*p_()q5WM5B38Rs6m zXZ?MR0KDmn5dX-0c(v;ro?NR7@hL{Aq&n`=8%VhXlL)R%XDVt7iDq6|u%-Y`)RSJ` z2)C5HLz_neUvYu%HL9I9!9-AR-~y74$Tgd=l{lt7(X=&KIB!xPz5^AaCv$cL&k8V3 zM1w}{L5Xau;x-DhxSww>F37k`gHTQI_pWjn-u!8D*l>Eu#<~h46TJ;r2End0fSKB> zy)D2G=-_7d=*y}UsjNTPnpn4!#4KGE1a}Z0Nfzy&KIJ`hB=svWr|pPs4Y`}_!3tdZ z(lcZOP#mZsy{>h}&yPn^l32zE$spiVS!GCic1K4R4-u)~CM;(9UcA!ctc#DQ;qa#1 zoJrEMac0v4B+#!&_*7j7fhv$GzQ~E_AD-Bc9CK5#9&LGDWZ59_oaIt*H)P zlGrNEZ1LdEmwvho8~Lbrd6r~0c#L1a@X@p`PdzxgzOg%U$;sIGZXz+}%xk(9gvKN+ zo6|TV-%xjJwUe;OEDC5wv0MA~N{^cTJ=7|@-ltuN{%_!O$&XlZt&QO(2sgs|fm&gV za+SWH_tt`HBbc!Wd#?B+T|Gg)3B-MbedO*wMH6yyaj|u}*2AU8DrAZ2W(bb3U~G}% zUC@C4(GUUkJi4mp0b1w&EiOkM^~T#$UYl2kQ&1YLO1WrHL zxqHr&Hmm5+=>N3W;eNW2*?ag)!m(?%=l_Ih{Mt{gp-wsz;8^Bug7ODESA-2x^t8e@ zL)7CLPWB3_)j&%N zdkSUv=zcdZOGLlOH;?qys_>ORqi*oM7yj?46ahNrO|bt=Kc#oABm$;SKwN*v(J5~7 z?TPMUPiaXLtM;?E%5&=t?cFVZ(B^vfLOjI;LdsM5pW{|aR2|v-D=VJGol+)}_J-|} zBXbrewgCLMUtBVICG!jDkl= zd3uzvt@z+<4pOFbM!8lrXkq|&nzmgB8$CF>6d<@c#~EGMz0n|e%pq=$7&xX+k@eB- z7!70T-WPoegLk`Ok2VANqJ{~Zb>d6jI!E1u9L?*F@l`R`M!6@MUb*WD)-S^hrE7O> z<(EzYdzz(`LIvNNy6j-=;U%gzAie(zXKY}9m13CBnkxu@ z4kDkws|$C1y z!yr0%Ed+jUfG95bkj^MwSn8E)GxfSL+Lr|R-G?#m?TLn1@NIrud+%SOs@3(8PglB5 z{NFBz0=!ClK<5*7MSxOu<(_8+X%7wZ zJ3rqIT$`WVQwSpD`t?&j}_Ov_Ih6Ox+ReOBlAY4Afe+z zy*@YOw+kvI$H=SceGv02M70GvK>K8NSq%+jXITR{B}S6sdtff6mJYDOQ-tEd5)c5@ zC1pY`OWav_H%{c)IA1C}fRc0~8O5QqeL&mip3m}o&pmKejG`lT8Wi}CptVNni_ z3*!&;b>M17&|b8bGdF(8O@<=9joQBkmCJaL0Lr}{0{sM0S}fcQ_UHCg&rN)6dwl4V z4=&v4azLFGf2nSHXL#$e<{Y<_;d>_Q3W|!dxb-2}&~iu-hhw=x|388NZ15vMbdVMm zt^XWMUdz@26jKqz1Wqft`n$|KI(rWhcIB+a$BJf0K}i!(_h0nhMb4eA6zZr?EC{OW zoE_zQ?U@Ha^k-@F2ZAQdB_iWCy*KIalP`gtoDn`gjAml0_Dm9CWyyA_Wp8x!i<(0Y*IVd#sBT1_E;jA+@Ce;{5wMJKcQ;r99K`g zzDb~Ug~Pv16w(ro$f_jU{L!eBkLvPN+k*S`4(~*0-cyeSqWgSMtyxPIG?Gz|$_MR4_jV?vLhMpN{$!VBu)%NkJ0RSGQW=VQ7SiK0HZ?_o z@nE3FJb)v9U^xM;h+{Pzy;#cG?g*H~1P}}F2D*UaQIz+iQ~LjsEmjy14$A(QtnMfJ zm!L-AyNX1kj3jKcot|RHo5sUQ`u*oQ{%03{Bo+SE5LAy}tvS0bKe-$e5B0s(J(K#c zp5HDEteRb&iIQSO)RMS;@wbflkKElu8`X6Xk?gAy*cAJ}&N^H_1>fxPsgL!Q(#09i z$kN{#)jx@Uej4RJ0T=>Y-I;;%^84fi7fAI`1muY3K$Jq>-vA1TXz-$CWnuFhKg(U4 z3X2!7Hqe*Rm9O(L!Yh2Zhkj))>V78NnF^gsmI5%@5sHPMEg_x$7(RzUJUDE$fG~Wk z@mAie?*hTN(NB9EnAYJKXrjBP>D^Yv4D&%VS~H5c^S0rao4^7!W+)u-6Z)%TDu6e4 zSb0r9FX(x1=IoHED#mmwDrRe=+NlDM@AmVv*e|AkV+g*7LdB)#d6z^YyF^Wz^oVaAq_>*?31li9loc(3iW>(WtvX>gzzMh!yB3FjXj(E#r@X@gql zaMIeA_eo1EvamiAQ}E^Rz=X$SC8+V^JprQTo=RPG4W!^j8gCsJ?(=!YfN@UJt%AE| zz7rlCxDtxYwJMf3D ztfoaHQ(wNyPX8Xccz2{1t2Ibf`oJ+eCW7S5DD-@5bO(&;@$cVeIMzbIC~;jz1%#>slC&(FU6P`Clasqb27B$_ ztsxRta0`7*B|v1?T1^=ml>WuC6js83+A*ZWAIY{U>VKtllOZ^oQiIDVj2U8?oYjb@1xm+9pnSKH2L?`g3P>j`MU&eG< zU$s7QqyMtWp)e_Aq;b`ao{>w$S7UroP5^pd!9kBr^y1Xrb1%_kml;ZcDzL&$5=FOG z45=T2W2FEm$F^N8%6B6fU!=BZLr7_cX@*Z_B2IO2jhy>!ziZqOyws9oHp2w`W!Ja`e{G$~b$01irmZOEfzM z6k`}umAW|6&hHd2j4z1bMV*uXW>wT3FKN!3(#g$78C!U20NwPDQCKl09HdohfTH{D z_fpTzRiXCUp6mT9Rdw}*I`gsaeN0;b%{%=jB2Pb`ME7;54zCy!>N^=mD=G|y@a-yW z7nz8f0k4P)_Ww4bRH)}^@=&-}xLTt7W1!S#0d%h`H723!2T#`vc1+XCnRMppohv&) za>VH8luf_PQ@IlJvS0`-dY^WFP3w%GsO7O)FZ{`O!th^DryTL5yYs3b!4TJy>Sd!~ z$z%f%XtAguK`JV-VouBcJ9~NRRcaXaTyrWOW=EA$e_>oI$uOnV20d^btKIN5f~LQ0 z)DLd7ta7@}E+7?r7XlT>@>Aih`b7_{5b0$bvJTqwWuY)oGk&E6Q)w<;idG<-h_( zDhgi3<$Qhc$~oxt@)4#Q&Oi~;%@&@Kl`Up_GHZ;7R;-V`4^J~ZlFVW{R@2kV*b^e! z3F9fEheF&Uw77?~3D>AF-vTdN>RFIJUYvQ*Log1Us0_VPX+39W=D4Qh9P*d7H=qrX zCJOaD4@rl?#FS>lS2NKL8-2;+1?y0%DPMsndh=>V-<6E)2Oiu)+vT{NcgNI2e-z32 z6vdaj6Qw@&LR*{WTn!`9mhS7^k2E7&A5{1$-ZiEu^5jc-Ii~YlMd}U9LDRVKc{cHgbCnQ->TWz&fvVSTKl`sEG z_p#wtReTMkjjdGRQD(r5l8x)(0DnQ(W1VCkyNp|v<#`udwjbey(w}?8dEM#tW!cmU zF@8V39Hk?F#oKCrA!`7}Gi&T=%xl4#6MyIc^8zu)L;6VraavNOpTS-ilItcEXUVL4_K%-n)cYJ|6 zHq37+e|S|$rCzc|==V+tzFIKI-j&QNt z7I^20U`Hc#-8;~Kd{7^JI1Fw-ItigITfmbl3x(&RoVLZDc9Z;y8ghf+7jF0E$zotM zV1SF6(i*J0Xe!K)#Vn@`BtCAcJ#=5cq9OmOogku|9gu4Qtq!lEH^Qc0L2W{Nr62EK zjg1C64<|r_|LujA@f+oFi-`gYe02V3-9kNGg2%u1&4!y%SwZ7R(?b4ab!|khLo0>t zY%bBqV3k^k;=)me9gQcSm`uQ&z%02{u$G0ElUoRDi7sMStN-yccx5TRx&rUELDAsF z^_c2!=R{F)7GZ&QCK1{%P5u?`v#dFf<)kz=4$jv4uTVmEHw@(%l4wI7&aX+~?x-1o>Ie7;JaJtbR$@^GO5WevtI}zXDzc*YLl6x9k_oNj~^KI?*SSEP5Ba z&wLlh;CX}oKmzS;^@`_-;747Usg6V;TQBT*bp2`kfD>Yo!ziy>C)Xa=r$v4~8Da#Un-Jk!=jbJt%YkN**hz3YV(n%U;qW z+MC_(C=svTo^9N#Nx!)Eleq3KKrEV|goD$G=w&R-f&bDixZ2ZhbXdeOCk{ix#LN&L z+O|UW!TTfn@qD3vgv>?~_1)Zwy7X1xk8ua%Hf%#LvFfT1jD?vy1$b^7z{^t>YNcv@ zS(x2Y6{0htw~N6JD7s@5m|xx#WgebQB{TN<3V&W`u!#kSn^cfJrt1+%%+!_M>!OT> zBO#VJRf7#VO5`0<79|TaVIM-Y;Bozrl=_|~VeEsdSb?)^WA}?68uIGc;vQ1MFLpJb zf1xZ%V*fb%eohLenv6XcFv$h8|H`R+AYXa+Oriu$pucne29(j?BK4;IMNt3hRq&m= zAzEWn;XM&I*6oNPkunf3JLAryAVAa!6g2;VT0BN{xT+UsOP@R?MO!gh`r7iTZO6kT z8MW1!>pfnuHKfJWe!tJ;l{ujSI8VmFkDAgcsU_jVOmR9)We;bVu9wF3y#ceZ!vxHH z6hQ7R`x}_4_(ahY=FxbU7VXzdU#FPPmv5r#hY<<1!$2n>0fj>rJ=Zh)-yZus<-)s+ zAn%*+10O$QF90WF44{_{gMd1`kk6DP_B`4HL-_WlUn^i@K_r?4Zi)b_Ij~8xM1mW9 zwl~I2r#GmgxnbB{srH-aHJl)6d!r2LIo}# zUcxM?7dW%XcqE_x^f4%ePulHe>8vlb5iwA!S&#)&(#3!Llzk zIT#(SurOl{nnJz`G#GYk;+qMJ6<=*w6Zprwjk)5N%`9R%bp`n~F z;Qv?eDZ8r--zD7)jyb%CO19lc=7(IrrT=OQ;jjjiB+tQ zWo2xw>{I&GOjh#D=#ZY0G6WSO{MQW~xhYEIH;Rqb2ODuB@}JSqe^}6bg4y-NH`Im) z2gf){oAzM85%NRc>L`V8+8n*Cku}dEm!@2$+2T%~bMTRJ2LaW|heu&A%_`(>qvLXq zJnzp08vj+3e(_Q5B|PXGzsh%xw9`5OcreiToAlOv{>k}S-~*U^NNyU?(3m!Fi#>mf zC#FjDLS7=g3#8vspoe9FM`g635v2&SL@>G&_y;+c@Y@ghy}9P={`?>+qHW%us5F@n zo;m1A1q^M?@7EyYsVm~@<;s~Ez+bof_5@q3wbmHe)R!kY|NNNWe<4lof&>;&A5+ct zWFMLA??;yj(8lx52+nyK7m!(PrL6>iB7 zZ>UlOUu1<*8D1cT029hFSA-^iJHazMxbb32t#C4*iQ{7JHEn5{va@o=;S44Y(3g6| z4z|d)87U5*c}0Yf(`#!ftq|-`Dtxc`-kRzyGC;(Da;5AlP|tHKOp9?6Nqk-$-!GVo zwf**xQWN#_dfo~e`KZ?HkL72=Vz7FYqp_}|LX>AYFfrzW?Ze+y+HGPcKB#cn|VL4rAV}v_`=zzp__cQi90P9Ymw9%)gUpy3ARZ4Un#zIcd_or_`d;s?d){~d3pAcFHQT>7@6~~T zLSeynepjHW@%PJwyw?Ec{LPYUX1ne@ipn8_AzCIE z#G-Z~#bmfA0gT1kjDb7(u;p!Gj32^16*YGL7(ZF#ek_k0WoWgbX&kS!3KJ{&TLQlXI3 z+BV`b1-->M*S(OtAbQ>I0Tx4myeAMX7Qk4|Gi*YvaVTxUDi)6tSwymV%!UMe$SgVY zw$@3w7Ih(Dh@I>(>_MN|S`v5)Ia1{8ybK;Z7!&DOBalsx9J&xQnCJk>w?_!ZuECPN zG;|m7r>Cvu8@7@Nf{#uVQ)IG_HIReMw*$__W|q;2Kl1YCk|X_QfuD?R;_6v1Ej9Jv zq!^Gqz=quYf!vmIp8SWZ!s$VAAfo}OhNz}iXlXrdom!L*W>k1!q9=g%+Gn@~=1BJfa6L+K-izPA<)278gl4 zM+=Q|;FKp3utbOWV?ZYFM--c<9P$W;sVn~ zOAjwEepRN6=hJLv4f?JZnB&@vhK-X8l($L-gp4aU( ztFvbWe15CmFt(GIl9(Z2A8mG6c<(4mvg*RR}4GcTLubb&d1e&#sA za@lCRr)M^EH4?9jf?=|K6|}Up>Q-b#ZWQI^ac@TfD5+c@@c}8IA>tbo^(rAuuz2P> zhxa(sm@^;;#r^8mfU;>ohIGwgtnY82VRO=#zP`O5HC^;|B!z1#*FgqSmQ<$0Y<%A; z8_zfnsQ>I9_=c_asek+IFiZ3Z1Fo0bJh`-Am1avky|AJC!dWAWyc0WoFD<6o^Xzv8 zstj`(WV;_`hfaCWa|l1ZtCE+I!3tJV4NBv8tytc=r?Jvusyn0TdK2IO1u~Athx^}{ z5s^CN-txZ)Zt@|VC`Ta4GRMPTQoqdyP#u+eOEtkr?siK_LsBdC81t8!vuA3|NVvG7 zz$_u}$0ph$iW0GC3MB4#G5(B4iova>2WD}H z?AL7XgYi$}x#O7Qvhs9vbcph{V44Pp?c%$q7TDJ7!RkjU6`mt4cDARi9*t}3SzAI1 zA@ToxT(L;tT798SfOiLi#T+7G%vD>GykFu0JhR*Kh0gvjhon$6>CwQBDpv9@0Y*) zdEp#(LLwvWjQtmOriqgu2XO02f_T<=`KJ{;#%VkoEiPqhP%2^0e4k9T=k;N9yVnEC z*EGo2?}z0JzX|%){>?;~vjfjq)?(s>Se8hl;zjHj)NdA~s%{$fZf1=l2SKcQbx4(@ z)O6c+S8--(_o-gA2H(fbqYPxpi*+Oarc&C&Kr>dYH~RWKl9y0asEMhzf+Mz~1ph~z zH@I+;b-htPI)0ThTf7|hsVLMZ5%MyMlIlzK%U@Ej=PABcNf;A$6Xp=g8TZ($^sM}3 zH|iSzdgM#P@{|sFNS1}Ce8gs^Go0d&D8lIpJpA{y_W#(lY43gCKlJmAfnnPantMhi zKB*axdT8B;5b+9o^$@3_7hB?>SS0cbL~)Q#V@`L?iWpq@76sa?brUW~SMFSLJFJU? zi8HS9%14U&sSG#I9a6-CPU5w$6)Tp68=Hf%3JZ|q4{|(Y{e3=DAYY&S>HbAQr!?`~ zV8HBaAb#C!@!k;RG4e0=@6BtqQl4d- zv2)>+-XIiT$5r*^nZ_TFXG*DaV=n7UEkZ%Py^|3aRmCd@GxXY5zgd;>sQwTapVdt8 zy06qrI6LXsEsFE8d)i?l-FwvYd2;Sr;s1BS*)xJG52x+{da?CNyDlc0SkKHb{RlW8 z%!ZRkmwJ0Y81pJgaI6YC8`;|LU~dnky@yH`Jv~C}<@!18w^`NqLX}h&Y+fIt=~-H^ z8@Gej|E|U4GD?H4gF@J69k~3ADcm1A3Wj=K_?NLhKd^P&eG6jq1V&K*V1a+Z@VjKF zfQGFj)e4vJs=Lc;vM%Z|)v>PrA=ew#-c(=DQEy^CozHh_GdinFtykCaFY^ zgUo-0%mr*C!u^(fe<8zhUH?P5HoBAbe7`4FkaF%e&=!~s?=_qM6^1|1h-BMbn1V^9 zoDab%%mKUH-(7*M#Cp_an-xsv#hu5b1>^R{vC>af!}3 zru5U+_>+GQ&jeExSSo+^%dj_W?BD!azC`<#zgmmBNiTj?uzp3dvw0qX{h)0Txp31l zA>sX7GMN5sAeu_yGtrEZ|7MrXgYm4-B2l!!!pi6;=1hgmE%q}k%7+O>;jPW%aXrRJ z2qG~MyyQd;=+Xhuwt!ET`g@AuWolY*d9t!H3~o2Ph&w}+OT3Ap4iAkrT@otxj2z4X z#l#^n#9=7!LWe;OuKCtUh&}8QH5?#3Fjn@SSLN{wbLH%Pb+Pn^Lw+_5pC&Cr#)XY3 zOSJ@ajLEko-)1G}g>%E<=*n&%CtRTNAvj-EoaVkM1&kgZvudgd(FzV(QRa@ zGs%dV$Pe@g%RTmhhe6>J+t;fQnJ}Q>o~E2m3SBJ-6KT&F_BdShlXG;_HI#|-5Q`>W z(4sbr(5cH%FE|oXERbnF&(M`m;goq$5(3BSgr{G;@v;@!&``6S@#(jo&?D+^f^6uY zG2pmM1hr7diYiFGZw-?H5O;p_pS)W~%=i65f9dT7qOoA_MzQB~r2=ly^$*7E-9=rZDL{%tqo|mqGvQO$zy=QCDhbPtI zXf2h=oq=UBTIi(~tJcq`%WBPP z|9y}WA7P>KS!2)SvPyW4JFQ#>IPSX0UGXMtR+xcl*bYe5;ft=;*_%gO%1o~BT-(^x zbaX*h<`w%l`|&<9&ohN4H~t@MTHV|!JB*j5Kau8G%KC>bOluEmJUig$k);EPvI@v`Mtkr2Z#Y6 zvXBMSGKu-L(1(xUK4c29fWO9bW=hOsD$U;4wSbHzMS+5pk^TPj+MjAA@m9N(Ssf37 zsoBg0_q1;^QxL2!vrg0B9+h>6noR!FqhDE~-;94Ju~#5}zP&RSfMgw`$V%o9cc{KU z%WsFYx!fdA08Z9HjD~j;>d)w?x|pk)xE>j&2C)LvCwG^_*=#kntA1BBLwJX!8WTCm zis4gJtfOb+Udg%Y^}LlfY8s94oH@MGxTPKRF(Cc7zB{b_BfqEgMDS`mWK~RO}ED)p`Kr{E5nPe=bIwOpS*~Z>_4Uu7|x!00g6^U7?tvb;wBq}MpO{* zJnhQov_-nhpw4l(y&?#YT2N5;Vxik-1fLMbGn7u| zdBbnS`*psutAcb1edNg@Rd%pxsSw1z4Pu?s559X(KWoq4jf1GFTq|AnCg?ZviuXhL zA_$Zs*kYiojlx(dVBrE9`kA)T$>3Bk39*L9e}aVwz=RVv2QXm0iPVn}ZKdsoVh+ShTh=^)2qeG(t`XG`|s<@N9Pt%vq+)C8|_> z;;Yi0gYi;)Gn!`d7slu5u6afp)hN=Jbkn7g|4ohb`wu(oFFGcxtYB_1Y&)L@8V(wT z@tUZ!WzXhY)jDQ+`oDp}gH{j;aK^74@OK7$KHv4O*EP$9W1o#y2PJ6csvPUircp^xCJAF)Ky6G)Y=dCA;o;ec9-k0!0)~y|`qZ*0&pzsO{M` zO>QqI)V!>mp$3H$`Iz;kT#Yl*O&r{ofzXy{0=H1Zl!mE z3+m#%W-6%nZ&@eQRs8vNC0-wWI(PyGhYRyiX#y6C2aE_9aH&Aok)rbN7f1m=o~MJ4 zPITcMXgl8h0umT`)Kx{=Me%Ke4|ovzowGr9aW;r9IftCD!bUz69l83JBfk@|HR4Y-_1K^du7yDUJYsx-vE|pIGN^t@3xyz2ky-aWIYo@1o#R^KcvY zwt_acJTJe4Fsm+oRfZ4?5uoc=8~ZvUg%h{U0dqJbskrF*)WxOR*#BfVzc`YNf2&+v z*qVQ&oIa$UuOwiZ(lrK6)FJ)~?=CH$X{|5SBx1fcRoA4se;WUjpds0(xgX9L>$o5nuVSiyCx9BbEe(YVREwCB z^|x^8OfK6?)!HP4CI=haFG?|%_n_;K<>l(P@bOrWIey9jQl>q!V~>(q#0#Io7gHcP zt9zaJ4|Zz|Hgd=?XY>>!m@(n*Lur(T_ZDByWX#9P(crIU}l{|1}pV3b3 zkFSMOUu^*SH08|e_`GI+YZxlNV>xq=U zI&?sTocNygr{*Potfxec73)x14&xXFaH`f+C=EYs&v;_)TrEk?Zg11LiD|Dj zg^`nXQ{6yG{CbHsq`%%!vD9~d^CItgPX6p_ttq4E?O=eYOiNv66Qn1nMB2*DfY#G> zhzPH|JE4ha$}yraxp;cAb=4Vs6+-MV@E3VDonRQU3rZF5ESKGxmPMS?^0&h23485&%6x!oh6gW z5)ty?t-}Uez|D<9^;L`!Rm&!5q9Ecx{Vbf( zmxVciX^iZ;x-6uf31q=_!p@WU6oePS*&~zIHp$fQ2ZW-RYxZ@FfaY9oLBX|%>3c<; zlG(3Bx)ri8xeW_{^}GwVP;YxO;&`?69@)0deqH#pwM!racxFkv2-R)E=!7{Gp$#GtotaPYrnVQq zSONJ#mNqR^Q}B=}&>fp4dL;ry+d&ik0(hd?0=U@l>lJF49Q`OXHFE=1Gt$E_e3Gv;hg7`qPi)}SNZlkUwzVgh^d}k-Gl0lrL&&tSZS)W z#l^Y#00`GfdzIf5icr{iO7-{o!{GgDkg9Vz_36_aJ1`FroCW~+#b+`x5(wS_4c=V* zP1x!id-{Tfbm$^m@x5)i1mX@tOscnD#?9W=r0r2O+&Q}9k0D7Z2BTSl^*(O!rB<$> z-2yfRXI3Y1%U-;VFWfdF1sLmQy+NR|6!S^%P}LS;m(vk#cOWgDRc3R~KAflo`0c02bSZOeuay-1#8M3gMipZHQ}5y*y26?1A0Ds7EaM&zsy}K0Vy!{e z%exQdDFnQkLY`>L(ZmDNfYl_5I;qr0_LA_NhWn`tKUyourA1Pl+71<$2H!dHR0~v5 z)5N_4?KOtNN)hY!>)Cbnt1*WxjV~uOi0}JMv#g|~pD!REb%Ay2iER4o*K9!IHuh5_ z?V7u^A_5S=+Z{&krfV1n4t0m&B`$4(gtDQRY*hN~Cj2c`^**4~Sz+AxLxAq}!!Q^= z6m3fhhh2dvNr<#~_PWU~yj;;?&oaE+XnYV1UsU4B-jma^+Da=cZMJa1u4kQ0(%ldTW~Znxp;|m^{E`7QBjSbhr-@GNE7<2f{S=Weoo4M ziHTLpQh5=u7|WPiS)@u?)&QKsGJl~7&~3dfHElKA+n8B%rf&0;K2rWE?)G65*>j;$ zj?TTmlMW*gC6v7EjP|ifjBT{fR2xXuU`D>WukAxgPPC0`{K1N3_l=PrZgIi*% zU;L1KSi8QLqGR)-<}SQ*X+~D_#n&u$!3B-lpGj7asP0Ea|7A$0Hjjo8(F>cd00m;N;+V z*zR{C6xXjg84=-{>>YV+bh3^k81G9ke1ZT+2Mi9z`-F}<{t}6v%~Xw#de{Emy+2jl z*^ER8woDJ(RkGpIW_%x`oXy_~+wote9dO6{E9^7rHpe_~6pO2$0cfMb6!bxianHmJ z53A=jrN~bdzq0pvAPI&$bOA=-bt`T4vxpN(y#SG@pPn}ICQ$g2zdK6MCE0>1mN^zH zdLMm5kC!)YiJV&OH_NrPaa}NzdpiP?>$5g4(EU-wftkm@S;@`MI%AsfyK#d_wb05t zDAK8%UD$}t?w&f)$iTbw@w@Dd&wtb79AN-{3ocm4 zrXAb)Zr}5Juw^k3J09~!VZHY`6K<2bNWltREo(sKlJIjnyc6pOEG*W z=3~L*ER%ZCz&<3xJm+hxNhrOFd`LRN@1WY5WO3|K_huzhg}^l?%`D2V{Mfv^-yU~9 zL6?M5fPs>1k6>oT6;LPGw)$9n@mgQrqrKs(0CCbEmLFy0iR_`OAQUdxp9+42Vcw^9H~|==7qZ z+4V#G_{1V$Nt_ua@} zjm28b{6t@RgTuE$Z;KOS-fowngFrf2(&7vd1B&CR0L5oLKk{M)nR#~WINO4N$s20=58i7+QM}f)r9wNk3QaXO=FVF?&SP7^s^M09} z%d@$<@K3gv?3G*ZjJ4?6zS^9cO5HER?E0)VPUC2Aa32Xgswp?^?8*wz0~b{54-qx} zBLU5B0c}T(Rrll+*bD(|mu4#t;zpF>%5{KSvq^b+R(M}pkw(Q(=WWy_r{V079Sk~I ztO}_PeINam<*$c%jZ#FLj??<6V&La!+g7dIh2T#`Ld~4^*yqGohxLt0&=@%D_@93V zMH=)dtJ`?9QihxqUiSbVpg%g(F=m~*&Tedk<8iU?DR11#2cWmReB{h9{-!3DPi!VYVgy>|H^r0{d}nxr@z(F5u{$1la2 z&)?^&C+vQxm8mOj{@>yk$q`YUESW5yHv6{hNpA@CvNpJY4!m`=9!1=rWRi$n{L#_` zrj^k-V)wtC;PO&AJwpTOvaoJoi~- zSHRIi9}bX%k2x+&%}%ih|EbruJ8s0z!Xm>&2eMB zy+W@5bQ-v}`XP|)$pdqIdo}(!Mmp>dFBDoBmCdP{47qU?X{<~jq#K-DusArM4txHcK+ikmT+?YE& z?wp~p06lPeOo!$7rGZjtQh3+BtMb~kw+(-4jG#S8#&r&Nq%j=DO!j+m9+7se5y&X0 zna}SKDjFJY-l=Mr#m|Dz=cRFU59aB&kMQgJ#hyX>H~y>%HTo;i(kG0XF!ryrlMVH< z;a%J)`~KFHa-@Y}=}c1`99%*~z`nmT-fAdkSnvfwedaX+ZiQknR^!~S7&MKZy^)Qw z={zcV!qhi7IMJ*m#l^r#N<0`51=S5iT04C!j7zE~O(GXrChNua*1YiAol1NhMDJ%$ z_{&s^1oB34V55!8dOyiW?p1Mgy5DfX%JJJjhGG=B8-pMMKbgcIS;$SecIC5oNxk(T zW8c64=t^oTG|6A=OJL8-WuXB~i1@ZxJX=IQBn<+c+RANkK#km64;cR*OfU*;p~s~B zeDdZ@bHjWvX<7~8+gVSIv>_V?oi8$NT7T%j6dP46p2KU98Hxazp+e=*BBsP}P{#Dk z-~J^g254d3;6^c(8g$?xdS3<^dbiY%7*4lJ^eXI4GL~4P_gc4MEGfx-qHg!GQ=0eJ zB$*xoVuyX4_RASGk&nLsn~KY2_}RKH7?;Uo2@S04nq(G7q6DI{(Hz9!ge$bb^e0*v^F7oC#%)E8aE2WbDD+cnwAaa5v7InmWCTQyzw%&)wU8byGgFi@W(MGu zcvWVaa*woGg)QZ#ZK*mTX+u?3lovm-2He86c&MA;T29!^y=P`?l@ZKRVuFp#H4#-= z(nBWhGwfwX4I&#$UMYmqmQIHAD>e1ABp)EzSkl|Np9wCx{QM{*_B8F1_iNOVB_`}F zu*SvKxA1DCLYXD77jW1ID8zx3oS2Ddqf-7OljQH< z#xsi6;`Bc9x+BVj7Qv>J?OsxN=I9}A5^YJ_myc3<+y*xrjJ%1Okm|On=TjWFP(9vq zU|>DVr*!lKFky1AM;0Qx^YZlgFkgjDSuIREKv%$_F5v*%+gpHiU@Y$Z3&#NU^KOa{ zsCSAb2&valf7PWIgW*w?eo@D4a*+>NFEhEAv4${JM;=hjkfA=##>5zJX8CAUwC<(u zWx-ziDpps3z+GwEt%Vl%g7H-XWF^ck2i{*W5FiUtn37Tn+I*oKnV(a9BY)~a2?9z!yZECD zZ8f75rXp&v+bs!iR&ajZxX@nkaLxOxpa0FT$L%Ni_TLoSms5*%(CqS}eugpI>!W*k z|G)^4ngQwIXsA!K5i7Tex_YKUQR3WqcaEnbeww+HChJ7gkM7IspEd!SwP2(6$$Ag80^L=3$NHBvYj^Q?{FqR28RPpL$2 z*7^XdJ}yze8&o!=wtgFEn*&)+bx?Cdzp?vqyOF;6fI&#m|v(~c7Mfsq`K2PWN8 zI)#Hx_GL?zg~(1Bz3>B*eRz+;BRY6!zH(7+`}|XXt&WD(ji^%mD|B1E={aZOHRa z`tnLIuRTG25OyQhC8s)ka*ZL7eZhUwEp%krrf{Cl26DelCo_4==N!XPJiiZKP)~by z{6)i@X|lJv-tx9FE<1$oMlFpK>5EKt(XF4fog;`Re0{_{Z59D-%6xG$)BI z4z|AMC0Qut9xcR3jcXD-d-iOVQUG)G$C_z`44OC#rfgVm8`f?uQ=naw_{{DAAYKa# zO|N_p=t^`Vr+^CQ>&rGRu|d0+G;DLs%l%BOiqPB!O|aRaf{=yai(>g}-){SfOMF~t zisWohhPy^T*!aro!e1esZfCgXDfIYA?YCt3#0$Z*J`xZXbXeic%HD_d&`4#zQcAL{ zJcY(M=N#ao+-7Wa*b7wNKsQ_dsJ{7Bharw|j|TbCS&s!|KpBTeH>$?kdrcP|6U9Y9 z%J|L2;$t#0NCLQ59C|uvxsyR6YdLtc&Ge4h7Nf1=izciEG5dmsYodaDjWzP~ZpUT+ z-4mWBk9E-v>DTDgzfB7tiBqh)2*cOT0iA#nFg)F+wze9MR5mg?5A1^v(uHg_&GDM; zUKYhp>6g>IeeIuq?kAQ-7^zwB;1>`)gRhm#4nVFwE-W6tv^AkYKgQ-&O;S)F=yE4abAzCq+1(z9@+3fJbo~iF5k5 zHMa*g!#do#m(AQx!YB!Tx@@>KRvx)lWMgUD4?>O)2mQx4;U)l~(U>*Xlr3XxOh5th zc53wf@1PTCJ4Izhjmc4*z_TLwao_Tq7*qec0yd0K`uyip74^cJ>jAe3f+1?JjCr zN-!L80yR85y5|>SM-e-6_VPl8L(>AYi z={tTWamM;J(zzZpld*riaK4pPF;KCFrC5NRBEy0AdyiYQF#hZ+A0z6i{mca^ZKHM3 zqHv{g(ob$1m`{{ek&m*B(sa1wzE*(H#In`DP`h)+&!+f#8ftus{mgfRdlrCy9sjAS z%|}6?NAC{aNz`cyU&~gI$gtWRc>xa@3h?qIgN$Pu5tUMT$((3k!Tyww460#%Q4-tf z1j|Nm!6EG-2k5!erMsy}!a9oaD(2!cOLFAtqrbfG3}n9vlT8Qb*9$4UZ?(EwWZGOE zHDhbmOAzFJ5+xUZvL|!vp67#?6@1OncFe(ZmVxvPgnH-3S-Im{U<&0F!o0$urhDhT z?(b*fbV`thX_v?)BT-40#w(B67c&Yz|NJPjN82l&jkcckkUf2A6SE>HU$0lHpGEz- zQ&X6|n5FQyss+qjxss9&J(t9N;)m9bx6|cy%_p-rl+6FB)s8&lk;kQAR&euQklU=| zN-^%&h&XPxfdNqtUh34kBZNttga5WEfeTZ{qqCqB7k8>LKYJy@ z%L?<_*?L1DZhD)|4W&TVS1x>Nf}M-tTCbx-1FF|bU_Mx{=G?;GCP$hcACRDGGcC8X zSIHZTB%%sAPt5F!-B5R04J4lnmO`vajN?s2+U(H-*ChllZg)C03uqw%iS*8seag(n z_)isW&<2X7LqCRP@gT&LbP&z`v4dSBjTlQv&#PT6EvD_tI3FNPo416j-=oC9RZNe2 zd_6UvPmj)(c0c32lCwvjJZf|@x_1QeA64;stURi!Q0vo2DS<~ZEobJ)J|Xq2{J^G# zZ{pvQxXc;&mG$I(cL(pY4odxPspkO&{=Ibp?*Qa-_mLOqP3WgrjNp_UKlsF>@!K-F z+whM=`%$57kR^aibvLJ!`(PT#JvR=9rH3`9FjG0MX7 zIkYh|ca*~$_7Z(M-I(DUCZ49FTc}-EpG~nh!rXezoX~HO+5121D!oa!mG699Dc@N}5w}uJc#Sm-XC9Ziy!#%s&ebgfI$sv@ zX-pK|NIc%^wN|NN4}T8#2l;O4VMk+w2ASgE>-GRq|D2h9gBKXHR7R?8*7E(+v+hTA zzXXf9B~wvf#?|2W>7m{`1?fPVnlR*_OJ|#h0(^PL%ixscCFozw*P=9Ze`(AuHF}}d zfo->G<^?XeKN8^}9=O>yD{`4}Wc7AhwGriX8^_&jQYSZZ%Y!YeKOHdUt-lSO4=a49 zcQuqM>XE6pDXe}jGwc#YjeCDT?R6)L{b=-$?#9qy zEhY*1ipFHI#kl0zo5aLCVg|g!FX8A%zSj{?^kGNG)!|Bkbc9=ZgP3|u&dHEAy+}=& zTWKv(v|A#rGYbdb5o>&$0KSwB-RP&$WcEA*YQ-vH=rQTPbMrn89GeY4f5sf8kgqtC zzNhDa>c_2l)fTR@N3Ig#vmD-zWv@mK5*qbMq~E=~@@MFE!IUZ+YkaA^$iOGIE&`gD zl?c`dRqOu5oWd9r0( z@+$FK$e3T&Q3r+Odi%~`{NM%}%0%+HYS(JvDDIsl9pC6UOF&$~4V0YE%q32vkR~PT z>9a;n+{4L40Uoqi*V&wf(_|B^Yz*kf4OQ2y5W`_Yx!DQ2!Bo_=t27>s)}uu} zv!`-i%22y!?}g7^oygQQ@k;1Hdt*|P<6v5*$jhyTEArLb577*ISPFYRDobK|uQJ2VqsYE~%jCA^Oz{6b=YW;WT~?w1PS8HkGo+FPND^^CExPH*536UUFa@l9=8~W%cp-&Q(SFneWccRnrOZFw3E7M;*LQR zQq3Zil!xCD*D`bVc@+iHUcKt#Ijp%IkwEV1iPqsnC+x@~i^?0sUb}FAU31P(H9ipF zi=9K}v;D)v`zlU>hfrP3*2q!J|85Mkc6!5~{&R~sr?*@E5;cm%|31mtOcKr*Fe`#` zSkGi6m?y|u3HO_9AN$w6f0h^rFSi$zuA;A7&$27yN&QTd;;MxkY2JYKZg|lDXC*-n zCJJQ5p2+*|-ufGn{Q9qnTd(3UXxajCF>-cpOe<{nrD(Y^%&ERjyTWlm_D8q%58S}j zxh?t;3@(enUWS&Yri$8&#d%fS;g~*^>4dk4nf`;qz|(--E(yaUrJZkikzx^kV*UxN z-PG&Mci@IrO)tPG#J|~*^A{JO1%8vnZa=)}6Np#X&VZmVfTF2>5fq8?l3qhrag$!Ya7)y`IHhH=?7N6+J$Dy;`Ijzs}a#i3OJxnDZ%MgnMqVONtnsr5_aF&e)*@>56IK4;VQ=;~FBe#lh*+sl<#*In!nk!DM9+a%M&1y%1cMOeURQ9lWalrKxed3!zpy~Z zqk5&!Z?aWGXt2h%;;tya=bsTNDN*7LAm1hgcRk~_JpayHoNHKd;_Cf9AU?@o4bN>H-rSSTK_wJb9V?9<-vG~)Vf_+=>Y&+)D`1YjE zRv$Mf`~O*IS{kfF>~yPF07^ZJAYUR%rA_LmKp1d}s7&%29?*^sZC#IgmKOW`e+zZ6 z<3iQ#>>vhEBWt^rqHNw zmD|W_b)tMvTMTy_?;G-}>Y%HwgTdd%9&D%=$E$77(fS7DcBuFFw}4FJA&^IGNoU;h zL+BvJ0a*eVWYTwd=eIbQ*Zol|JJk$AuQ<^5H!4%Rr3!ZVGodz$E)WQJ|J$yPm-wGg zO5Qg*e#^Ul@WwhL4v03jy{>udO~?KX6cwnpF9PaPQ$X?ZY(rE`A>PSXoqM0&ah56Y z_M~2^8~HR%+cpR-m8`4h7P))CZr>=E%=aQ3nS9siut?;7i49vO6p&<H~4(eMh}41A-#)Uw%cvK=xFbyLUwTw%tSbEv{SDo3w0wxHjeCKR=3xCbSI2N}&7Bf5RJe?IVBuj;Yfm~NEx=G806V5?odvzQQ{_w*x;=AEKc zH^02`nYy>rdVExNXg#;3-mz;6E}nCWF)Gk;i1`R7bPZg^O2!}m)hcjL&j3$fGkapg zXudpBFnt`ykhlL8eO}!MOUOq9vl(ZBDVAXQK>Qu%G<*Nnqt}q(H9)-6 z2c$M^$eqWWE)e`L22$4rKGKSvj&ox0t(aDIO-)@ve}8{sP*D6fCPh7bdIC&7PWXWV zl3G`F)OM1Q<=}tScb}b lp!jlre9r#=tTO-il5N#J47zbw@&fqxKtWZ$T-H43{{h_igT4R& diff --git a/docs/source/_static/thumbnails/explain.png b/docs/source/_static/thumbnails/explain.png index 9004ddabe4d879b59f202b21256ccb890ca68ebc..6b0869a7b25e3fd5ae7befec85b036dff06f9065 100644 GIT binary patch literal 14188 zcmV-yH({)z#S8*xlXT<>lq+>FMw9 z@A2{R_4W1n`T73-{`~y>@bK{K>+9d&-`m^U(b3V+(9qA%&(6-y-rnBk=H~6~?fd)t z^YioW?(XB`gvVC#m2_Q#KgqI z!^5z!u(7eRva+(Nsi|yiY+71cSy@?ITU%UQTwPsVcXxN_=;&WxU!0trwzjr_fPk&7 zt$BHQczAfMtgNJ@q;++5V`F2Tot>YbpJ!)hdU|@o!oro6m0)0CzP`SwsHkdcYI}Qo z!NI|Sfq`jhX>xLMxw*M?baZxhc5Q8Ke0+SMprC7OYmAJHWMpKoudiWYVP$1yetv#l zUS4l+Z-s@0tE;Pvi;He*^{r&s={L0JA&Cbr{=H~YI_xJet)Ya9vxw+xu z;_>qGzQ4cz|Ns8}{qXVe{r>#@{{8;`{POejBYvz+uYph>+9+2>cqvx;Nju)_4N7r`NhS=f`fzN*PfG>J(> zK~#90)t!f9l*Rkt)3&E<%9bRX-E2D9O)r}S1d;#=AfPB99U>xPX!->y0)mPZK|v7} z5wQV*E5Ezn@5mAEj_cK{|Ip3MGv%3i-%Y_#e257NLH3iU%=0|+%)A*H&qOk_vU40x zm)qm@<>uuV`2B^2g+>12f|A^T*AondOUueLBhTigJawtaj>g=c%6MLJK`|jMDk@}B zMWnjAhDvR1O;u5Tyso~XY|it3wSP_`*^LQTvMG>TK&7~#zz@0w;zER6)7;Y1T;5GLg3S_O|?FG$-qMzy6s?W_E+y z8;HkC@~CLceu7+F}fN;ij;9LruIDUO7jG=C?Z z8O1L@S)_zz7b;g4N^bAy=oc{iG;N94gtV`>AK z1TYPmtjfx+tE>0|1)kgnv5NzDwO7TS|1y=;P^T%E{1>1f1G;f@yQXh>C1&{2k}nyr zn-OvsNA7BC>VB@4otd4g3jk_>A;$wk!xq~{QldvMnuws(OD3B*3lUoGikCjeh@BPk z)KQ@(4V3YC00C>3jYSwXUzN$aT%-cF2-iIEA_}I=T>`l!zx(;nMzWmVIwXMr2aPjs zaAaN0Dgz^i4FOEWYe_7F7pb8*atG6~kWOFrbFk}*M6k|~KtTGjh+Kx26)?-P^EA6I zmUfNO5bnwCGz*cQ+*9p+=FuxN=Bcc#v?8lO9S6@2=vr1*9oOxeFHtfN(ef@4t-18} zwa1^WCufx=8LWl4PQ);-S=($xq}eiH%gRw!QB^$g64`Yd%|SXk5ps*`8M<Z)h$4n)q>Xk*!&azO~eGU*Rk#npq|<2mse@*}4tZ z+3l+W+Tv^gL%JnuDU^E&;CMU_=@><8xjxC|sY5lp7;(|85ruOlhAxzKQQ|iD?D6IV zJpi1vFnbrQSUtUv#DJkW5{UCsInI%#hIywi57!ei2@xxDGjufDJGAAguWQzN*R8H3 z7YlR2+nf5Qz|?_J{TLVT;_+S5q~$gRS2FE)fr`=Isw$tl#PURms~cyBx7-zUGgzu- zct_t8?Y6zMd)l-_-jxYwoVEJ`GI%Y-8m8|`N^t+DNG{3Pk=xqaJ9MjVcgzNFcgPLG zuoZB!X~A|xGrD5WRKRmpzRCNWCa~D1i1YnMZq~&GvYOC|jijruI`-#z8|&S{AbkU7 zDvUWkZ?8hxxBrderk5u-?$HB!!pU42nj!+Y3O9jDRP3T{T(KgH<*C`SJke63v;NQW z&KYvMrH0vi`XTA5dvBX!S^fTepU0E?H~#73HMUJlo?cVB zs^fJuA5rTw?{WH$ynl2J*v3BkbK1Z1SOHkI(beQda+O$~K*#MJh94KV{u$m@XHW=c z_I!9;(w;bW(!-Z%???P=$13fLX5PiF)XX}5=!C`iPvxVAy+z5Y^P{X9$LgX#k!4nd zf(n@Z$AIfjEA@W7A)uD%+*i3xpY(VHC$6b@&rcx};7_dHe!NTHp)k!_rXbBC>C&>y z+9$3gB95RTVAAhO_Z{AQ(&Oe!H09pNm#I&yA=hp)i7SnQIp8~E1wL_7Htcn?mX3@Q z>kf-myPq@xX{{)US(FyUAb{I_AiM#a7^f zrE%HD6~iGSbV1r>XVs`a?tG#Iq&r1u88ZI}`h8WXVaI!7edZsNWVK4x@joBDD9cEz zmQk_X0pI=cYebfQ^3Emp=@)!-fH&+*q$9IhLR+3{RICf1_4s4m z+cH{xSqV!r1Ty%TbbA)?fOa_#7tT5|YKmN8-#q#iR84cwu}^pKeF@E~Mb>46u3A+t zDy!}(ocZDubrGpIfb+h-%6a#9xOwQtz~n>81u)X}t8!JZ0DzlJ9TViQ**7nI4+_Q> z@QBs=##>$GFuE+O?U&RidC`jq2d{l>2?SlqOe6gmGIkKi9^ z{Si~i+>uYQ?_JST#|o#C_aFsn!MifFL@vp-25YF0rB$O!f+m@+nHhCf%w-5+PQJH@ z1HY1w{aOch`*|v{yG3t~=t_nqDsMUa)&qHTQ6|hs*0ayQRm~7F8&Z;Xd6IstDhU#} znsa8tTj3Zofr*~0cW~a#B?Q-q-pr&SYJbN&ARp9ObgXHaD7b0``_6-+P`(IJ>wC4^ z2!5<*)@r}4TDA5}H?EwC8%Hg;L_DHA`lf)fGrYb+%Q%E>}>Le+!F)3RtJ+ zWkv^&)cfOFhos|8^0md=fB04+R~~{Octxa|#So@%_+^K$^T#h}ZL#K1ZXRy$O~a~t zr@aR=$Kt?5+#f*=*Z)Ji2xUPq?0N&4SoinbjjyqUixbx^7fageH(qLC^+oQ7@P!VU zYC@QP^AL;L1t&MkqWjX#0y|>a_mMf5YGu|=*Nt=R6zu02#hQz58NCe+^R^dZF8m4H zc-QG-SW(FPq7jr-jlMrp74Ul7c_!_bnI;rmCtk}VZo6*d4pfAm+mMS}xE8Ax^>MqL z5$Ks_A2T!L3R!_a;t~3E62~zKOg`2fpmME>>bd4k4z_mh-o4kaUnlQZ>MfwrC8v&j zc>{p11|%?SH1=H6$?;6B^q5pcGJ+YQoqlQ3lU&pj&eJV;rp&=LhMc;9$SyYE)osYAd;4;y5ve1MsSuGN@(VtO zD0Gr%6T5nCtetzZ20VWTyuggr8JyM9f3~9U`;w+DiWCL`>g%Hf1IY9{)^X+%9ie; z10z~%pssqA`gsg5`KT|SEXJ=-Z%D*OhU1g-W+q|;HWnhk=*L)!soS-g{pJ3jNgNvr z4W$A031CXhqwS?Hn5^Xt`wxrEy6e<_;;n;w_I_Dsbi4@i(xXJn{kxZ+F647SH6IMx z;!H!nn6#%CDR;&|!zuPL2EWOLAMoyrLsaQn^tJ=08V2CO%a9{pIST*!=eZu(v+;Ox zZ~nA-#f2Z+tnD9?ejI*d=c`+PX^D&Nr#SCiKY^TRnXIQbr!-{)9OE+@%YDkLhpbDr z4e+8^Fg( z@K;X`>G5o6$PT#OWd-J)y6=KP+2fS^%dCQ`%x~-I>8$Z-^KAh&i5wr{FMsq0kS~yI z6|x-U9Fe_BPEO*%&X^Jf+zUCf>peq1V>TYL zHEjg!sy~8(1N_wvUTYC4H`21V>Vy4!DH=Sn_evZS^AU-nm&NjZT}-B06OcljfZ*dW zp>(_zVGA%5h|z0K#;ArvAr{8DnnBZG+JQY(cC@fhBrwZKz7egGHSZ>gl08hroVZ}V z=#v5Sjk5uS%)Gc`t@zX3H)VUTB+t~boNwZDnPNhqFx`2lY^N=6av%G*4(3x}V&f^$ zJoabC=D7VRB31LNsFYT}=cSI!fEjVory4Jj6K7Q|!HD}di$B``VS(yr+F5l~ zjUsLrZN2t%Dh@k|G{gwDMh@{}_+(0$IT46zBq`&*oJtuXTQg~5#)n0%)PL?~QpT>N zCw_gvBD?3|T}J;+@e^6JvqrSJ7;~PsgNzCZRym=K#cKO>tHSAwIk^yKr@($4e6v~L z@|*+$Iac$K4X}%sQtn@*BHv;gG_V8bxnkuV7}%VrG@P&I3|3q;Cd|=XM6n-DH`R*3 zC5Y`ejdLu(>^I&E>|fhksRwZ8$TAnRtj!$w7%!#j>+8;cEQv>$(y2`sNo(Ey>F3L3 z7JXj%@Eh`rE8FFH5iuH}3JGxvNDP(B;+R!VO?E174L49>_AI%{+Xi@{ALx!veL2O~ zYr>p@2&U95fkiDvs;L{ok%xPDha~SaSO7-1{zjLl++X%Xj{W9I*k=s1d?9dmyffa_AF>^5XI|6b4p1ZN(a~T#YGlXTCMRoD*$G zBpMPcZAzG1DO)e>h#sN3^dp5=5OKjDU&w?%mOxS2<4X45s+eqMJ%fYLYd+lB@&^bHpe zQ9EyNT2?L=ttoJryFmvQ7 zfH!rq4Jr6xt}Q@2GB1Hn&$8+X_7+zZvUtr0aDC`#GRr|qRFWx)RmRoMyLx6>+?~LJ zG&~pF><##;Q;9{-9Jq#c|E2)m)yXsrz?%`U2Y?qXW5BoACvWxPN+@;63#Tt61X}x4Pb84kRxwzM#Tj5cZZ?x^CPJH(*Um| zkO*X$eBq&*oA=7i3aK44Pe_m8I!<4HgebICK`^VzGZ)H@%4FF7KrxT@K0=ZAowyYvpc+Q0elz? zYGok~&!t9Xth5e_G8wfq(Qi@zlj(yc+uPF`wQ z;};Ft0KD&D5{d8ieZr#8LMpNadR>d>dw4BT1-plf&cIQEbu0K^7~Ih*t(qTAMqw)# zx94@E7Om>e&SE@Goegkdifsrqlh*gCUTmn;gY1;Y{#es*va>338$*2#>w0PS*x0wH`;+t**CvIH(^~|dxk^EY= z^$LFwUmM<8h?_nJaIV#;3$+xpv#S`na5x&J79Ccs^0$}a=i8~pZz;}%vq6})X4V3D zh!>9DUqml`@scW%U5l;Oi#9Fx{&QY6(iZ8ogK%SeD_r?ZRPw*NldBfUbKk+S;5+J^La-Vf= zO8~4ouH7HOPYsCT*e?sGY8AJric6IXN0Ef1pl%NQ9rj_6%1WVU?$inJWww&KQyBPH z=yK+LCbv(1E~#f8Ew+}c&zZxAgY)WC2bX4;^EZh<+Icttq1wl_jDT}-kIS}h@(A8Y zPSj4A$`kyM)iOcr+;U@J;Kn*9AQO`YXu?~-zHwo#4ost1y%=2Rzx2Iq5r}=|rn7J1 zvjf9Fe5n#^-lnfvB3oiN!H)xROUgC5QB&e#u|HnnumuL{lMqLLd85&G@Sp=Cgm$Vel~Idhi`Gb8!hsoKX(@ZNV)wzo;o3^>)9kZB9hf2e*m<4n zw0MXmVHj`UsbVHqc<<_~farO-s?ILm`SBM^`;4JYI?k8$-~3?3{xcu8)iJB4n@_*N zse?6diyl2MiZun31OyKn^ERex&9}F}#7Ab-R>SNEK*S9z@#+mR_T>!#ydhyqLy%p~ zG2$T+d#DS>G8P>_#^f8GG9EqRV!-u*x$m$r)W3p_A=hIS2RlDtE--juUR50%F!vUH z0)hOSTD0qAYt*@_si(C+z>Q(3C#$ry)B#dz103A~FLp({4jktB2}(tTbvs*@+n-V! zyk0^R&4M4d%4A~y0)G&Vs=D1x-^yib@s9VGv5CD^5Cj(YpgkCQ7+1AFXeEJ~dfJ{M zTbf`PvDINpsiTy`vJZR}-Ct(_hrkA=i`Ai(*Jg4*ZiG&sxf@NNtw0V_q#UK zGO*_0yUojIHxP`&=N7T%obQeN28To#j8go@WA4IRIcv2oU{b5hT8oz zdl`f8p~&^*vklXbtzPQFIcb3fcQLoJ6DbKS<@bG0*QZEm_^sl+`={w6`-L?Aao^AWz zykwa2i32xB_+IG}xf{brB?cCLcJ|#LkIy>%o{a<;yD-IWv;m*Xy*Mu#Z7~Lj_Ikpy zcBbM!#dD&C$Es-IsJa4ODjKan`sq5FWmdhzJ{=D73ON{rj?&R(^!J|HU-pVohrzuU zq#QuCvkVQ}DEkUPj1rUfZPr(u9~)?0rIfsd|3EgKucIyvyX(L$ z+BJ*zf3>WV4SRX{DHb2!ajQ?l0QKe;7O_3v>d!V`qK&Y9*Q?QR6Pgc*+X))SEW%|| zb4%VF8ne;>qc8DDa*X(MxbqmYw9$hemVbpspCq&d{$RK-#>*(!ex<-&?101Y=K0k_ zFg6f!U4)q2EA|+3!N|`v@l#MqPeI?2r3YV|?^jpiz&KYW%0E<7FBesckYN^UMMOa> z0nPy-w%o|x0j2y%{>ul56KxQ~ob>KplF9jeL8q!C8SxPJ_B~(13dOf88lW9gbNR-p zZ#}fHDYjNy;8gi4v8!4QEBM>|FqF7n0ZnwtAw`sb;3QL2islX*sJN&sqe1`+X(a#Y zz)`-gUI@v!Epu;FX;1~XK+e1SlOee(v*rNr0J)MOXyk_YtVAC4VSg0`awnvuH|6Os z1uV|0YGawaaIyiGt@$Ec8CN+nDspV3k!hS;b)z`Hvr1&3R8aD*lw3a#%>7)URyZ0R zFE;E2#NMs!`!Dl4Ik*LK$Uadx1+`jH9rUU+hSzYXI#(X-f@v4rr>>5YWq1++13wF? z;GUGeIy-f|0M#5Nx)r?EqjmY&`S_)cHPiB@RB@ro33} zDvGwAxK2@3#<+-ZW@MFtq-D;@;iHzHn2VLHC?#_(?16YisqQiXZ401s1p=!V-s!v^nD&tT_d6_O&vI+y*h{ zfHmC|8*tdJsUy#2e_{O_pu-MWxo*f}kAOyN!N1_tiv?>1?8LyaSkUfB z{6@X!!+?d3b)!~pEV^7q+m`pY{KAXtGP2VHGw30{1$L{EM=_h6TiP|G76Y%DJCS@d zoklH*9D)%>n@VH-N!0V9)XIyswaG|70;u65@8!xjwor33TDy_h5V!O59dQ-JR7Qrw z7XtphMoD!Lkiow^F}BkPC(JywX>C&uE^Ie|aU;{-jYYzRf#dihGSR{;$jI)thz9%_ zpN%U;f!n6YfEtDCJ%VimY#KToyv;_6a%KM&MZ)#-sbHJ^6fQD;W)Mu&yDG}?Z2D&Rze0qk0~ zRsa4a9|TMGr$$`6FkX2rK&t}1rRfZy-1~8lC zl)!B3$y-oZSj1-?KJ=I!dY)tQ>)Pa&lUX;3{bK8;vAz?Lk&VTo;aVubx8~cf(O}p} z#~s#gmIau`Y6T1{)R4;so%_XKqAdyG$ZRY#sYPlOw@}#>Kuc3xT{Xw5M6Ti@9oPg9 zz78Sle2#ry!WJoH*X;|Z0ZfQZ;8@56hATu6a4?z1jfINM2Wp6`=Jcg04X~gdfv(J+ zgc-bz_$83rw5x=UfA{_O->=F8;Lf>3dI})zTF0b7G=Jz>3KJM_JE<)vj_k!{lO@B{ zH;u)fF|Q#e0VV>DHL%oX!NTE70>`gm;C(A^(%!vI+GR{S(ZL@Juq$Z#aZ;-yWHX>r z8MVF~YgJP*6vqCHRw-8AAyDw>T-!BT3)tYw&Xf8{*|#o_I7I_U%eor03Yn&vR$v#p zD}KJaFbYn|@E6%R*L4i6iBkOkg zZT4mb?t$051};rrd+z6ere-U$Lm?-M&YzLUYkBKh1usPGG=W35M(wslYFQ~(s<`!@ZINfEl76oPJP_7upCIP(g8xJv;?!xp1X*qO8rtY({3 z#w8ooO`48nf(bKRVi!VdQWqCCWz4Yxv)>Zn*cS%yfZudx6B#R+#Ox#66tM2cyh~4j z`J#+~LsJ2xDnbIQwOG5HJP((N{sO-{qk^0CT?S5mTMoY^AG=>Nfwx$~mklvOE?SO( z)3`D8GCA-Db=J!+;YtQkzsD<|LVycn(~IL;psvrzyhA0c{O;YXyLXX>>BrU&45J>g zsWU9d$f$FVVPIRU7H-UeFVZ_=w1emM#$9FT>UI^W>r}S~jrPb+fZAM(abPZYv$F2o zDd)gPM*QOk4$Y%eO{{>_)qOd;PWxnQ@2I{Ef#b;ej0 z(=h*ahksIDez%;co4h4Ts%vYTmu(Zk2zltx&!?;Gkhxtu9C@(+yMFD|GVPn@|C9_j zXGF5Hs4O(2wNpPF7Y{c^1<%joPej$PP2Qnh}?o&>R2&x7h6Ma)&`gLmD1&|*f zkx5DE2+x@Jx^z$063voNC;7*phpY`{WKdu!_wQ4yzN;b_`_0|f8W@!lz&UweiGhtX z;p}cZ2!2F)``<%g+tXUfwUy z$jDS0zE82yMo- zb=p-CECo?_+n}OJrCV{Uu#RmWz2IW(9+&%*0RLl{TiSMf;cC1vZ@U%K*JPbo0B>gx z86wif4-+;sfrz&Fv$i&Nt{KTn6POi>^+sVV#{xvpR!g_cQ5TwS1=vh{<~=0$$=iLQ z?>iDZZZ}Q1{(?uX%C^Y)dCgV&}*u0KAj$#gsbvpdI8~5*W$|G!k*j z0_-w|s`WBaZM~qn*w{X$eE@d_*D%v7e`BA}UkdpCoqu}JH0LpqD!rI*V5QD+))#;YM4=T<7TdXr%;=}+je7FPAd{yB@^HR z$dH$daTY$^$fvIUL=6kR}ACW8X z;(z}4B;|ync~Tuu4jk1@nvFy>7{D%E8rSQA+G7)_RkO#&rsH|y0%n`2C27H{331tn zvNm?JT%^35bqnm3%ynGwgAN=XLBL@grKM3jbq!-;UBhm>K&@14oO)rc9&z~44%pA3g?`wQ0vaP$fSj+(%UDS+#BV7E5TWaGu==4q~z13wrW z8_P^bDwIvs+{RDxes%kfXp;~}E-$w=YXDCA0M z#P!IgjX*7W)Rkx(LTg~g_I);H`DVGK;Pgz@ty&~PJwC0xr7+n!MRDT@kwv!}L_=U|;1*Uj zDGkTir3o6bsbORL)JUjROFsP-`Hvq&#)vCFBAR|Yp_w*}Ky3ea_e7G-i2+z3H+HR1 zCwBTbzuZtJdYLi68i4WJrFOt<7Zm}A^t~57-3rbnUjBq92K*0CzXvhGZ)3w4Ffc`3> zfZcpnc+|mE%z@XjX^55|seI^Thr2LvN;m4Xb+KBRtgRlx8<_5+)gM15>+<-cM-L`4 z4Pc6l?Yg`*Hh-_>p!Dz+NnFAYPYiqtEgu1_4ZN_SCbbU7LKZJhR~I+g#A)8>A@7qj zcsSrVO-x8eGG+qb(}1&@FC*<kS*yk}P2l^;s2!cyny=1t=fOg}=vOEtV&}#2($DLmbW*@!eR9pI z)?t{`nxw#9##zYtmTlDC6oIbOwo03DRL@(cdi z=&>+w_()*d@6o0|6fnCQm1;O`pjM~#?21GYrzg88-d-L|koaW<24Y9s8Km9eH@vjt zm7V0d^o60avcwmBtr}gG6xFc;4l80dsE0J1Vz)X^+6eHZ?*mBSX+L`ji!kYh|M5Q| z`Op8k=p>pc?O%8~AX2VcFyck;SCLf zsj^b9AmZL9dS>kduHk)e>U-JvsYnAWUrNeqv5_xBua>;c4Pf83QaK)gfuo6<3$Je9 zzVr2D!q6~{(=okf(q|ed z)dz!U3Iqd(;gZrzZ7gcxpcN9A8>1>w0}f6G%+Be>z0oJ*&G=?g+QH+?TqNgWvp_%` zJ)~VyarHNcC?gsTV4hfy_E>=7@e5SP(<>6~Du~#VKCkw>$#@r^_Q870hM#~qbpd_D z=&HstJP|Pi;33_MgVsjCWWa&+PkNX_EQ4D}y=!0oFLy zTd)7}-7h~L{;&V-zu&#bx7}o;izBb&rSb5GNmMpjfqk!hb?w^0j{>$DPKAsrF`XCN zR1iL&XIjsSn_M2;8ymZ)cXjbDI_>&!71#5M;Z`H>Rm*t@9ax)M(}Cd;P=*(4*~ett zOg`TeJ$~7gNCrtYC$t6-)9`&^)FN(`LS*l+?#94577c5|rv`9HGy-fAYD?~6b}s{! z>3Uf+BqqS5oX~Zwl+3nasFJ&3uwhM6etv#|UrA9>;qL};I2|y{sHdzTlGR!gC zhMAT;8hw1w2;)i!#v`SYMgaRS@RFV4+UTg&x`Q>C4 zgl$pmnjH@YiHtv-7$4Vm9<}Bz+ccjVnBmT6XQ(e!GvLonV0qJtP3|F%+Zr~_pO*PV zWzuBGV~@rStH!`&ahfJD&$J-%IA#NE30qSD19570c}eIZ(faY2recXqcg!X5AQRpd z8C*oAp98bQ<#xc$3Yb0IYDnK$v*aGuqS!ra5lhO%qwz=Da;%J+ZNUiO->(rl#V?;a zb&$#q32d+7Z7nhNtSfrAi`MEfx|h}(k;G}QbM_*ZkjG>>el(WOs0r}rph$WGsJnr0 zsi#=-@ty#7*aNTc!-w^FhuW?xP79oxL2q{2LZ=6`z=b-Qw1(iD;_N)1^E?M;`PO*} zY??Ih@V>8U*z8cJ>t3qd!!w%^TFGM)x5grxij^0a3*fTp0mJ3{@W_&Zecy(V6Q?Xy zanRg?(1D-0&oPyZO^iR(fD!TCR{l8gY#o>qzbt`eSDpip@J7knD=7&~ukK0N=N_chMNs}Oo4HdiFOq=V7^C4feA71pB(k5uKemQIKJi+eS@n?$02yUq$1XnEer6> zHdkgQV{}Sl%udC3%J^kFIjt7pHhxy=TmS*{nkl)nU+6f166$nCv76r*n3U;sQ{rhv zXdw?K=&XRP8~+bJmFl3g5IS%jpNkm&uv8Ss6c~utiA$GvNvjUUhKB6BmzK6iQ`1v( zU=U)|;}e>S1u(xE!wOuM;}B1IyxJwZ0llPxV8pR?Vslkmb;y!WPXgRH^^5*^J!+T6kPqddbBVo0o z4t(d%KdcjXDSe~?J9)Eb=rDKVJy#TPRBWvIrH)62MDC%t0W@HgY&8u%O$P>vL>@*A zFTQ_g-Ab;cZ7sIIIpR9${uL0GM|)6J99t(gt=5jDM9rkH#A&7<>aCACoP8OKjA`iy zvgd(wz0ksxz_Op`ysiyNtnWb@CXq{8^@0@atLgn|OP>dTle1g|+slJT>40;JwrjwBQT}Kg&@r-V{-jj1A+Ps&7;y4WG~Q< zZUW1&m1t;Ck!wL9Cbo)k)UxUGIA*^lv==&zE6b+ae~9VB-94>huSo%StDb!I2?add z$D4VwWvkZX1?&-;ll6H#EKs|ia4&e~^7s*L>T%#dHt`p_|LonYG{9xhl0Uq95@56* zkI*Qc(%T+JpN9a4rk=Q)H5sGh)?B~Bo^SlmyL_lR9dKiGnH&(=H&-)hHV%-)k;~|8 z>3tq*($gBan=N@T7J*hk&L1LP9dCjrHS8xMU}V#d@OOf^{}6B2;-Q`*ydI}qct~Cc zCNZo2S@NN5+&r|Ij85nZq?O>3|(U5vorFtQu~J;q^F8pNEC%p{G{@r;!IF-5Qhj{IN%z zGEWCw#(^D8;_)u*`6yxS@-3$`gss|@7P$WDR!Da8czoi2DgCpT14cT_`o=F026}kQ%KXM=K7-zB5sFMIUCVIGK3||RH^-GFquXgCS z&C;qdu>^KK^$OTdvc#(%_`ja}_wJ!9GuRzg8ZcUzU^4Frf0o?tv)ZLSa0)Yqt=fe( z48+fL{gu+)BiG;mCp#aan}>n3ttxgnoE!OHUQ>&5j|9IIG8QYkE_GaQ0zV(#j2^N6 zu0A6Bm%O2OSMy;5m`|!Ti6PZfg~q*G%&J4Z{026%9&5Q)FqnEK1)(W(q&*K`{-YP9 z=MOG@^s3gAKf#|LVpp|pq4FD*i(GDrI%3Z!7q@I7-!gY@UteEOA6)Ia<+o2n2BNIG zp1JQcs~0qBwqC{iYpj_zLUTkeL7pB(~?i^pYHfnr2qFgp&g3N^svgT ziu;ojcF*N71qM9Q$iAs%^y<|;uina~>UGS**yx{-%%R%$P9j~sSmeU=$6Sy;5dPU* zwL-~NN?WR^Y&eNz!-g+6^wOuS4LwhD#uCXKL+6fFvL$bUmB;3_n%E)kWHLrg*;U2j zQdf2wkv?&anDg@}j_l>p#KQ+R8*9@6c1#DDjy^ccp4Vz*BV!rkXj)C)RVD7cRlur# zkdKRy4?#A10O+AM^pR`NtoJ-EiHuK7j9E3DCGIu00ykPT9Jbdm8($hqJf~bToy;6q z5l1b=Ixy9+;{^#;N+J_zmq}f}CU2zDfbqExWYSSFJS`tH2fz)bFB~of#sd&D0!FjM zVbzUSz>QfimNkQnjgLQ4QA;!Fa(iHD(xo;|OplGjOkP9I3#i@<(D0a5!{y>82)i&X zZlzAIVQ0AYMbs`8dRYQ?V&1#_bHL&37gM`b=oKJpC6*IC<=CXBwJwFZNhiWDs4WW_ zCkxXg8g8AThNF(>(zBX@%qnoSyW)k)t8C=K*dt`p_bn!E6Q-@nG~0a$JG!552{=7@ zFg9ifoHbM6#_kt#)wK$pU{dRnI`(02(q(3*1x6jtX?@{M8A;^PxJAQvrlDbY4yxm? z1q<>(bOB}vT$00Bw}0{{R3O!wwD00001b5ch_0olnc ze*gdg32;bRa{vGf6951U69E94oEQKA0Y^|wR7L;)|M>X$^YioY@bK;J?d$97=;-L> z<>lhy;^5%m-QC^X+}zsQ+S%FJ*x1gwvq$jJHm`ThO<&d$!t%F4#Z#>B+L#l^*~ zt*u^OURhaLVPRouX=%T|zqYovW@cuNj*gp~o58`slarI9qoZ#=wWi0O8AcKyVRP6>TrU{&Uc2lp z+q1p@|L0X585xlgnN=kT1n!`X0b?3|elo6%%$k|Ge>VFl_f`I}S15k{&9~o`%HRK? zQu#yW58wY#`p&ODdHU?f=Pzn=)BYasySB;FhoAVR8Q4&pr59hG-+cMiYXM>@ zUfx=#B1|!w+h9hvJOmlQZ2s}-n`!^-6SbirtD+={Dw)$Xl<8RI%wtB2Jmfi)=CptQ zN!i;MuS1Yk5od|05}c->OjZF|S9zey;S0cQ{`J|HPzt@we;tGL;k8wj+G(a<=9U~Y zTI9g8`M2UjU*~fTA3##C(2y!kf);6(*atLByvBpqmV|v-$T2a?=HCl%r%foWJt|a@VT^`EMQN7! z3x^rYGBGbA(Fb5|{5d~uMyc$LS7k=BO7z<9H4~M|G9#Q9EwkuDBKZ*J&-Xw6tmZk$ z7$%A0RVvMqlxfCt*<;4Rheq<|#>O}I+-25^GReqsvfQ$2yl>Z~yvkY1ZqG5zNWQVr zY)qS$!xo=ai{Xm{W|a^`s&xBIJ4}1ONT_|V!`#@YtW2Aj#fEV^hUlcpA`T^NQ>Dda zR*bx8p~5%IRfA(~&2X3-e}8%}+nIAu1Aw4PJ5D3y;P>sybOm$1F=^`-F^9SF*H_c# z#_a9$ViD*i(Zs5(QdL@%iAr;mB}+0b(Pvq7n8WaM&-!m~ykcwyM;lHeMWRYsnOK!l zEKe5Z8l*+kVQ&2KUgbr(Cq-l?Mhi^V-bhX`V~;78l6GOb9A?1sANR*&E;fqHC{Bwu zeZ0hn)@pZ%wGG=eZEHJ4ha=fxZu~quZHAG(dcsL!nl5dUb>8h{w%)QOFG97?atepp zd^~N6LtDzrAS0SYl~$NB(O!0xogHh7eZlE4*9?cbQJVT9Uw!5v8PU|LbguJJMA_+V zmT4@PgAa3C?rh&Q4{N={qYSSe7=m`So zrioN3Npq;m-H}jsS(0h3eav2wsmp&%&tWb-58x41I&flDO49Uq#z5)Xl(hAVLS3$5 zmK*+*9Ol~}3qxb7bc$1}lCvB~l}3wQ!=#a%Zd6DPb1LI+Z=MyvfS(PgPu*txYLX~< zi`|i~qv=7zVg7jv&7zg31!@LHM66-RpG^g(nm)FG*P1IYfB^-f{$I7Ki>(< zw~sME8_w$9s2~-}NSS)GSF;s;rH?x7VkD>YgZ4lkv%G`e`f4Kvs8uQ8Y;BDU5~0*P zKY0Evd^Nx*qZN*}eq>qN9i~wDEZjQFw?BHGq)LKwd*4MW(RQnprKC!NQVqb#qgob+ znWW2wTkaGs{o*OpvNUj3K8yvj)9I7a+??l#UJI%$S<)ieVJ7SHPq*qXhglG;?GUGl z(JK_sxRX52)~R@J^f~Yrub|2jQYG3ew>ix8y8O3WWVw|0JfcdDvpHa7PBscf{!zOP zSjK6>Oj4!GVP?@~`S$Y5uTZ=pIMrG&_8FKD&4BVkoBe^!jw(ERr{RaF@(?APYNB1t zidZ0bCYZO(vhI13D&w~TFUiDru%<#f`rz@w&aTDI&o($n6Xx34&N2Mc97KO_Z(D0q z5T+c+T?uAfL*5R_9M+_v%JBCK(@TC>(^36Ef4Fb$XBLZUQ;j!=L6xVnngWWpT6tk_6)I-iiwWMyQfEVhG}6LtA~(j?zJ z^DGwOY8so^yM71~=ke$0PYZY_gTn+>GGY1`c9grODjg0py)OSg;oCS?8&;5^b!mhi zG@~>Lr}2d%s)0U5j6s#6GNm~L|GZWLHdzKnv&oN5;zvQ1g~Fx&q!Wlo0MdjB zAPLKl9lI^l{E|CN3^Ri+zn>h*TF9i~S%A_tw^P;YKEw&C^a>~X(=Ij3EHHfs$xc`I z+l&m;Qjw?C<&#N~oI?-=o`ht(r9h(S?x_fbGNSf!AuUYIvI$?{II^RoBS>fahANT6 zlqzyk__%dBp(DGioFWEFR`=&MBuD5T!JCCc{i*xa!U!-SfN$w~5pwoZLzN{TT@xe8 zlAM5c>9RSI0nA~@ks(HJoMDnjKE>!^u%R?jf_;uNcn#Cd5|+S;MhX9z179#<15F4% zhAvs3Xd)fA7mdhdpd@yms>qEQ0@;uz4jbEr>@XpKzw_a6G)xn2;)QJ-MoUbJs4iKb z=-cs0t~aomJl|uI=L{rPWh_nY>^eQaAixw>Ib!)){pAZd*Xu99S6r54DFyOux(war z*k{jHA2+xG&I+z^&OE^kRZ`!2S}-jMrVik1RlUIrai>cN@*|Q-2`1C!1l{G*bAe=p zb7<8oMDjA0s)eUgrB^`3`i0C=T;(B4&~zIrNgp*Y`<7D{^7#~VjVI{x=kc1CH5j69w=C)Zy?Uix>#1EPd+tH~e%3d4ev-n6Q5Pi;E=Q zWH=`nC7f8ANX;#YWjShm9Cv+_CHzgaE9v`?t+svhK=k|F(4C*VS*FzG^616*3m3@( zYV>>vd?&%^lBS1>p5YSb6EjEfr@I>zCIs*n?iT4DCJo^WeQ@TxQi7RTm!(lj&N-Ji zsuu^v=tG)g!!1oz^r$6{V3J(*Y7fo7=>UF6I&bh#)73ZelrB#2*Lmk=nNpV{_JNn2 z%X>|Km%WnyFjXak89Pi56+M>)!z$($kzVNfXwMqyyn&2&l^{2E%>z}fv2_{rE=MbR zUO7dHQkwVlPlzg&Hq{Ru*F(7I*$e;O!_+OXmT;&nUg&!0s^b%s@GsMD1{^E>EB-Zt zKN?e)qi&l8(K~M?i56j>|0z*r@T#mnc_)@^ z=1=7$U5fon>MsBBC_Ble=XnQ7U0`&MJn=KIDjOW7mt|gq+YUv!o%}aCVHg(*a&e%@ z?k@k7JyCxYkQ}~5|1}J*+Sj$qR^G^05|jnQTaIAc=xT|fm6QKQ__xGH5$~%z!+7SY z|5e}@{-<9;L*M$Pm1IM?&4#eRz0poI#x{Ui_W1X7qHi)0>c>n|jbjA4#| z@~lwVvy8m%sX9{kge_x-z4NA@Il?8`pR$l#s^>$qu+&pKyU8R&n%Pla6@wge?Oa&8 zFmr@=efSJrr+pAL-Ntjv-2OewJn%{!ZDgP&{muI6P&P2iXw3^hI7wY;H94RnajgYURkY?xV6c2%}@3_yc}3$&Pw zrTMv!52VEoQ?GNsbsT=hgHIY`Xs$VgQS3j`l3cVvjNjIUPT!2|DODNJ1eBXlpcl2O z&I<*jV^rPf9xfCGLn%>^VRkPs$}vdI(=oF(n)xYFzRJ5)nTI+h@RTf1%B)I_@)LZg zXakM3-}Idbm?gi-h@Isd@AM0||4b{&JX0ciTW3PMoTQDWMyWDQ0i{k1TAn`4LR*nZwt{eAw9bU$0Z4%;gQEKy|#RSo0+($_S=_QrGq2j0dQa3R9`lKRnyr z+1c%PE9kZuLzuA4QII>6Y635sBn_a@$UX+b$uKF(hUhC%HM)5S?M+#j zehbw?SN)0!G8U#%=LTwlYLo&?YYEA!o-~&=GEq{~wZnxXoPwr_veDou$&gyhi-jq` zELQtQrr6#UVCti1UEEBD{t&9i)37wE@f3rCHc={rDy3bBIW|(@bfb*j=P{-ek9r8I zEHs7OX7i%TU@BGa5$en6AcaGh_=+M?m&vkB5M>^+GqeF&=bQ<>ZjN0`RK)BAc?j|KHfk~Q>8~tl>tnj zG}hWJPYcuIL)fOB(tdD=a*h1!^x#edI)<5!NOWJ_&C-^g850P7;s;cMuyoLN!EVaVz=V*@G*B^rvM z!RJDs6wnQxGhLDwi)f6y4A(W2uQ90jzE)+`lx3_Y9ImC#p!Ti)0uT6SGJ+@+S~4>po-IYa#J z@C`jc@8t~-AahjmgdFCba+yhcJII!mEOiGLH`xPtfPP1HFmcwjSH)taI4!z#g>fWH z+kE|n6kJsZ+QgB9~4Tx6Wlq_ zjZC*H9o4waT}mBb$NC1-`#PSXN>v!eOc~6Dqu5$%&1XPaQYdv*)75uJ1y+HvqCQvU zu=Xq?e~DFDB_LCVapEr5ntT>3aeWUB{-xVmlX;{~tx9*g8RSQRa!XLiG9INGKtWjq zVxN7YOFe!5UZM2y(AMWBk)<5KEn(c1T?T8Mukc!~9=$jWTIgF8W1hTLs&u4B1SM|^ z(@TpkbaAe`W=B<|OMQe0qE3O0zXgFD*KO?F!FBt2WsAiKDmWx;HvjdFtcXqs;6RnPI%*TH?{;U)c(u&6x}Z zr10bK4*ifE>YexdyZo?h~@X9Ra? zu5g{7i$o)X8hBvp6{}M5wXbvHD?c|#{|#R$7|0B7R(KxW+S&ZLPfwop3uKt6OB?bV zW_l^AQV8SjjL2vYE!EwsjNXJF|A;SstV%7-II6o;2`ED=lbN%27w=XC<{X9&FNwj! zZfm6d+wPj_rO|ea9cEg0sdmk19ln?gEJ>B%D+QEPl`1`gb?!om+QKy8d|rml3I(+2 z81LI$R!ujsGCgLoN7GB``I{_^JKbd{%NGRYO3pA*l@P(%0w>yxU)5DW7h!6YsMAiL z^meQ!-Ib>dkNDY-=%^cFur7_+j2)k(m$E8-hr5)PI6e1RGf^5uBZKnnvx(4O@J(sz z%su!Iy66&0RhQG5@pF76NW065Wg5Dz@7lq-EYdG};*<1}@0*b@p5!j0CH_~E3Ry@R zKy?#6Pe6uydxMH0O}x&R)nD9|QHCt(Fi?x6nkXak$$l+-8FU#gy=0V*6&wiZB@g47 z++`D=FPn9vR6q;x4VAH6ZD)E{s4uxAoi71CK$xvW#!_3=J@M-a@*LUfiMMRf0 z_#CqCrRgh6g1dAk>dz3$^)x7lrf&nlQArgW@5B%k{&J&?b}q1^ysEGg$V2(1PX{d9 zr^bN_Lzg7@>`LinmBBRJWyRtyLzK8_nUZ7JUHH;(0>4_ZEX{(_)O9DH^mid*j_%IYrvtVyoca<;fb4F-C8%2@_DRh`!p z0(vaZM3h6T>L1r~NX?^GC@(Bms%mGq5aBd+8JkwIuEvQfs}yG%cd3`Sz79Bal{NHY zGrbYLk<+SFAKPRo!D(`oP&s{uy|ffZCp~g2oQCRorYZ~SX^2tX17CU*n(7vZsZ>d2 zS#ioz-`6&8;^H&UewrJ%~Fs03dDszkf&)&spBH+4aoXQ(pPWptWCC!_eW zDV8n~2-9$v_9(7TM7^@9k^$A{Wy|qL5>#2YsImb;SfkAAw&Stc<0xJ1emLG4dPS=$ zLzZj(Ts<2EmI&xir&^+5(oOT)rZ@xt@|C0~oqP^Tod~9XhHJ$^30346rblko+3Htx7}rkt zph}HAM~;qOGI!~U;_51=xf(TxDs$WDhZRsl$!@c#5^5x$Ioykv_jnGo808}iN(pEV-9*thQRXl}S(zcuPttc|+D^4)y^H95 z|__CKQzdM1=ZLms|tMcLV~_{>B3)03@-Z*hffiqN|D zzja6UA8`O*M=vQ19o*gyP|B{aB}a3ge0F=V+rQj9KRv$M>h-p+21h3+a=ab1{02H6 zbqqyh_?ylMhV@Xf+&vxJ8)vosfaDV8lm@E)X~R5S_I+KHV``Xr{K@+lUY%|FGAnU27TV#-?#`A-PVt3p}oMyED3hJG=>#`~xJT=NW7M!3ph>33D4Styjw(7)Q|A?aVT2fdGJH9P z7}g8Yn!Hfm4gwR61oRe2vWW&caAq@T6&q-y92Ra9H;VLMP)E*tVZ}coL7lLNSx@58{!{4-PY@t=3b6LE+Q6ALA5S7Q&EWJueaU?V$h9Bc*Fa)Yo z{9GDyyT`cd$vlRymc?$(%~?stf?x|!y=I^U8WXDBW;yRb83QG%L|3x0(2m$KnbzyDgWG8r zDaKfr0!ZWHM}2b9M=T*1I^@srBD;|Ei7DoTbuBZj9tOV7kj9ahH0$nyWHl z&ukr|OaoQ>yW?7o<)TQMF-Xi3I-R?cC}ELXR2d!9hY&u{DB(2t4g#r(($GU#n$Irv zeNZNj;w1XmrigMgFC~Q$*Nhk?V-OcJoaY(B&878>d!x)~pV*#p^b?IuKmC!zgKGj1n z6qVhjxx^_=wX_4iVnKvt;rK71c!#$cJ#US}Y$1nlq`<%QTStsebe73@|?|W?Q zLgL0og1Z!=c-5FO;K?N|#lM-EU14S=S%=0>+cPpwP$iS5pvwNXVH?0weRKM9EtLY7 zqpZkrYwtTzlnGrM#xai&YVEWeXbT8Zi{lwC8jBUM^q+IJfDWTyI@0r*SxIY$xM zWy_c7gi(jgW*Mrqxl4J8Q<(JD7csdk&CFR*a-e!VrNk2@vd&GIRF!E^!rC@a=7ncv z#b3nsm_E<*-0o6{;x>P2p6JPf65-U_0VgUBv1Pt4!wgkvj~Eh4)x*RngB5PQcJWY; zvJ#L?m1tT}TH^XNk1+`>oEJy3ET=*l^VE&oCW`9fI+tK(MOl|snQuy8=kxA@?y8#; zgQB}kSmOK&H!e%!Ex&j8O4D@r{EDVX>0o9@$yFKG*v)Y#u_(&6s12Rq2WD z(iz3Y!Riv5xzw`!x0yN9tSI%SYL_LPB1}`5nNik7RYpjM2H%kG1Ji3owJO=Y|IQ^| zMWRgYEh`+Qjc6XF=VoY62QypPiStZ`vH>kO5hb6mMo&=4u*8MVrQ2J6Z$-&~qVBnr zC4W^ z*(azW;}5hBaTOWr(uUH=iO}6I)Lj~z6MWCC>QE(@Wg3(rPCYrjgMzvfrn$;b8OqLw z1-*-A>lInef>OznAEPjjNV%3c&Ms4V%T%fiO~;HfO_A&{O_wP?12i5!h~bHI*Mfz* zw4t=gly(mTxA&)GXzZsKG@zfnZZ*9g_95s^pov z&0VtArCjr{tVr^fWdo%Zsovt+V6is1B9VqQ*Aq%c3Zcb~EphFh9 z;4R%KHBX%hU8p#e7N)Vva~qb)$3r>zpv7(Cbf6TU@vhvZHHu?Z;+6*SmcB)n-zB3A zal&R{yRL(JdYHsx){1A6G}}dML)a32Tqy1C(y_$NX$?t~T$Zu7{5BoRS?Kxh(iF)7 z(-D4#(mdcSFIDWJ!ql=fmpMkso>Ad&mn@27St6X0D($lLXBJbTgrE1kt89BAp9SW+ zQArjq&6`Ix-^p_s>%di(kSkcB@QsFMnaTDmQQD1B4Bo+mZr{uw9ZotvsUw3NSdd) zoLiUKRM`+Z8xX~byX0#;qbx&j*_c@sRLPOr9>D91PBXnQYju5qL>)iny3ErTn<`D3 z5xRyUOyg>ZB37l45T%yoFEgulRibx1%WdFT$t-Fly+U>IX8sT_LM_LQK6&>5o1XcB8yR!vokXcr9l*E`a9fVp3j=C zk3^R9#u$f)(s5&i5W(+-&D&ApRUiH~J6kKi{}fmkLdAI~Tmz=x{p7h|*2owKBnMlk z8O=IeFb(@(Gcz;)oE;Lg{D&xj?Dsvf@yZ*=)* zf+_`^&}BS2d$)5e?9}Ye^XkVEd;n$d0GYl&LnY+e4mWjwKZYomZkP$GB-(V}^w(?B`p3=EKpC^tSFp231@w3|u1tTe5!xDK$?{cnQy@pl zfEwC#ocI{}&%9YnI$6SxuMS0r=XUkO`P2vuN67~bAl)}n-i9p6QJyz$lxW2p+C=wX z8P-1cSS^WRa+VBbs7s78$5ojJSrV4I%QQ;2Z8TYiqWm_#ztV_1$b}pEWE6OXxc8>dtyD zr-zhO_mZhHPkZVH%A^^CL|LZHhobeZ9;&rOxN%iRzY$f|c^7#9)4B;V6-x- zzAlp4m&UbX8S>1+azvCm@*tR0mGX$eWMvrzo}U1vkfa+p>FZFMb7&3R?a0Iu#y$=ri0iViw5k$EaVTg{^WwP1VOwf1xS^i6+Q1Fwj)rM5$Ei za+oD@*K{B4ouxwQsYf1ds}>Pa<(B*LR@0$GJZ~gv5|lSwm4d^pU9$63$3?cKtB>K=;`{O3DC^u;-m)y! zn?EJ;GZ*`BGmayZQOe{fZx{=m?Fv>uF7k{NuK!nkx;$7W?97~Vfuxjv^=|1h~Oj2GEK!3 z5`8uX$yk|DFt?y&kz8<+A2;Ua@*3JXpe*7Le_V&1af(uWu%V>N^d(QmiYK!5F;VJx zZbL~_SvzA+a+duy z!w_Uei6%7#C%M~*OGsTn4x+BQjU-Cid-@clX_6{OlVyt3GO(}4C#hiCQO0%Tb%l?$ z4k}QNqs#ZZ0ZU4G9A$^u1V@CQ9rqQ*_g_r9G zg$pHqHtED<8LDzNnNB9VX~|ESS3mY?*F%SY=o$xGz<*_sr)^+$_cBM>Q2* ztqp|JxNS#wH7@ZKD_-I<%3KPRnPiz-m6P_A9A-2OCkfLWC&DW{rx3Io0?KtuUJ1Rn zT(T-B9mB`%o~J+=?V`!LyfJorG|YMMl+DOCn7!|dD03+mc;gmg_^pdl@1ltg)7;Vp zRjzb0n1K?@(xu9jvP`GS$H^qdw;Q$fBQ0^fPk~iA2FhHrDsMrSZ<0Yw2Bt-qk}6}C znNZfOvUI9)f-(HZyCJD&I7C^Ro8t;*MwB)DuwwkKrqKNfHLNl9u@FXsV24=|X8_?QdQRCb9)$N#L_oVN~+X-&nqvbv&<;V z$?h_yH<)OZ+$E7Epj@}2bg5ERJZNX*xFIA%W5@6(6W#jGgi@9zg&DAfF@AM$kD#3A zzLLxV1Ijv@KRhv6rjOxvRZen9C_77Ab_p;8Uzq|WMDTWwqf{*oe1)OG(pHCOahCb1 zk&OFoT$rp!q*2B!aRiT8>e~ss<{a<0KbQw57iC6ggdb7h!p`>T^7)$$c3AZd8Q!TxjiBX)AoJX^(vjWPPW$jZs zRpxE6Cu<1z#<_^#7EJP%WkPagHXX`!{*1kkIf^VEEuu;_t145n zl$>S$-jW1wX(f4WRV52N>+})MeXA;iVHcTYlCyk#e@V{7xuu#4ZQ*! zWSM|6k|pV0`dXDZvy2}yA?cFKQit%z_nhR+jF|5^m4yp9*EMi9R8{> zU2<7!miIW~DB~pQLt#Od2`J+SchvCLJI>?naOy1coTcO}VRb)9k``Gqlx&4ZYaDkk zPi?BSt#O+wzj|mS@uPe6SQ}U6oEUf1l^Qs`Ja?%wfhAJqy{x;b$s~L( zV;O;bHr;WxR7OY+=FP3gL{$naJhD3Oxb0NL5Dt4}_B9^K^6`Dy@OGHAQ+O-Z?)7?G zTc1CF{`m3g>S}*~{|fA*vGYqTp`Zpv!H9KgM>wnmx1XWQL2DQ+~lU^EIg~w zQaUQ57Z1709X+tBG zrIQZ26jZ7EY~)UoNR_z<6}UwcMu4#@1!rk%S&pR2Mx@Hew|ZOeT^S!tH|_~FRVjsV zB1@!7a-${pmC9Z{fPPxCWmn~xA)JNp{PPFU;!S64Fx{$3A?~aT&XU~2C`}a*S$_2Z zb{$fhaJ%5)(-Q)2n><2E+wSaWJsj+t0K`XZd9GT~S=SLOAsgz(zKPk^Y6sLHVu z%U54av{5Epi-lEr%~hGDpmSmpk`Gyms;td^xgJtmg0H+DUzLyE{2n;Y$o&lAHs$oH zBt`g(`QHo2zAI*JfRd%v1ji5^2M<=R$^}CWIv41W7Ix$tgnxFms z$&s4EXZ+`C>!Tz}mmX>aYL{q79W$w-5>hIMt zrnaS}UsqROzFb{gTzoUMkYzx0h;{hx;^NEI)%z&K{}1ae&5L|5lyLw6002ovPDHLk FV1mH_fyV#< diff --git a/docs/source/_static/thumbnails/heterogeneous.png b/docs/source/_static/thumbnails/heterogeneous.png index ee6ff12dfd5901933b5a18d56e228b5d6a5fb881..86c8b2079548d95f9f0a62315aaa62fccf1ad320 100644 GIT binary patch literal 33157 zcmX_nbyU;;_x}P$hoE!}k&@bIkQgm8kP?vGKtNKuV}O*T5)MfLK?Gq)Bj84tw4@-? z(k&(Ycz-|V{QlVXdcDqho#)+s+<4r3pU*fwT}>)-7IFXpK&ACi%>V!Z+T2|4kwR|1 z<)Yl8Z+?JY2AV3s>0Y6*n;SCshflo#01Ep5K0v^`Y$gC8{Jxf&vXOuGRt`z_ZS|9G z3bkhrzPG%Lbrlqbg01v)A75J;3;|Zfp@AH)9#r4!EJraQORx${%!3bM-I8nGt1tTo zg|v|Tv7mopzji{uN~#WRZS8Cg2$}C52ITEjR8)NW^XKZg(yArUE`gp;H(SPIbh`7f znM&Py)8_omy3Qy1{ymQoVBF33*PT!6kg2|cS!m> z=lAQ5!E*3OZGXsa&dnPyLr>8!4widEna2&St#|9!1|RGm&0;J9R?y(?OrT#df`rI6=pvaOQoI-tjq&Vl&u|8KZV;hljRV$l{F?c4{oB5sq?=gn z6W-DQF4B4%MZaB+0;_JIyRQvc-2HYvzdJgVC0&`~_U{dpIyOf}peo^3$Xyjq`WBNT zv61is$rUG*Y$t^d{afqM#vvwjvEQ$d`XR)PK+1VP+TL_0vaX3gGv%{S&=vXL>pulH zYV#@7t!cN-b9pl!6Q$Ilpp;&~)tJXh|Hh@ooBz%Sh5qIA0$E;Wxi$4lv!~FAtMW0D zbm(TMncj}&i$AIN#;k`IFvbp293pR9evc}RsDMRx-Zrwjbz6rq7x>+k|WyY#B~^|*q~ zxT_~e_Vlw-L3>ct#Byi z_?`)tF!W`Hd8*0W*em}3{nOi^j&HE=9RJtrZs-}Yg14^*X)3L0Kh30?ITW86v4CqJc;X>_i7@tG~A^qGb6)%o7fCEl^t7R{@?JnLrKSmxM^p|GnZ z!PakVOO^e2cJr;CsW=ak_FqiDP!`|!#CesZ>@Fi@yJ{0R_t!e5u}g9EeCKw*ITMma zMZ9jn&du}f7p^~TZKI?$802|tu~_n(TvLG^Hr??b-aHu@7^aCJN325z*%aIpLxyK| zXR`qn_cw;9JG?xr#P#VzH8`b!4eh>~uR>pd^5=f_UB|$3@5eMZf=Q2jqoaHRjC^4==3oB|dd$TXLYpEYS~KnOKLc?0O|_?mZB3ugeD8 z+*s(1J)l3S3K1_4(uH&pp&sUjJ)H!8QHssw)L*@YzA`D7AYRlMnKurs$59Tp2se3S zubyM22ttYoe^dV*;Sp1;F=w%Y1E{XTu3r0gtjCj!$!}LH)%v_@Fkr+}N8H4XlTQX+ z|N9&AR|>oRrzCGD&mm(@b}f5pO?D;CFqnpd*VMvq&S(CPiN#1qMP=hE%Wgru$U!}l zG+b-5C|NA-KQSK(MKx3s0MP2mfQ7ZncTO6M*R_88TvrbY(B?6YL|bQ~ptSUz zas?r34$)_R6q~p^QdELnYE?R>8X?%QXU?I)+64$oaAyotU7uo~qRXAPlBRIVI{^Fs z08{wISh3FGG#qI>Ep|5DO`POaM2ObnR!Y@WyxddGlWea;w^rMKKiITAVLdC23>hSR zl&a|K3{L0Usy3+5&==;vmMQWbOW zxWc~sTHBT1C+~>l?t7{)!JbQ!1`OwHTM|X0Uew)5F%GJs=9q}-zNlU8uD?g#;eEZ& z*~O7S#dJdP^~awjh*&^hk=d2Gn~jHDTaS| ziO3!4g1lFX{DG;xR2~1g`y#X+lMP)@5Y?eD45_56uO<)I2hq&O#iWs5(>IEQft*}` zPh2{jb~{BVDR`)cji5Z7l#IhXq6?*P0e}|h5-@MkoxvQ~fkh#bHnmEE=VHFUA<6$> z0m$~=h#@_y>L0J2&%2%NM z*SA)VB?$;9{%h;{M!~O4KtkB-LaJ#FhT~#zI96El4NB(m-p6~655;btyb}};lmk}00Rip zoV)IKF~pX)hIfF`oma_@t{ucEb@z*pJK*mB!u1TOqwF320=QQ${(YVdX5&(`vHPI2 zNh6%8UfkkWQ_Q5R-8<#Kw*{{=i>H^X${g$T#r2nW_Sp7C1Ec@^rx_+zc-@l$FiwYw zx0@3G+X(`*IsA7r`{JMN0605_@P#LWwftSUbkekDZ@=VL^^ePO<(yAN9vmx$h>#*f zz6%R$HEF5;FAJ8qg1WMO@~;Lx64x+9luL61e{+6lGkM3Ms$IsSuJ08wGyetMoW~*z z{~jJq8C#JEwi&=4THMUV-2P^eD9|0tkcQLB-}UK#A@FUGS+PK4|bqSsE`M zB`7&=@v0c?19-4D)z*Y=FoT?p3r03QkDtky=F=gt;MU|uz4MdArbc~7pVMrE8niBc z$8i2k_?^J;8 z%d^7}_hlvoHx=~dEn5`?x-q+Z9N_Mi>wO?N3@TXujs2YQxV6!1?cB~?*r)6own7L@ z4@u3d!x#AE$vZ*bQ&yCS96V=FBOIsE-y&)MPgU9}aTkOse)31pZhwfl|Z*r~f%soneMVq!KT2k}=OMmq1p7Lr~_)gn} zHlsnOFm&XCQhDqCuj{MH`dzw(n2?AwSXQD;7f~P|lvX*!t@X^_8m{b~l&_f9Y35CI z&IMxqlST;i@3`F+n$Uu6CZe=Sr%4h>{WFo)|LE>x#j_&LJS;$ki1O|!t(|y#vV4uG zjfN#X?TfG5b+$+t@jrF?0sHYMW?yRO)7&ExZ^X;606q+T5rH~o##A4TYF~h#PlIH{ zRTCZikzBlkm4l(SdDBk(XL_SkVugtz#=R&Adw?mi8G$1U2KK{<+vzLT*$#@^CO!4E z(6&UHPkd>NPL9k_=eUhi?=5s4UkXm}?})t>CPp|q&M~+p^+$layX+S z7;{Yb&8F=oS?EV?W543yK`-GobW8aH@L@Yve}jv?4p~m#=GvAvU1BwZcsgkv)-r_c z)n*U20PHd#=~*+O3=c(US92{zn<5>P)VDisP$DMdk|DDn0)5a2AOVHiYLf?^loP*K z01Kc50NeiE*qUfiTG1^O32mnf5awhRaR}%DR6^E(9n&^)`IMmE>L2OSQ{6(!9DuY# zf0GpI{M5&9Q3b*c%^t|cnwXGgQGV^YrGs)yhuPRLH*(vqF(hr1w=;c9dB%U`&?yK( zE0+o^=7*g)5>Juj|^p(pk1VDJ#{Y1H>Je9O^)LO0+B>>O)%^+yoY@!C}fD z>4YVM(c>TfAR^$|wlWBkbyy~t7D@#LP5z-U#Ybh|?}HWeUP$)T$BHftk#p8W%2N)b z#emsx*AyDw0mbcpY11{~9my&W|GX`$_RqXbJ^dtoyIIhV4SV%}nl~(b)RE}x%2mPR zcdJiSU7}WUM4EDY2|qo7@JP3&3bq6WdZ3Vz?Z1t-d<^$OxjhNM5ZqWc9o`Hx>O-lJ zm?J5d-wP2xm$c5uhz(@O!>n5}4!rmE2X3WNQIOOCgRnAoseE@Y?G&LJb&>?~nV(-T zbcWka$B4?l(9)d8#|F88rNgf@W8J2%zD;QH#LDw&$5(7f7!=x+`w0-%j-mABj@?ax z`j%@qS;EYFzQRicYX65DKnERP*#at0@qq)HXg@+yNN_+T;C6AsXH&+DWiu+?O2L5q zv_wZB^vA*>(Hj$l$uMuFZd`F_U$KshV=AP0GD=5(knuV9UvdluNX=nsh$n=!S4x(hd^{D$$6Xcf4Hr(Ark5!&}s9b8~R(QhnD z&Lk|9b-8hepokfi?aQ|k&U1cF0o9S?1lD1tQJt=+j{D!>!1Yx+Ol{@*?}EQrlEc(2u(r_AxUG7WdGOU6Oqd6Od_dMbSL7Hq-T@6HX1Cr_}S zMBaBOmf7Gs2LRYMIUth5h#l50idBjWmxZkB*|oq-PI^nqT=5kG)gLfHrJ>5{}*Fu~XOW0X!B*K(cTC6X+4{zPQr95xo@ zW)PWJC0MdM`yZ?(s%_w;kYj+$RYIpm6E`is-hD5H1{td7M%zLla;9B1kLcH8=9#?s z-SL)(BNnY4ss2lR1dvU~ZWEE7Kg(mqp^?z5OtQYdVe5+_Yo~=hMYTt@fSPee_*;ON zwKmaE&b5Zz5Yrp;cJ_I1khX~$2M8i));+0KHlffpNzPSj(@GJyN0#H-K~=8cp0ru= zzqLszc1WE4?^0ej%(y@D>m5pk)c4ZDLm1$Eb)l`ZQ24X@ zrK>j`SL1wg9l4cCO58_*Q|##Ynxgr`F?V1y{gG*PlppNv-cB8C`h*$wvK$f|OjrTTD-RC6j(fmUn^1fLAVmxd8Gch-y#O#o+^Gf@ zlMGN`xb5J0NdGF^BzPmOUop}h_8iG#(T>W|EnQ`EMd4_9lz6EmUjqhUwgXHU#pBzs z&PKKf=7PNSZ^#8JrelJ-hi333k^aJ-{*Hc9m#D86Hn)3Z*gC40v2ax%)+imwMCB)(Mg5nBZZnl zCRPC-{5DEU&esB72<$c=4j$^t{V6I|XMV+>LmgYiZePlmN89n}>&! z6|}p=S|yPrDofuuFMTR#;gu|cWLPi9qa@O9t6dk4UN<;d1{U=W1=PW8`p$N~?q13V z_c9MGl?@#Sx9q4d4ftdy|8afI0KRPZ2%0y5jsL1&rS|KamRL*m zkm4+-ExP=rd?X3YNvtu&@Lz%r-?hF+Ku~2Z)y6p@ z0MI#DFPER2bk?5|w01$?2$#aeKCfGz{q73;kx-8|E+atIwF`9{5 zO=DUG(<5~1+19g7`4K+k-axq<8$YAve?qf)4ZuSaToTJbWWO^%@O68Uj&=Sl-Zw^l z&ejaLs%%&!|Md*^tS6B5XC&;k;#(VLtB$v}U#!SwBVJfMD>uContGTI{BBq>j@t zCq6}qojC2x_JjOzLr;Ri5Ze8f0AGz(j)a`;(zPgVwZ^dfV)K%o0>AFR*5vrZ z?mes=`V`Aat#s*rea1_J8G`=_Cf(TBXr>Tt9wzY`C81_p&++JD4!yh+OMM1=fl}?_ z(4k3_@=7?1tQ)T2PLgSR(O=cgv6-cuK?LohibHHQTCok>R@0j8O!I*9PDVmzjf(p) zKx@53&V)0}4&7>$b46p zH|7a1TKyE3!xP!Vy3=O)qX6({K!&o^v@{u9r7cg9G6R(3-D3r279e7S4;fdAsiH@((Nc;ybt?aysQf?=3Q zOlVqZkDcG+zT^CTJin@)_S?7^pdm)YqoW;yKPVZwbC-rDF}nMu4)&fE z8kfzVwG(EWex2oCu{11of&c>rMjSmj0l;NW^}1IP@zF2|Xp5lJBk~8TA<~5LTA_Vl z|K9HWYzPn(4D=psck)rEKJcJk?GLcbQJp2!x(LKv*07>P7E$cK;mKq^R2L^vX1dV2 zC=Lc_P&k&M9b$`0LO4MC3c2-Uc_h=zX#!=v{ao%e##6z^U3dF= zGt`14zSwPmXPhiZ#2rU{A+Sn}U8MTWAT2igi?vJkz7a--V@D7d!ou;w!BX{98%Z)5 zZ^e7=N3X#2^#d7`^w~NO!KMC1e^@3@gQ87R!zhu|lEq7a7sb3>q0R#JTF0CJ1Hg>P zhG}uFc@M|a)PJr%7Hh+j@%k+X{w`bc_Y zAmvAaRN&GvOt1EAPo@4K$|E~6ppB@J0}^XMli#xo07%ZOtlyvD4N83LfJ;)A#$XAO zpzU=`W^~IkN1L9O10XRRn8=>cPX%@=@)%fPo-DA4bj=JR1O$f)ae;dVBFXQlda z3kI|T;02Hv<;v|dFJnhS@t3Dd$$ypOqyYX2uRvD=b-x4q+8$tN5Mr4}{U5AMr4dA9 ztAW6l0ua2IjOwJK%zc<4l!TKAk@{;8!^{hs!3X?1-LJwaIsMdRs(@S)cCIk=QUP5vhG0|WW04_dNQ0M-FW;u-~8{jg>LJv6XlalJ@7H4h> z3((}+E9#Q`%UH5vQ@>K>*8g)9a`2*O{g(~{BPe7K_hM|jTP@;+$Xv((IY6ZK>k;od z8A=Yo1Xlu700hS}eWIwh@3OH6uF$jOmj9&Yq{hzL515(CoMe|M1s zA<}1lP^OY`CTZ$~>P4?S79(^Qw3jTrHNRmqXKH-1%J2M>jiC0l^tsxx8H3GBOkyNt zk+gTQ5CM|+LVQY_X1!56;S#R_(~-Sag{F>wXoFYT&D=~Yxr;s` z1|r{w-KESCrXjhIb2R{8itpGv|=Y3 zCn&gl+b9~$s2f$RtBF7FP37DAby=9K$g&HPWJJ3Cma2usdVMZ(-Ne9l2YLVAAK}bI z$rgK(Cgc#h{k&>)(zSTXy}vIk8ocK5@w_P?mgxs<SM*}p&=oKzRqznAb|{%89eaD)}F1!WS+g} zmM&O^Cfl=5Aaq=sl>8V{+PD&{;+8YRvhO@2zfTS*2jFf_@P%Xx{Mwy(%rd5cD0FYmII#dS; zF>Qf_0Poznnw#4`%3BxiFLhr`f<(OEYk^_jfU?gj4^Ba1);R=GB#3fi#2!Hnv&mzf zE6y_LAkpc(YOpK0<7ruSP^-N9?ed`iG$_moL<>v_gGM&I9Up=!6TC`|zS(ReUSbTMb~k*waneko_EsD?R!vf5C7wAlILUD_rJ=A0n$X53Th zsO_}(+aClg%XB{e61rfCl&07c44tnRE`3%ml9P}q;w8?M7U^RzYS{tLC6Y6TGtxSc z_%!h#`Go9@^f(}EQuP%i&%Y)0kfd1K$#s8X!w4$&U7iGcr0M@W+RN$ctTZILdtgS^ zks=SthffkDYBiID-&#qA|3ZI)J*`V9+Z${*LYFkmzR6Se*dwNgPNqOO5d2(@ubhwG zq3agjKvGFQ5=bNLJ(2>oN;n{60$E-tQoG0p7LOk{r{CbY`Haks)*7%S8Z7HwCUxhZ zfnC_sE!KhK;P(x;mTmJYDXkTZT;+qqQPlt?J4dUEwSIcZ5k#iQ+Ve_kwLi5riNf7! zWjo$Zj2lRV5VYCw{;e#&*|!dSWrE>6zzWRv9o{|nQ|*bfUva#umfs(BJ7b z;YzZ}Ho!FeK7P=pB@DZsZcIv=OtusO6D>~srLnw88PSpj%MQ*jP=wXd1?=(DBa=!^ z9ffJj_9|?RF|`LtqgfMvCa!fT5?jQ^&Z8mAePxoOX-^L1NVsFgid^3^i;u&PbT7Qr z_ZX*_lI%UkqEzl0PsQz4G6Ct@H{OgSX-Z=DW}oESstT@^@K4lWXqQrDnS4fJE!wIA zD1so?DB2VsHIBDW#(L+JBNY3_nHrZzdb#QV#d1qYo08OEWM-W{wLL>$fyfo=XH$(^ zX{8~+L+)wqYs{XI{^O&pu4gVQp7p(2F{GTOsyf5fJ&c;n)w(U~N;>vz$*)rCK=2M- zhcp}d=zum~bk9-wbens?gZ+^nG1tC44I+qdYhKm(EW@E%ujL> zZ$MJcYF`oHt#RK@j8yk*InFG$2I1IpVa7V#T4eAtyz~{xVpr2CIjD6v3cBSi$Na1G z=eFnQ@3woimr0pu?2A{DzTPeyk;qk}y)SW-Nd%YJyb5Em;EyT+tN`?pC=2f=D+XHW zV}3`-nu=i_b7=iX@fE@sSTMd{#R-im1 z#N}&4#FwaBsgS~`9Gl80fp&k{YDd{)O`y5>U6!mY*2-efAF*=4YR{jKv5lTderDyKcw~a zWw^o5S*%RkORPmaKmLse-@TJVtKtr>2Q9w39uQ|dLoLZ>GsI{JuE1H%&uhpebYg`S z5n^yBR@?X1`(-=0`Nj?_=Ck6x)y|TsH(5VMtW~QUWZm5%N9iR`5xalf$q~7&{_RmP z@hRQD`hws~8cu8KCQy#)eTGrX zTY@U@w=K5<5~cY2R%A9hC^5`?8*a#3?<%6S{d-B0X7_wi@(ox3F+zNTIQRYXq%eoE ztEop}?TrH;rV~uR(}Z7@(ZH-}TJKT%I^y2Rr}D@}8r`EUNv%>Ka``q0Y+G09t@df0 z{ZnM~l1#$CYN4g=758*Sl$2qYAFH)}vxVHVX?UfYau~qYrJZu1G!I7qeLR<9=?4OZ zv$E_$7BYaEmVw<8R_bI$?$9pdh|f`?M9~{OruXl}UcY(NbtR0s@NhF`Q>y*B?QQV7 z4VtHWos0IIP2K-GaYqvie=70zVWk>^a<45E-GF5s&(h{|ThQ-R?9gX?4#4#mZfdlp z?AI*L&}rQekDm7xZ$1^KGmTUc)|;JOq*=Tomgw}(lTuz0*`ym;1~UWa0x2-xYOx3H zD{gtIuNWa`S3-fQWQ%6YAUk^jI^)}6TRzAKC8j|_~s zbWZ)~8}D_;t>P4VTwa`{TAK5*i{elx_aI*sxJpBOtddH|R8#QDcur9o+rmtHsXs_ECn;a}D;GI=sei=?G4P~8rVX?rr@ z;w$$lzb|TJfcRhVvbg7n5Bxl)^q~U-m;GNU(@boj#obAq@YZig2p0#Df}h3Dy$n>e zutE?Odp}b4GNnp%tW2~@2AY)eh-A@R2pZ}HEt+$B0H;0lomlMe3S#x#`6p+Qwf9i#W54P9`1L zYkH1JB$lQMyVijITSEndeQrghmdtpHU{tqH`1nU);i6)N$0Jl=({J=c6pbQbyF0cO z{f-gbb4N3?VdQ3L zGX{7wi>=C}=r*(VO^_!0>(Rbty0-25`jMW7qN@x0;Jso`M<3Tbk@!)pPx-zdf4ned zw)3l3vR0WGQMbt#bZ_StJ+wpxRzWw|5LS~S@bQDRJGJFfof(@<^@JJH`{t7d* zkK;7#HI-H2WH=ZVH!L$1Rj4ow(ZQLCfAb5Whl$(lyWMN5B8&JypNB1N{U)V+s?A#u zdmovxw z%T)eJobUt&pxeRt>Q98#i4 zt9|vS?pOPxTe0-0mIaWx{ocmi1~%IW04u5(W|{<4zl(D$_p%{QmVOu_6@Ol(buH85DI2*YI_Ua^6OJ1C0)L_)HdxaB&-0_OV`qoq!I zWwM6Z2BPW(IjTMW67w{;iJyk2LTOr50mFVgdi`DKYLwn9L-}>Es6v%}!2agWZ2jaC zdB^pGW+&ZUyQcjwYk_|}=IgD`S#_@7=Kc`f&^gk+&LA^xJ{_+4_o3vg*}AsUq}sQm zM*A4q&+h+CJwm#$e>=0pyG>T_!&Ip=N+}jqhVhV-Om84J52q;*4n`2jb9Mpt3)94I z>-QDp%5?i&YCK6GUC`-QX!6MG>%0fO(;=?3sB(`V-8vq zjJqKK6@k6(mJppZG~S;0?w@+H^Ft!u?b13hqeVwI$6lbAR52r1;>`bGXHWT`$DGfk zWs0ri7v69mx*^CIfHIXJUI(+n^4Wu#;E^x*9kJvn9RC^24zCwyPRZSsOPRH5W_d=k zN{(%d?tT0;4eS*SwC%zi5f&sFh#hh0!S5;5>xQ1g>&6^o8 z*vgoeN1i8)7~4@F?~$aL=;z~iE#d7}jnDGyFCAbx9&OKGi@IhXV2S>IO|}#r(2hqh!aT9E zmR?x71ol=10NV5~lq`*GF#uyX$ zn#`E@y=F&XSos?j(Ks1;%8DGP2p?j24oZc~V0MHTRz@iv9v9^h8oPdrmsI3&rAx{> zVkA2-8)Q7%TvKl_ykC-GciqP#p4p5C2nsZHZB>Cm=SB~)-!G)BcNuVY{ z)pbucztkeW`d?tM`VY4nf}i3NQMwE9g7OdZp0O_Fu|lLCS!4`_Yb{h5DvRH)95hY+*U1l(+|0&$n&9OG{} zP~-wBzrZneV2|Er2BKij=b*)BTX9d<+}2GqQ+2s>!HdD8iqL5FHbo!1Cj;a#WD>dw z_cW<$+s3xaT-!Qsbk#OEHPjU9sGw5mU$wzP7Vi#k!tU&XX9FicQ&9|j62sdJ~0vI%G1r|u0} z9hMVR&gY0m5baB#^V6TO19z%3xFD=YQ+llfz`n9dx_<++v{R3YuXuJ>ZO=e3ok zWsbD(mg(-4ZSt|M!tuDL6xEGgf>8hg0NE)pkTMqNDHp?vT9(hH!f>p?9QpY-U6>?= z0fSD9fM-=JppE5`FF38#m@nRLvIryxCL<7FHR{9cmgf1K?+i65_x-ihLZ^NeBAa=A z?Wzmi-+)lkl}q=xgV^{aw#)Z1EXK&vea760nIY-G2rfWmKu)m7RPTTn<0vLJu=mDB zu~$QHp_}U#xLmFWGRwoB4B6mHtYiW7%L48^f!ZOcv*6pm2Cy6x0Q0js|0C%m&XSWI9c%-mVp~3o`Zw0t)(NA(W z=E7`R9pl%ZfKbw6T<_Leqpu`}PDJr~#wk42=6hZ0Zt2Ss!QPPKPt(-Qy7JLBTv*@E zA#bwI?lGGn?BUbda4b2~X&V~>#qOcNjIf1Ky{%?_(Of0q-*t1g>u!ZVoiV_vx2M%39TL&XP| z4QSvuWx8mxkr zT?n^w?5XB4d^O2Uc;P;Eg5m}^h#$s&9v*wGNjAlAEWZrc6v8^c4#I zBlon3D~>Y~^Nk|z7%GYB`qVea%cooL%c)(|8 zj|gUx7Hn&{kpmx_;RI1~Yi=se7)MNqU0v{pQ;<`ST;Gyl`&+FNk<{X`(lZt(?~qnS z7U)Z7iJF{H;o6WiZCSQMpB3kaEYiYT5k?HO<`hw{tPFNeWIWzHCh{Mu&YgVdDsL~j z+n)sqnr~YbQ4*R;-eGzIK9a8LJ z1(p_%X}+m4!xyx`cW$F|0jwC!r89W;k~Xoa7YFT=LRsl7QThN0d*Vs-D}c~hljFc+ zK!zx`g71Tyr<`iCD9xlk!|~=lxH<~0Wn<8SPpb1%PL^8i($0OI5H?)p!uuC zV-9?(EtAknxb#;0qsD04hg_3Rm24C7RE#zn%o}Lp>IuxjJB<)jMqtngN1sie1XC4+ zo||*6ks;&f&9rmMvAxm5iPk~8eVl2K>PewY08sCsGyo$eWA9uECZ6*20@w|rizfFn zEhu1yH0Dh}r;~+luiU2b&$|6<$*4TEl$KiNuQ@|Dw2O)2+d8P>S;X{8@BBX1k` zT_CNfJ|Kq76hS@4psX*^NPx*lbFF+xsivd&6nNTj zNMweF8v=en8S9AEuqYIByGw3a!u1oDXf^bfoB`S1kaT&;Kj@*FzT;evVa3B&4%<(g z+!drPxj%V$HNN&H{%E4Zv*kRy-nLFeeRZ$Gwk(WX`tkMhBTwtB+#~S6sk*vbD{iT& z6@pZ%Wr3udbIQ5x@#;zgwj@^uN4&rd#`2nJ>wjypsRvxQ@=2f(WGPbY6cK8FwoFH% zI(<@qzTbC@w3xO%E`%rmC9? z-GwjYJ{DJSZ_Vx}OBz>c_$&>LW@%g5t^Jgl^Q78dvfzO)8quzo1mug{`qlc=)pPV% zEw|me@6fMc-K)UM3nMD^J7%=k)OBh2bf}oGtci?v^R!F)VC!nmyvDsh_~FZGz3pZU z>r3*3qR=K3F%bo%JYIEbjms!SF6V=`qNFM)-zkZRMrJ}reEGc_wtDdP#9o8b$+Fzw zA2_}upfpBKK-1(^G|?!-LmoRq#e?yX#4fxLx(N3;NAJC}wg-|M)pekA5;Gy6KbYO{d&4_iJx~^$J{8LQIAaa9QTq`{~2B*gaPs+3VKq z$_pF+C(@NsWb7=}kmlj(Mt~Teqz;h{P>_85F${htTPfJ=3>BMquuPBfF9?2ZJ!d1CLG_5`7*yPJm2C~^?Y{SB>#?lw!-&eRbfnr$j=q3z&@>4wPnnWzk%r_U2pBFxqO z93L9}9W=MM3@&8mc515rMMpc97?TF{bb7VGQX>cCkD6 z*@m9TryVbskt34I0MYE`mcM&CjD0a&I7a>C3d0gh=+(d@;NlTnP(+yK((k)H*?(kj zzmwIaNs@QTx%f?0>WXZT_ECv^aSWqD@9Fykp$bLk=Q?F1iRK`iz?SB>sJN!w{oGBa zv@ZTN)1d0+dJ7Uo9Pz{6!uO2cTMxTGc{_;(tT#H#dsEc{JeKA9z??m*nMETa#}x%a zflF!Ng&RgUX(hAf6A@b>K~%K`gF;Nt;DCFUL~cyh!j^5Z(fdn{a3|Wyl{el18r*8k z9`Pbp?Cgtsc^&hGW@=s*b&pdIa^pkjOhJ6Q&DGvFjTg;}A;F`?R)MCRFD6dsS{z<{ z8_$w=w4rA$VRMr}SPK|t8`g&AbM`B;ZCc-Cx2K->EE#vXxBT+u&5s2H^Nlh@=yL8t zLu_pCUd4~=ODfD5I%p%x1tmA{iCSvD`D+di{6i!<=uvK%EIV{jnc6)5BT*RjInw7S zjc^bDq&up?Dl*a$>D~9EbXZR0gNPTF56^W>x)hkB*7YKvaw9S=0?pDDR)b>_YLyqpph2U z=bdz2qZe7^Hs(VEwt?6j-L8r|`SZF68RPQv8z{RXn1;}okVRts_Jke@o4}p~8cUDP zJ6CDw92juKKqKDo_N3T+qyfaV&D$0S{L+oRE%a8{7KssineYYZR18n0_SF}?fT5{0 ziU~1#eKGq^4~a?d9k(UOK34dD0b4+%zqARZT=1>KfS4Pku_4r@jGIHFHQE{72<$Zc zC~k&!8MsDW*|6#8C?p`RH(z5}Fu=#Yx~B2txp6vEhO#MBCz7Z!Ub`kQUU=x@4_sZ1 zH|5N2y$7y3&6{xdUH;9l7)G3oX9W70mUMxBoU~qMCYHnQyM8(t_$8+G!W`iHgXDQ$ zfOl}JHokmEw525=W}HX^e`zo<4c4V;8gPekA=EyHY5*W*t&lRE8m;+sRe24?OC6NO zL<|i`273jQ)cZ=OQga<-P}#8tnb)@Jv})4zw{TvkPInj(s|%S%P=lqyFu4gDnMmEw zXbkjUV?XGU_W&>Z>V`&MZXC>4qbI?D)!j^^CXq*DMksrxPW|C9bG1W%@!V~>oz0VR z@15f}^%`Hery>~foFiB1mdacxbbP7mW(svfvZ@wM*H*1#pG()qmJz13Y^mDfr0pW$ zyK*WK|GPXpkBH98@$3W2%!V5)|Lc0+4qm-OCkwR%#E>W0BU6GkOfCdWJ+bILt7jFa zPS__T4e|xUWvVjgA$U5u1L8*-`88CpvRm>)6e(NhEhtlFnzM%Kr7e`HB$GCghrmFz z1%}RfEmu9YlpSkOGjOj%fLP-ZbA#08t43|Tnb3ZncFaxVhx3E5^XeRJb!(&hppz`P z20G8NUPn4*j~_qYPs1ER9U3{!j{m9M8;y1R>ce*k1MZy@jMGQ2>U}ivf{JiolkQ{R z%=X0(C|TXf*ifyvArae;v`+>v$A@7n|D{g4)uSs_8tba#@~mtx|I>9XxSTN)?xw~> ze4BJ_P0v!buO%SX`2>(EKoQ~vxzo_O8a#}1y5hSsTU{(d8MB7I1S7C9D?y`th@N_O zDce<6OBt9mz$eaA27u0a*e+X0poaNX#;n1}Xd~@nY)j`XP&aw(#Y&3`#E7n{!4J1% zf9iw@jpwOTj=3qECtVHgI`vrRjq5f1Nc{XX!aiHN5B9NrUL*HMonJWjYj64)zlwY? zr;pjx`)S&H_RNidy*zwUhcq=KN&lrRBdt6 zvP`Y3=^@X{CZeaw|MOWmz!vqKf16^$MIyF;YG_Sfm$QzNmVj7SgNJQuAa)H~3o*k4 zsv#wnJ)`!#0?Y{zJJKO!%3jr1SZCi-7WI*jK|b}Qploct%93Hm*qF(KeHa$4a~il_ z!)sE%+P5i#`ZU;&dRTyg?E&Jr=(}W$Qem-kq%m`Zq0W)7z?_#~etC|7?Hp;y9Qk2$ zuge^{33KY}>*ok_o%7B+@63@K;$KTX`4RrHFl+z3+>|-;OqZjCx3CUi24l(SjepxAIRb(X$yT0(NIv?Rz)-{FEW_?!r=ge0oHm9yrQt`X__q zOOZZhaG#9PZAtUfQI0=4qK|)^{%1}1&xgd9O4Y_zlNZzvCiGddIyN;~y+)wRRLwrT z3O2l})t-~M_T9m;rh3yOmhaY(j4vncUR^+YIlE1^OdqmLcd2LjRHAAP$+t@5kY$|R zyA3jH)Qa8GiTE0FFaII`$IE$K&UoHm7d@`AF1|*&+uafnL&Vgs3+u!jsU8b3TlH$t z*|y}>IR+X+SEngM_H?!`-K-^t)W6>>(twPnelgs(nnSGD#d;6?9!Nw_mTf+*YH1^ zmIHiY#Gi^@kxs_{k{%YXn2%jcKn#P#Y>C-X{fIGnDg)Jd2^t&-CJMvF44`IB^69D= zcJLss!?S^MIS=qs9?Vt4VWKc&4H|^ml{^{%h%#WwuxQ$;v*xr8uF(aO$&Vue`6!=! zm?zo{#N`xTZu#opD=&RfMsbBj>=r((0vhrC!meBsdq^i^e=bHn?P_DPCM(cSd{}lK zN#7-8JLDNo+Ekxu#%Akv0>A@;ygmT_7^KB63$NFg_*?r07FN4KwvAY}wUIA;GPZ3) zZEWQ%khWvczSnqEl|Ln0>RmGgX#D^J{NGu5-KrxwBvo6zOu8=iJGqDF$bEV{mD}|2 zh3G`dHz=K~Ua{Qmj*^yum^lH;II+trqlI-la~!T;ri*Mx||Plh(S)19PW60+fIos=;^2Lm8MCG^h_C3Ycn4W1Y3Dv5xU*!0dpB z1!Z7(*51A zo4A3DRAeB}4%5eOLSY8q@w(|9z1vjzbIXi4W7W~U&#xwaWU&?NmALpfV$X@ zdGjY?w`JXzZ&J0fRb=~UR{GS2+U5aI3y7c3F0(}-PQ*?mt)aGRDeL1kHB}pL@_a}q zs#CypHet-F`6LWl! znoLsZMEvyh&?T#<&=L@9I7!R{47_23g$*;%1)F5BO$W-fRj*}HrQ*y`Y-#!A-kzp(3eP3S0> z$isL_o(C13B@gPaidttfe(+-K`=~x8bN`W-&$d^8|M~Fc^A2E1dw0uf@2fKgFN1qp zwx>ge3tSTr*T$Y*tPRmX{O45jfDr*>c4(}v!Y+Ni#fVS%bZ}1wnfiqwL)v4X9knbh z|B^7@f>fgFIIA7agMi7{PQ|*9l_cMt5bu_*i~l~=>?WA&d1f&6TFLttdFI*`<*ZVr zB_P(BvABonv|%Pt;C80#$FagQZ1chkFZ7u!vv2Kb(J)h3D{PtTb@gYMHmp=<{W3{I zk12!c%CK2=kHd|PRi&Ar!K%&VW0DE&pdNd&2!PB)6wZ^zgbw!kSkPAXnHYknM<)d0 zjG#BR60+}ZIseD$BdUr%(jEehUEj6Et|L#~_gnS8&{BTsHQ?$ttLd8B&^;|#wPh-^ zyL(txJ*@-c+Sn$kX1lNI?hQx9exAy7ft!G%ydx8lZ>C z61qGaQ3&2=W@hj(P1DjA?!;DDjzd%(KJ_k))1e~rby zrl)ITYi3=erio{=%M&10cef@W4zIb+V#R>`!G3zJ53+A%0l(B;l?ndIDp=u5S^p*1 zJd~>KwN7?BI-^V*(R&&BIPRVfOhdUA)(lM#vGe4)>QbRiTLNNODr^;|i@le9wy@y%nIt*4rWWr1tl!GlRTcsTGu&=gWT|5HQg05YsD-K~^(B+b__p*

      FWyN+nOjcvgWM6BSX`jOmt>^ssG@=~CXkoM2Wn{N4dmB|(rvu`~y7;=f zFAw^D78?VM|J2d-3-Ioe^&)-5k}@4tUy5KX&2=r5kHHe(Rqf1@8d}Y4!IM(KbmCm# z3o9nZ??KZN9ZMcMg9zAEc^wccVF}T)hE*s?Qg!~c-zgRlUtfxP1&qgKy;!$RPjn26 z3%mYW&j%e*hm-b6DiQB3q+lQUf9`9+^uV)vo-qGkl)KGNoD@}3nbT~)N+qjT>gYOW zCSyINuBS6h2Fds+M#76-SE_j;hVPkB?DCzoO{Uf@N0`YS1cWb6wLIJH7I{8)?&tvP zPMnlYTRrxt)0qrJDMkL`D-!jNY_;_n>E-qyqM)XX}wHK#x@X8 zJ-!9g1GC*8m1psv4Z}+3=IF#p*<|X_-mud*te4=CnMW;C66TRg#&%3)x^GR9mvr5h zuPjWYyPFZ{XL>}SA156z@_}7J{_Df!KX@(=`0~NTy&&3qC)suDZH7Wyi)^<(4%leOHsILwof~GvhCqew{18y8pij^S!b$ z5&NB&BM_4lCtX6qi2+SZ()_6|0#UC1c~#iJI>1FFPX;{N!S^*GYb9 zOvVVqd1eY1$=m=;h}3&h$@oBd*49eZ6@P5NiIYwzvyP1uV(}^KMR~a~QMx#`VSsbn zJc7-7-1mo zeZ4i6Kh^^-XXneucDrfAVs7BgiIaSi3ZkTGm`B{njxTi^1b7#-7pD@{ZF1O82Mn#r z?me@v#;sJ*PMLXhtb1c!^@adzDhOVm%jcJ9dyfeC?wd|j=JG;LoK&QwYHNrtcCYm^ zeUwgAm6X9W*48YY4i@BWeyNkO8ygcd&l@LBmN*es+&|Uqce^l^jBj1)Hl*v~)u~`L3VS+hqBUiFGI%*Y zDl@Gzat4BT5-p9%cwGMf-BbQrdd-WcW+~4~qX6%DqQ99|SF=pH+wH_jrU7^>f64je!0Y--g7FtqdT3z+h>12HKR507}^HlB?F1{7uy*pJG z-?=dnU$)%scH(4-lXP=Xa?c-x85-+TmUbj#d!;gF{KY2CRF^G1AIw;QSu%lV7>aBWVJIrIC0WZ!sxa=2qQ1uel1^B ze3>yk&f=q&+N?}K;Ew6Zc#Eqngd_P1ATH2Pgf!OGEE}$Dd{-(nxO}$hfrU4(NG0Q2 zOixxjmoq0$N+H64S*G0%l=IN+5zCi_{iN&s$sYDPaSH{UN46CF#cqGPge!qmx}@vk zJyW%@U&}qYFw69lt$5E&NF}OvZcI9tGbc{UBB{VVNngsZ8|&h$m8&h&>io$b4OV-; zXAmi8{-NV~v9i6Sm`2h8qq9fx zA8Sl5zB+?%&$I>`%hBF)Nn@faCd^kpTJalG*5sTBWo^BiQi0zCJStTbeNjGR7nr-1SDKf(aM7 zOb-mmJtjSdq_E$$%H3`!PMj1VepIZC(e(J4UZR2 z?pf}33HbazYs7!5uyucdz;s>wubKAY_!1T9r!g7tBfklkvujeB6K@uCITMzAwcM*+ z8i)5T`+w%diIaASyvYBs*;Qj*e5-P|2@x16c^t^o~yG! zh~o#Wv=_?V&h|(uQ5_Rd<%9D?D)($Rf5xHY8`YSob}Jh@ak8X}yyRC|Fa75YiCC=M zZ9=47xM#hX1xn3sTFF@1ZQ8As?@ov>CGEz%a&s#G%EqkoH=Y>IGn26vXBs+j;-r&H zx~_WLRI}UIB9=8Tcbilild&yQIcA7SJ3CFFpT@e_CaGYy+oyTJ7oXn=sbqYwhGEOb z%iU%tPMnlb(si*|Du_3rfN)Q_+hmI{$iJr+ciCDnJy}!Hfqv4%{0SGh2)>ZY9bg@O zC7p;TQ_kfq6mNCWO-0K#+cRv(ieWoSIAO@!Fs!p8dZkWEECK}fAlS^tM7(dg+w_Dn zJ?f3w?#b@v@^^OVnGv++lYM$El5>rsPl*neyepNpv7t27^~UtDeL4}lGnG5Qy62lE zl6XeMfAv{nSr_FjL%e^9dZPa=VV!{g1)`Q2$QvHEBP*7Px0UF~BGf58Pw!ysxVz}b zVVwnsgmtn6i2(8Dsf>thv7zN^O9;UGiGC*P`PxmM(7k$jpX}eudu^j0-plJQ0?opqifx=8e7E6o9MSC@36H(m;FS)lRwr=v~DnVJA;-+E)-tj`2lU!U&P z&3n3s@t$4Ecuz*05A{dU;iYJ6I#In`xV%tamov_@zFtFZja%ETASpQz|1zxavn(K9 zL3E2u(?kD}BSTPB$&Rt4h{loO6UFm(-`Y-kARr{i@yqRzUZoKF0fM zE#oa%U!L3z0$uoXteUDarm-5h1IgW&?Bi}R$DIcosmk$ z$H_fhzon3#PO_yCAoiq3;q?_=TeO83%3g2LbwyVWSm$Era{zH7Fq?7UWQ<+h`PAyg4^~P)_I;TDa(x)&*EN8qQ*J}pfeEP*2#(Syf zVqjbppXeDQYPvTj<6Fpw>U1AqbAvB7;mMNk@1GA@)&*E^vqS;0H$-&0Xszfd(J#Y1 z=rqy8BFfoC^i3-t`axJm2)N!wqF;pP7KC*?9G*KOJSI<1(TSqphI!|O=PnNOP7aU% z6w!{+;W};fUY+sOq(J4jOdAB-e<$~ zzeW6>^TKnd8{g!>@A5w$qP0vyt4MeH01IS(5HR{Ui7 zr|5R3$yKeQf%weu_yN&~XU;c4aDIGvYyvhreA|1dn)p#;lG5k$b|zZUp>l&)Jm zID4_A-k3@CAz;2$-F^^O{bsEUAO`4vlU;W@8T*C&zT;DwlUWvbISba*8>Tebh(LLX@BkB=0+`C=~X?@6szg@d{K#XRw;^V2au`a$!IvGDC z`p zVu2XD^yA_2pd9yV*YN!N;jsW$e^lo@i~ zi#-FALNS53VWj|spX?!^yg?5J+=$-xc`Ek`7t=+rN+shvr;;%@@K!d-3&au6ViXYj z=>Xe?Av?xI@kt2AWw+EK4h%#)D#zl(K+iF@0oZve@0%zkesFmR=&7LK0b$~6Y(`? zB$siv-?B@4f!GHy%oTq(O@mv0H-CjNOG58wbQolnC(t-eQi6(uvri#jc~?m`mzIAZF4=v4MF0dcuagOWAw($^hR6(HD}-gOmjfIB+0Zy4H4O% zvjFVLV%Je`%+>WF5KmdY_(05L5X!#kpjG|f@b>U!z4Olcxwp-AfAC^=?dF*Y{Z;3S zHj2I=daao_;7BvE|7K?5o-3Gp_i)S4mt2Zrn($rRl;tw918)gt;o$;?3l}b7>xX&! zn!v}F2@j)1cMsE}fcVfbZFUHfJA~;+gz1#aZk_uK0wg#|@yUKIDG3VBYK>We7#Rp>iktuus zp__U8o&W2FE8f1#0tF0EVl?c-1CzlEwUR`;Ce8Y4+Xc=3DvC zi3Van@2^ZSkwf!RzuLOVzloLC0r8S1>11qU0pPh780T=IX`9ps5!QUL#KWQ3K+Lpf z%6_7o@h0}}`NAsK{B;un)!jso6}?yVbJ4l20B7|3@VduEFB4EcNC5ihX3~J(#k@CN zTfE-fjF@Xx_$VF`r!DgfK$%N)t?> z28cHc%iS?6FTi-3=w-%d#ZBpbLV&+R^!@;NeWv@%u$--}yxl@p%cKn0yRz_u!n|3r zBz!Xj=a{1Wu<>XAdHXC5wmz6A!ZIR!=lyFqx6b0{oK&PtPb|wshe;L?e>in$@1k|p z>y24i^h;4cwnTeo)J5qBnKSIhF4lla{d<{-1J)EU-A+LDaM6E<4EQV2MXdm5nD%th zS4D4;@{X2re<$pDW%IyZZYrr0Cr;WY=|ubpd!VZ^8Cz^(L|f&FV0!Z#^&wb(wnFhh zn1Q;dyJrA#L@<^JxvzhB0q7M4Ot%tHO^Dtg`ch~^{;`$p`D@YlMgJq^)rxK}0KK-E zJix8K;KYfO!bO0$uK@6uO<x~&+ANmp#XX1x-ivz?T_Q?R@*97$2XF8a? zN8C&p@G~=Uk9`DKFBE-5^b^rJtpw+JqA4kh>rWAI-b29o7iQ9)%bSM=I@dWTPMj<* z4a53$69B%+V#PBW6V*i0VEIuS4Gh-z{&iMfP$}&OhiA4nh@K5UpzQ|8zg77;}8B<-%wzgR&^{+CM_FP*4byoq` zGeswheh}J`TQe~Dljt-7QlO11rb zV%PCRuzK?d(XZ<>LDhR<>A+<1;j+OJO7HhJ-kY(-JR?*e6a87S+gB2q_;*YjYY2$` zNkFw$biC+eqMwGQiq@DmaKn2}0Qx!s=24=%iEb$Dc{%go0bPDzu-~Au-M1MycCBp= z9=q$-2ao-S>>npOT8;;AbMUzS+YB7HNxy^7t15RpojA!50pK?*Ry?`oD_Sq1nF5>f z-J)~r7YEsV`ibcAPn6OoTq>E=zs5`&u$2JmfdZ~)h~6doRxrrds)51zflJ+hdt`f- z0RF*J=C1{uS2Onx=P*LeRzK-u^2HIBzRV_n*GactZc@MSo}mKw}Dd zP4s5blccxC5+h*X{ zRm=UmoOEGHCu7Ns!0Ji2igBlvC-z}ra4iATzX+%%MXwZnx0UX6szj=L#!MJ+wQNU9 znf=T|yH&(w6ami5L>DNK{$cRt-mWJ-kQ0Cx1LRX5_I6C(kPXH!^cys`GF&5_bUm3i ztanYyTIo2QhAx@&j$;ro*mkbTkEl*1A}je1B+)!c?0}FNX3x* zlLdKw2y;Eq2GAW7*L%B?xpnLBM`}7TlJB!L;#XbqqL7zz{_*Y_sB@`VAbnMZ52<6DMtvbRu>@#+2t| zeA{xj$w`@H@*XP*C~oijDNh(MDYPHACIG)ZF!+j;!IbcWrOZvu*Tc!Q4t*S%mS z?iIp9iZJ{9CZ_owat|u2hI9G@=;Ah#22bcTy zI_aX4s*C+8Ga_4Tk8-!kNhg$h_gK|T8nCx%o-K&iMr#A`+j@@x@~Yb$JoZT2He5^) zM*lqg7TY}WNV#vLlP)Y-5!oItcYB;HA#%TO+SpD2b)*TFuX;}O%T|DK!v@D6`;Y=9 zlQfDA#Je0n(FWqr%6-e6bYV#)V<%cb{8M9Xe7SPB$;lEbm^LQwv0mUU;RADCFz$Z$ zp58BxKEVe{CS?>Gh<7{XK^us_F8A$m(uJil8DCQXc#$=^e31nl&+bUm#LmY}$Os5j&fDR{2|mSxEg)Vf$D5bCO-`0zxpHtfGZDM-fFZKo z)J)tnaz*TI296z^Iav2-QGpn6w*vu{`ZhRmk|C)?e0c%jA1pAwsN8LGvV_Yc`?NR< zuiqhKH)wKSJLCdy*OTrqDiCk?&&!(K*9ML~q!d5fiIcn{;CsIX#8W;U-PcWdc2ZW^ z;mEVQZ9Q=8i%sTZtlQ`^oq-R0Up zNm<47KpAZ|6MZvm*XW{^YfHZ0KHBsV9bis2LDSbX^#nP#Q>%q7=GJ- zT;lC={G@_Gc-NC3^tK;zbp{BF{#VL%@l~8umP||bT1o&o-O;N&7fk@&(Z&mPVW>xn z{=-UN6sC_7J=;p}=a(_Y3|%bkG_$&u*7~S)W6WA|-6YYUME5D;@BOXlT)$nXnk$P| zt}VHK`xtYO9DgGEl;jCk?Knkx8o@`7yiayWdqU|qXl#$I2l{@+KiNwqZGYrd-cF+P7l-s^YB`Pr*;Jkf99*yTI2%}$)OSyIW^1$IQXbTYndx!aUe zMw>e<5Z+V3VncJfSyuMPTj|3jeFI5fQMLf|)hd?&Y|T}i9V&IQD| ze*0+iVfcH>0>sF+gT^f-40fo1=r`F5g7zD9q3@1{zw!UoUh5CW`RmjT9G4Bi=*^A^ zJE>?%*TsLCG37ZCA6k}%j5ezXNd8f_`-LFePc--_oMM)d{a^Yoe*gk=<#IQ?k~E%Z{v=>2kVIT_v;!XACA#xn}UG&G_zjV_6^O2p>6zBseg?5 zg`YmgtlyOPEVGpLTl+5d+fVba+f?fOJp4_s$?>lQuvcFUh)?tXmu*D(U4PEFFGZAX z4jKC!0n>XKXv?umbChCZeqC7gUI2PWw9AQ;qKW|Uvlc6Us^kqAV+P5K{FMces<5)H zL^l`xHsbuZIno=Y%vIWLniyja{QvgO1U#xDUBiD62$KW^S!7WM(9yY9WbTNJkPt-> zh_a+R2;*fYA;>l=VG&WlF?3W=669VKxO&yAC=6a@NID=RTXGU)aT!2CP-GZj*klz! z+3ETIQ+2wlPr9>qr^8s^^L$VD`A?lXbsF;a-*u|$VEagb`uD;e%`z@}GsyXy16~ZO zKVtg_+x|U>CwR}pHx@-!t(H3nuavK)gV;rjwKP`90bZ22GCRRHgcIa!Nw}(kHMSZGu}F*Rd zE=q!z!rRSkjWxv_9kE^haes_I?pSk3nu}J**lDwWm}4UTt`fh?j6G{+nN8?Pa8qmE)BJmC?S8m^>vb{`|n`nCiW0mqm@>4kpN4G*@O}{D=JoEc#RhutnCH zMJ>F?PNBDyxbLTNC`L`V%$r%3yIHpj=MnK7^ZYwy zd^f&C;>!K+uBD~r6W}{Q0rBPi8L_>p<;0ozV|LNwB>EgJaPiOLOY;5aW`@kFP7sTB zje<9m?`QfYrCo=Vb_D`)k`fE%Jv80SD6KlT`h7McD?eW*h{bgcCtr8LXnEW@#23r> z3A`V#oI4@twTp9?1>z01wr=-c)6^h0Bek@&O4q@R*sB5HlM0OI9vT-L{@H2%5aXZC znE=LpN=CQIoUkFvRv(C6lt#Y3XNK`}Bt|}1?K-HmD-ehilo%f~BY-nyS*u%Nx9zj( z*k!*t*{eq{CDMx#EKuIr{vAMvO&BC8E)Cb*F?X#2CEUJZ;H$ zV{Ri&B(b{PREoj&OC-iS-yx6xNcjvpDD8jITW{GQu1#VVo|Ka4?;MwpZxCq_==feLBk3*SO6*{Sl zmMajO3gGZ0hWE(#bDbg5NS3Wm5dV+Tu9-MTN8)}}U%QSf?HaBe&waHs;pXrHCH^N8 zW04mtv1eqP{-DJ8CLmAGeoX;+`6hH^nX?I1;EtE=d&4iDMM>Ucf`fRAjGdI%JyL^M zORE}maBTg$0PrE-+7mGgPEU@FGKg-47r>`rPZ>T37r;q@&QU@x>TQFEWot;-S#yGc zC|jK%<{@RL1GLNPM!4Y3UbCnZ`H!dNl{+VfUxrKITkwG{(vRW%hc{R~`_H zwJEIblpoE>79#$8rOxkAmv5F?a1hV8wPm>mv6faf2>^ah$%;>`Z(3`yA>djTfbG5r zuYt>6a*!9x5m_+*w9W@WICH$4#7}tkowD3;xt)VD4V2^mQ;A=v#Ct1sw2`sI5;3uu zkQGZ;j_)dg#C^OU%84hxR>~&H<9RmP!z6~Aaar!xXf5~g%)Ck1UQIj#B+L9V$tW?h z;8}AR9N$diuA0xxFOg@=e%~y4F27$V0hLA3DEFi+&%N5wGTy-3<}4cGcTBKr=h=EL zdRexGv;1*zG>N`)E<4)xZ#da|-f7Cn4=pXNit9i|Ob4}lVpF@?et`WnnmoH3ljm?t z@_ctadA`4aJV)^UA5F=#C5}ABQ6+$U48A{dSEQw-rFDhLlW|QH0Q}Y7kujdHlA=B9 zo7FOby0IR4P6E1~n@jhZHssmffILP1K)wKXkLZPIX=!O)DJmvvxUON;*5Wz>q72mi z&B$}P3;^4G;yUuIyUGv7rwWPMMfU2nw6wIY02xHH3<*!}W?-OJ1i-4#wk6MoI({(T z3>aS-PXW==(u$k_iiQT!YIrL=1KS~;)KEm!6m|YWIbR)luB`<`5nZqZZM(xz`gadr zTX_gu{-HH_*3|NY@wiA{qgKsU;;`ur5{G1mWjRf)D@X>>@8HXZfW~ov@}>~hJqZ2| ze$a}^vAHIs@IIo8vZD?EXSLmmb5$$Z-(b|PR+SLK5d3xzv8DPLyUoCU>q<694CaZ?iI4`8V6#WyS||eQ>#js zgx!|`^pE`ei}j7WpRh~-r)ED4pQyk>@5ei*W{-*2%Uu5J%OLutAwcmh{DdJgM}5DFo<#fK2|$;h}y-QLI+~@w)%b`4%>q#wQ9D^#obf0_XY!Q=fN+UE(OT@u$N-FF@Ja@ObzcxN(5}gXF&33W#Fl`@hNh7R&7$R1m{0 z;YZ*l@LG5t_V>kWiS=C(uP4v(4!jOt4o`#I+uBi2o@*q&P&)9n8KMt-BG;jmn`pIT zXQR+Un0=_ZsC(9}jXjg^x&9k+JGxVzvm?2aZ=rlgOIkWGjtX7jUt~xtVgiIY3qGTQ zHQ}LI=EFYkdotJ4Z^}yVgFyRcuJ9y1x!BWv^xI|`8;sAPu3`MN=SW|ZR)|#~fOx83kq#8#C9~|d*%KTv&Rr~%YB1UcO%2s@9Z_|h_`|G3ws&q8OxQMhB&4< zV#Uh-3rhWGuz#dd_G%gb20o-5^Zg2c5Wi&Yoa1)9#@ay4=gC6sS9rY>eow|v70JbP@ct?|PsR_zLs0%^ymo@$f-k`*P`;~d z=haI6yjH|Cc(TEhYW>^$hE?uh^U#Bshq7_q?OwR6G!R$B7vUe^p%Ikj%ElF-kOPq1EEJK4l3<8AjmN(x_h;|t^ z+td50rR9ULVm>Bu_#73+CsFskk-k2y5XvBW+Ylh)kpt$Eg{z5vhW8s~-0$&06#=ml zE47apo@J|}l_6lu`?|>Tx8eO+Hel=bpAh?E4biQ^<=>St6~vEOWs7XX^004?Rc>Fi z(1Z8~nGv_2mguRos9&gH943%ERx2Q%4Nrq70pgG1H5ISP0C_hPidO^~~t068*px1$>%eh1~J+>C2&LGA}*>D3We4#?fk8*yE?l~oKG zYlhk#(dUoLDh**ZGmcN6X@U4M&hdD4T(VY}We^QDd|4v{G*9s-%07p6Oqf92EI>WH zf*JQG_mSJL$nuMVT+6TI{==y6=PHN+=Ag?HqU=3Zxlihn=faJl1u?Hbp?r?BgSDfC zxZgK-KwZjBsZZ{dCU7f2tSunW4gdql9lgk%JOD8U`FNSVz9uu;kKpa_9uj@eCnP*C z)?ko-0K&V%=Nx2yDY_j` zp-llZ()jf04FJkb7Kra1yP(Ptw#+y2l&pA1LRwCAwO+qgXtfMK(G9Vs1*ykXx=Ym!CSgX)M zm?ws;tL^tWb_~{zu*f~IF0xO6Jh=$~B0%nT3Xr=UNr0Km$eoYD&m;D_339h%t%PGu z1o?#kAm1Js{UxO?E$LsMmN=VGK7-uJAioU;$el^>KtO0105=|h0?3^VayMMlL4F!O zRtCslf~Vk`9>w+jR(7fIwZ9yozU7E}ANABJE!$;ftz92^J!BBJ-1Xtt)T}rslK)M# zswhK@B49vlD5VVF1hcv!X2=+iBir*W>T2AYK;f ze`>8ts#uJQ;DyT>5Koj=*6@ug;Gg`BWy~5zR$MG^Ebyoxwz@0$nQx5? zkU3WZ#5_S`yF6x%GOVpO_NC$Vq#?Q{M#}Q#2C=N?d8<4F^iUOsvhmu)wl0aaXhZF? z+MuQA%Wc(ku;g+b1J5N83!YfCFzByNJ4Ycb&cKzV#GLqN~`$!&-c4oBeg@IIp? zi02x=iY@?jv{i}q zh>w;wYC|hb5;r@e0qF*m>uf@9Zy)*Xl$+8EvEO0eSZqH>V)D#CMjEzS@0CqOgv(1x81tGGep9p8*Cx-` zSBowS&UzWnJVyBMjmh&_biilU@h^iep=y^_4Ux!}EdcWk$Ysd1d(aZ6ciKGQ_>y<% z+L(7j4&ol;{-lEVtq6V^T3X>#k%0UQhOZl>BIXG)9{}hrs=wu0VO8OXD1i5Ccpp5= zy!p7iZ;7O$b6U1jUEC)*YgOn#>{{jNGW2EN+7)Tp{Uh;FXlYfO3^59X`?vpL1X#RC zG4JEiB6)!LouPiZiB=8KiU+Sk_S>8ClY>qWVgZh=)y3XJ*-y-MbxO;zKzulUuzsqC z)=xwRQ8U;b%z)(KC$`Ip^XTrk_kZGB?ickh___NQ~0GwI?7=QNG< zb!lm7XoOLqQXV%{CVX>Ne^vPq&PDo~{qSE}q7bmQ$iE{C0d=PN^d{ zw6wIew1SAcDFEYvfa%UaKrX|(;Xz%}bFQ4#`?R#Qv}&r59oI7-D4LdCSO$DAz#n!_ z%}!4kGF?9sNlQyhD;!E3HoXC$+=IakX9Bv@@R|;Hk58Yj`+>Ezw6wIe!li!#CM@5S0qJF1A)IVT{RVDF~>W((Z3KlDe1amU=UHD|6pQ#Ood=zaY!i3%e?SP-I~Tr zv3U96=69``+UA?W9fShT9rrufmt;@5LPD?-S#y|3L`lh6LoGU$dJ<(^K%Js0?`YXp zIi0RgTt5ovA8NhS_SAOoJ*=Bu6;*#Z?W zvS20r?KU^|xH4dzKYpH;3@MBs-or=ISHX^Fwf_AoPV;RQ!V)IPr?9aDKSaWJ65&lA8|ijBAGvz$kF89xoJ8a)U2kX7RA>-hYq~d zHhf=Hv;T|BwbnY-S;1#9R#;CjCtl)M=lpkkU~0P0#`nWkhHL1w3+jQlWIIEkAL5`# z>_BieBQPNSll#4cdm`35E4~x2ax8zhxQ`rm3(ux{O)thvr5kxKzrVb`zS!+G7g$-0 z6XP*HS<8-J@u2r!dh0f38oAsM+g#yTf26f@)5($Xw5a}A@6l3%+_h4sf1m%6isp0~QUk4B9hCvrAt zjtV`O1F2RHFJ^Bf8xci2jx{IN>E2e4PNrPT`Ya3O$u>>9Zcf{!*ei1~_|?oGI@}0N zfX(FoJTc8SZ|}B645~{>_+L!U_9#5ePScRaI0bH-9`|S?*KW&yUB&oU#Eo{t{`+Vx zy3pTvQl~NjjgtMvMcm>^dm!Kr$AQu_z?J^I|X_eMd_Zu!!(k zb>V5Z^>8Yqjw^|at+H20YU?huVB3$)r`J864M`-vi+$~m0SBM#Itt5r9*sjd(z)yQ zTE;Qxr2_m^HiuHXhsOeb#~6PBZu#Ir!|B3!M}}*UsOu+v`-+$6z*Z&1lZo6Nx6be% z08U!!s^?l}|>2IX%t#arGdJ04>mC%Tfd z7H(oRd$r@qF*57gD^WIQ@_>Liuc*Q2_hbuskUbkxPFg%PIIkEVa>`fcL*}w z$@g_P#Pxtflk(LB@^W1Q6t9Ho>t%6P#D)qm2&MFTNRHZD_x02_qO! z5ClbtLdt+6?--z|PONJZj5~l!Wzbhj7x5r8VARmFTb3w08EVo6v)ihg4XEk1Ox9(W zIP8Ja3rP>z0oOIN>usQc(v_#1_@0A^8trZeG6XF+hOIVpW{AiC@gi> z{#2m*-DsLfvk6BCV!5#F_C~ ziOtLi!ziJL-Sv*K2Rl~0a6?Ug_Xa7SFODN6lHhnxb(CwWv` z1mDZ5nzT;H$p2`Vdkw5!IqfmYtyZlrPg{foThJX~2Kr97;u-9Ec{i3J`<731; zsMMjS!u(0$-Ga9`Md3@25xb_}$i{)i8(cAJBqjj_`M6jZqiGN06C3?gtYEn@3{&}Z zEF*kzImo8e)w=rNu(okpJ)8J+r0d8to8V(FqqV#{NXsj4Xfp{To`w7+xky+wm=Aq)#RC^` zf;;$XB*O~8?o(3fbn>(dNf)-=27_V$SiRf&#vt>gf4*cU2NfdtM`2|W!uhW^0js{% zcyleP#;{M>k;jN92YwpyR}OJ&b91Mj{aR{ZnEV69?0#lx*kBCr9a%P3uoIGjMkfII z|Dab&N>y<9g{U(FE6KZe>f|!f%<7NUztq_VhsAhU6enRi%eu%alh7sYJ(HDFzO>89 zjhhCsUM89BCCHI>cG9vz!oDQMt+yXXB?>2RvuCC-oPe+KftQQko&>duO%7lWg<{;y(N7MDdyXuxPJ)Pd zsu_Ky$5flu@NP+6I?ePqX+=}8Kkd+GC8z$D5pX`Wf|qbY&NsRi7*lI9lxCw#M(t@C8{1<`rS+g8bb-^y$K zD|`SZ(34`7;eQ-GY6JkNSIN3ookcQGd0D%EsR%9Hy!t4n7rFJ`tx36W)^r9|sv<@56OdIsJeN{DdRj%j|q=RT5s$QryLl`U2ED zp+*kQJs%yn{>QGTKZymp62fBuK*VQX-mmZqKo?KMcB;)Kk*%`E0JJ(#f_!Zr`;Q$_ zP_g5*>_yb&t(Fmokg*F!k>Js}X&k@iRD#^24S_I5wxF@w4q~(L@O-l-|jtAij;4IGqc9g{}xjWRHUw4Z!!7Ttrq<%m=PcW;2AQoLXtkWag?b_ff8N zGh}26WW=F=9qTxVeTmF??!WH*a|Hg62b+i{(H^qgB}GZPi&pa}YWa&&^Aw>d>?a(! zi?sY}&2XAin8L@vFx9`;G%IavFOR4EW*R~J{Ak(6hC?KEGQ1(gB=LXVIBF(mnvSTO z`NnJck>{8SC;Q1iv9Vj5QZz|Qk*e|AS|-Tz|ANwrurFglgOi7z{+rh@|VUvZ{BZ~S{e%Fp%UFceC5fYZ+7;p=8?)fA3^HbJwGalckVWZJB|V|Y{R|# z9hK@Bzj(`?+8CZOvYf;+6xgf$&m0j4At{;vDC8gDpI~3#xFIlgD-RWFKe26Wl#4oZ zAFt+leydbHq~pEnuN9I}=r`^B^U7OvHryvK{E^2Oc%pG?rzVD41t?K>?}E@0aZUJD zo+Oh6r(fmk1|cp%gEAY#9moC~nwPW3-)HH~Zjy-awMCfNk^hM@`^d7Hgbc>M^-@2w zTl9AyFXp+i85}VQ!&5(-oiv@zu<7racg(oa9JV&OMY6#S2Htx_JU^*B=uuG@|J_#H&iCP;k0w6xt+&@*BLmaV|C3OJp4THc za%8`nT|&}AjcMFs$`9B1Z0Z}W3Mx0ybE5*3BVNC!SV^-3t0PI)ZU$+BpXi`Vrm+Cfuv`PxVgR?}^J_a7ZZwgXwTjx3DtT+KAiy6KmwqzY<2G=x? ziuDS=0Dxs2&;sEaH|0Rx`y;RL zwXXjQ`!BrV`Ua3NzTV9INq{({jp$mZ`OVNeBG$i^pRIga?X`@RX%Ybt*nr z3%eQi;wgvJneqg%vIFw#)5^1rPwy8+!=cH^+GqX^DNQ9(ev-$e{|rRnf>R zO_c}l)qtY+5J0Ort+%AaN7PsONo*t4(z|vE;`}WOdg(eV7M>KR9C28AzNzaju8fi@W>xM8M%#` zaCH)BoR*nft&682X+Fy6Wd`SOA3XP-yC$?(^ZkAIsbp`M16x(yYw9+{M}NA|!7|^E zLB~p$8CNAc-|B@uy95}SDwUC!={XwfX5`*npEUL=6t|qRD{KKh6W@LF_hQe4%N# zL4#f2*$X4e2f!z<@=I-RLIV*u0QA?J)8k;JV~cY~F|Np@nhenhECyB#YpI~?FeSL$ z(t-G_@#bo`=Gwru`+YbVB=M{T0%!!+ZfN?+N1;Mn3jq58@q@0U$rfM_KJ5Tn$>CO6 z@3+muTAp~vqhamdtIZ^cdAc}L0HC0(RInC;*%}Vgb}jf?CLSqnRd1D|1`Y?+Ggg({ zphgE(3M&f5^UJ@t9RtM*$_JiMjDFVD>AWrdm(GJOPJ8``smX7Ra(KCoTAvVvOtPsr zjY8kK+FP}MG^E`#qW_#0Og!BjMdCTMygGd2|Af~$n~o8*Kadg_RPt~ijyX|UvF7t~ z)`50+UHgQ_eY|0V=#$OMSp>13L-$AO6)k+(d@e>FI16?iCGOh&q2b9publV0ncVho;MsiPpH2<;fc7Y-56Rn^&QMI zG#8B5uvpg@r^w)6zMP6rH+w!(RoX5c$XYEvA4akN@gX6lCEd9(ih>Cu_;*&x(l*1E z`_$|}6{6;Xl`kgk8|d&%eJ5C**cnDtLEXzcIYgsm2QJ*}l!;pwv!AGS3__@se@8*R zW*OVDvSjk=$f-y)92w&y9PGA5JCi;}k$8xJZ+OQu4RAcvQ-Bh5U`;?4jpZVm>hkN} zc#KS`tSMzeS8C>PNXzrhiCnNi$h7PR^&?@vtO_8?TV-0(UC%l!^gR=G;B&pkW+Gr_nTZ1@O7ov@ zpJNz-(bVg}H!I>xVi+mt8qI+y*q+#Z-vy{ZV3r~dldKU=G|!Q#BFU)IfDO!ruS;ms z&YOeCHDLB)29AqMK*JVNzKH~K#3XkJAPYUzb>maOg5ku*Q9K27xaz6G2ON{(i8w5> z|5zM_tUavL2@G>qA#Oc*?52yx@QH=tSkO{PE1yx#R+W!Wqt_>&?Y0Q2ADO;-evPWN zjWjJfq_hhngc*e>ngxz2zLCWVuX2Kz8KmTxm;&X?7h98 zz^Tj}t^k%hyf+(W$V%u@n~?ez!<0+K^bQWDDJ7EljPA(}fxpsLu9|XNZhT@}X_u-R zD4qIE3Bj%TU_M+&VbObJ{{fxv35grSYa;lLuf^^Yhifs>qbk1-_B7qJLfc&fVmx?X zIs4ZZ$iwMZbevRTD&t=}`-_##FY&;vOPK`8Xdu(E%Cr}))mBlrK%>I!j&|>Do=Juw z#mRukK4b6+b|#}oOmhT>*$i+u%JotF9fxM+Z4r}s!4h|U!c#G43(v`UDp+#>mpCvR0&H>8af%TOT&#uI?ROCLaqGHwNFW5i?3 zV2Ohvs4#qFSuf007ONLDX+Vxw_lWn|mrZ-#b;LrZ#>O6F!mSW|CO5g1a6i1l(MsmF z=8BEIQbzeb$+?$Jtr#1%TDf6}Iu=r?%RRre#TIE;D=n>nLUnSkNgb{-UAWxdOoFRy zh;mVjn1)B0MpYfWy7aJ@rM0f$t@EAW$2YR7`GbHkmv$hQBGpT<3RBUyvH^t2O_fh${wLE1J#^=T-`q!U0KSnn`L^{4Je=0O_A)s06)FG{edI6~T;Hy?Od#N4M52OWarpO2NLM@#`xRGxUvKTd!VhFpJFSxbjBbP+j1?p~f0EN3 zB>N8?$mfBOxGubGqtx9nU?N)->f?Q%TMPamuOW@AFc-Dh3OUhe1D!EM)DE^>+9MB_ z*+-2fqeR_nA5J=F1A;>+Q+~Szx>DYTY>=~&<3Qp)KA5=izo5Q`c^To?@jLkfuxOEI?p-f*}e_L z^PAWU$y9{BO8|!XnUcWz5@RyM$a0^zV1#iveL9&DEj)6{FV{D?s%Woo43p$!yDcrD zkk7GS_`K@fK$UIvIuS4Q8`Ak5pOBHUEKl_fTs|RAaKX#m@Tf~UCHT7TWI}}MO7s}$ zpI9v#FZ@ClNET#+aottcWC*JIB6>Hte$s?x8Y4AE1|a40=lnPPjg8kwMdM8z%K_ih` zC^S>Yg&xLAJd$W^;_?|X-5Si-``ny!FM*N!>3w3MOdG1d8nJ6povL;^RU2`cb}H=$ z6R8Yu#`Z>ex^@@skwaSnb$u)6MsVGau$;6p=+FZh4i(LqR1$-wnz>zovxwqJ^Lm7t zz+8tpnDIUPIH&?d$tH%$*!%H|aL#b`G7bIlv%wLJTc1Dh?R-o$&!i@&I~KkUXX~(> ztJVOU5?p!i&-wRN-?0|(!oYuK6W)h6PtYQB*w5h2?M2$r$XK3rsNeEJZ~ojvnWlm* zK2)@mgOB<@Dqm947$ZlkMm@FBca)|3_;((n7dN)2l2Z%knNcJh?xt}WjslUpf;HD~ zDMuUhjeFF4f*f|c&Sy76NHJR0Z(I7S~R{5xD_@p$s^cfkX zmYn50pGWQzAj7CL(&~O9t{_Aqn?(m(koRhuFwG6IPYw8q@r0QT1;vsgSd+04i)Flm zoZ@bFA1So32q47<>NNXYN&%dy!Br9BSb$)o-hi}j2|b@EtHo`WhO_{>$om)}d}bW5 zYa;hx>X0z|K?SGO~nAF`XfrPYY2rIr}A zNaAa_2&7cG?qpmz1HJ7PL1#P7{Vz&?a|ZsJD3UU5e*y>|`JHQBc;icdWoG*d5p~0F z1*jJfCj1K@i=nEhRd`!D8-}y&mm5DC+*DDHrcZ^J5Ql?lX4oq_#+#CwlRS`YtuI;> zL003<&DNbv1OzVFN)8<+R)_RXO_<|0)?iJ9_*2s;@EKJLgx?N3q1HV zYr&G*j>C)JNj}_oG@{SJ{`~MmKhN*10P3BegFz+TF-^(Im7%O&HtMnp$`sMe=PXej zUrny#bmfYWwtkj*s*m}fZ1TLzV!X#<+I-i`(LahvRzC{Yno|rnAuH4@9miv*lQ8t;;V8q{ob_#7< z#4T-XM{RE_!xi6)3AGPDRDHPs9R9?v+7s+wW%icR{$R~ZJ!zBoweyvnAq1s?Uovm6 z`3dJOOl<*6ZuYq?s4R^JpTL7^??bRkf)fe$)SwTtGUrC}fO4;V)>n)zsAMPQktJ6f zpieNI-4pM{9d(`_jo7U7OgkaIE~N>pw69l&&Jkd=A6XKoSLU@tkKJaDEQJZpM{*GI zr1YT5#uxFqS?aPu_PqW=lr2#{;>6`L7SAAivd}1|^A)ns^y(mxM1OdDPzi;F7psVR zms5dj7mM}6pahIit@Z{HNIf|nB5xG{B z@R-qtd;uZCSRcd%p-8<+w^byjS31HD;lJi zlX8rQx+8#{!#?4@El%wCTT

      3uJv6iPF#1u;z(zBN{ob(@gBx`pp8v_}5w~ZpiNr5Ug;l zVyP3%fa)N>Mly_|iMHoTq`@g|uyf`2|2#orKSnZfZzN$J`rc%mc@^JB)$GOsA&Y04 z!;dhn@@UBgDyUHzk$lN5xFp;4x^TQ74Slh#8~Y9Hqmg5KLf^y3G+J10XHv8fFVTWO z7Pk;`EotDHVo$C9+~%!WN*rG{-fFlcCiXqM#mZt3cy&}pbPcqX(;Z&6D^)R?k?N>ZGJP79-M8{RwVU$WC+};l3cgbzfj#;y;%P zp0~1CYYMS%sNxB9?B9{5rvSjl>gN7irnv9#Pn$b#Uc@}ABQbg{kbXnGptIL1v^}FG zc*~IBkDyB>AvTB*ko(x&b!x36a7us;vrs1Smg!h*l1?5C>aY8CKW_;ql-M(ay8E$? zi97{X6#hOW2rL6m2zn-kLs*1EfD|f`xyBA}G7;|~BS6M8WE>D}0pgb~jt-!PxqNb= zu+e5B1S@xi<6*XDq%<7_OEX2Yng(HH`kRG$MBj1Yf^hYRdxA=uNvS5@CJbESQ+h1T zxl4QT@!FP8yv|#mA~@$ankN+JUs5U7VjIsAbTg{mAh-L)w-9>H=FRrZ0_{?<$9?cu zUl^$KGIm~w*OCpNBKlW69!zjsh6=d@RP7hN9HjRU=-WK@rMjiz*(hYrL|EsEzt~)3 zKVg5ziX2@mh0H=(L(;1c+%Oh(u$08emB30c&Q22Qi}Z3|-to^l$HcEYNtOfWp3OnF z#^zEV$zsoyRi^tNk55Hv6Wm{pDjp0G+6BxXv>+zG;8mRF7H5UV^L<)Kkt+^nyJ5D3 zIV0tmZnNOevL-LKR5ucqbSNWC4j)&Y@ThAR5~j81R_q!ICaIOq)egfpXAu&=K3(*V zYa<;c`rt$tPt3$3D;)*(2yiD0zE#kG3O+y8F#o; z`K#Rz2*2FN^?#H&GMbOg(km^Fo^S4vFt>gQ^3`yW!1=Gm9{c`U0(r2pUr0JRf81zf zcs>5DUem*Vut(ZVj*M0)4>ZGOjzox0eM?h$WP=ST)%40jH`$>cP}87F51Z%0Z4p&y z6;=Jzo@$)1@XBV2r$j2f52f5kN{^F1C$Qv=|FC5|C%U=*rC%?pg&uh>K)1Dmiujes#6o^xhe12pfKkmS-K4SUy zhr1fV866M(=h)Sa{TiXgKk61yiA(@Shasaf3$~u7LJfA92Ce(hv$Hz+co$-Pd$58N znua-HJ6y}M8uad2ej%cAYLGV32z!n%mB4@{ON>v~7Ln#TBg$Y+OVbK?x6%%{y@oTy zNytTB%Z0M+M;J#ZaCUp<@24m$bFOFoL_xAvOfaIbQxk&RC(XM!o zP*MC&aD>dmdzq|ACg-o4VY(@eM-EeLp;0Wx5l7Cv6_yRMBISGv^{oF~OGvxt%k5Z` zh_kouXa0uCxu`66-9{eY;pJw`4I!>kK%X>ei_pZ!s*r+oK_{Vx$fY)BQ|ify@v$cc0ecTevyhY}ewaG$q917RU9&u@yF3rA#$hU?^Bk}kPe&hEC#}3y; z`h&(ytw~Qh^uP~T=ZA_a*zzIP>-tzvmHCE+Z=JjdG>82obA#}O899kZIv6^= zYzw?(x%*9LnS|<8Unen^!U@Pev88M&1j^ZG>q;=o2>cq;Pum?y0ZsNeov%w#*}Fe# zyxuOSQRH3i``e5amy+5BiY9L#UVi&}E#lxgy6w(jXLE8$IXW|LM!VexLLPf=>^Fgt z9>#_%)GdgI<_H9>R2bhO6?I%8N*L7d_$TI4m?uf^3x#8fKRA)EC!4`YFZ6x0vtHes z-sk(ehe%qVt^W%cTYXeQnabs}zdfRXPlnapr~y~~cN(wyI_mkQ!q5X`&J)=meamn$;#BZn=+$rHOD9C~q{^m*p5?Y;%lOVU_i=vH zk)kKJl>9jhIIqk8H+JRx#<%^89#?G@^QeVPa%Sl~xDDj@gGce@ zHidMui~TUX`uB9GQFQQ`!!QR#e6U&MiTA;FiRZ`EQZm~Ey(A&y+?jiOFmFJI59aI0 zr9|=SW{QIn?>zCS20FMFtcfvw+{%wIf6vXCE_n@lnJgsn}M+!F_1f3D7rB zjSqUp&6h1s1bdFGl^o`cD5NFaCsT6c#AKBl*#u0sr=;fkg&}A=_o`gqEWPj6+Y3gU zcg0u{o9v(L65E;He)$65msyij11VG$6GZTq$o8k#fh>h%lhh?eIdtENwTw}ptXTOK zq7BexrzXTBU&3D4B=HqVE>5SaB&v1_9&stN@W~kAp50k0ET&&sFi-wmx0P}@dX&ds_h?fne47xxR0gw2u?E?wNWqNSVa*gS3L;EFpXUw6uYxm)GbCz8i#F>w;-NAk zP{qy%_QS6nU|taof&-CLV2_YK_O~IU3Va_W&Q3mL_BL6|iy^Bk%>nakw2;KOXRP3T zBP@@sx!*Ew8lxZiE@PFml)PD45f42V&8p!Z-5Kq{f@SO@Q$DrnpG|k#Fj`{dKjL78 zb3|w}CsY($ZO}vtmhX({CaNz)S8zsiqSUC*HFKgs$O6^fY1;*?ei??HNO$*{kFVAJ z&7S;4tDx(AkP@a8*;njBP%P?xdCc;d>{a&OIs4?$Ou81rA^V>i`?!aZ{V1K?UX%6- z2J>A8wE-nw7eiv9Q5k~vx$qW&tmnTF`nBYegJT}TJ1TjSwNL-LdLIi=bV01><^_D} zQY?6W1`^8fRIgfUTRx5Nf8JI>kc}VGnetcUepUpOcVeY~+OKdVm;pzUv}P2^e|XIp z*T?`_1UnJ987U2JTd;_@H|IB!pc1!{IXnj2=qi5F` z5a|zpaTPrkU%dj_lkbfIm&r2P$k({&7l?Ky9sQMY zZ8vUmJHVc~4){P+&@LkMu)+X3p*R5c=<@Y%pT1okPx7V(UZ1_XIvQ06@V{@NFW{p1 zinh))dTyb62BUO<8dMW^&|LhNB8eXYjD|1?5l7n=*09rg%>};#{6*avRe-_8ZL6gH zTMQ3^lg@9i`D}{DvFwm?g_hNide>m{!V!Nt$v^H5MGYxGSa2N}UXa@=-h4(0_Y%flonZ zJ%cdNh97#IIs6ethq88n{jb3YZj81pUY7813@?8})Q#0S%b=t%eiOd$zmGy-0_0ie=-sGJSB zI0OtTU(n_{w3GGhBYJBx8Ub_L0NQcyw*}CW?w=J-sxo3ljn_WFEKK$QP+wExC4kAL z^~^~6UyJ>Xy?yP#EKe#=^<0R7I)f2I!0Ei-Gwfezbk>P9}V-AArm3#~D{4Jk#Tm0hiZ z6V9=cPU!I|^i-AvK#kXd4x}pJShJTbRNu_GWve~$qUg|4a;3hNRBVZ_+S76ex#O>U z%e@{kQ(@Ho_~IM^mwg2xb2(`q6Zakf zBI~QFX%7_g21q$6JrXBVZa_e;+8$4P&Xxn4v!XrfP}<4<>XUt?a$Z5DUPAz2ZT=M@ zjD3Rk)j1gJ!`#}b^zBN@0mq!R`Gg2?=nkgvoK1jWOCRIGo^}UvUO-{vz!NUj+q}S) zGyce2fJ2TAon&YyIT-;y6Tl%EM0XD^ z%1Ht6qYDh4i7mf-?xO#AOBWEB*yA=&8gKjy&FLHg?-loiThh4^*|^pZl$twWlocLY zu2H*YN4y~2?w|GiYxAP^(|`0HUF^JeQs~zg$BiH|e@9jWXoME6X_&uS=l^g*Mo_Pz z9VrKc9#AL&F>F|lRFstn7v>qkYZ~m;1bqoNMM2S?#xj{+6Dxdy%jjESxatR^z?0gQ z^KVd(Pq?JztilcjM|4d-$Nw;ZkFi{IE*nX;^dOnnN(;^nK(N637 z)QBU)+cfCyP3`OzPkg9nkVl0hy?hXQh#?+zmrU$XS-!+!?n(v&8sHi#=ppge6x}Ij zCT;$Ke*JaQllq>} z5A{o;KSyd(w!8ujwZ5Z5hoQ0Ap7ooC>&KIh43+L2NaL+Woht=eijX-1??Bpva-$Tv zTZ<z-nDzeq1esja3vTW!4L`JA!v(fq=M5GZ+k^SQHbcf$|nrC3JqW!(_1Sh;`^%A z=@?8?(&<>MzYM}?m44{#i@pm$jor>9kX40ccQH(mftW@^kH}n@++>yH$sB*>o!zx1 z3w@%49Z$}5M^@ZGdnevI`4QO4+^7eB#HwjC%jb@s8uiS#)pZp4w(F8{cl2{t4&I?U zxaPI+jS+}bY8}bo^VcU=+M7q@Ys1^~cxV&>Z?9_H4Ff3-u0lz5pf?7qltq(DA*X_| zS}5SW7Fq=-*vjmLBw~=eaMAj#Wda=}B2Ep(Q-H80Qogx>*M5d{C9-a4&x8Y3G!70< zw^$!yC!o)`|JWZz-NXq`(ahLqmo{rhx8`HW0I*|BUj+33%3G6s&zMqK?%HS@fg*(7 zhR-`1Y^=+@b$hGh1k9bR;Kc3I<#J^N?-AOA&0&h9IQ}%X;-qoFbBMwS@I%_j|v%(EIU@s>ph9c@H&xNQ)kE5(=pXCp4H>R;aCjWJd}1St6P{?YZJ*B!N072}(_8@;>K+^nF*fLD z>dy3Xc+Mpbm^pZ}yZxxIswNlJp{1^=puX9i=4`J`FiN2TYxUaVG(EvLixP^)7-@w> zbB+9TC-yZTjx15JQugB%GuwkPV!eajI;c>Xa59)7Sww7CPP&jrF*AdyqsYQAj5ua$ zd7&fd8D$Pz);O@#6w5-w7dRU{?8sHUexo(+?#O_jf%+cf7f7v8`DR_9c8eA*M@x0Y z*c|=cGIxH-EbQJmp263fs3YG!&!NUT4LJ7Z{=t10;eQ)XfjO+-B?voBcuw99jT~{w zV|Ps&%Rx^RVzGyJ7pwIxCa*#U-^?i?NMO`IgC;!A#fXM2F!W$*Z&_mQqp?RaC-PP2 z(OfDRvwfLjZzjq8aS9e6Mo%6~dFE1Lkaqi%S+@2N=i~q{;cDb*#b8RPj83Han{!#5 z)RTrVFQA=wPh_?FI8DIHysuId`8lq)PS5<>O98{7qo$Il@zuzyjl?lV?VzPFVhbirgT0z- zR9aJ726V1d+^_QDC%R(Cs!tjds?5#wO0&^J6NjlZn>gRINN35;Q}onm#o^dbs6Erc z!9IS&RoKxlE^obrb;jM< zr+>Kft-00Y`fRnAp2|fXzcZ+p+z@fAh0^P8N<8>ZNKgftrfGM6xGP}vG4NF)iQ!bs>#R7*_K+DVk-d{f#LbP(8sQB7^_@#FDLhvT_bfPfA0VuJlDaDDI zYHQF4^fwQfnJJl@%L^2HDI`g3^HeAUtN4PqDGTOxjNSXL{&M>>aX%BkDBWXT55^@HDPjfWOC>-cHvP zKuWxC1isGTAjzA6jCxia2hvmxJnFuC8*Wh#1+#b!uQvk(N&H-)oMV>Q-3Tm>=SAq1 zSmbZd?>B52=9}2FW;sVr<)!b1)oW5E79o^o{eo!HB$t91AA|TR(QClloW&|v(al6& zrA`SvAKj#MOnuG|Dk?l^n>{VxezY4+0JAd>siZ!g&~Cq*TNLL^PLZGSd*6eRP)GYf zX8dK#SeLrLH+i`>4#PodLmkC0>z}8G<~(r}jO8pi;Z)&AEM9xEPTi-!7t2oGJd(d- z?#htQAk#WEt?kNnGA`jdTafK5=cLi-Rp(Vy=p*s@c?qWP;hdvW?()D7$4@=I#AcU& z1?=$ZMya)-Q31j+*FXoJL1G2{jweckz?FOVR#FR`C2as+f$BtIw*@8kxv(vFPqQquB9i~a*PD^`mD^Na9U=I26H(Bo z#)j&1I23=La_{~UcKQrWsL)o|oU)|()GF|86H>-1;%|L1Wbq+=vY25mV1Xsay=#E9 zb!`8VW9-k~!>b|74==`_CvbU)|EjN$mS~0jX|s;X!eauOBc`s_ikVNff=M*8QZ2$m zcLJEin^R4<$kp|1vqJZXMn#P)hidS!n+A z&d_H&jTf)4TOQ|saHfutPi^7HS95XgcxKvgT>uGOq|7q3;55S~WmuWDC?bl_W!dCc zH~pm_nF{OGiN-|A!b&L7<}x2Z9vk3CaVOQ2 zczJUJbH`_{WUpCcU%*dI3|zRtxW&N8kQH{iO{SFX0|8nP!2piZ7>!Zy0-M=Ei6 z{<)d&=&Yz2UomYBg-wDocDq z;C|*IjQ0t_DzqnJl21V*-fTroK?T`Qq~iAhvbr&nnf^kAlgU|>P>k9X1rbkbH*$%Eo5S2j6bo&DDg4*9q~?K>utXCXinssp83PL(wDj3 zfNhgG$X$TP{=IxsVO2w&G;OH!o-8A%T0vsFmZ1&T2nXhOMP1c0+2lik;UxmT6D8J$ zx#&6dQLl>G0mAUR)!HASeeRS|5aX>QXRm8LbLVGm@c(1#E!d)L!!=x*p@#;kp+S*U zO1euzl#m7ikp}680R|bmLt3Sgl+K|M1VOrc0BLDvzkF-$z4i~_IGE>s;*RS&Z*MD& zzh!|##1Io;*)W$(($>pH?|JV&i*UHVyF7j8ThnvB_*iSo`l5M65blHfzYE8YCx<0k zi6qBvwo~A@i7mNSfLA6d0Sz>Z&=6D~o%2WI$eGS`A`T+&r^ywfoB|Q7!olA<2d6rN za)TUoa?P11L(&kZ40XSMo~W2KR1FjM82?{JnsE3gU~s5qb;iTdkO=J5c6=oaEdGAKh=_dyr+kE74_6Cq1Tm;*neJL&s zxMCTifwB(Z0kY-yW3Gqig~>&qx@k6?UE12|w<%ZW68C?dOZoYlwelI4-Mt#gVCK~d zwLkx6mK)89v?H71HpJ4GHBbJ>?x)es+j z&^!@5$~#)R>htR_D}>r5TgA9@U-SrAt*&+!LTwdD6YF*vkFw*Fx|@^thBG}gRzDGW zo0*Ij&!Ik7&d9zvtOB&gx!&W^6;AUhe#ja_lXk&6_Ww64F)vC;Kdvji=?hUv#MKXo zV)xU0xB)IxX%f|?&}ABfe*wZ)nso`92RGop)YwW&VICChG8XBNGm0l<4)^1`z?}9! zkek7jr-Ga3gFd;S2Yir+1q&2g5yH)?Vux8R=wn^aMR1hbrLwI=$*E|DPVih#sc~4w zf~B%KK|9}|5;8AhhpDRpS=6>R!4(6P$N=*+nKPg5yTgbsL?nD25KVQ(<92(GsHaC< zJ*tv2H@zOu3EW7HkFVbF)lTzfhRYG-Sv|+eXZoWOaFCH!Pi-m-avSxo_+0N1t76{j z#(&B4uGfj~PfsI!J=Mr*P>e2{6@KXjNW(U~k4)(*sXdwB_4tCex$Gw~o0)}!wap2J z^n%I&19)59q_y$Xk1!3s#J+|nf8NQNe5Kt;f7nh`lglDZ|BQFgh=C3-b zl5F)CMX(&Anu1K)pSkJo=FF^ci>#-2Jq`+{3F`a)?o;9;!Mt9SP$_Ku(6$mTKahETXcf_jI2mhB4xpt@hlKE=v#_QgmpQMW zKuu2MuZZ9&jVtc6hTR4*62g-QD|7|mc^2!ys#|Srz+>U}I{i4}httw+6jsfbi(Q2o z(S9L;?afQE1(fKr4?~!plyOy`Mu8+aV2oQqW0Dse_UbjgiKq}6i_bnB6c%ABGQX-l z;r@GHW1DUq9ARi%V=ua5X}8uS61p>YfIBL`(|;N;d-6E#EVm`|h4Nc1^?j9G{~qfm z4z}8eg-8wjUhyr6^RscBtTtsIe3!PHIulItXEPLuy8oFYHR6d!SvCa(EjMDOIFGN5 z>wRt8qkyF~k1x_8R3-`)P!b6ORKxH3waT_|`dy+c_WxB5Z!0-HMyVqw(%9B>^Z(-g z0An47izGV|`Cy6u-ECQ~JVmCv;Hn4?s8DgN_&U0r@*T)ZI?4o?lD9q{@`B zZHPAiYqi)&r{C#*dFI*dCHd|#Vvy_K9Zo_pw2C=%l{905&#w@vdNZ;Gi&vV}#6W>| z`$cShX0!)=ELWbCYk~en6JFb8HyMVj;(13<2h*-B)5>Cjc576XcoN44o0U)%Kr^G!Q6kvfE*Ij$4PPSb%n9EgKv!-6zqR&&CLBCKam3t7M>;~kPUSH}0D zJbStgHO9BLC2&tHlrj2`#g)yjTc?Q}!e!P9Jr@7!xW{2EH;Oxbf`G{FEQ?kowGwwz zZb0MTk3Smx4LL4YAaLU}aF>g0Mqr$8gvZZ}du-X7#Ejdn3+PV|Z0Ei7p@X_7W99vs zd`t7gb!a$`WL;bT#g^>%YN{ejGeE3TV zjtE}aFawKnxar*!_`e5hyIBX&blg1VZ>F6fbnTAx*^xKvlh5OK?#3?s+-T{9h0z`2 zi=X=TLy3d2P-7HvqM%x9BhdMSngrex?=V&i`5;0{l8{v1WLx7((CRHo83DK z;4R?@c-3>rwLC9~IwM*R9#R0Y$!4w~q(EB( z#f~Com`>fJsVB5`O!w9%ZecR#(Wo4$Srn{Ly8_wI(P1!zAs`W$M4>U|81`o=E1)!bV6yTWrip(-I?sQ8( zkssidFe51MQj7_=zBvhO&|X5B_Vnk3anIFh!rhZ?E@W>inb}gB?6t@S$O6~q5>ef) zlOp$tXSH<}Z9{Jx`~2=6S;eKlYX81ua_(cr@aEhH0xvr1>4VgkS#pceG`ze;at^>>R(q~SDJd4X#OK$ zxX5^5$v-=x8alelcAPzuPS){#L(iayms3ALj9TeLxo6R1wvN|q_loD?<1jyb9L5Zw zqi&qcrGvvI5#TfWu=$@$U>ko$zO1S5FKw^blajEk-Nh(Tx5U(00bb1cD{q-cU+9vb4lxY4%Q`- z)4u1q8Jx(RZoj8;k<)Ggvt+drn_ikZVU~F>cGS{`(z*fd(I7 zs5p-P$cuB!jtoON{2bOrWjVKtRfTv0S8K3O>H*8oe!uPLZRpsFtGQ%bX<^(+ZsR#A zzNLnBi_Y|s2ES)Mt@XTG|Ig)mwc|W4BU5pkm&R472{S|L{*l53&H$isY2qVgU2gs2 zm*5XZ{x3lmv)llQX0+nkGlNETHRRh2v{~c#sUeVA!Ueg>6mVpJq)z~^cap$i#W21{ z(+}i5$)56Fji-r}D6mp2CF*9004&ch_D%AO;1FH~xe3K%lpBlamQatWrD78XebR!? zUyLR+D37sgFQu3n`ntX!6yFCQ1ePOLW5n@6+we9o9$thThFBp#=q{o8kowys56jAc z63k>Zp<%@0TyaKD=*Ut;IFev2{!j)*CMjM?yg}!3pJ`t?c~`T8sqVHO2H-k}DQA4+>)Di*5i`CoTZr z00av!mFZm-a(?LF&7jK_E2}DTpD6^0Ju3e!*7(QOo}(Y}pslF#-~&Pa7%@Rfc?&Aj zNV@NmZRJdH#F&R3W&HS8Ugl3)c|0p|ZvT0a_2Or?`cf<}Iq=*?`wIY(QB8B}!$W62 zzw&19B=>)IJy-mL{ML!IV=;6v_F~F?_t=MIr1Z1-Q+s)jKb(du^X-D z{-KSiH9@-te@9V;hQqaFF|;%#5AbCVICea&(|DR z1Hnx2xV+b78gFp}8l|z=13Jau<|b|=ir|jROX5j$?dJGF*3nlqP%OE zRyj|Cc9Xbd9Jm7P$(Pcgu$XmcdR^4>X<1O(=hOnzp<@*Xh!{Ig=S}!p(IrX9IGy!j|Uyx_X0wOu~^!9fB+Y?%L9p&olH*O3hxGb1D{gn)o zEbZTMl+t^)JemrDH(S=AU-gZd+fv+_e=3klOR@nXEUZPAK;RHcFqIBBdt50`-7yW2 zID%7)Jl4GPwOK)(?Oaq3Ep!Qrq!Q&_S47-ehK; zo?N2N1pP|h+Ryf^_;<=S!p-0RnDYn`7>$-SP$k8~-$Kmyh=(vcmuVav)7w#4T+18t zAH1Nmh%L6Kn8gKD`EzJ@-9;p7K z&mM-&NOfMZEfrw*zvFV>vyC6J>eQyX?__5JloUq7XS9QEd?2lrKO}uZVd?0Pfs0Y% z&ZhjuXo^`N`gS^BD^r!RMf}Y`2ojy&Z$rBC#pg17PmGe%6KT`5O;govmjg()7 z!$s5kA8@(k+W5@oGpCLGIO|@mG`Dbj3uY+-J8+K7<@c9g>bZ>G;p-f!c_$2zxf#>6 zxE60Wx?r=w=?MwFeHiG&z)@t0=B?M&Hb^p65NF?er&b)AGvHahL}*j6#8n8@mzdfA z6zUeR;t#S)^~nt3+A*mYRp$?sohlLD3!_{`E&&pEvdp!{ z8}miz>u2~nCT#8BBSytg4XPME+&lRQ zY>almk6#Vck4FUlC%ZCck!eEPU<7AEq|Br+VS5**Zn9#yY;tU(-r}Q|$<(rNX?d@Q zu0oZYVDdkjB|>+te|mTZJ(MV-(H;bG6CZGf z^amRg00w5^uyp2qQIQ}kbAF-JaQ*>(^I^t=3{3RUd`Hd76uR!qy6v{=-6UBjVqA#(G3v%Q@DuHd9U1{5t?pC6twNF+fk@ogz)~CE0pcd}h#@03 zQ0v3`iyMNdH{C4c7v9^?BwFbd8UQY*R}zJ8=C?bsFI*WeHv>BsiuK8 z&1nuoU2|aOA0qc#2qzNmA_k$UhrjuIMR>;Ipi(q2a8B`A;IELZd{FO98YIzz&kL=q z`%vw=Jj5-SUpQ=a(Saoa;50{#Zt zi$6*r^f-L`BCv?Jh5_s8wuQy$maK8Gl!J2#(=@pQ&7K4$2u*?j#`#A-S3+Z?1!}*e zpY`Y0+h(m+R;Vl38bqN$dr)F~+sfoE3}*0Vd7njN=?I?=m?9=2o~Akur^%OGqK8QB zOy3Dhw;0k0@xd6uW(JGl-@00XqABuJ<{?&y?DYg{p<}kRKr*QU=uc&TJze`@#Ev;UU|nj&N;_!-`PERs@AS5kW?cdwN0a-BW^ZiVo~hM{j$c#Pjcr;dNdw2#sHds-3|{PEde%;gZbse1b#un#*;4e$`bVhJt@6QOAm7Twav7@J2k)`~$sxz!8{*+? zdv=PS5EdE_q5a~=vK@AXm-T+zV+B_h#b3=&uYBcI9uqdC0xWCph`w`+y7H^Q);aEos4@MwGCt= z7~ROz`PFhG z3@pb+^J9*mkr7Mog>irI))HS)Xe4$@#sqI9^q2q-{HjYvgsQLUGZj=Qy>|UqLo3;z z&N2oXf1R)p=WI8roqjP3=4;Q2bIZ$?h+tN>kX0>FFg|k;f<7SsgM162r~TU;{N(f3 zwDI?!;?HwZt74?tvCRV-Y% zp(@HeNMm2M%EJ|NyJZ*R9(FD($yn_a(#_ChsT3%LG#2rey$)EW1zuNHrA!P2PRB6_ z4Zu8fBCO0VyQwGW-A5AS1@25wjC!0iY6#K~13Rj2bB>LA4k?x|zdEFZ!X%|?rcQ?`RE49pae*>&8P_NQTeazOFHf`Jx9_KQJu6E zaMNCDV$xvCztKHGoo=`k3HdbN=PZ%|W#ZhGw7(BEWE!0{75&LKfOGSy9uejC_;M{j z$1l@KBT-0jBLHvcWoP^ayF&2{pw~W4D`>?ce0Qe?He8s88_idar}AMpka0YDt<)(4AaQ@a?MmA|aOQh=;_BX=%3t7%qyc7R2yCxFRYy=AWlTEAO- z&!X6VcAd8`#7x!yF+k0Enz9U#ew9!`7Dfwl>18O0G?6`MSKb1sxyNKz^nk`eR<>1j}a>{ z_ovj&{_Kip%re;nar^W_T0+5fL{NA>XWp4w>zdlwr3+}3-EnPKWdg#X(;oaBN~}mg za4P;2V8&dDEQ+)r$K2BG@0p=|?-`1g9w1d-FuD^rKpl5DB-Ygq{Ispe$BZ9oMY^o! z?7sc%kbXe*Z6lTw&2J#G8uUO1ab z-OP?{Md~7NJ_`eK(#_?eK>G(e@&nFS6~I&J{>_E0i%bFN`z%StXk2V&OMD1$Yoa0# z^#VeJ6=In7+@_H@$btSwiv3a2&WRFQS3=@S595{X=f%U!ipa zzXqqG_jL9;vr|)y0x~E zh5v2z{1h4PDr)-@^z?6r09hDJlETC6{`$PvF2I<;Eb!*ABPb`UiH3DtF>8(wg#)7A zaxj*lgA-H=1bGCYUjP9`=EwA}yjl7I^Psq*BS`r=pNC;1lHdYp%It_gY#RT5p^Q)`ikais^%Ar+- zhbqiN$w~UXh5gUV%ziTBFpWOW3jtkhg zy7HpB?K``Pg$QDUffUwD#<=m@^)i!Z7I9$(pMJni*#*ni#mM6ZEqhhucp6*SQqXO? z%A?cg$rly(za3KN^XCb-q#gQvjt8{G!*OMPoQj_@dCdxvSN!qeC-geN^{evJ@V9uG z>0gT+?)ZJVC_tvjGAuo9{kN2_3n&`N>Z|Am&vY8C2<2XB1YVmR)deOfQVrq=Rd@A( zweF(uCBPE-7kPa>>?Fo8Frn=d7zz!IRC42?VD8a0Pfhu?lHEW%Z(fet4!XDfyS;pW zuv}w)RH7B|IGP$7j&vNCqY00Ejv(olFPs5HGI;9W6`L<4$Y9irx5-#Z2uzmfNa4Ao z&fa=Pu#Y+WyD-=aGbtZQ4k04qqWz*xtSHnL_gHQKD&~DD7re6DIUl+m9?3s9iEDhb zN)jY`w{I)0o(l|RIGwwabY%S7MIm$RNwCzVI`txGMA9TJj<|-1wwI)0m<}k)8t1%C zqm2}A;tqePeSh$nUHiUL(vDKO>nAc3Y{!y${x{cd=C?1t%M+UWoK6zTCh%?K&)yDQ z_;?!rSWMR%LHb)MFx*v))|>X3-0F|YR{3&f=MbCq)878|&NTKPFU(4@$mF2$aVPA5 zh!0Mm(GUk>B||k^g@)Qr5c!m?TtgjE54DUWsoFn)!&lG+0PGT^vH=Y0H6SG0B zb05j>Fg#OI2?#VTmPuObCjh&@@=911MuaEweG&zjeKcSpZwCLr?z!sHx2q(-wE|C) z=!N&ZsGYHwih;k9NM7bpKwBj~VYzhdevz!Qc2V+udd+p^V$TbJjnca@FPwIuZaPj6 zGarDs%srwYQYhti6fP2fmVM|b_PG}T;%7GQIjlilpzh7uQ}`5D9~vs*H1;UBnNREr z@3x<(AMIR!0MlxYIvTYSu9nOfLzXFP?BWl20|r0U3TOhLvA9{++;byBHz#}JOQ@y; z;8YUZ4tBQ$EC)_a3+?L*J213%nSgPC>z0X|GGE`$w&}{x>3;sj!D&vIXa6>B&Bb~m!SQtR$E=-*!~L;7&&CDiV^*rOqL&$`qLhzTB8$X&Z#gl}<|%&7eO1txFWaQAh(IXCkpDI9FloJ* zP4&=xvuN~NZ2L}n%(wIdiT#}4v?&VZyX@sd>y`zhuBYa$(Im{ze9|BN^8PxG#40|} z(aLp|y|`neDqzy58)s^XAa-X}7e%9O3}E16W|^t+rVVc{4N9x11y@x9O}no@i{*7G zVx=YH=Bu_Ihe@3# zjy4X9s~4C9)kXy&v&B~n41@~}1}h7TYQ90wuT?fLmeLL@S?xCkWhD#JifBF8FeYA3 zaZo|hV8Vi9XT&9&{hSS$7@tIStFgf+1uK!;#zBZtaRhw{ z^-4l-sQ7?-R@7rCwg)T4&Tp{JrAR~*i$%ye%pdD_GIh1`0v?~&=4@XxBJiq7DOLmq z;qfLV4;rDGIrJrWgMqFSUBG2fk7{FC0Ni;4q944BvRbX8UqHnF4RY_@O}_s&Fr_iJGk?_dHcEhjXc6^`#af@ z8^&fWj!O=$pG@-<-k?5pzrh>XEK59=f>6PID@~O={=F*3*i8|xBW7lV&p2QH>l-4y zAXoRza4 zwj5qcmTB%obx5#!y}k8QFuQ&#wT+l1W?{;K1bWpQh%9CNGQJ>Q$Mcnc!9ETieY~6c zK_Uc7t@cBYVY;h7wk;(}!E!u5r&DxDA^JE%VL!J)wGEFni9D|beZ@>@%+V&%(3LTQo5wbCDhnvH2OY>qSbYqj%~vg+ej zLXL6!d%Hb5J~K(V#X8^gh3A_1_X%%{>VqL2pZ8j3|1~U|Y&aSvKPBN4;wxyVGPxY7 z<}jZE-$Pe_Q~P+{Tyekfr);7^pfQ``#IECa^R`u$v*0V9~kSm4)l;raJu#i48gCl4HTid`C6&uDLhM0t3xhB+2-7?}#`sKrB!e zPD_Dd_%^2J(sO8_dS0;aerhL7j5sOq^~4)?Nt*>C6Q^S3vqS}#1cRVDOMGvd0s8x-XMe_ zHn0GQ=ocEWC6#Ez^>DB3BFkTN(qKO{Mg0*FG}kEjHhbH8hsKq(bwB0XXG=lYiEbod)4VnvB+ z4Yn}1?2sbg4lQ00y=iI_)tj%AAHlt98Ldo`V!}KLua`I~PBb;0uq_>IPiiG`=G}!S zGK&9q)G`z$(W|=@IsvcdRJkQu-^)*1Z&f!gmQpp)X#ALC`{)n>dZ4A+RhwP%RbnrV z^`{*v%v%&JPJ|Z58^67odB_?I-=-b|^PTLmi2h~{ZSvCk(DKoET{3nDx0b)lAN|KV z+((SGGsZx2e}9&yzO8y9;3+id3Q&xNi)Lne1+YNttX7-_A+C|f5fYYP2)5~N>Yi%9 zOqSTUASok}x?IWKvkLg)blrW$^ZH!JfY=3>azFE?Vnz?SXRr;EwC1sczw_TP*nA}N zNc{>+k7RVJYC&Z_=_$|C4d?7_aq&Ly*M1s|^%nHDVn1gp1Ny#UeJxJ1cgnVQpt^{} zW7H!=eyxHlPj%fXu<1;L<|>H#V6nTlK7w)&N4eYluhG;;1swarXo~iqGe%7sI&nMf zyCgrOF-sIj+9bfvd!R6Be#CNacEEir2vbF@`+(jOnnoRZKr%m!=}M`N)}^F(m%&ng z|DGYzfIxh=m%PZ>@P0qRU4b#--$}h%SQ-w-Q$8Q@eu8nPdPvfMFW93a3Zzo&cB^AA z55hQ68vV?=Qb>(mbD)?XgGCv1-Of5~+K0jBE7l(kez1jeG#0d;MWsFBiku2-cO~u$ z^@i|;P}b)TY5h71b>n4;oAHafsx*!L?Kv%<1zZ5EGaL$e>W*g1u!Tw|FWoQCXy}er@@D z_JSh%g+P>MF%@xqwhCJ+1c?VDg5_G1Kb;4uJgc!QSp6kW;Sx{4flRD4~wxT{&!1lGn_NAS6GA7Dm)Zk(&4nC*BJYeA3=fv)eNI3m-Kr8FIRzDJH;xf8HnXZ9PW*Og-4el3R66Ie7DnKpGzX zy6CX;hiKQt<4?l1?cW`~ec>~Fy+b|hf*)F;Rd0xrS|g2nzhL*2XeiNYs`Sk{P!riv z5`CZ%XSsg4EtTZC=>lLH&8j&IkzjSA63w+%f{I^zV~Og;tcb5je5%s&nYHQY44bs4 z0-Nep<3np&&u(pmsoK2~-Fuj{0!MY3w(EyeviXAe6qeb8mWE;^Hmex!ci7L9H|WX~ z6|(A|Qn&=dPL3z4$Bs<{O?gQh$tI_VA8HSD zs5ghS7_WyLj)qTJm^%^m(Vj|@^{Nv%m>@L??O`Idv$nhOw2;QFf5XbJehlc(#*R~k zK?)Rk62vTlz}IIzioLC&2xao}ev$qCF)6#G%YKCh8=0=MBzAQdk5qNntM#RE4o&cT zXj-Rp>Lam#Il>l}q20oVhmJoM8(f+BY30C2bomNfFLMx0pnpJiF$*`E`brGn9d+FT=R;+@tR$w~X7+}+)Oj=Y#CGwCDl6N+RC z^p;QET&J-jl0yLVUFJ2CC6GFC9~8oX8H??tocpY5kUgxZ;A5)s!l%MCNu-W-A%1(L z%JkzP2!~oZ7l+pRBbKm~!QjmvJtM~{kYMVI>2G~14b>dlycal0ww098ukWl06|{e) z!rJne1MjAT+O9NJw1dcX7OXc^B|bss&9R9+1c~dGJ=h+TzR>s}##gQb)4Cq_U?wK@ zJ!_HD{|l7texKdQGoHlTIP8gT7w7sKxjKY)iMa1K5V|TppADJ2P`fTnY{*eqCrk2C zm5U#fL5KwZBPd~zXI>ebVOsGIarS*$PqD{y5qirSAv52d7h}NcBc-qqY0>Fh;5bBm z4ofF#L+)YE9C*!|R9To}99y2U(AV8t*rDS{Fh`6`d$oZ*9H!qe^*apG0AZFwr`u29 zVcDB{w`{vyAubS+W8?w70RjUo9W8TuYr=5z8ho_ciE<3kl?rz#+&{BdSnQ}1SL!?S zy3OU!_gTjsz!b-65c$HBk-$Sh0YN0Ih-{mV5ux8UMx=UR?!988$j4r%*vz<6eoJbf zgln^p$6zxk@9@3{5s8OW5W>sKPeYRN()Nyu=1I&BT^UU+WJrPZFFI1C z6BLc1M~|SgdKd-xwYp{jAp6ig!3&sfK$GdNj{?Lvh92KTwIlE#{j57D%D0<#auL?@ zjWs)b1(BOGFJZuD^L!~?VNhLAQe`iy zBy0RINRnf^>$clCfvJn>FO99%mF9d+3kH5B5cUWWCJ)Y5SegSLS5NfR2sDk(Y*4s0 zuXVpg5#7dh=%T=C>Q_Jx^?*N!sr?GF+`xr(ymp{e)t*IyB&;^(_6~8gpd~*EPuU5_ zRP(h}_0mtGWc>ShS4(Zq{`MtsK9dw4LI>}`@@7NlLtWld7{|$&&&#h=7 z^Hhcsc1FmMO$pO!djGScqT58oQkaH>Z`Jn&Jo0|YLqP+tRfAUT#ApF z4=v~`tZuyxf}lV8wjKA`EIl2)K}ZtwW^ zaJn2tt{1of({Dru34^<2Z>*`03iQ4f`Sa-@rPG^!nR*-xPX924Uq^5S7YjKjUH-|y zTUjrz;-1sEy%a|B-Q^J@e`-uPL{by}7HbcIGXH&at$0s5Ef-W0Y5yg*-99OB8*?yQ z$EPyE9ka3}&@0Ey*z4|FZj|a={xUa8L!dv{--4a0`%X=~wCEz`qtK}p$DsYB#EJTEXothwyotE581R^U~CNze`Up!Wk3**t>l2`#@{xZhYh=yuS(D1AN z7t@~PLNrgnnwTv| z2|!8L`K`DF1m;4h>y?B0k(T2Po__PNsXrR;OKc4W`+UZ2w)^U*)A}-b=(;zbb5zZp z#|gi*;J%=5c}gykSETi3oxvgF&GlkpL#B2X*=Zi*xH?&gf-xQji2FIUWeP4dt9ke% zTrK97wF*|To}i<^!0bhc!Jy^~B5YutQQ<8ZtNW=&>Awb|y)XJ4cBxy;y-#8Bi8rx~ z@kB0<)JZmAqfBZ@aR!eOD9e-+{~P91X{CO4DyooKpg}LQ(^~{_(%y-pMWA0PNrE6F z<18$$XsiDw@`G>I)0DB+@T%nku*@w-i-}MEk>x+zlXhf&Ta2l=^c<8%(D1<$3*4jAC5mIHe?&vlMxg!uBs9Es5Wax}jhB*|*>T;yy^h_ZvQ~h3>3dY#yp6%;(Z*J;-d(Lxpj}T3Tl~xi{ zY&6iJzt0W)bcw6t@2~&WR}D#ewbK!NGWjG_9x3lg_{PX2`-$RWw>aw%!E)JjPK{^(n~F31(onfl|<*QQJlt4@YR_mjj#EeZhu47KZsNkpx9g zM1)Yrcjk58+N!oGXVdI=MVaMCF7~Q?Pwj60XUzybJ`qJRo0p4qePIhCb{xjFTBJpk z!xzb{ptGM{iHc11CWZ!|vkQQb+-+tK1Ixrk%%dLm!H%-t^kBk^{lH|H85u9&|QC zfE8hi&W49Kq^E_UP-T@&y{1CPEK{FPWaYa%Iz&*m9E3#8A^e+i+VYWDQvqjN`a&mg8(#eO?Ef6;zxwyfj4~ z`u{;6O3S@7CPzfOF!sJB#M^e)BK1x}TWKy%+UvYSpVT*T{GEHUebW70iRrQa9qS6G zJQi*{v5@ePg0{R1!VLe+zhcsZJSwb%{rSOQ> zh+uic9xPax_bAIJ206H^lxR91EnU$R%o|9q6#ehEzHt~ZqsyC2{T8sf!lRsnXcaLe zv!fMB^rk>UMNQ%8)%5WmArL&ulQfYuk^>F{m!R)vet6uZm!t`4k*!hKYia{ppXtkZ z<|h@6Q1^$ZcIN$1r%#UPuokJwAWL?aC!_?jU?mSWPsH5N+^*Lp-cVS4*d@8$nO2=D zow58H)E^JMGN~E6F1#vN>u&4%(^Fa4xD7A=Kl@yX^Fs41Qa^d<`>QB)W=*W=XaRG* zduJtX;eFk6juviV4XPK(XMXGf)xcpoT2H)hMK*2~-Dt1~uomk&sIV!88`8G0_XS_P zPNL;u5Glc7$B}<1de458#U|A#qAszHGN+=ij+tTr`#|4*SHfa2wD}2%MZ@^?i*?hc z)sP1=bi&LlYG#QJ9#&#$9*{h}h3q15Fwj{Xgc-Rnj7)8IegcwnnQ}+lry*yo?TP;A z77g8)t{4eQ%~R@uP@*m-Jjl@ZID@q$5OZF^32ERJh%tkZ6w=-ov#ms~YrnV)YGYu{%pbIvJ9Jjx0*YLa)y$igZ2ugBxa% z5$EJrMDRlaZsTikBkR)0eSnfn=jG^7r_{0`<|U*0(~!gJ1s#OJ`=B(fn->3U@_B!y zPwW|%Ez6{4olS|$4UvAqx4tRg;G)t<`hF#T#0=Fius>d{Xl?@ zixTf^Gxs}pcB1Nd1i(nrb~esdDrZbK{e@tidvspn zQemdFh)+L>0i>GykjI7P0~cW8l|+)S(AgTFi>}P$CNhx90E4UsRwXG09ZMDlnOn8Y zQN~>~Iapl-pJ&H_7k%X1D)qBbhHNU59h8oipZf25;SR9&M)XGCq!XEpxx+Uu@$2_oBsqpbBkCTj&&12)9CT zSeB%7>wdjTa;+D*gU2qCv3BE@4H6I~5NFBUkYVOAu705KzXfcE3o*yIw+)|qqBwJA zZmS^+mb~SLwO^S>|DEo}(O@)G82zY>!dtlkYBqm1tmq)yqsO@HJ9+NC`ruPEQ(z#p z2!hI$+V~hS+c?7n5_x34Nc6IU6$I8OVC2ky6tH9zntLhZt7b4l*@xue7{}D{zUS2L zG7?y9#@gxvT*;h)A#S%+Jg*q0w92s@sr8vdwdb^qWsoUf?k#0N*<5|Sz>v_%vuJTJ z9>VxnpobWsy`!7PDhRn31;(ak)jVjwgb#jstue!bELI8+dvy%zKfKZ9^Z-wiK8(Nn z8ichu;Kx?;F(@4^It3P=4`|GA)VsH}XG1 z)PG5p?Q$8AhJ$@M5dYk#dS56)vR~iM`%?0r1e_SzO$^^z+B+NjL z*1n{f&VgTTP-iHq;gP^vtywD^%yTmWi>5Rr3gRSKzhL1Z9=XOqLRFe2e)bW#!Gc-) zp&0MXG6tk!tmaH|nw{~~ulBWK*fA{aIZTh_YcB@sr&!{T01*q>^NlB$EM<5IGg=jK z$Rq-vZHX+S99$=Bge9-=75l>^W6_f58YU@=-n==jc|C5&7f?3s6T|mjFy0}>$@9;` zkgF41DwffyA9IfaWWB$e1fIC&1d?vj%?&5#E*Ft~NDamGQ2wf%+R02#B49 zOVlYdk-aLXk$d+l)|XuU40kvQ?DMD6WImgfcxF%}}Q^Wr!l&j0! ze{osM-d2Y+SY#uM`l;~QiQ?g|@vLTM`h(ZWQ;?yKiGtV+Fua0JGTRL#WT?C&;+9c} zmtY$xXI~30ejwfv4T^4OyJu#+sV%k5c2x=hR94Sx3sQL{c8ijb+tR43b70_@852PN zdzd$-%M@@nz7Cddp-@rHryvO00eWWt*Lv&jTHyQKGk|90u<2eQ*I%A@W}!WRcY8UQk$ae{TZi5Yph zlQ>!??iduL8e}vP#JAhVewSAI`=2sV!ZO7By((_n+58*M26XinBem<6M5(tE^%qn9 z4!5%GlJ60lV`Pk`6LX38)kV2)fQo~Z=|t&=`y2iQl{4ATx9mV{_zHNPBM>#Dbd5SN zv0ogW)vvlNF@E5fqYLnWe9Gy~bunZ=nenTvDEQQhqPdb=Qo=02Pl6rrLE)BSBh>st zNMR;}uN@m)gmkY7XD!6c;LINqQ1 z-Xx#dvfp_R{97>cz{=Zpu(_%w?j8e<#(5W;BJQ1^uBvVFS^PsZZXj2j)bS93ZS>-F zjTagG`w2aPkjt#~1Iy*cx>sy+riSFj8}sZ_QzXuI-0oW$=4;5oWTFsg7m=bEC#$0?hB}b| zTCoCe1!!3@tchWu=t7xFjj(Lt!SIu20&__4VRbWD4av zY!5F#oThT!>Zm%GT=P9|?^}z=)#fZ8s>xJh{~Mm1Scair09468STE&yGB|`#ly?ix z@A`OV{rS~l1~ePjPMu>_8Cn9rQBFdZ3RA-4Yoz4XJnufhLB(O~0gvtTH&NiA^BC)` z>nKK>@Sm2Yn2Cj&ZAez8e!FkJw^L?!Z^#WajYuSpI{UKQh&&YdH=Ole=Ma#7TnlM_ zf@}B7Y48H@h!x`v;HvZjs^D2LgEiuOJdU9L`LzPWr0Qbw4={bSH}|}Uhm@rUFuvG{ zjejW4zO)0I#ZUz_PMzpputZ>@b#oT79&l0e_bCruH9A6S#fddFWDn-utZD3V_C%i! zD~m+BI>fBQKJ?wZCL34?Vso+Bfxzj)+hpF!n?bL9Qp0N+dqqjnLv=_1#`C&LRaSh;pw}>seb?e zZAZvC$jCZ2k(s?YR#qw;*+)orviCmr%*Y-ggkxpTgY1aN$j;t-|6bmo@AdoV{ONMN z?)!eu$LM5s%)bU<^^@KCtC244WxXIdAVXpf-)lvFp2Dw_NI^0s2Vz8M$5^SG6rTYS zq*L2#o56$1Kh-VB-hs$BdDdAl~li*;vy7TO}oJFR{efpslANMxrA>+2{gLMboxBW16&!tB0taEH=QyZ8(M1O z%?0>T_t}gImWfbc;(wyv8@&Xe>fQ^=;$!PO;l+8u+85yMs8kD>^xzVsB#4daz2&g! zycjYH!wMSXFDF*@u9C|k=tGcikZF(^_6qRc7pNH_DH!o!*@qka?Udw03o}v00S)fB zkh0;jY|y^t@gRK(FC>J@^t6(ayNeQYD~VCdMP9g*WOl)18~5U?$XFe!>K~iaj^nL)zGYe~x4-wZ;EwhN2cj4HJkMTMsWtfhd9{L|cZvuB33NMG% zs|Aw6Uo*Z1{R5D_(@Bx_jAoa^H~CL39Uz((WYU9U@6&yq_5yqrapssU>sD8dAKi6I zX>bCl&@8XNT{w4rJ)t|m=ZtAv}| zo=vyA#H&B_XvQ_tC+2+?WRyK%r08j9My=k|i8Jm2g}IvEmUWL&-3yyUX{UJ)o}0-Z zSNBlRSLNom*i35ZFcAN5!IGPpv$h;=J0}^hR!J^y(xh)$+>a%(FV*Hz1uxuu@-%PB zB&{wxk1V{7Q|_RXX)3v~wrA?~|27xi^iR6D4b^nrA-!?GCE;s3yqeX#G;OuoJA8i} zygft+FmPFfk=RaapqZPNgseK*8SDyLGvLw%VD=kFOf_x7Cv0OZh6@Ysip2+NKUiFR z`Y%zi{9mKFfh5-QULq6WakCO0<1WVJ&c)D|h4~(d_*GNGWG?!&>@6j;0R*N9Hay6@ z+8(b8`2@=h9Q#gYnR?_M-ZZc0XyoUBM)NoXjyyHq3P^rXp7a4xu!#y3}E2^``KW>u| ztm+kqYXdg9KDT5Dp$NI@`WFVCbdz&3LBEHNmfJr3Tn>P4_{G-?zMG?*mIlN+_8H`J z_Dv+&jy+Ewe(Rz1-%~sSPMS#=8g^$+v~w#zp+Dv%ZL1e80)YM~IlRQ$Bb4{JHzfW8 z@O?UFXO%L)-;-F<{9pS>hYIlbH%-?UhCVzp#d(Zcep3nPS_;3NR!_URm%* z>kb!A^%0_CC&D15DWJe~rSFWWq#;T_P_XQ1ckiWhv$a%=_mO@7$(mFA?H?lgTv?+u zZDvg*-;x90NQrEVc~6Bz7M8QKV9c-M(<3XDsB!GJQhnDSBG##0W!$=Ms3^d5m2-kZ z>R{#Gx;eMmt$vrb5XE>acq>fm>kM%KMzLShq73)#m1pf}wQx*M)@{)Y$flM*@9NJ? zRe^(EP$Fmbsmap~c5M0)p~V!h|IWDCw*82^>ztSMMnkCn5&DtTG+4B4qydZUui=;? zYfDan`E&820xq>iEB~tbHrYP|1cuLJcs6`=*8XM!xcc@@23FF;$iaYw>LrR-xXdg{DbgwuMDZrL`j2rxb4FInfviB_{l1yky3;> zfjC^?E1+N7k3Mmt_jCA72E$=OisLgf3a^{z1s^>WUGOg?Nzyuh;by@<6XTtxO%}LP z4QQD=kjH{OXO2bqL%JWJLg2DLFginRzIwYbBREECe<}k)@6Oe+IWP_=3n$GVMi$Y+ z=rtfRYy3ge28P%$%D8+er?#D(td#;Z)o38Bg9k=xOB&~+&UYZDB9s1>8e25p2nQD< zR?ya3!&3j!bc5a;cGjb-6QM22k!r8$>VIkGpH{Mt)7{DVb&@tshvQp0ky$V+HsBZ(|U-n2T{d{XihRH!U@; z=JVH)qO7-s$LZppe-&89AC4qh$$53XRBhFkUbotrAmQ#?(fY}fwJ9>GeiP17o+}B* z-_W`fyv=6T4R!|A&5GCaT(s%xjT9YbYNgsl0{YWu)nS7V(<;5UV3`bnEj_`3~>0tS8< z{A9+dmu`|);aYAAB!-3*u$D<-#DAEZQ22AHe(|HSu3G-xNENB}3faM9xNPtbs0mi& zl`0KH`3!$-wXv8I@ugga@k6o3uHf4Ze@{$R_qF1 z9cQ}(F3L!`g{G?{#{khS6Q50E$2;iJurmWvc|reUHsd##QeL)CnZKc)%S)bwtu%## zZUU!ycl(Ty6MWUvGOuA;5}5E3OC)W_ zK;(VSBts?=y`?0~Wv~8JBVcW-PlwjCtJ?De$?vSPC2y5ukAxL%&qI1^w5Gp|T=3FD zV+SZi=LNEF(8Q3kjk4s6b zE-nhPwiImEkFk)dQT$)d^g!j#$WZ0-9}U03kW*}gL>ME}_Cy&N9h)URFPJ4LJP62q z1d%G%>8aE9IkcrP=;>i%oCoYRb<6L{*a%gavCT6( zk@INGRSDy{i7?Ue%nEZ>mjjvx0h)4?ri!mOkfY99xLIIGr?=&jS#GZMaJ<{hc59O~ zu!z|D+UDctT}JehH-CcTm~QboBC|aLN6z&rfp|GJs^7QZmB|H*JtVdBdGE=Gj2jeD z{?;NrOR?CzvniX2Oj9{3m<3CV`$NV~dG#{n10umkdZO1SmV+zSg~*HY4d5(O?09yo z?7fM-YSSb@TeR!jWPf0A7aI5Y>crs8Zvpw!_w=;nk}Tf_-`&u7j-BM+*4&B*1v=E= zGUM(`$zA_L-p#~b_BFAJY}LCa0MXJPQu(iSnWs#=gMi`p+BwVte)!RD0!2Y>MH_9r zv8>Z84?w;hESUJs{%@o8ah8KLTrtXmS=jbrXO)$hzpQ-oy79|TQ=<+Ilh(4C@=*BO zcQRf#2q@H8f@Hsbdt5xsrla@y<#NI>w{>iZTMpVflDJI#5^@{DEsiz&ytPr5-g3yR z_KpjC+~kfqU-z5)^9hMlMMRDJN7V|Bjp~WFium@tBA-4goka3TgroCdQrOGIFhn@? zen_tZ^xhqVB9ly(=a!XeznvXq%hR?5i%2G!s&~}ahHN9}O)WELNK$YM?;zX!WmZhJ z->eaSS6*HwUVaj|aJ8E6rGz&At{#iYia7l<4^#XI8riA8v^^6v8rke!I`PdHfZz^zwJB8n2`b`XnP)^;;S(cw;I|WMVLx95Bn$VxjOs zO#M|O7g+Hn>F!_{`CDmS>qRCgThI-RGB8jWX_Z4?x52~M8QvYfvE7yeSo zyF_0Ijl4I`?yFO$9k6mBVTgG6sA-cZWR{g>jZ0yWXw4}MyeDT;oG5IK-4P@d)X3Q@ z1nN+{00dq%)tR3B;T2ZK$xO0Be|0Q76-!o&yhQFnO?J74CwusFZnkgkCOQk&q|Ck1 zg-?YuB-<}~KYcvM*hus_^e~G(909(puWEDjcil{k3}+uNE|i$5KEfq_rWBYz%maf_ zoi*t#%3kL9RL=2%)VAQ#!)Ioloxu>1G5uhILPwoU3iSsY`HKGcn>9vM85*yY6OsJNr@{!`O)h@EeT>I~+G@9Xw*{!8vGm z^I!7ktmc^{t`%pI$}t%*+9+9AI*;ui(Y zZz9Lr;xUF(j`@WA@3ECca;&%^v!07QYEc-#CNDXmRUf##rhg7kOUSa9_3k+e2tn-; z4!?sK5leo&qubR^>e4I79jP^E8h;nzq^_J~_lTP7f|Bf*yH6 zF)&KQOdUjZr0S$e6JMdqc2&!XXve)4E*Eem;Od9hMq(^BIl<3J7f0*rxGJ2|_-)-T zz{_ktxiT8%sjuI!4RromiAR}6>2!BSsAl=2N+hEMy`IhukNt*!kOn_ou}=6w^^Kv% znOtp13*&x!uYdsNDz~Q0#b#yKkbt|vMrBjYzU(%w=M^ilRKSsj68g)y2C&|2cm_Z# zhK1BfhF@jLMh(}F)PW4$$5h#c)-Dr;4E9Lqtnp9G`b^szNd?Dg4nBoZq9{w8nN16p zcVbP$Vwg+7e6~l3QA-c97tOEf*zZ6IDmgDmRy`Z0ciq>X%pdf-mif$5;y@=D>b4kq z!T4Z5pVd_ks*)Yp`N_x?x9pZX0%y23Uh>2|AfdqVs+h zgc1;vVU{u8vj!{!yQpyDE$7zhqE+zq|G9enU<`ZqJK>%OoURHY6IJe6qD*D-rhdcRMgNzz+cRgNkyjkyHmI;bu|QAJtMVPZqvLI-70lDl}}ww z<@~v9)TI6WZQ z0TF&U{>MF%uOK@LKf3n#TjLJU6Kl+^0NfJd)%@GTF&>Z7H742rAC94|ExL#eT>D2a zd12Q9Ljf}8evLuD!1nceRtyh zB_mGWu6_u$qNEq{rHp*FYgFu1O?N04o~YmJ4LZVc^XwAyPkfsKu|_$A=ykJ+gnYHe z)=dWYkK-FQI=!Bj-|wedR9N>1v$ts_bM=S#0$}#+&7_y0n1FPUv8|jtn3MNP`#> zNYY_J+!?f;7!VV%NlUJYjadIUzP;L;k6UF2#$-c-Qt*TfR?frQnj)!h5S1h+z#UE` zF!&&vU&CLAqpD@Ayh(V)B#SMsCfuSBeVN!DT$MERAkkkg^`{n_F1-C-zmTR^D(00N znZPueT12+@VPCXjm{uME3X-HT5eBFMvg@eY)h7cvW)`|UpUxlhk9NS-_}gbx%Oh7e11%9 zjCbf1MVUOGT%swMZ~Vbm8x^9z$J_ybrb z&gXHZHP4RFCal~}q9EZ-%GE9GE3Y(ao|iyTaaTsc#;^7#ddcm->(MB^;|`lh`?BjM z6{@Pf`+e(`Jwg&Jqo5u~qL=^Oa3kgR+a`QR((Ha0bD5K+eTy*cFr0UfB>dup9T@Y1 z`L_zcKtvKL)SDDsPg{wU-48IpAdPM&!-#sdqRhef5OZ0Kzuw=`R1#+Dm~{=d{0*_( z(5D4sfIoOZ|0Lf@FZg4B_KtstYO9BfK7=bc(qNz;_leZW70fL3+t`|4TkB@?<+P0W z>^jqMg%nAje2s1otgqf)7`ym9*<1*Lj4Na&RS}sB?w7B4X` z4ln)uO`70M-oH!1#vY5cCvi_vrO{M8Bm$TM4jhcYxU zs%Ca-RsQhSc4E|t+?Oqc{H@>0;9*ig{mh=bw72mK<6v11$E>R6 zO3+i|yiIk~XT&JyROhRKUjEfmo*Wf$9_Ka7IYmD2Cn>Ta)dTYed1*5*F!mkO*Qfzw8 zks1cJ>w2xn-ymOX@ZJ&H>WoD`Z~U@Zrc&8}K58R|+atW)9VZCzE+=)YsK-?wkYqpE zUAF*Kt~|We2I}QGU~g!acU|?A~Ty2?ba^`*t~}s z@q4c_+f~U`XiW_N0i#1A2iyB3h)0ia-`lW0(%!xlB;3r!BL0>b{mPPcUDqw~XtI(PzhJ?`=~1ytk#)g|K~$yQ$}i ziM|=l-Ym1V=sDr%{6gGXs$fXgTf>eVpKwVITLw}0#0AG9w@jPh5CCEZG0SU);mw3^ znBOOKT8I%~ZygM3)OQ;p!+qSRJoRz(V=Kvo%^W0|%Q$X(m3DPd zTOA#!X3h2nEWpIU^73Xh*0-ilFpw`Wg)BaxSavE0B%y44$ch7|99N|-Bj=broTY=D*x=mFE;sz4-@g6IUibZIF^kC>L4`7%-UTChX*tK zhft<MpqAawvqQo3c5*G5O~0FhkrOFrnUkMN7n^D)6ZF#YY8XHrW>ON; z*RP=!$) z7Fr(M>};fZ2mc@HGZ`gSu?Zh#AtjTO!oL1O)8alT+Ccolft#}WJQ2f23oXVMb2_xP zD**!W^n6@wm3^Jc()Nsk)g4{6VtP#z*d2vvLUDRT%DR6d)7+V;@^)#T5-y7)`c)$aU{NEy1g?b4h z1WQ9ygDm6`%+n)(@k>Jp`lWQjjvof@T0Cnra8a3G;q4syyti2TV&FtWs}*N{8EFTu?kl zz{7N@2Zju%MLOBSH?0*8}$WUoe2wD62jMI*D2Q1VadzPpEQEGvi9KqaL zH(ciB3h}H~w-CtfCE1fj*Gl z&#X%SFWV1w`EHh|8#YuFSN?F_1-hy8i^w@%>FY!%yHt>1JjI$~w(`u(4rN;|-FTA` zeW{-LEU5KwtM86<^d(v-0=gv_u@OOhtv|T0Z81B@Qz9o7fHA7X{Z-^Ok6_^GjIv#I zv^9!m*llIJfhQ)?hT_-IKQ?~nsaAqsT)`6cy0nb2SnbsI`i>#V_hI;IM&Y>A?6`vw zpW}rT!jTOD#|p3~79~4uB3uL4p8Sl1Sup(xindxKuj7!d(u!2@hJ;}A0iPlRzdj;s zBnb&+CRroRSXzu-uk>xwQ{sO1Wr|P!BR<9hcjGm}!KuHyz8fyL#rzQ%Dd~ZA?WyMt zR(pXZGGZQoFk%Nzu~(e!C&d7m;N{RmnTC;z`90@b5A5+py;9A}zR6D?EmggAgCN%? zCE~%(cL}ac{cn~KCWY|kvZHpYTbM(dKjmNXmot)S5d(G8bGw56YFy~k?{M5 z8eOFzmA}lvlE%e8phOX4`R4$NNgGbWEw3N2^k6YGym<+V0N(dL2f&h~a!LRctx8dy zZ#k`P?GU&gJu;489a}J1^eg29!}uGk;8|7mAL$`DMS|hvX#Ey%k#a#@N;Sq=#*8ke ziTbRGqU%q8>I&NzaY9%gB3p8Z4}ZDZCEzr%$ipu>S)zTWS807@Q`?^=(RsgGP`aqZ zS!J?Gh_om^GXJBYi#pE`UvR@ASfN+8Q!t6_Ha+A@=4LObjGO4?)@wj;OqS3uK4Cn? zvHxL_0?Lztcf1io*P7-=7?1`Z^Q#%0yv1DdgjgNxRrLL#?)=ytnja%x5+1XuGmef&I(8;4jL))r_g%;YreR4=ee0 z>(%biP*=rtw88)w4Yle=un5jy`wtaP< zYT4V|BY-J-?i*Nc>IK_NdWtfH^P+su#dP@^PP@513mjno2Ohc zpk-YzEW(W*MvfP@v+ed2nQH;0X|Jr9*8vfBwWGPmm1w-p1e#98*?W%fun6;C zyhwbJbCy|-*>>-roH}{6Oda;FJBJGprE7wBgxVe0vL7qw#-9E3O+5~$NsUl`7B2e) zyvrU*K>sW->Wc;7zsNiT$hK&RPI2)^c_cBvsOvNC47*-YEaNkKCr=)r6y@}I7Qchd6S~xMeaM&9 z>w<*9V&LHePCdu6HF4i3_`!EzJx5uOv;w8E18x_Aw6D7`aSengn6)U~eL&f)&o{L7bwbM3*8MB>7N zA|BbD@_9iek7F!47BMq|8_q=+0}j|17RR)v3;jT_ybUtyYl5n!oIKB;hJI{Xmz`1z zm`6qZOy~>sFS8O0Bf}?!lAgsro8KW#KMSVfRMhe3MzT~cg1~W2YMvSs1aHoducYrj zv^$e*9U!CSZR<<5=7uGH+BG6{21!nQG>uSVFG9PCg4o6U|!lWOI5zBy@!n_ zI--7Lt<$TKPYJj4MrqM^;hd`v@&NOV(%}-e-3_4hbxS@DEg^d$6i@Zwga`mK+YFtO zY`zR9W2`EJsyBxWbpA^kd0>{nlPNbC*3Dvd?j&^m^3M12S9TV>Arkz{Bs%W~qd}F! z3LUQ;+iJ2u!B76^B_v&Scv2!_MjtZubEG6k&(m6up8cVdeZ3NLTjs}D(yZh^gS6$} zm;>Zpi*3mttyL?^#YL6tQ!?=Sqr;1$HA;!BiPx#((#nMt(gHqTf*e2|!kl59FHwc%F$ zLvrczQ+R`nEboTRzczQ0A7b+mUUo&Vdycsa399!`ch6RAoebFsC%)EA?WooTFUZG1 z{_&t_X?5&TxUwIQ(gG$6cfTN(*y>`f+ZI9~Up@^W^}T!+L@fpAC7g@^Yc~USF z!H5;wulBy;h~_E`Js3$G-nX!2vzAq5HZdDkXcTMlF!@1H+?p9#Rm+H^MU1?y`#jtD z2Cemf==ewdJtj_F?x>wtoFRTjtDWOK;@ce5K{C z*U9{#4Nq1o(eKy!>?}o3U;G7fwj6@zW0COzvn$624YU1KSm|xR>G6mt4n7ZWb&ahR z;<^&{R4qAHCv){FJE!Eevnns=20yTjLTKk^F`_Wj6k*Xi-FmXNxZLCwF?((NwN#i( z)KquVRzy!v{Znyz#t|4u9q4Z6)Q4e3_il}9BWa**3)j4gPF&?;`|Ko=%WPUU#QLVC zUOCbYN|A|b7H7J^fh^lE1eN^2GB4(+$CR6E5hQ!uF%;8qe=|@ujawt$I-3(VOww3 zz1mK4&25?gtt&76j8j3Gr7hQ~mEf0~NStW_M?Wzp!UI33v7Y4A&eV<`?p!>(N)md2 z3Q)cH9#%Gam%&~!KO5ZqPc4}&gSYsti1CYPUB}A)?pEyNO!#ManCjN^%ifZ^!^(X1 z{w2i6zdkN1+|J{IEW$NTQ)Xd8zaR9Bu-P6IrB(&5W$LiCp{F{ymqGWe?& z66$FDBK<-?&cvz@guOFe`ZYDXEj4Dp`e5L9^`NdWA~tDYpg~2PH2O?5JI7Q~3y0tG zYzA(eIrIn#1pr`SdU&9B2S@D-A9Gso|MlY&+^wY?Ya2lhs?#4uNaNsfiIgV&dunuF4(Vn+18 zD)^;8V=V+M5jzuIg2dC*qgT}q$UGK9E_*#Hh`NdZEt5nstp}V*l&RC^XJ0pL_SR)j`Tuw5F+v8{ z)*xbyH{TEQfYToJpXq|ik*%o>`xs9UwAO=_08_OIzEH;3G!#F!;#rNF!grE!T)d5h z{9g24R00HrChFDagX{CJ$0~^RT@ls_gK>JvKokBUF9_Wz??3jdG-g5(L7kT+hw3<= zr<$Ictd}YCjvuXro32KEe5|}o5Yr_Tl^C589ilN#Z8BrkgsPOx#5Rof5g~YAW{bxV zH@iUV9&sJrM?lDi+m-j(0z1{HH`(4(o9 zYZ*7k z&l}BmX0x@LjKiZVSqiD$1oMN28~s`f*5=)Iq3)X5*LaU;7INNKn#MC-v^o1GSzUoan9;wW4L>V*tCW4Ww1uBv+_;#?xv z#;T>yJza&Gy91U9NY;`*I!zFqs}P+7cn02$;snkx7`U=(Dy+!@JOQ zA$eZSXXRBx%`_v+RI?JExPh~;sY-1K2SQI*nE#4!A6Rt{GWi>XNWauF_tJgXAMdwK zla#`Pe1yrlFoNIHZOJz*pq>ZN z#K?fr+b!m4GlF#T>S@Dj!)A4ebaO;_0%&)U4D~P)D>z|;;Fs^k^K^t!?l6iwTD<;i z-YjUUgIg&*zdXS+HFMHfT4K-xk=?jXB4TrX{9{CHy7#!}tTOH=9tncQG`Wko!{hKc zZc}R)l(3AWyAk94qzyVv|I$LaMVQsy3Mq3qTK=wj=kU1b%ePx{xFz4$2wA?%SDBeb z#-|c(Ed^hsK?4wrAL*63S}bb~(q>YOO2Jo}GCeIhM5`sSKDXY}$z)&lUfRM#;eLOE z+$f2v4BjC>o_&7mn&nDI251VV1>~G2C+yzyPb!}^HWu8J443eQvxa)c0}@viJ$>qz zxNlZmp{)U^@`EZ=*@3_o{<7IMnw`G$6P5_Sv{Gxdn|T7UNP9Nehf}wJ@2_iyc|ETq zBZmEGt8>u8k}2&TRraDQWC=%_+2Q1s@D`@|2Sb=oI7QEV&tC++BOTItJND7tO7uXV z#L}+|damnQ_6H}C;t{d!Mh5Nb7MJB3v*c*Ah0oB|tm|5`v!8y3!j62BeXa!aFRW(v zAKbh(FeKx7;&@LXJDHS;apEb@rz?M@{#n`2y6E!9FQDb4<-(P**J9(vQlG5q=w7gr zk0(p?mwAv)#>VCSMv~UPV4pc66_5Tw=3Zy-13WqP)kVoxA>_B_uC*USL5^y9`J@t{ zN03VN&=bwT6*<~Qk-HNU8W|drm_yuC@H#e&7HyK2Dy}=9 zd>{^0gd4iAOPa#KE=tF>+D#J@XvpNSP{O%0=d0{LySGrLi(gSPdBlCYCQG|xZ$$M@wOGDmvHXXft%2zCU^@wjvrm`t0W zT7Et!g0?& z`f1vLr$N;Zs8p}{{zSEb%D99tEGfpIS= zpN0~pun*Q09J8k=AU#mBo#Pv)6(Oq~8rlg>oE^^{Y~VK(oiP&i&<@)!O6VhzQ+zhZ zLH~lp62|id*>J!z9~P3%i52b6Hyb=1N@Pv#@r94i6k!7HI&$C z1f*YKc}`BEjRjk!F#c|ml_b(e5;6H!R=^zlV7=rSx z_ahV!cUxe9finP|5huo-4rL^E>oWFq$N=NrC42HWv;=qbSIx^n4lvoE?iY|Tt}a#7 z#F6@YQXTc;O`DTf$*s&;3yi+imyS`+wK^@IDu_pAB0)eRMf?*}pY*{q6CsSrXmkq4 zwyf+jY9F$+lr*FOYl%PZ`>Y@%if=IN&)5@CCnT^^ed zpB1=^ZZ-$Qu|C@AMQ?G~-XT3jzm9TVoH*A&kqw7X8wGOh(4Dgj-7iy>QI)k3ZKF+x%-4 z|KG7LtwD>I`&fMnHgUW%0F-~~7g5$&1}`o$X5|)8H(ug`uE6LkY3S>K)W& zAc3;EXSSKDMzL(G#y$!ohCmwUFALkPneEV%H^zt0?D5Uttx8O~dGw&O`y?&~w(c2s zY-dqz8iV4Rw`k(d&`VO;sPCU1kww?z%g2d-ZKu4D59z36*YGC)0WC?ltV^X@!JmHm(tnqZ=NR5HX`|6|Z zF2Vh{O5sV09R#i%&M1R+>*+?XV(3PN(x?*722_+N0@+nLJ{qG@315tT{$^$8zT94j z(J^TC2In%4xf$Qa*%cYB!I%JIZm6aRQt z^_L6W94>_R($#$_yUtGSAe+toVHgD|7sN!62VAm8{UHSw;`b=GWX#qE%YiPDva}{Q zD%4`k1eaEtYYUK9nMNsfbM^Jc*wrreg~UeX^!@b4JXr0d01Z=N>Jz3V-ZI~A13m+L z%h{-_9Ilr{;$~Z^#w;l({FIm%sUB8O!McBf-pKZrNpl4nbMd|-3kY=Ve3C<_{IrZ= zI$##x8h|Zr1)gvn?$;zt399GX-}d=g z?5`VeOJ?*M`Cs*@vS*GbQI;Yc zxfsT|1%yUDwhhLmu(K`U`MozNn`EXw4L@FWIW&@L2wsWw>rku1vlX$ zcc=IRIl`oGyp|Atp)I8BE`>-=@N<7^Km%2*ipb3cJ`5^yjd$G`7oUM(X;n-VTL2oZFK9O1N4 za*U#+ztuq93);i_zkk3H&|rah$^9Kx>|AAaELccFy~%<`ARCYFy=mD%g$v^y&Q zq>bq#0w*%tU-lGu!`dYBp;~L8D2Hs*XuC*)n}XKqiffP~=hvd6)xFu2*5fZ4-b9^| zNA=&ptpL4PKwSk!e85R647)^Na-rlFq<5o8*U~`@P9Br zrtVPIh|9~(Tbkd)KHjf_Lu01+&Z%6CGW`J7+=mGT5v$m8MRNOP+TlL1g)Wpj+ z%gW+Yu{ITdm>`MD9>l$4(Z!H|OXDPSHtS5YG_n-XZu&%$XsD~X$ zYRTSOb;XEJ6)V6FOZ9`F=n|6oaY6-ga#j*AQ%w6+bd(MDfxMACcvRR$n}=W4!@+0E zfCd$46Ds^9ySU>9dbr;pmEE8ZP`Hg$2=2t}|wo~)#u?=2efbfwYC0sZSiDi?o}rRJYaqRY<6cTlwF^ zV(ahB_aBJ4Tbk8Ke=PH!a|Z|wY$lv?ZQO%B(Mscyacv;1K3q(*7&wuxAee0N_?MQ$ z5>Bec+T%sh-(Z{v_5_QkrZi_5yn0e8=Mrp3w;+ksrYwmRmrBA9WCt*b)1QjL5&yuG%uK{#6kH^^b?Bcy^CvYbsd=0XnK18ny3iD zw?WSv@0q1Q$KhRFAd+-^>?9(9syQcoNsIPPk{ye`K)(fJjW7Yv62NQ&Q9K-otIqf& zO^4`a>z!E{jV7u8{*na!i!pvyBa^YS`%TpMl#d>&O)*XJ1CLw|Q(D0l{-q&+qyUEh zK;RUPAsEg?^uV5KmKaXrFG@bkRj*X}WQrU~J?bh3t%GIYACOE}Y7r5vK1}qhSXClx z^9Ra9_1Cj7v)#@D)}$&v?Ds)m!)fAG!+jE*Q@i+-H*gC%)jz_{-ahY&vVdGRu~_1B z3Uf2gwpd1HwzXQWx+ya~L(TxxT=o;2(oeta_3mRV8{XYTvh=OW1i`5&P--8s8X1no zm^-Wr$Y&zD0^57zoC`94{zqQ)-bx$swp@O0_6U~;J5cMxD!2l$#O{DLfIuk^ca{VkKY`a1N&~glB2*XQrJ@T7h-b6jM?=v+1C%2e8I56HCwK=; z`yKbv8pPR2dXAd>oH$WYsRW0e{0j(()aITDY$v%K5|Kbz zC{J1Xujn;AiX8M1j0(g+>nng|5$q^SaO80V5NU6@c^dKksTjplI@!*{P3ig<^zyeA z>p!X>gzw)#fN9LulUY#!|DUWXZuoI>B0$ z6Ga?f?&V6Hne};-Kd*j|^l8V$e^$s_CpO~toMO@6bkS1!U#j`%^n?8_5{G1ClzpkB z;&Dw`co<_c*aKL4{l&Qj677Aq!MO9*bOE%4^sa}BL-00YTnr4wcTQP>LL9N_{`P*D z`&LMQbP*8nFjD2^vED*`|3HwZi83>VF0nSC$Ih(Tk~j$P=E znhxctFI+;kPMCGU)aoB4<_}ydv053Z+;(J=1{zXYsY&Gd$XjKi8rv0+0?C~syy{@)+EFk&)uSeo? zJwKf$VKsR9;^xP%F}6d!UBIJ!EOi=yj^F1L|C7?Szv2&bb5%=qy=|v=)PO63RyS)( zFxDUYb{{IUlRp9_NlMK`>P-FH7e!N*Z`-B(UM)+rCWF-|v5X7Gr0| z63RBVWC^8^bu5LH(T1{%kX`n5EM<*BsO%w1_8PK|C0k_A*kvbsV;kG=o?f5V`}KYN z?!P>4_uR|%Jm*~Joa#>)&9jZG4T&ETf%C;cH(>E$1z&lNl_fOJxv8p}h27)b*GApX?J# z{ei)t2Q1y)@h?u_+e*wTwx`-IUeTH|ten@$pO_>&*W#k9_xh8B0t(5Bot{uEVb_ol zXbrqK1^K;cKlQ>4SWDyQzjDw+R31$PEBJ{+kh;)7O)CU@BQ0Efm^+$^nQauQTMN*! zJ>8yDj+5p;Vq-vTY?z(Qo4r+R`Th8tt~j3Wwz(KOLqEyf!gcE#yDUG&7RuyX1a%|C z<<8jLbt}Y}D)*48=?u#7j=-fW@9jqrsw(4Y{4r6~qW$@){oRRLT}U7)mOHInX3^b= zMmLpPG=KaGzg<&Do>~lFq4wdT%O*m{n&iRf6spV4wRTK5$R|j#R#njbuswS#ETcxj z6$1jARrqJUQCz6kbtt7j9}n6SfRrLpb2QNL`G=~=JjIU%6Wmy!<%L@Y z4p~Sh4r49R#0~xzmup;~2{B7Y0HSkk<5#n3~Ec5hek#m^-C;XT0 zhWFu|P8b#~K5WPPOD&)UPI2&oj;SSocChi%sO#ZcB|1NpT^2~RR`GpOQ$2pi`nAkZ zdOz3dP62b|S+Y%}-*Fu(@jnhJ+O2xL7UjXIUsCI1<;p2xc+g^3d6BFTI+RAQ+;3|0 ziY&L0Z9-3qyX)W1Uee?qbhlq{(0_vYUZfK_uimO;==r+XTAja)Iv$vXS^?#fc5N51EgF{EGr$CNFCi~^qX_UrfCW?bB;pcaW^evqFC>QuhWk=r0ly+tBUkt} zI=$u#)=X=IHv6OQq4OFSY~Q^Weat7$^injw&y4f6*IxoSz^ewj50s1Kwm}V?wN$ozrR&7xe#nCRJNPqhUZ> z2&kAIg42-CFnvbyWJdC>?Yp0*LMRGDC2Bz*pkvgPHWI6Of*x`kSdU8{tX+Pe`j$GU zg?8=J7)Tmr^UFe(gcGvr{Mg>jZPAzPl`u5&+^Fu5AZmv1eWKB2NoW z-;;&{4<-Un#!CYBLa3D7nLW^d-xI9(B^Ri<{A)!r5{fS_mnN*s?&`-<|KD!m!p>=#ZJQCsJJb|3G4{{5EhoV7d&c|NHsR9Be0p-&Fu?(_qsKtw<}Sb&)q zs}(~VtqI)p2?YR~>fo@!o>{|_^>FA#yRQquhq^WOxYwD7gZ+x z_0H>(pg`A}1mG6`3nwF-jrXUg&e>l3e)ZYjuOG?0In?HXsUb{)_D`w3yl_AQ_x=56gg@+X`a0Q&tI zAVe8?E`RX79mzP^Ejwj=+Q@sP8ZhQqd6B~gK1qBpdyzvHEnZ((QePd|rZCy}NmTAa z;L$9vY$5lfq~3SnZ6;}{OP>EdcHOXUyES;`>ZeDp-~N3r>!v-qf?Bw+BZ^T)YN9@spC-3({KUKu&^?sWkWS;{{Bs|QY_2&6`5jO0 z)mH{~+nWx2y6X+lhF|4}1p`bu{#A1cI&TG&r!-YaeWZJtr27@`vK8mI05-&uz_GDO z{h)*IW={I&Yj;QOQ-`REuI!)eH8fn?gGqX;9V@Mt)ekOm?J(|cHrRM%b8PvWDT#(Lu*Z}ad7Lzwz zkzP9cKTb?J{v?|5Cv9=iez>^EY&#)kkln=nPQbS_23Pc16;?~tD^9=m1US|C?yTHc z(u!gIpTNCPko4blf_~L|MO>rP2flOXpYME8`b_)OS=;O*c|Q2-o>X~ZUT!jPTIdpdZBwDX%Y_*bV`$tSeZ&fpD=s{NVh z-vHN2Wj(pc*b$i%_9wuyYjF&)Q&s`IlqwUe!dDPTUFCy0l2_oO*9tj(R8Q>J;j{?R z+e)si%~J-d+f(H=CjVwBRA*j4jsK|MI@ztvAAhXF_}?cve;TzMLN)RY7wy>fTd4o`M(=yuddKeD!}+MD$Iy%BVJc)N?D7*6ikv_j^}+=&Y$ z<}6gM#g^68O$odBA-NFrX;9_aBRQGvh^eiSW2M+XXUB3Fqf3K5qOXn#-4w1+xKH4G-rI%clSe*{bf zm3*Cbn7N=ed$U3^g$C>~6ZNYLE;F(X2i-n#2Mwzebgvx^9te~2_;Wx0(5Cu!qfdWS zGn7Cu*l-lQ0ZuhWb_&eV@S(NT+CO|u4AYe0qt&vdpinh(kTks$@0162tc`M%b^aQm z7K)Ly&B$L7M3`=G2BRj9jmA!0%j`?!Ykqt%sgbdX*+`w5{PNR3=GwxenuSx9HZGRZ z2A~C~EF-u4=dBKOpd0_Ok0_V2e-#7swGY%tk%qwMxAf2?t+u$JquHYR98>D;>dK@g zu~c9Hd6m?F5`)8+bo|;S@PH+CO=`HS&|IR5T3o%Wg^EwoRhi$ve`_+NjR=qSZ77Lf zmwENd;og($GOGpA<{c^>qn$ISZ^Im_d^}FAD}H{-B_CEl=43+MTam{r$YYO+nwI8Y zs=kxRiDwG5&Z|sE5RFm&={cAYBegkg3#1EkGzN%Ba*TJ6-{e4ud)8GeeCF{;rX!b} zOV&gPQtsVv%M1HQg$<-aoP<>iK(W0pGZG8JKuIgNPQ1YcUnF5>KE-S@#D1;r7lX5H zC`yzC=yY~|lx=(qpmJRttNt7=hv@1{rVWA~Zw2e3=qb*u}Dr=-n;9J>pod z8iQe{@7MAAI>Dl*?0US`;L-2R5U!8TSpGUC^!IosA9U$5 zM@9Xdy>98cqiMbzHDRgcMP>a?kz*HRdPg{DTUa&_U>OdYwL|-jwi-=tCkq&H1@0IG zdCUq2&IpfU;zs|7{0wG6`sWXP88#hD79!?EgJM<=K4g+U$lxGq;3~@2FY{NZ!JP9s z!u+7JV8bjwEfBBl!@FRomFss) zWm8{7WQm~}!@779{u#}I^PKBaT0LC3Jy*?6;xjiHHq{v>I zd~_HT_)j2Bu4$EU=Ss@d=qMg_AeB6^^+KHvWaA+RNygD0kzJu0lV3?C{?`O z2!-+An|n5K)1Mek1ycHa@WZRVLg?an;;@&w(VHcxA@ryuaHr4+e;bbEWdhrZ*^C9} zPI%SMau=mQCgMfgIgD-&7TwO**9e*m!#Sv<#F4av&SN3%FonAzN$n~3-R_Spr#uOgcAuYp-!P=ocv|1Qma$pxYF$xPU%n4#`DQOywF#~G zS^0Nrc+cPa%Q4jg|HKHS%ac0>?;nQutMKMJJG^jBuJ^#MD{4($(y}}r2mX}N^5V@6 zlGd73iWmyyniRuU0piOM$reGA+IIN&@cTqn|&S)?|(+Md=N@} z)-82oP?E8qj%qoDYT1Bt`CBM4L6!9K35Uwcd?;}&l-T)X$>ZRoDk)Tz#M|8~{fi-# z7#>Poc%sEo>w!I4L>@0A>+G>7{@9bJ*vT>a*$e)qXL*voDr;3iqa7JtXwi^Mbl&`gPK0D+U*J-XdjoTF}hZU(Sdp&i|}hM3z_2d)3pPAI^ptnY!dm@0wjvcR5_Utx|QY@|Lek>B|Vc z)5b7;<67L+0*nXXPOt^2Q|1J4=ClmBLmS=cO=+I-6Mbol z%2Vfxv*Y#kXG6wLOO~lBgGPXx9KP+I-p%RWtZE6eeM0XdwpL`td+ekWqU4rc={04X zZeQzDBS%@EntvFpH?7<=lwpIp$R7Ki;)-PLW)qvF%Sbp)6-kVarFKq8(ryd0MzIM+ z{XVz3ac-KHzL~l-*z?umkDfX+Q;!>1YIjKMH@DIukXGfl=~hl>hfooDZis4#1Lz|! zsW%*#8|vSV*jW^$4X9*Ly=Ekd<-pf$;H;2FA;JCijsg(7sKOX3}j z$|j)eGq#anLZ0|E;8}k^3ULx;cV#T zY!i*p6HtF=FZ(8Y3o}07&ufRrNmT}qa=6f*r(D5c{)3SekBMIyegaYM{BCbNmQYXo zzC#~^f*i*l<%YA!VtE?%siCwMJ&|XoIM#op)+Z`CmlK=3-87gjk%R$QgG z0H_$U;tm38$_}WvpEdw$iQ|juWX=8yRcD-GRmUIR^NycP0f5&R063eG(G7AWHLh_2 zdVZV($j43%#dv)_pvScZ7^B7UAlwMlsWVCR!N<=A11HCaaiG2l*6A|{O7rfG?lG`u zbR5{O+Qr#-qCYeEU(* z^m5CrYWhlkCBS#WGgiyz(2asD!DFCd->H5t@y=+R^%&sBvs)#|QoFT%Kv2`IHL2BJ zA)Nbw*Hsp(()M|*&K`{;1($B7hq?4=K2{Ucm*V#TcCbCD-!^7V8aDH++n{z90MX?(~;C`PwUFFB(uhwB@NczXO?_tl>8hT&8#hk)7 z_Vmex>oWd%TC$){Td7y+?PhKoxIg`JDsMU0bdP4Cn!V(7u37{{57i6mP^WBX3wsB8Tm1 zi?H$~nJIFE6i;wj4V781hUTqpHoam_RH@hsnZ{#mqw{-P^R<}`r&4XY)a{oUws>jP zTaZ19Hycro1QYsf<%4A~hl5o0lC-o)i5w`mqa230SYW;scC&^RR13dXTZ0zKC-K$}@Mu{}O zq6w2?6?9|ef)a5`1UCGDPvK)J$?BzdKLjL}9;XHcSm17?u*)jSn5`O233e zD_5QrM>O`HDcrz`6V90%H5Y)s*<=Rn8m7bI2I~n>{6TI83yy8`=Q^d`DlNv@C_Itd zVaU8Dho)50Df9%qoSzx!yYZ|sbR75GAy_JdFB~Y|N@+@F4!h3#fwjz0Q261v+xLrW zY*7)HR%6)}BB5Imnu_mIs3Vc04tyil8$165CPsBv;C3(iN^DNh<^4aGQv!*eEb$EJ zhCN4{VtYUk$uQb^-Kzz^7r{>fDZ+tP_R>YuTM(l+;M-2y1|}X4Y8Fi$K%l9UKoAcH zZ0VaEV#(woiW8AEJQ)aZGWVcvfr4p=z+KQXkZXtpKcYoVFVkJJjYN* z(_&b9TlE%A8EbV&0l4YQH!)ZNWs8Bner(z*Dmf*^h#AF2rLF9`Hpdi^_=6AXEmQiu zo?ld=et*Vu?Ca%LO-w77#fKjhDXeLI$zFSNs{KfTmY2PZHPPAGyb=74=fbJ`o9fcA zjrE-Z$wV3p%e3Y2n104X0lgR{zbIM0{rA2UQCD=Sk*Ft}}kN&E(=!4M-tLr|aO^+Kt%1Xh_A%_$9D7^(iNqA{~@MaPf zHU++gl*_V*ut4%7s8K8z9wcwx1O=vpvdwg#g3MuV6je+-+mJt~dIF-B(ZOcgcjF4B zDTncHJTG3Hn?jMo8|>Z4@rCXP(yH5)rBWKxh3M_~jCSDk=?&!>#DT4#WB3{pEN+gW z@t$hwjjcJX1gHV^MmHHH9-+_{9UQoYLR9EB5Fp4s5d=Vh^9zi0;`S>HcXB`?aOQ_m}{tcvh@s2Bb(2H zw;NJ3>RP%%*b6L7u$~3X+_Ka-g ztb5hjRu7V%bj%;Y$;2x3jLMtukyJaoPdIp^3_i-~W!_r^IJ=M9CibZZ65#&R(2G@- z{M`?DIVf&0QP&&o;GTA;;+L6?w}SOSKd<7aRvLcmmSBgk0$X%xb|ACuw5HHe$Rs#O zY^U=nJ3*hPbhnZePXUrHTZ3Gnbcknj2PJgia~@~hUlt5JhG zP}iJeh)^FC_8c{=aH~_S(4-pDx4xwT`WVkcO_dud@13)ha)Jc=+sI~yUG%0}f^)Xd z-Wyv{ySWL{Gp!?983pLePXEwd{UCn#xiTLo0*ZUM$z#F3+gtAs*%y0EPi(%rJNqRV zWd6)kW3sA8f4eiqZS`Bu^VekR`9^?Hg^~^+!Y{2_7w7``ijL^yuL|lkbV^pwF)(*7W+eK{Y zlFN6_yHZwkY#{;-#b+sX*seU>v{HQqyT^9_p^LR@GR%n0{o$sy>T9z>`q0aCKg6{v z%)3Km=qAM@D$IX`$kVNhxBt2K)}q&@F%uTdM*m39wlNzP$tM1&*S0YS7SDEzl<4 zouqVc8qPY~y=dc_Dm%;Lr>OI_xiw}}2W2m7dst;2n6r1|`^)6z!@%?8|K8VGe+gaZ9_LM zKk3OjE>Fo`@u$2Rw6xRi_|b6`&j9)mAhHp- zS2GqybB=u|kuLfWquA?Y#`-fVnyxZU*(H6}dR43`6@=abNaWV!_Wl9;XEU3%B0-}uxmjo}@Ix#oEycc^;R6FW&6`Yl8@ zmBIVsAM1s@9nCU#Gi_Bgdt2V;kT{?cDny2|1mCZGF9)U!2h{6URa1)E1r7)|Of$DV z|NK@t^!%NIy`|KnW%_pPYO{7fRq6=(rkJBZ-LIyOnXATbdt+V3!#9&WFM%t~Yylt0 z83?~>CIJVJiX48Kke#6(vR@ttxISh>fEOhR`0TqvWXJ9+a4K^QIE5ykY6Y$`odAA# zhU`U^0{`oA0-X0azz_W1?8rKPlKP~|&94%;D)2Gjb(yJXcZ{GZ*#6@9Vm)+c7cWN(18IV+ zBwyCu-fc<)*t#5#Bfx8Rnk{HQVB{fPNjX%=5{bjerKfQ*tTTrV z9-wMUIPdqhO4K?tS^je#SLzcprVxDn`E`^s)5Y!Y>Lr8f!iVVuOaR4`KB`>W%NQ}H z)N*!y3{N||rTCYNDn^7ULCLx`>1PvvFt(PG^J~qWlb3h)jg;0$Dy{tqqC3?+3t9pJ z>C<^-%$7~0$tSz!+5LC=J1P%9zEo#vwXYiEuJ>IaWq|Bh#{;VY7c@@xc=~4z<+!{l zAn*0$h+4$~7<4gmX2b{ZQ%`^dSfwQSlr(Uv?gQY3mykW(YBE@(09XR#WJn``az-Bz zQS*`|;E)V_4LMAI26(|}p4`0mf3tU=ZqYTodXb$6ZY%0?y z3a}*|1c~-i)ud2hdYF_Cey5QR^5lQRe}x#lzM9>j1;UT_iSxgCYH(cqfy&UJ^=kY* z*aHi8Pw!Ugj6P=CcNv}P@PIRX24))g>+%u%H>n&WGH%-NP_1fB#ROA-fHurgra zgVh|p`Ppcg!mCfR&v99_p+2hXqpnqFV<^>_@}b0AakV+1Yn*#3{7x=hDT6OSvU~VS zbHGbb?g1|fT9~c()PrcV>NSJC5ToE+%vhxi5Ovs-ZAvIvt8S3taRS06 za!7o51e{X-3V_H7pyEkJvk-`~{7iw1;EupW17r-n08lo_3Ti^eDV2PyML^?fdSCrL zR$bpv28xJWQ?)*6rxYWi6q&(3pfIEP8*HFAc#%?3@cRge5!)+O4$zT)!~6p_!!t~0 zrZo%=M{wJWn}sQdbr)g!)nJ1?aA4tbJA)wD1Od};jR&19Ts7a}LQobkoqd>1Wi^Bj z%3#1a@h5-m@$z`+m;y&=>g{v3pVtCugraw>zD*`mm{Jz=!HFzB7{O8{%p%CBSNix> z%87fnf-?lkRW-~b3HB`uUt?TZESo+Dr^Mw)jE+z|O zgb=A91|4t%X&Z{9`mE#Z@$tKy06K;$J;~Z;535G%EOu92=>_{ViNR=8%^oMVmphaM zX0Tn$SmZE0Czhmdjan!D<)-#?Kkho!u1j_xD6Pn zOtZL;*H|twI%J|`+R-K=zjMzxWBazxa~wBZhW(Xk>bn z6B%2lSmizY$>XBP-(w_T&bg5&@C;%6?3f$F36*r_6Y9DY zhNN3JPtv^myhzU#LpUKHK+1SpY$5amA^~r+M_`v1VJWBhz=V!UGi2g+qtMB0r~6bW zkQ-$W!pVRGF*T`XkdI7fJ9ja^agr-|EQgjZ`UQFFRL5Cb{PZaJotd<&s_Ibr* zgNY8n*y}z=K(qYrhWR=&pl-F(^-^&`KacM^T9?iTN1uKG*uECdphLdbUxZq+7O=bl zX$^dnfz4yV#KG1f1G5Q;wd0suHV;xsY=0t$FP=7nU6twEdjz~Ozg++Ln|7%q=3+}c zwO)R{mBRBi=2byGOD$$xmrn_fjc_jew$GB}$O6JhBM!l##W<$#;M^-|bj2aK*FffR z57)B~+yS%QSl7Fi7bUis<24UkuQx0eXSS;3yN5;G&6jCeD18vptt7BoVCvEGy-E0x zus-9ry5!e$C#UUC{}B*dl&%}Lzlh_~tPz5K^_kE0L#ti+R3aX@_0wk3`t;Mz|0LlJ z8`;``tpQ2!+X%Mx+h1$klkJsw(G(1&8Vxv_5sY{Uau|(<-+xxq6UnUm8qvN56adPX z2cV?Bwa8igJl+CU~Ebi^VFkoOs=vn(c!l=usE#vHx~r=6Il~-`W>W zm6KVO$z35ZTRd_`xJvQ`3|SoDNSk~?`MTv@B*kDP%7W7osYwXY1<0`zkuRV|V#1Ps z1v;zL!eOO}Et8QI!PFok;HtTD5bnp&1aoH|blKn>ZR6_Z5?gGfGFl9KO|YF5b*xo) z4;?yiE7roZ5!8LfWlV=5w}(EMDRQeV-TIa1%Co;AJ0`&CG!gvIdYym2hS3FmwyDei zuGigsA~4WIHV|241JUA%*3-hPiv7|JaN(6>(1H_Q?ggLm3JX4r^XYmV>rD?z$Dc<< z;Ww8xsCZ~}jrD{}8EzdGOJsobiSxI!u2X~{Llp(x%0r51^V2;a)s)A&fL45DKx-t- zAgDHYCMhXNAdHF`lR>h1KDnZ##JgA3_C+4rv67XdNa6uzjK&+7FB|R5GLZFn1_yNm zYodW4i8z?A{N-Y^scsbL#OKx~&Ro-jPwsQ%eUujR0hWUr62|Dq$4=z6K)AI>!bxF* zh@(4tnqhI5Td(sH_%))pvLuz}Rb{G57=4(a)S)ePh}8iRR`-sV59RGs7du!xUPK@xe!TXoKkjRPfnGJQ5zeJ1$vt#wKkj++rTCx92ce31yI&~@ z2`qXGaSqcwJwubCDx1`pXCD@I;fkcsqTStQ$`VI7kRdk}rpSq9FP>iaQ3R(7ngV~H zXGeenq1ikjW|FC^GUGLFX5OhY3{j77&6E! z^!|1l>Zxs9fn*tcuFnjW{$q6ET6ReUwGm`6@3td>C#zw5TM+9_S(+n1w5IF~_ZWC+ z>MQpBJT-H zhmY6Sx1Hw))`At?e^w{|;}dk33~rnuyo;#F<&l(~#!|7p1qT)Mzz8!=INGkm5oGId1VSG}s;`(qit5q7aSq z)Va8MO$i5&705evm3Sc%yU}i>FH5FsAS$D1DN>GuVeo8|CXeG>ORTcCcw}E0_nF7q zUYUHlH`XSSVpLN1yEq(^R4(!QTD_I*_Ja49!28+Xc3U;uyeF(1)N=WD_3!C^er}0X z_FQX{?Rhkn@Xs;QeuKOWt|0VBQ)Vvg`KJ?|+1S{acDnt}#}5=HshB_`0Xy+r*OwpZ zN`vknYNUf#w~CiSoC+6eh35FS`4k83@+PJxayWei4K0Qxvj_o&+Yt#x`0C3s%!8Z&kkiV%vbiSv^^{rRw&nY_<6>Z zW;Od5C_fJWQF~_QL1RvK;bK8p`w|VK_WQ$r`gw)F%gKMbRpJ>k$DVsOSk5oZ2N1f9IHEB{|@($7BNqyM9_bV(nj9ER8h3kUSevoL)`CEGLl^4 ze*#r&3DEYZazyM6(cOY}Tb|!H8i$TCF8RzS8Q_G~BQ7F&@x>*bl!Er^T@(TRn)Ba` z^;)lz4vkj9$K?~hEZ}~I_OhP+yq-7Bn7GP>+)%YNrj#>krSs1+561-p?l-tONR(w) z6I732xG!rOCdEMi1aCGzVZxovK_P%QYA%e)dd`~86QR(=#)0eC4KWujbgDiFcRhzy zZ>N?2UE!Kn$idLL6uLdiCLF@>twS#ypEUYYvr!@bMh&9km+iJ=D3LCd=ym$~X6zkC zNy%EPcZnC0Qv!Yd6LH=5&&eW=dV?+pbHG@^j@o)E=J&GH@DCNijTGsgNT*o=R|#(vXaB3^^cgUwxOjQ?<(Ychj2I-lMznmL|Muddj=UV7@U0)W2GU~Qx zm6Y9OyJS(jl*=%y&g-d(;!xQVp0ifL$?Vpg^vAF}9iREUDQ6X6y``RrWvKiluI%>z z8@E^qT)NSw;7Gse!1FCe+>ZbH+&T7f-lqXIn)C+iH zIh04D7?qd16x7CFWE8PhAUSWwsw?|7_j3!s9CrELcf$=n;y#Ar?}P#_WnGSg;vDw{u&$J@H<&goa?4R;55Q4 zIEA+(CCxo1Z^}^?OM-+{5lE?052J5k*f`E0-4}>_^FjhEF?QV)g$XD$q>P}ezUhu~ zD9!O~qcIuE%zA@S2+2dcJ*6s#yvcSNuAZS15(JfmA|#3IMvcOj*_0= zkhYXWL5?D6PMUsMUq#+QrWq73bPu~@pUk8Du=YhB`p2W;f9-20U|*$K#tP9ut*_@g z96AovMLDr^|57rlzsS6;s|;|Vk3QGV=&Bv}N&d6_f_s`aqpv+`Kv$v2r2XQXH(k6V zD3BeVwfB}@V;U{f9w+>QHbx81gQvOm=w*ruv-vVhFkeTGgRUu(j|t>FHIFuiiIHe} z3yKCMc|!ypt4d5ALBCdRoX`+b|u%|BZ&JqA=oP>W06<9SAcvS_&0X5~fMiQODD=|izokUUWClGc$u^zBp% z2o-Ftw<`a$*ACWzy?zt%RbFzrW^$*vs7-qH*&apk^nDV@~eD@Gng0?uz|$e_|O zT4$47eg2X=E6u*M`uq(D60T90!ia;k=S%n;rDw&@FdBPJc&zQxJ2D%|mb$I5ZpNRC zvi=SZRE|G`s*(piLn;}`g1jWf{5};I<8VwYyMd|KzDbV6}jd!(QpUT4sxR7FD^B(H+9xOthp^Nkx)3GpR|T$$Lp) z=KbP@UZCvk)=&N9$x8qTe^yS~y8jwM-%Iwm7a~8rhC=HtzQ>2YJg%=8;9{lo9n`CC` zbjuj}mZq0ZTtEX00e#)TUoxl198#>-VcV%l;7uA*fduGLYUc`kVe=B;@5fI^DUTq` zhb28(#Z-NooZRVbHIdZsZRrt$M1U``xa!?b)3gXCw|5vJ@35>H@|#7WjD{JU-8jJC z5+TRd8gtvY*>YP0n_{X(sewVStrW_~gqG!|oRIj`WFn@JUrSV0^U}7qD{(P&$IA=h zKfbt53e;@(zgjJDFdh*vE)hRzwrb>KU%&RhX!(g4IaoPvIy^@?9n4tRh+vDb5FxZ! zW?#P(9mB?Lp_(yJ{HA1)LU|u_K1@J-ztyPvA4&IS;6YJ&jPy`q+r5jko@%Rs>5gBQ_K`r zQkE`?IA2%EV2^x`V&5~)5$2}2?mo3rhhs5NQ`!{~nhdV?)&T7$xJ5>0IY7+cA5&Dj zck3|3{3*|>Nx2%JBMy^E#)1^Dwmfl3cWQ@jop9JP)cy|Iz3DKVepA2zBp1cZ+58u;Vfomt+ zyLw|yEhSAa+o3gKkE$fXHTU+)uWYLlNmW+$bas0*%|GSBXbsc_7SPma$N1^ zXr%5txzdg0e>WxMTJ4tKu~OIW&?QgIEl&e>iFEqyLOkuTeMjeMO7y3$u?A=hbQxcC zxINmB81_BD+;Q4jiKuD7yf8lb`V+c>W0IR!n~679fUx_IakfyDHbjiomv<79v5sd) z&c!>zTz?J2U8&MASth74eGtj6=OFj$` zaIg3JFoJKTH-#cXqcq|fvf!P}b;>$v?rh6)io(a~ls}xVY}M(=Pz#Dg z@E^m5ZUtyD9s~Rh_C{(DjONe%47br-F*r;Rv*kMtwNE3Fh9Hb zfD)+fMt%4KVWb4Jb5!KC+w8Tqo7=9QtJ|RGG*Gd5A^_CLZ zlZc-pwez&Le<27i&hT>%+9~FKE>ZIR{ykLPXI4}a(qyY!<)hOc z$F;5PwevK^;0F3rEHF)L{3KVOpB*VB{`jBGD3`T*w4awdNAc=FkQ3wQM^21BT2laZ z{|0wp?@@rvr>}yk<}0s)HYwa__WahjMdOyj?i1?sR)>STYms2->q` z0FkyeUJWtEOM>aZL6b};x)U^?UOR$G=QB4tbDjGhDioKgufRSoClDy?YHBVbw=T@( ztB)dA@+#(PfTYa(kju4@cj)bb>!!mF>3U`mgYr80>e<9BY?zH&tCq^#PI`gt@k_=P zv~soAPO_}^cz*to<^2}A^Q!#IP5N5ErWKnI?b!Ig->T1MrhLbxJv3VK_5V^)e(L{b zkkRGAptt%z!K0YvVnQL>o7yFE12RtSVo%p1w+tCC27R2c=M%x0lQx~Es~d(QSxxTL z3nMIk-?4aJhikF0{h{7VLq+bPi^BZcw~*Z9IK>v?pgP?NPlHa3Z)O7fB0uM16oKsy=7ktXL@~G@ zXHal2K))fNvJNlIkLrSm9c@V=9tVGKSfk=Gl{nfyZs_5wsmxnv+18t-M1rI;0K}NzS8c0-tS` z78pYjIz6mj9u*h%?7~QW>UB7G+wVFH&Q=s-b}6PSL+|Sk#`GDEchySmb+fZ78b)G^ z6E1H*HHx9Xbgq`mArY0htrO|U#|90E7jF$ys>@o@F%5VhU9BXGt67R$Y})^Av+^f& z@K5L?t7fkD?4iU4d-CQjUcdgz?*ApV>iB@AA&@1`OJ&spZok;G@CoEy8)^|QF4)8e zK3Gp3dt34c|J=vh%&1ETARur?9L7xFT5%V)(agtsl%{4T_YAk9(cv= z59-%4X0{tmh|h~c>Xz1cI*g4TdOtU%8Fi++>Tg9S$MueJUni)0=nGV#_gk7`+O3!k@Ho_54KN@OA5cFUk8(uD)T4CGMom$SCl8RMsVz=BbQ&P|%z~7garj>>9 zIyC-#qWDYbHT`DzGvoXU*RY_wlivJkgV9LpVxiUy69kW>|1Y{Y;u*%flE>3$tio}F zlCc>!9e=LRd@1H3Xdjk3N=erF3%5!edfo)G4$xU&49LhTIQ+Riu%DwpF=G(o3a1Yh zplDHZF`LAgYJ?J~>I>UW0?Mdr!ys26MG4g{Vu(bL)B{~pD1)G+q+})hO?Vr0Ex5Os z0~B)G*H$S)7c1v7jVi314#!l7L%jE<4}bZPx0s#WR-db2A2|;cjUE8J4m7fu!|fJ6TtL zVrZ;G@h4s^K!8|0^q`A^)N62=a>mFO&FQ{7yL`Q0#dA2>e^t1I^zw~);*2daq}H|_ zaRIqici$lA}=?rE4@usVE^`BGR3s zVIVCaC>;t)r!=EGB}R95!>Iqx^Zs7v?{y!K_IRHo-&xz5#(*8DHxNIVyG*|H=2jGA zm1Fm?BP&7p7->Rh3D8DR955c?n}DZMR6*+-{f-#Eij?PVFB}O5pu!kO`nNBHaXPR* zS%E`(6(|kIH!3~xa1Q}(kBC5zC)_XiRhU?Rb{giO6 z4BZ;KVP@WQjhSKEjP#Clc1Xp$OK~v{xJL)vTE;;JZTaBhn_F>3M@U2*Dp`5TmZ8%2 z@`+?)F{i*ELgnxzI*2m}?qqHu!sF2c{1hns_BJ`klwlJMA?Kw4v1RVn!iPN#O}k2c z4VJTQuWvbMMr8?$uLrw5KRJo9`~$r`h#oD3_HYTfOJ>= zq8V{BOa};SVt`v(No`3|;d&>`%fL&752R{)`8_jzXmq(Pw;A`8lateq9#z)lSFfStZq|=JsC0;ndG5w6 zEZUnNB>p3sE@IR`EhI}6J%S!m=!Vx>ujWGvP_a}y1s`r%idS`~dJ6Cr3>_t~7^Dwb z%*nduDYLe><#8~w9kMw9SG|{CeH!p_DHI^B3lXIJ3SRnS=1^iQqXF9y%9MJzJ=Gtu zoS=C^a>sT5&nvt*MtgT~YVe0CkJuPx3}2}`T@B=<)%u8pFxL)h{KiV++cLF@@jT$U ztgQaV`ah9=k$7iB{ALUGt#4>3NC!_RVE;hW?o3B%?$vVacT)_y#P-4QCPXPCIc%lm zoDTs$jcKGzWz4Cd`<}-}RJfS6!5%pCOVUUX5Y&&*chsP8+|H4Z?M5QxdT4dy?jr6v zHtEwa%-=uynR}=qwzf_b{LEaSYtnaha7gg0-+;=jztDN+CI=f71-Jw?Et?t z`16xdvWBa|l=z>EHZMDxlHuVR{tv$}Pj2szY$@|IIac2-yzz6fb9-<}S9I$?X+E7w zzRYw#eQ@hb7dD!ia~D6iQ;V<#P;=J+^AXRWM3@-2j39u#7r>MtoZ{(vH4z}Z8WQFU zC^LL+Jd~!_Y%8b;Q4X+;ao!Zj5zY4iIz&}WIa|B*@P_@;(UK3AReJ7PxX_QrB2vypdTeQmCwZ8W zv>vuOVl^Uf${%0W=PSD6VZtJKa#ygZpV;I-q!Z)R+jRfNzWj+;Sv#{PA_`#AhvJ#X*RT^juOQ)E1DaxenglF8Nm7; zg#Lp%nbZ5WZkpuZt*idZtM-{ff3U;p#4&}Awj z(#A%1Q+dVJ-t$o@J}b(SARNHL&P@!Ul9xJn{H3|3?Nu&|5ZS;Njmx4(X3_ppt{q1g z>}ZNhu@+H;;iV8qMijS8jRH;0uo%cU>b>s}j@5XL3&U1Ab?k4?4<+1ID;<27yB3>wFNLK1x9Dh$ZwHFILyao}M0O$u zc7^EyFt35Xb}&Aip+;ode9-$9CfC#1oOm&%-TjSfLkLv?Z>;92hd!vDAfZD$3{#k8 z|eLMC(YW^lXnRe<$U+(FKd`jg8oyr5NELh;tu zdQ{aDAfueZ;suJ-Zf;-q-C${3(CC4Pri-rm*-U{HCU%A4k5sF|7c)w;(=Ib6mtwTo zH>?F88pRY()nzskY?lPgcw=B3MtnYr==+T`vVnIb!p8^#$_!vK(5DgxLp&ZGCq4lcjBR12tv&BI zZ*q|AVsvJ6-{xPch7S+P$z9-N*jw9#EVjPC%rZjz$rUY;Fc$BMa{eTpn-j}jtP@$| z36;$bxCzeP{_%H?3e(vB?DbG9O@Ut`RTXE8{||LP?nmvc3^VXf^G#p$YYEx=oy-eK zp`PbE_}(fc+xqT2)||TyPD4M;Czm}Pvr|Fq!1W5=)N zw>Wq>VY)m>yC^6E$$yU^(0dge9i+De_FAGo{ZNZA&^6px-2ou$CODOZ_s1r3(yyO; z_`6u1uRX#~>A3eVC*xZ9E*>eEWq-X)cvr^v@g5AfCMeTkGul}p(H(h*^XH+%mPCmd zTMMor2OB-a{QBtj&HTo(YhLAzBewXCa2tk9AlK_yzX~^sylG6H+}Qhyrv{0^*h^%0 zfrVVJ6ssI0w!Zv5?idSx&BaCY+yd;#3nrYNe|Fe`tJ^KPI=F%Q&0zbID~;`|qQ-gf z>m_Q>uaf6FJ``FoD}SMJcMUtjZz~S9&xQd}KW%INs;Qu7mWc&o5@hm9lBAi&6-1o#SiTaY)le(Q%rmwY-Z z9P&EZXRnV3rAlde9({m|Y0F4@)Cs%GZK;`_tM%G$OmjVv{Y5kTOVZ97zP~oy%pslS zw{)^%t!m_bWxC8*zRRoL>(q~qCP`&advSa9pJ+|g?_VHi4||%A2ewB!JdeCunVe;C zejm~)da8Zub83A0?)1bH2bUIiBy|@9zEQt{5~b5LT1@{QKn3{C{psaKwdl#10o}bL zM!JIJ>3f1WC_k;K->{9?snO}jQ=1V`|M{cc7IW>tsk>?Y+$u23wp%_TMxH-7w9LN2lSGdxyBH!I!=DY=@|Pzl zJMPg=wz-F@u(N_!eZ?=Uw#wG~5>ok)a(79YV{$68N!b*M71G#`-FN87C4ZZ#Gz*Ma zJ+)+XbD7`BFcp=OJdRJ<4~0`FQyh0?^wdFwI8`R|c{+J*3bn)Ftz~A-=BU~{&uuyYfpn4$PDees=%?yB@A>vHpxPjWetLTluAlLSBPrGGN5c~A5EqzocO zgkEbHe(|0twVmtSdYRnOJ7OGVHPpX9Ev#eFPER3|h5c~#;Qpnzb#N;Eg!#Y2#=;6s zkuUSQr+%zGzXl_(#x`T=R|nD_U18k@YPz@r=;GutI-2G^IsgdVC5*d1KrVn8V(X@_ zX9;k=to)sRcZnzxI077EA>JE8tFR;qTq#Uk)+A0qj@b6Vk*&}Us4en+G`g!-;~n5W z9za2u{yBTW)8Ma)zl(E73v8;{Ih@kP+iX<(22pkYEg`kAFqSCbL%>{(hu0ZH1XS9@ z$P0}Fl7h}g*;%lnpgI=8Vo0o4@K~;pjkgllq;vogIudTwui?FzjxuFC(j?s|_w=cN znsWj?gnLQ3D(VNfdthpX0tRA=zQH&(GJc%)9a(wgag5Nnzg|Pob(&Z?EZ<2J@#}sTPPW#a_7BRBxmCrB>lK&e3rWHOv2o=q zvJ^maTAzuG@aWAGj~rmwd+}+fNEJPL!(6z$8X#J>o2`|fo_xvzt5Cz9u&eDz~f zc`l0OdV65f!1W6QXPDk8Q*UWW9LBbbj{lbckrNtWm3ddyxZ&4^Ti+1hPAV=xnxw8> zeltl^#$T1AeCpkk49#E}R@UgHV*ckp5%qOE4vS#JD~$C2e=j(9jeliP`f5m`oaL{n z&Hk&AnRi#UUMo&`4x#%2)1I;kw4Bl!19mQFE802x&7pnap;9t4rawI8+cC8lmO*CC zenpYr=iLE&ugj?j368Xd^Wh#QtcB-i0;RF+t{t*mx1Nn+=XeqV5ioXA*pMlvVFdgx zt=IPpQ4fFzQlsD~33T3XJu4y=VRH-=+S-jf@ma1WTNjvm?3L46MoIWawe$!ut)w z8{4@Mwv#bd{TF*qvQf3e3gN2p+{ED^p&Z`%xqb?;w+rVR;RYw**w@_?XB%0?k!Wrb znicMjl9L(pug~qb6^t`awizxNOWv1^s?Ya5iA+N*H*faD#yOu{9q&xHyhZOEU#r)D zET%`g}|v1`vxbNH@lY z%mjLc{N(Z9^ei&!BnI}+{Z;CS6*^1Ou+8_2?3VZ6>S;!$b~;|0ywz1P%spOp{+%IK zEw@DJtsg#k;dC)+L@~dtkH_j|-@7J6B8>@6T?VEJTGq`;p`%!M=vC-?v6h$ks>vG3o!BqM}DY4 z=0*+0b_XxtAslbO6ebM*<6->7=hh|97vt@Qd~F8-pC|mUMJH z3R55RU>b`py8(%}laMKyWU6&uL05#qyJJJ4yi9k+bzbqyyI#rvJ%`YR>*=;s!&t)I zoUryDVLod{7$7qOXZeYR)iCy6ILq%8B6CdK3%<=QicG-IBwx_May*~05n-0TUldpo zmmD`7)v=&rhG8;E*c?D}0%}8uj_fUC=T#Gpkn?``-RXY0UZmqiSWpw7&!g_z8gon? zKD2Z3%U4(Jyf>1uz2+CQo$^lm=U;Fm)RIG{>#NLUSz~@ymV<<1O_gt?y==IU3{aUJW%uHk~OkR9i(!ic>Y84GWr&lCz`5M3T?` z7&N$Es<0C<80%eq{qNj!_C1;8Of5CkH>^LL*=Z|P_aW$9W}mZOJ>Bizcou65xD9YV zH8TO@Vv7JxmMD*^sPHA29^;xE)OQMHfKC|kmIzNBugL;PrFtX0p^|2;w{vY-*FsSI zg73Vk%njg2%;Jd?_QC}w8e^5Ols!0CHc$X=HBf1q|NZs8#QP_Zg`hG~v7>ZmTA<~}TD8oDM5kgQRyQr>lcW3Yy z*fz>yxhcyKZ^1<8;sb+w)TS_5(3%Ail%Yfh7olg1VO*efH}j*#W)FLlMLDl|l!0yZ zD?b+M^iOkvT*FTA<7$QRz=5MbeifyD<#kpe8B*I;YGhPzX&hr)CQ2VN*!LmN&wM`r zBUwzI2yUlgv@TqVP!36Z5wDAz3#5c4li(w`j(C6@fJ%5lga4;Fqu=APWk*5lt+e@? zGm!`L(8OOfGNc}Kg?k3kiTmVHMsH9R?*jTeSmTtMq?CdM+*#Qrl`p~H)kl6x(8g7w3mw_z^LA4E z(ok?YWVz`-4sH#(KC;uh=;rCQsBXilnwfS{gJ*qL?|{6zxqKMaK5ge_)vpeei?rP6 zqQF{?@jU+i9F!jMOYt_He8M+5(P@!ZIHj{XZJgb*dj6b8svMe6eCmA3vgq>6fI|GP zm|mZ|;tjn6Mr0_(<~SB}D_0L@WO2j~oLBvFRh+S6K*##O)3TWtm2`ndJN#grigf~> zznLAmxKbnZsd}_YnzJjloe9iBZ!`V2k_Bd78)a0{%yQ(?UkzJPwsaWn1gKVUvs}G_ z{aR$dZFN5$D5FZ%C3GFy_Ac3l3%I^raC4&(w!rYsC%SXwofSmJ6<0t9zPe5((|RHr zI%SI+z5Kp(r&x`m86ULp4IVb?lDMi3kwEBwi5>OAS*v|q1X0BEu_)tLkpOy9o18at zXg^dMvx`w#kexs*S2P~4bJuxTTCYdbiE^c|u8-ydsBDcf9Zlo)8 zB-^b+mgi5{hr2ArmXfAuf{T(vg>5pi zFH*7C!>>IqMgB&qXa;03(OeYJN|hb$k{7FS@!s|Q_l%s*@ICt8=^*CoxR^VoLwWBk zj2p%*-`=Vvbbr8W)U=-FLADGdpo&=u0ccco-kyf5arT&I>P9EmS3JJxqlJ~K@{PQ} z@@k{;b&dBC{{pz`cE%68w`LfPt9RE!Kezw}b+O1g3=0MrAH(D=;yfMr!2qenN6TVKM`c zf|F%M%V*Bm-MKHe>wvtwD-t`Yo!7CSZbAr`+&h|Bt$Lh8%RM6L=UfbOak`?!34cCX z&N5m@w!&)0GIYU1ebqk@9(EykYhxWr5yTf)Bgy(-z2o^&zhOf1^d0YdCEb2yX5f94 zyLJ2&oJ#(O{+J$_NC3WkTEnC(z5iRw$2NzkMlfck=0Qy#X^djVB#}9>7T4!??ka|< z?c{0I*g7(<_~kK7r*_c|S`8fwPR{*|la$P$N8f#4>lqu}F3DT82X^-V6Cm~P=pFqR z5X@fhd_Jjjao4ri;~s~#zqG$(iftqr0BykZ zrM8gKC#(uoLl9ki$x_KfB?2kevDzlS0q9&6MUeSeBsjY4Z@9_`)z zln*xat;lsnr^8oU5crH{ioZb$FEEIH4%6t{E;Zr1z*88HU;1jh!g?0`6LxvaEiD^h zNA?2e2_v}a{JGCG$unGf>5@I`#tG9{S|$~G>2cI{_tZUSP5hh=JI2qRuE}rI^dk_h7K4o*CKMm~vUw`P#+o^wX)X zi!Om5#ngb@pbqz!Z@F#!de9KyfM$fmMpGGquc=XF9^Yotuz_SscOwOlKuegzz}<5G zxb=OysNmg}`GW0GT3ua0R9V2yOb;I60V5^XJ)S4tsjk}I=T7PRR(i*m7(@D=kBo;r zBEv}oQtTa7R_G8<9HBJ@g|X2ld{-czpgMr{T^P_HI27QxFuW6`Bd))d>-S4b8u`u6 zz{*ObSF%?!vhj#aHeB+YwUsV9U7#$KfBdW8?@UCTt){RgsgN>&YLaR#3&fD3^A6o4X436`RcS%L&;gv29>EYWwZ!GHj>Y6(8T(^$MqQWARS zdao2o+=!#TP;AC!%d;aIS8q<7J!ZLZ1{!H9(TO$?sHiY@sPAeNup;?lhM<*Evxa(P z5LC!D3*otq1{}Y78VfLqrS(~9xM(~(lS7O!%)VrIV=d=2tlI7#0_{4i!@|4-DR!(> z&)2#YX^iS|Z+1(Mt3o?GP}cATBkZX}Ex1%CcaJ1*>aL$}Q^?uzmsyYX&Vi?ox~4>~ z4w_MS1zy?HaoiKR&3tWW|3e~e!dyk;;cYJ)ST*zee{!AFb@Hjjdwuj4u8sXrx+wfS zC_6#wbZ7IB${lZ-#0**{p8|lsaKMQ|^Oq`vu))_rpHs_I4=?Aj;JOko{t?_3K#|mH z-A#{@R7HBX{+uep)nz%&&&p2|tjpT1^*h1OwO{skV-Pz=Me}A}NMc|z*O}-j!i}o6 zjX9z_t|@W@a;@saaC@t<98uX6ZUY)0nCo}Hx!(t_-7N~ zUXT6jkm9q*bW`vuZg)A++;7RNpPiizp*%J|*AAHx_gu?rCl)qxh`F!ocS3K=c0W;MNdre+Q&c&_QzsftbO26 zI5&?tskEBY>9?8?N2K*O_Eg%72X|I`BT2N3o<32IGg4Z(x5E_Xb=>m*yaw6-KXEad z_*q`>)EW40u=TU$K0V*w=ialb&ax4YOYOZcs5zd95uYWCvW$X{3>W)NaS13&G>EjI zitmYLxGgiz%izo zZip>EMS(V$4q$i}BAUb525pc^!s^@Jr*^;qPy^ctfTBSk0Uz#q#@()dD6v{bBx2ZqZqKJRG2 zrgjfGi8H%`@h!(zH;XJ7JSrk-_hgAWL4?3ap#0g|Fn^+v*)^)8IlY8aODBecu!55w zm4-3O-h(laOBr?9&Vd*7cFb5hWM1H`-TUP26QVPbXM2SYsC^|P2G964i%6%HZ><*3 z{{G!W--oh_czK&(UwYdIaW19({wEqu+d3Id>O9ofU$|_nPU>vQ6fEcaPP}+U_h8+R zw~^R+D~a%Y1N`WscI%F%E&E8j)KcIv3M)*H36^DN=xop+C~n-9r2^yB>?LNu1(lE zaD)_}o(v*Va*KR!>6^@6a(_*V_*uupP*%z{Ma$dh`{i&4A;qYa_*xnpom|2=%CNfy zT*nVi*UF?^Y%erCH=85dd-EhJ&5jAR1(!8Kx7X{+**`ClL{}b2kr9psCquvXZR@XS zmEp{e07tsrO%KP6VwUAuMlva}B&P$|;hJ}*`k5y#k7a*T{lN_FSS$Z_b26$&f2Y>M zA7m!9R%CkTWX!Pjp=dg_ME6ZcsL#ES6fTQxSZH&qU{}Ru2{E$i~mp zd<+xV!y%`n=Qs?M9DMA6m;{s0=@j&s zqMe5yWj(y66Ei5@t?CuGIChC>+C-L#hE;IZyrz63ffO$$ON?&%MB z?lh2A6mHy#*q_Jib&cVyOF(q! z7ieaE<%Y`J$1)L1g^F1>1IlNh+dQ%5guwR*X?hBG2f<G0Uw6YZbcp1W$z< zjxd(rCj#PUVpYG}iAnOBpx=9HQWYn|2g1FWDo6<*#8+reG&O7Jy6c0D{-&e1P zx=ew&nysgB!zO;{gSmMnZdN%77FTL_Bl_udeb>8eQfJW`9YDrLGM}0T6#(G!!lwkQ zk^SPE=F~DA@Ch)3VFvvrDojg~mU!hX3X{Ft=#RKIDU{Jj!7T|Azb#hl!_M2N^_kjU zq36n_zZ^(*8TZvlhm@=zri}n7A*%Img~@*^G%eRi&080D2=OooEJokxo!9?-;LGx* zAa(J^q-iA;P0uZazw=XBK+fl5>CDp9*9=hDpX(M({M;2iXLNVUymw442A^1_`Xoc+ z*xBtlc)@p3oOA0&ee%NBz7=%}=i}rzYdzy>Z%Jo89<;6gG~Q}SMdZNdexg8?{Hz~H z5JRM42U*3vr!I0U;CM;;3T zrZvY7^mL=vEnG&}X>!MFrahz0PK1!;`h?EyByq#O*%#R^>nR zkgP>{-|b7iQ5w@+O@@8y^TbAuH@~oJ7}ih_8i3Op&0i*<;{`cy7Wn>$1!`c5x;if7 z5HlF~wxTJ^#r?iXc+!s+BjfqYRJ2iZuClQEUy>?eqWoxvOBp>Y5F7!eUWK7ev=jFC zc){WGCPOwFF9)W%yVetNdnKd^U^{^>COc56HXXx-kmt#0&>+4B>RNZg>P(R^QBRG+}spXJwX+FnqUV^zU zys%tOaVoG~Wx_2VV?*UH@6O?}fbX5=ZeI3VpnR;;ifj}tUp1Ef@?RT==?~`*oEx7w zDM>E*hgZ`|n=_@S0*G7|8258=nIPlc%UWXf;xE)GhpZfp)a3%?f*p2v-| zKRHe77gzjji!?{4-X87rvXQAi{C5ia{^-Lg)w})Kj&mXHWM{d3RN~23#N~(nc|2%S zcvCXtY3(vA)ijw*Ded`axLbNTF#eWCtBn`&6tMFMjYF5cHq)mz{)9fE z!pRA=iu?uOS+WL_>&ia^^qepq|7A^l{F%RDTQATOvoegCXnudsi=w zdiwx%6nj;qWiS2RK$8f8vR(4pAJSRs;CfOZC-;@1Q|DvuGU9C0)YEo``=c5MXr(*1 z2F3-&Jy%?Y_s%anX>PW9`w5 z)IWL9U#L$ET}KtTDf-(A4kgC9kH$uW^6%e|zoi6!GB;O?f2JIaw#62n-~08*(T^Eb zVi~5|?u)_5Z@>6;wNXJt(aPkUAazn8F12o4u~#Uwvge<_$3X-fT`mS+=5@K_e z^KF%#<_j)CEN?Oom5JBFZbc`Y{*emH87P;zV2hV9E0Hga(x(*%ffO-u&a$i)iIb^$ zyF|uh=zV?=ei68s@kA&EN}=oZ9Kh8gSUd07=eyEhX8ZSzF!IPWK1N+wDBL0;eB+Dk zJ){d*J;D2Z+CFiBbMs%%&&TG$MU;iA8WsW_3|#Tjk@3=IqUtL@t0Tu9zk3UkdnkOrf_z)of7^n}#+L{=Q^3Whyu zw1%!Y@-;H(k2uJB+{}k|=aD0#{Fmc2IRREe&X}*4Gwyw)&cMgP$k&vHE}wB{x2+mq z|Ae45eI#g?D}0oQ$?+2j;dh)tg6|U^if3Q0Ui4r;?*Np`9x$F|`D~4wlbozLt{)#+ zMg~>%9%4P$#U=%9VZD~FQ8zTACQMBZ>{m5=ZZggXqbKqHzz_$=KulswZtZ7ZI^WE; zljtymYv8Z_B}Gu1jKfBZcB6V3p1hv%xBCqYj9jUxbt@$>pCrbK7)&8 zNrHkjE||;VQe}md3|?}FTg$9kEi4Q^1#W#ZXE1}`*X_ocwI|9ErCT;`O>N&`~~{*drUI7vID7ZP03@bH)s zJH|U@MuMSg&iI&=tdxoykwWK@tse&v4MLT zTiK~Xbwp2x52mdweJgqGU%WXiX?npr`=uR+qU4+7my?qJF}ni8&R6B^u-U=>NKGSS zJ=9i-ZGx`RZ5uk(zO=>C@;Dwtm_GJkmow7;YF(f2tG$eDRKGa)ZN*=+AqPfR+A<;O zFoh*r7pKS^iiv@2(=bn2HKvWVoeCB#L~a5=kxk9qvoHb%^;I7`fJ=kby_x`-ihE&Ko%>Yo`;X8FZ=nkFtyJC_w5J z?ya|@{$}CjsRVso5@Ir?Q99RQ#Z%UI&l9BodF3`!`1rGG! zq2MXu-NDT$eW{zaEoEbAAw1__(JU4OMBzLcBnmnYRbJ2mKA2F2=L_l z<~=g@PB&7f2-Pl9hBy2HmExncWjU&O*ZO8-2%VCkvAb<+zeg{*#7t1y7@JhSln-{z zZs~j(nIBNmY2?3~Dd5^C%W#VgC%MAZ4ghhfz1*4n+F16zt^rA6t0cW%IXI6mS!9;2 zy|_ee*Vq0v93Rz28X%s&jUiqD?h3#ovrR}plkq=Sk=&V5N*SPf3aVKsf?RUOb9$>B6Wu!s=@8_(IPub}TyIlN$qJ$mw;z%7 zs@*7@ayz)*@sMsMCNCq!TV_oZzKN@FgUCSeC}Eac``t29nfKblVZ(Q{a?HQl!Jsgr z$PKIkdd597gKV#h37zf$R`cp~Vb{u+NEs3JNrUqiWi4SXav zJx8^Mu0R@_|NoiRV;&?OZ`hAm zTCuXT2Vc+giwAFN*o^LVeFBb4Sj&^MO&5kb`v41PS58II>~nA;{lo znEnhDnY~wMKv0_TfHa*m$un=mPK0J(z#VRV!pt+LH$NM170C>7J^)Jet z#=L{YpR#(v30$CW&c)K&!*{fP)es1AncNe(ewc2cbnLCDkhtw8P@7tC+rY-nNg?I+ zY?WuBd{B8sHqKtK%$D3kI6(CMK&yJ!^%4T2vWU`wD!h_ml2?FXR>ep*_+xo}ugC6}cLtw+}kTsE>< zvjhI?u1*Uc>Z()vaY&P|(_E6VALaQz6nn8kR@rt)pwDMN^u=o(jx1=#JFd;jvww+B zWQspwjPE;_vJE2Qzto7fZBVp@zI9@u%1&m);AJ|PvG5N_btz_c#IE13L z#`pVXfVbI=*V@9`?~{1@?RYc0HLtyN!@51z+K_xv{s5v{a&Yhw%aUjS3He7{g0F?l z8>lelR*g#34jcUv$HHc{C`I+o{ck;(=?c$9TFiQp)@#g_SO@f+IEho-W-w$KNDYK0-7AYZ!=Q={-)9x8o=?00(e7=s^4bfAD_6edt;Co?>Z#zLx5jNhOn;atB8yY%&wvSjodY*fCU-A$w6VaZF8bo@$-=0{P6d^twa~c$Hx;#}T zr|;Q$7v@r4?Ca?R_YlPt9-*){h*TLkf?gQK0?0*u!LuYQ;r$ew6VWb9Y9cy`H2Z|S z>nSWzVvIp%VoW_uOH5-Qtr8#Cgu-2`ERmJ-`Mb+@bVlQ&HCw!QqO`N7o=opDQgD?} za+r92!V;sj2|ew1TRu_Xjw|`p_Q@mq*p_y8A#5v9W?!ZbWPq*={fQ^wK^BGtPLLIr zTGCFltgBbI1>aU zxR&3`%`Lf;1M!m-=D)-g4wk`#;F0|r9n4t;uM3&BKA%g?{8+$+Zoeu)1;&vDVRc3l zP2#^kV&Vx*)ul<_oRsH&tQ?;nU?DEi_~@nGh&v4WZLKSpuB1FTeeYGNROI|Kj->Ir zNZw}8k(Q1}Ze8A77+C93(OnAH<3eC6{W@Z6hduVf^z!>H*u$oS_wcNQgyZrXO-$eq z!Y+yzdqQkoBXaU@m%J@(f0g^yj+2{jH}l)SHQS61ZXVMX(gGH%&i1yOFN4)(tzGrc zjh~&r`TM)2{F6R8*+1g~-@9{0RDW=p$T4Z=nE$#nj54%4iG<0d31l}wO-TuH&kFNjqqB@Rs5$QsE?E zJo>@{W!~$laP0tkB@Gj2D%)Z%o8a&VaZiqMu#dA;?)C7uB1lWb%S$fThsI0kp zXYYo~%V+}c7TDgbPw8D%R){aI7kq^y97Z1D8_~-wv-vVm(7^ACb{lS_3dWHLCRy@K?TjA3`HTviI z+VdBBWw`ek;SDMkM_GzuUb{>;Y>h|6EH7$8g@3`pODrcRlSy6qx}W&9qN|wNu0k)( zEDR;t)*Bangf9%!cTO{ke9bTx%=AJl68C9+5<4{(=KGR0{Wg*7bb8-P;FMmTsfC)N zJC5l}{H0in>pD7BbPxQ25#yYc-3So$2{DXp1*2rG;nFi>P=wydV&%n7by}c@u+f_M zXse)`yaB<8rmYK4P1BS=iX9VZxkOq^bPqinJvcfu0o@0X_re|In`BV!2?lq|UiM}Q z%8bOJ$M1A-UoJl_ZK;}~x_Kc=lfl~VM6kLrqG*86D-hOq*2_bSt29nIW|MEzWtCqn zuo*t_h-NMgThT%boJ_+P^ne#Yc8tMPmK_+USw^%HLZhXmnj0*~7uTh}su&KR%j!^v z#Nu?wDVYkq8Y>JY>fueDQ+rfjYf9wDC5}t^%M-G~NTuDsR$e8m*XQx~w-((V74b=d zfpBGNOw-Ix7oNBQfd_jIRyMW-q%T3Ep-P8t=Yy8-v&G<5VB|#we6VjM19xLuV*W;-UkysgA+}6om< z$WSn2{h!D^Q2%>2(}wMf4}KGayk1D}iTy<)idN%B-tH3jMhA6-*B>E0E9ViG?|1?d zoq5TJA=vc2G8*d6MnX?x^Dz!hGdhR9^$3=4@;&)j5wA|_ zfbeO-Gj}Tt9?UW(u_e!D(>Mn^ShyWgcPlj>+N55>?w+RizGM3Ay3@*=*zPlJ!EVpg zB=pU7LYsabrQ|bRv_HlXJc6JIimO?{ucc+|UMr2zbF;~Ofr+{LgUh7FN$^{d9ZF08 zJQKYm$k0R+Nih|xxS0PohUtXex3SL4H4dm_6Kq(wOkdwkkc?-F`hr2EFo;KGrf$%CFU?Bxfqo+hEEZ6e$N~=|03D$uLMgN12eSS zd~=sAQuu86ge|F%3+!L?_sM*>JMtY|B-jyL`uE9{h7e@8{Nky6f=Ws?1AHog z;R>QtEsB37KkIS0>~3r#M`FIN8ob6-t=xD!MkdNdskDMe+ZSWU9dEcW)j@n-$UGxw z0CBI4DX=NTQ@lP0F~!XJ7+XdDSpTX?4pJjzzQAn}>N`vz3a>)h9dfjs*-o&}C}EC# z=--c^VWY((LM3wqUG1{hkVzm_p(B1m-%~bJc!=8U+rHznNYX?=B--&_c2K*M5C$~O zsK0s@(=y#-pNUCyPC{9wPbj)S<_G(!IbJC3L=F258za+1GN&J}J{=-<+cIDOr-ht8 z@dwYo>w~=i6LFX`Tfh2Glk*Pjr8>lUN4eH!)R#L*3ASZo0N7mD5vjuGn2r)w_oZU} zoxAH4)+-Fg=zB8Zz#h^-d>D~#%TN>QiFo{3pi8{HeMS87Ad!N#i;Dt_4mi^p-aqqe z-d>(5+Wm-{ic;A^U{QBaf)Y^jF6CTUN4icf9#@M@K-h>z-0^%fT|UWg=F(cpUiS!89m!^V%qj<K(`t%|9&1z%`Sno^9 zPezHNEQ-waw7YbtF@$-jwAL2-&S|C&HCaR#26T1LruvJ`zx&BNnlNfyi(i^f^9S^G3+ zBq}jC5734Kf%{d+MrB{!hyD|m$-WeFp+qY@$YiBws|sBRc#c3%s4L+6*;{3wk{_y} zk`I}Nlik74ib8FVat-t0Lcrc*>RQ%30=|2~oaNaiAMxHPV^vuG)qIh^tJX_dWVU$>53GUCr#-7Q~6DLCnVhnC$~ zets94Xp=)DvabM5^SjwM5Ey9&b3E`k;)lAWcC%}ISFgK@AoBKXUizif_UN3z$1S$q z65S$lX}UnDsC-4!MA~S>3a<##z*Wjh*e7-w5mH2r597sx~x_MUYd z=A4t8NWE-$Rf2Kpj!MR2HR*4d0*}qLnRS9EbeW3f#d8vBQ1~2@h87t$tr{qXB8$$u z&P;!GZM4i`Ie(d4xG?MAO$WYHD_~lJrdLpA-y;u;Mr|9N#fdZU)IJAW$6q>a(mn5Z zVd~?tE4QmK@`YYVrTj|RU?wZ^-}gux{(i)5B6rluZvWNCHl0-oDnUL9E$5~Zc?k_iV+1UT!z7V~tCVLJgtEGD>DEwoa^a$a< zBnBkDun`aDjd;0e=Fi4Y2~ZS}-4mfzf9&r=N{E^c-mtly+}v31qi24&(X-nFLW+MT zme1kAh9KKGI9&sukQ6absq_SLb*MjnPa*76a}4naKeAmc2yfG5BEBIjMiCDT35fDg zm{Sa4?-vi}XungeUlP^Uxn;$80u;u0u!v2La|=lmG^+`)hh0r9vtkoKf?NDYkCr!x zzVuaEw7O8dm)oN?P0a&9sEYsn_#>{trduabZ|Z*6y+quuFb4nXQ?m8+lHTv-!oIkW z19{`_E*6O395<6Rk`JK0)F}_Qu^_5roU{-^Yhh=oq9Wd`V@QefK=f6DPSz<@dWC!r5HG8Q& z8TEBIn;%$_8DQyr?9*B1QYM1*$KTia0cV9luBMJC~k*pwBu z{&B=~nAf7ubf?y4TrB=E@#`aWMqddV&SCW92A6E(v)P`}R6w>~C*R zDSmGcbCcTPC%8@+c>kO=;mu}oIxSWZD+A*d``J%zVZzlD`O^h8P$`J}8nD8VJb5nE z_yPSWLd83h(l1*|Twr5`AMUEh2L9%kYQorgn&V`#w|YaF=G1b~KilmxbpUpXI68UQ z+IaTmtkt$6Y(;6#G6x zZ+_{+H#HguYgN9KF4@8uh0ceaQ`7f}euMxANi!Sn2`cTzA5Hz}EN`oe!-}stE`TpIr z2S>IEZINBnu=h-2!&VXOOZJW8z`JB`Q`j5ckmHiOi?d_t9$BB#=$AkbP{$ z{ty4Gr+&36#$C5a$dDmzmZQdbfTv&bp0`!UkA&E4aogB;q~XYzFOmXX#W(wDm*X^a z*2l}V<|(IF@sJ>(Ztc&7Jr2b%Jg4npj1J2GnFWX`#p=qM5~vt{_&ZHac1zbsDBc2S zsO>@&?qWEvwA3Tay&oBdKDz~iHB_u{!=LULF1;#>7hq#Y@^hxCs_*|T7ExS{*EXjC z)9+~q0j#ApbEy=1WFNf{sgN*Dzi;N&0r){rM|s%J$#MGJ55%Sygn1oh2MT)TE8Kw0 zaVUZHmB{2W9rf(U__Mi*Z%z$?pBnFw6FLjP^mJ~}ch1Qt7A`A4gXCX;i3qQfM z6_1}M{yndsbe%{NDvQcw)j$yk5oAnLd0WHEvE=!TNGGz|gLr?ei7KVt&CY~JE3Pdx z;A31D^)vNqm7JaQJf@KC9d5A(RZE@R)>DJkKa7F5a%Zv6IUJ)c_z&A*>iVNFxkASvxrL|x|iCmEm zGs^Eiq7eg0tb@-VAg(BWE_Op%#{fK=#9ql4MLK*7G?4gkvn?<1l3kuusU^rr;UZzl zRgTYdDYA&8E4qq50+4SCa};WnS=_k?t}^2A%P*qJ3iM&99x9Z}TMm3TznM%e+Z9`Ne%PN+Y;tT&b9SOH>P-HhJCGV~Wq5KUh{f z!EZdi#=F%&!1cLC3oJ1f%U?;x`yG5h>j+DgU;P!aX*L_nHLWZEAG@*}@$a8zGUjO* z@3WW`tkxD%v+Q$su7BM+;cAEY_57Ak~ z^d_m^mu2zO(=CWw5oJ8%z z6&8x8A%@EHX-hW%-}@&uVGS*uYTf~4J`HZl;}k=LBYQ|w%F1^n?2-1-&YwF34toNd7QeVWCPlhm7U)4&4p=8^G^p9?Ki=7hb~R+y!1+nAbJ`@1G}E;jWH_ zrrqp?_Qa0=5L^tYp&g7{4Ze4#{B(faw1V*0L4?0vLxe!@!XlQa;!OrgYy;o&lIG6X z`A9HRn*WyUZbBK^u2-jjLMUEqk+2?Cqw7cz$7XW#i`dShi$@2)g%6hvS;~0D8@?xM zcct<|8~#8#QbdbVjY3E>Rp7;`J&!wx(;1u{fTxh72qHng2;$WlewlhU33A=e1@#&` zI#d0W(EAMJY@?TPkVI2|vYkaxn^w)5ON?a%ZOui$%GjK#oBD%%aVi(}`yTO{3_dvx zd@H?Bc7X7kK~+u84o2~6A=6v{!;Vo}H9I!RH6ru?Zka5_EtEv$8-9>|kl*n68SDW< zy}v)8$`UNXMGUyxaeLl2K#@)A({$Rri__U!A}kxTK2s&QGt=_D$Nma zf29rRioD5)YrcVOZ^`VBzh@J*f91M?a~NG7^y%|=J);h77y>25zsZh29E&TyV`4(REOcwipokg?H~ z%0CV$9VT-|R|xbxPV0RrflbBvX16>t{2eq&zBX2}_b`yJQmlu!Uj&1E7%kT05=!79 zp=D88Z@tud;~yw>j(kZnl#9|5-*$&aE>S3t(OAn!QaFAn;5B1AB+aLl$hEs!*q^n% z<$0ck>vWSR_LfSyR~}{ifosDG@=_Uhq}pQ}HO_BbDvoegZ`NlxeC1zE$P2}`Gg#~+!zc0@Q^f11FmH3ubJp(&&!>Z+#E zz4G|Oa50(eDHkT%?CuEuOHWZ;L5^>%h~LYyfp)Ktl+~CGO;>n+Dz+;Ec?=>;GLmRa z1vLVyCs#|I3BI4nLZ9}{-xg^foPEgr#o8Yjf)64mjQTz2Y*7I+Q>S#F!gOv8bWNR| zl*k7{#1;1o`T<#tkx9%-B&ymTOy92T8h!Y{7< z8jy$8LU_J^cAfqbT&%l5f_!I93i%B9q%E;ol+RefU7n!x_C9osL-cQ9{?UEbCutL; zD(rWRqH6mw8cQF{=dBKi0H=d>z(=cIxiKmi$At`p;gO0!4?6YIfIy)zxVJe zCtC$H+{991DAHH7_xikU9y zVV{d9t(&DVl~5WW3K4PGHy{8Y+)+7-ccV7 zs)qPod`zl;&`f6(_0?bXLE7sAh-qb^f0$jWS`?9naLNu8*E7u+tU=?1&QXF#VH z6T-lrh(%Y^16>sW zeiACm+@})DWwdLo8}N}W3bpCEL9n_+Y3mgIZ_~M({X}_NMikO>+5M+&?^%J`H{|Kv z^iCV6aK5FQ7cSm4=j}N|v2?S>RChlwmEB5HZIu1CWmQs3z_<=kSq4*| zxTH$<-~rULE-8gBc75A_1Gn_^q=K!|8{5gORe42=Keb^c411lUS_83)lSzg^ii=f&BwJgEKL? zl1(s$lBCuG{V}TOS-M{+GH_B@sVdEqM4w=kgi(A~D|B}km1br8E=6CAH+soTV z#tDX0YAZ%o3zT5Azytk$){Ct0W~l?#&A|c&4FJGbbIf@nd$+x=sa`FxWknll$|e-E zHg75NLE^nv14EzDCX8x91^J_F+p1vHck*@(j50xkYn5=n-wt;~skVggHUR2!ftCn8BSd zl}xXqtMn}N9mAH^4jy{%DF3`j=6kd?!pKg?@4oq{UV_XO1zKsfr$qHTrvmtO%02~x zOF1;w41E8sJ@yL>O2r+~z?Z^A3|NcJ|1j%2yO<*OnsJgRm%`0_oz2bnM(VcPD>n%@ zxvya0b2Ow9)!*qB*d{m6a7&V)8+s(`KmOv`pJHSoc?MULvG(w~oVf_0?DzSa7ciaw z9IOK-8dH^!CQo~?#bfmOq~BVt)a%ysO`Kca-d5=R%k2wVdF(ommZ7(#a zU;eX=)8fsMFs`85+`bhDY-~%d?8*YU5>wc!}JNrIRSWB zuu|ym%9o6EGMTc`Pz4hCngEP@*n{(oyj8LfIamH%X)E5_ubo|2+ahU9Rfur zJ<0Lb?E-m!QJNdA&H{cKf>PZr0$QSrXuai*Gl$uH!H)GUlN2Om-6=F!nG`AcQcm9u z>I&)pyQsd(XDFl!*y+2Z?rK+|4*syL#l`BnR=(=uUb7FbN1J9$-t{Rl*BRrFc+}q&!pMKzR><6&91hPx>L5o<8Uv0txhfiq1L8R8k&{djrW)kw@h|=Lj0&A`n zDdsg)4v5rM=SZ?DUb4RB%IsQr+@CBrz2z^{P^gpZEk7#Bg_A7(^MQiyW9ON!G-O%% zd{uRo$BtoUjiXL_`sZ#m%_wcX9~&3eMS1R2;!-VK%ndha!*n!!`8hq1PKRy7 z-6odsffA6gVo7FeNeX(zG}xSQnM(b7;nIL#B)9nOG&V0O7);MfSkZ&K$#X zN2b$Oiw|uh@<(SViQ_W2pr4%+Wp39&YVHceT+dZM4z7yqH&3n>|CHamRwhyYfVQ+o ztPAPI&aSY?J*WQ|C#G9^GfLw}`@rTkt~LBb=5uGPu^GHX!+3RJwaCZ$w40X7rH%BN z$j2fvme(H`kVd&@9{W8!ntrh3l!S9P7i3Y#D5YO&CmxUpe|YY8bbvQgmXY z#5|Q~Y77s7BiEj`#jm|0Gqmk2YBox(x3qh$z1Zn~LFaW%iT@*pa^It8|4_@bGq{v6p5SV0HWc&4}3G7OJ%+64bJ?1+>;K5z{^ZK6V z{7cRej8XAV^$;X6v4nKI7*}kQPv9$x_*t$P!sy=>w3PycfPgeq`URNOM*O&rVF?0T z8B6fChPYocl9IRf*SNao@$Q9KQvL~=aOQ1h$|4Vz7_zM@rid;(BFdCE<3g?wqB;Tj>J zV(Dnl!2!5ae%fm^hpD;UOisXdf^VYuxlnz%nZ9J8pIsWJqK#nW08p*w0w7c!Lw0Er zCpHOmf7BSi$~6xc*NeW--PzCc0@B%6CX(n%#y+KHO$7nm|KL24E3nh;3TGevUU5@& zg~w(H&Tu=?*!%Dov}0UAR`HwcbU~0^^<)V)(L30gX)j!8Nuqy3{PehQgb*owf5bl$ ziSE%e!*+TvlL*nt&{ZaLw5ye#HgA(5&>utr66#SYy;`@)$QGu8yKfuaoPJ97Tg#e~ z^^|kw6l4VtMADSL?IK%=by&_=URX-#kCF6ICcVK!bSWn6ob?gA1Mp^|FNqs7 z>3aLfp{J5x726B3r--cf&gIY(kJ9jgr=`Kj{jXU+hv+Z6dg4pK-c?c7>z@;xU#KLi zPPur+J111j`fg!}8vW+r{U78|ik@D|5Df|1$J$eFA`*!il(&X7=IJhp>lfZxyIqgS zALcGh$NyJTlXC3iV)M~gQ;h_SA-{N@L=b&muT`@9Wb@NrU20q;qY1uOReuB0Z{Fxt zHNn8_P3L?xyVmWIQ_LcB|Et^eRKh$xiSkEpHLSH}&x26jCG7@f0|@`3Sxtws2)>sU z0&7DHz@_`6%>YpfZ{c>QRH9aoyD8{+w29~_Cz<+RmO8jK#K*@s)~A2^+a)e|#CZ;V zoBs;L3a8Z$I!dN2AXnU9obEX0Bzf`%_sI~%>MNlWMaEzB`n#}f5tts#_yYAWs+aF# zjOYYQ+&3Z2mE{GWfBK{Yx`S4Q0(8TP=$#wTg@LoFijQsTF?!YaZFXvTq9wY6nAm;{ zXK5$)?J@-`?I$1*h%;eAz@wB1$5d6He&^57w6-C^+yCEfMAsZ&_ai^nhr+@%tu1lk zZo{gpk;TDngL?U;;|&2PiI@YeT(X*D`z^o62+}=dR;H6CQ<%idU$Qg}adtE^l~F5e zD$k@u4+<^)eN(j+$#lrr;yi%4YlVv%5R_7gfDDlf1ftcEL9|a=d884h>_RQ|T4>MV zTdjW3;VqK$>8jo1HV;dkU6v^H>$kjBqA2NdW`c6&3$b?Bm1t@$Uy5d?Oe07elyEzM zJ+An4?lE0@RL3Z(pDeV6fszgEwjkgIhr&CPY`*_!_>P7&KNd@KX-FC`l$rca!pGsu z31pfr0=fJ~M4cRlsLD{%m2=RAs;2DP^W^&;6qF}{XXAqIqw5yB3G)#5{QqJQ_%tJV zr-`j(v<~sV*qffsbYDw7A;b9Lzx5&|X)x9@j!D|(%3^l5cOy|M!z8t1GWc3Xa#h?F ztV#7;=338u*@&DZbfZm|sd;DwI#ua$@xhBkJbcaTD|hOV4}rGstAc1JTSUpD%B(0BlU#1$Byb?N zKo(O|_eMu|q|iRc&@UYJHF#pLXjRvM4?|%Pp7Bytzg5Q*W({{ zdLG|)F6M%3Kj{3eeMFvwVW0dKc-Zi^UakzY*147|nH{WFpVa)iEc4pDQTMX2TowPr zZKaJ`i+!loX)JP^w!$viMY08n$^R|0<+U4GMtc?Rv{F0X-{N08xNYyjzQ;yDX}npd z@#S2X-(lRBL*)gSUs_4m?S?fNR^HgFH51mUnt=op1YYt6s* zA2W|>8yS?Fl%JoQ`zF7B{@9uFt6Ok~?Fr+pn$WX4W@4E%zk9K+mKU|mB=C~+8+{Ll z?aYV607n%la}Fxir%CDq?ED;Rs{$aV;}9S5%58KQ*s9*1AOU?A@-y;<#GZqU$ev z;oEJGF?@fjy*=4ibw6vLIwF4Btiz#4+}jb9f!w(fhj9%+?O^O5O7F5izEvuD)xsnW zU-Ql_djSJGbClS=2DLjjgXSyRU7T$PFa(;`1>drN+0xh@2>^V_>Cs#`=vwC+7T?uH z1z>&OhQEKp^?Y=XGF$FraFnHDv-9MhNu9tz+ZR>b!qCONMa-_v`gPgYnN-60@~Un~ zWCbm7G>6X*Y&W#IqISsBnb(TTFjUWLYYkcsoanOm6z$yc??~xJ+VtDADTXUzRhvBMcy#j7(azF+sfQk z34U|jR$mvp)4Fk)8DtbQ%^n`)zJ_>`j3rRpzu_yBrf-9;ue-W&>&HTj_a5wc)Xv0X z5g-Xd-4*>#81IThVw;E(4H_YkPQb(Em<;ahG-Jw7uHspw1<^!Vx`==@1a~RzKzWi| z(W^n?yIG}{Jn|?zWgavAC`cHfY`=?k?W@u^smbUsUA4B@Uq%00|PUK1-8^r`8%h^&n$MdAh#pEr==__VV_p^T6`H3zbVWl#;5<%E-D&LQnzMSrn1mwXEZwI}i#HTKEL?)yaLAg)lbGK@2?#4wJR zaHU%LYzU~kK(j%60C7bA8|U@Wy?qbA^{4KbkB!ob2@TTLo8i?+9Xk z4Ioa!_eTeMSD=~5?2xk7cVirTC%?W#rH2Q!c;SqK(S-qaU$pP67u7V|@=K@BSBI6# zPk)k&Ix03uuv;Oc5-(F~1b|%L%=s(yaX2J@*C#NpNYG9YNC=qHp$Vp2 z%VqJ8)aEiH(}w*yAL*f;LVfrg?3x4b@4$U`Di1S{#B>|f3oPYsv_jlcSz6t;XB~D> zw1O85`|+yyf#f6ZYW@96Ij6an0M}&^x&dMylxHD6+y7+f7P6h%ov&a)dnTB#bw>2H zP&AE%YxeOtF6g3-CEa4R9?IPDV9D$f!uu5&@$kuDr-s>PE^#>NZ>QCgbyYyjbT2A)T0RFR54RJplu}Dr z_-IJbT|4#`n<=OEZ(UdjY+TGO)r&TUmLg|-6{!3fn(ytZ>M~9&Wf5~ww1;K2JiZKL zQiZfz0iNEU51n$ZI4XqhkiT55<_Lv+#|{)~DVy$u7}pnxW&Ls{o?>Lae)fr7diO1o zK{Z+y0ca%FQuKXAlr=Cxo|cg^7q56tdqz1-GGL$08_+dMF(3j9%g>OeGUzeNT{06< zZP)LV75ky|-rsWGdI0e6|5YVZ`_^Y(=dmJeEnd~R#wyn926rc1pR?AbzsZVwTM}~` z{P58qej&O!)!k?Vl)%z4dS2!%vsm3$R6g*tcO7f;aF@1CZm$Ro|rl>91Ze6BW#m!pA>$b$*W>MVXTc)1gb5EaX!xw{zw~s(CgxR_zt71c6Qkx#_eh!JaLE$7p=1Ga) z8OFOnzuH`?b?FBX4N!J50`s8HNWm04w_4jq=|Z_fSzFNL)0{%OiZbM`(ba*kIp}Ef z!t|>(+cd){6yru1hzrWo*zXVcalQYs=6Pw%Pjmqdq2N7SWYV6px;8whqC=Nrulkz zzh`{G^>3@lVaseO``oACU#2eitIO*^mGR9G@wXo@j-Q@76~g>mudjU?js4Y(h2U<5 z7=4@GPrA|~b~pDoW-zE^)(eBViBJcZ+jYF&CR6QuJ)uTtu9*!@&K?96mCfj%Yw_f! zwZ!8s`H5gg_a`e0Z6dAOLp!3m{=_^{ykhhi-hW9RGG=8da?o91mm3WUh-;|n2GC|Q z(YBpvNQQf&ugo%{69~|QDk!6W(TClW}JK#!E@jsJ=jd>U+OymDrU)YP||HeXCp+wL}9jCKrmWIWBk4VgJ7+TUNZuE&>mUdUesuo4~*`yshr)>C|HZCTcu zX)gHcV&NnqArJTLCGE~5-7S?WgBh7WKitr!!RT8bE=r`=MD=9{n(O|ck^%$TYwNlt zb|7ph2O#UtXUi;AO-1Z#lk{7B@!8fHOE<-3_so%}hYOqU;R}zZx$*M zM>`F=T9Tz^{>ul`?9 zxYi&-ka1x>z2+J6BNIt766<_-p9Pqk)SXRn9e1h?H&<<8c=_p?$>B7873CC3GPdYA z5t8@-`rC=hwtp4?Wyb!i^cTt=Ns6l5gRE3%Z7wy}AdLV&-@A|> zUC{tBcu`sG8JRW!rNJca&;^T~Js2-{5q|=FBhJ-mKQ)!`wwEz6?C!L8$HFvIqcFe_ zezyj6=xz4Be^EC-2VUh28-+#=^>1^ao@&P-Rf@FdS^HY6r|i}VGvk+_Qc!db_s)F| zCx;p_GB8PqI16vV`xB6|C=3oaV+Z<+&vYa^bbDC5u9usX7)54Pfuyy8_xYV5-y+aO zV6Gch5%IM1oL%&jYX&WsQP5>6;)ppJd0&$Tj)I}BWCTFLl*@jD1JhW6I5lzU{PC~; z9o!D)f?UR#d%=LRzbr7>rxemB{aWnjm6_g=BAa@Esj+h1n3w&QVauoa6VJ7#eBx&i>=7_=#i!EJwQQu@H1$pN?Y`fh0Em>ulV)4i}Zf z?%$ZR$87B-VJEfgA6dy4)I)|TlxIukNWV?0SqIi16CG_P1KP;vrQY5B?b2#S4%D}m zwC`F=?tMH!7uNK{?Tln0_4e9yC=pUQCg~+AqzP0>guYuY1t&=ZE8iqFr)vVCKeLfU zU~MEw?Lx6vu94;}w6LFrb3o~*FONXGdSG4EoR$Hm0=Rf|Z<`61HJf^26WXc0}g-%Obyj-0H%k&o&e_Qj}? ziO@9uP0G#9c0unbZ@qj*t%Oq5O(tBvNdtDSYS2Z7I`nx0f0)LE`HV04t zqJ`~cII3}9#4@g%Dovw>plgUWQ{+Q{+&e2F z0SOeJZbGI2U%m02b_t|MQf1AJ#7EEK_((}K0Q|&c0lq&wv;ev`yug64HWNnryTP}x zE)#eU1d*`i%af4=PfUpLt9*|t)_<#>Q%k|=!=JB#wvCbpy9P6+_db{R&M%#>K35T+ zaFw$1M9+W2**-bSy>4&XB23`e=g|J<+%@ywh$j8Yc#lunuXes%6_cKPgns44>puXe zkrqhKn5MfOL$lvtzZ+Wu!k2ea3T94F%(LR&XvJ|1Oz%pQ$LyRq}9_y6O+l-oyByXuMpH}lx zhn64up6ppb=nX%rQtq92%)yPsUp}ITX%z64#q2kcuBqbor-l3rsVk6GYHPQ!e!wU+ zXhmeUmw4Sez7rE9hp)Zm=RRJU>D(MY z{qwL3muq5~EA`@fveB=dJ(s0EXRTkS758vgj}hfO9s!mWaNtU zGlCh2!*spDde2E~<59-FUR5pyeZfZggE?u%g#MVFo5&)~wA#Im7kRUVY_V2s9z5q*iIUTTf<>{0x#$j~=0K1cP%is-XomRX~ zi`{)|Wx~{)d|K}ua`x7O%TBq2WCn*fmexx&3HUuwkK6?-7@`{xI`UC7G;f#@6 zQ-;^ak7b6jXKzqh!US7=xQgu7(!PB7A0Lm*PR>MBbfjma;k~;z3;%h5eiGY=MXpsq z1tcsCgjGf*scj}cwPk1VX8#GJaq^zKDGtd`H+}On#Fi^veTy)tP72ba00S8)n?Rt- zw>9KpJLnK!`Uis$USx^mHY`?(}Jx5Zc^5RnnV1pNR--3*v1rhSZyI>#_x=1T5raC?flG$UC zO|5BxR(<9%_wq-Nz0K~&miWaS>RW98Y6%o`O277XU--x93}!5DQ#2Z<+^z9RZW(gv z^**LLjyk_nythKgfXwez&Xi+YKfc@_Vaza5!N~W>+gt>Hd@ImjRoKxbu7$?s%Nn@! z;d|Y%`A?Bn_U`4PoDh{=UbXGkihx@0YO3oJ5S;&9M2y_tsg2}jg zyM?)E)4=nqp*x8y++1boN?A&G9x4+gMd2R|G$tfwiYrW|~x0c=H~E-GiBh zo?l0Fn`1x0XUXE&7m;>g?C<8nIb~+?g9e=h)N5`9&1(jS4r+U10;&62_-Amwc{{jc zXvb@22&+2oT6$xbdpmsyZBd`MD=d`uzPtxJzSeU$c(`dB+;uaf%G=r{)bFZq`eFiy z&^SgiL=F3IphfP~t>{i_NizlHmQJ{3*gE^ipsP#}*@WHL6Uy<55ww|{n>T~Rb%&~* zUgH0Fm6s(gi0}OVkmwnB}qKw)-O1?Z9i zl!&E{^^j@U>U{R@&I>&MWTyMe(ubJ)(zIQK2zwXZOZzSWc4eezbmIRCN1Vw9QD^B1MFS)}m z!@zZr(UV|l!J$L$b&J}-^R{|z7Yp6XO$`{T#7|u8Xma;6E>*D5aV+SyagnXwb@r^x zfol`}U$b?L>h8}*mgI;CgN`o+m~s>%dpz8uK8gtaZ_q}KERR7)z=%mT+OqjttC2ab z<0w4D*xA;bKGD2px)AC(^p^Wa+Kp#n#YEOp9*61XrXc`1r^swp`T8J2RCR=>=C~qu zxD6b7Yv}7fh5f?NJY9HiF)8A&%4nd2e_M!RD=j=rTtc!Jq~diIF8zP zIO$#e{ALB2;o2{iwT;M9Pb18m(2Cjhs7{qPdZIsS>Q(}e78)5k4Ho|S3nPa7WO3~_ zZBa~UG4B`Y`+5w|28vO_%ZwGcSYN&4e}6 z4zBF;R_2^yUtbTD5quWBg1dq+E58BUGR*o>#+sU2Hw{5&5+ptnbPK7CAnDSPvOU@) z7#1NLZ<>9c#}Sk1-3gBU6ymGtJTbkHCYWKz%LwToeaagI?UGpB(k8096`D`j{u#tf z`H4`=G;_Alp`oErqwqC0)3fgTBaCZVoJ?;==xGO-(ZDD>E@f(Eb2j2r$j|;snCq>F z3fFLMXhzklqK5{CK^$GqUl4vIU@$^EaJ!4!vZR<~^wk!`{=G<7(1@HcYgbg&7?huE zy;xuHP?@z$&SUNad0;Vkh4UBjFx7wv|Jev-kM05s1B}q{%%2;E0kZQWs!Tho0lT9{ zE_y8}77o`@rY@jsEHnEavE9#J@!c0eHB)TpH8y1qz?R0?Vg03&Y)?4uD0(1&Va{fh z{#&id!2mf|G)-AtJGS>b{w1wtaNM@_Exu8>~tUtvpJw!~SRgc}r{!Z;HKkUwAzx=hbufOKG!J52uDYI{9DPYTfnm zvF5Z2X8kyjBh&XkOJ(j&EynJj{vJ$VXGr2NDy2!bt1l82L{9;qjA5%IK4fg3x5a>m z!OIMJk_SL2&`jxm^evbuw1A4H@1EkmyY0&n1!d;U7rZ1feK)u^pkX;oGKrF$j+`9{ z5bk~?d!dq14SKV6K_#uWAN@*g zHAx4T=h7;DM6sh=d8OVOk_~&W)7rooaa$Z1vO3O$%>v}_Pj#j_5aNX4_!qRJ!-AV9 zg);q&Z2DK^*5R-%uRsU_Tf}fvFGLF?C6sdnZ5&g$&-nnD30I7`LC|b!4<}6A+2QF(2h_VwmL=%_+nC-n0yEwQu4h5Y^{MRq-j$3~ zUA`0!F;2=s{7EjW5|wlJ=x5&@PJa)yzL=Uz`QvI^DW%Wq(fi^wMEB~Rgc79 zKgY*^4Xpl`{%mxblz|vhB0jbVR8Bp3&4HfVciF{$-O*Ohprjax!|qe>a$Sx!Ehxcc zo&joKS1!oCx-2oMm$kb72eR7ZMBXrI(hd4DPy*lD=0ZZgai#X%)qB8x6gR@P0GIB7 z>!_L%6&dBr7fpX_67l1Mtc@OL9GFR+_D*Dy5qB9sQUA(LmuThJk(0+8cIA>d;k}wl z<6^^L{R~PJdW(vggcoa)1Dhv|Sh-TqUc`ksGTgS-pyVnas^H6lgq+tHXLj8|0m-eS zXuKh-a4}+T7iia-E(PL7)Ei&ozOr&cZ$O)5B^b5EntP*v%we?3#Ls2TE}sJ$N3&V0 zEziQumGCC6Tv)GC3`>ZI{$Moc?-_XFTAvkl>@p%D!wcZ*;U*(pS#7fXG)=tYwE*Pf zfB1W{KUol;W*9&b77ydgYv`nKwXmFxBrtK2A1db7u?=R4#RYf@6FS4W7Q4c^!QcQ? zOTYZve>&S&lz;1|YrTuIVr=X6@^^LVe%JN}Lgc}J#p4Od)4k8x3{fTSn^U=5;@Pv3 z2L)zzf$i3vo9nad9PtDG0XDk|e~uPvJ%1VPe>RH^nHqm-DEf28ZWVlyZ6@>;CCh2W zc<_0aVD>OJI&y|}Y{12VMgNbf zuMUf{d;VSo6ls+1P)b5ULAnv?Mq0YNb3wXGKmF(}sq`RA?J9giD(dYYn&!2nk zbz;uUXY$M~G-9A0T$d41B=)m^1^Sisi;a2m4K?tX$?7d7Es1_X#-RbJ5LaLLDdx^N z-cCk1A?J>p?-6L6klJsx3f0a5Q1iAfvLeX+dTgD*IL2i$AA^YXi&mouLoN{ zg*jIh`tI`?LBblQ2d3pk4i@ekxss1U9CG+Gc$&$TVAa@V zaO?RK)~nFx5e`k|P-q|<9QK?{jW)@HAB`c5!Ivy3llAi0NRsOOVizqtu$R9uV2k!LfK*RY}sgl1D zqVbwlnwfD*|NZOMiiEVF4z?CVp%fHq;pjEH<=i(+_Tj>}9nRBcK`8eIMBPc)W;%qH z(@#4<^m1XR?N^bv}>>#PghL(xuW zsyNS&BcAY@PO2xDJMHvRFK?7tHmALJUzv7-oeHO&!Jq}hgFAkgXU;h5c9U#V-mLU>u#(;h)JcCrU#egM}@&+hI5-Wm4EQ4V1!EuszK(K z&AT#~Mi%U$5M-B-0*JOGGQ6VFo52QMpN-z}%offTpQoL(M=B_8ZdaTnS8H5b%#mz? z9VQ@h67ckG2`y~BH_q?n^G8fwbOfGuI2|m4(PW&9u4|Nh`n!VezstjXD1)^wHmE4k z(U22FQ7lc>5vSZr5o&rhzyF-?=G}bb*D${qBg;NF52>A?nc82wVGTqgiB?|?bZnBM z2?SB^A0Ej@cV*wLKzxvRinS9^P`*fjEFmF zdgC}z^+o0vH@T0{1qoe)vE^Qk>S^AN^q@-4}CN;u;_L7J#47BuCU*~ zGAETf*(}woI)*Rby#5jq$WO-gpSwZRji$$}C$I+38s3_Bf9QYRz9;~TN$Q)?0&6kc zUgS)_W~kX*rr!;s+e|Ad+}^Uvx$mmk(BUPdMP4F3vs1X;j~M1Yd=+#m^E~m!?Ew4C zK?9chK84R-V_~c#*54mMyiYIiTHY;2 zqUhsNeqF`-lqg3@`N%f8`nm77N2}DIwrnE6NS?S;zP3J0Cpy?2*fZ!T=)t1YgU(T| zf8u_;eQ%>JQ04*e?6w;MA)5K6b&TAB_Brh4DX~5`HvTwsk&h9EJ|FL8?JyaVBCci& zx&neIsS>rT%`jLpJ1W5m36X^~3B zlDa+&Jh8&36Q&?i;~+s>4fCe5E$RG5MDc^AOFdf7>?r}7=ra;kBCoddMWY66N1&o| zsEWWB9x)8RKkI!<BF)>nev&|w?7q%p420J`7)&i<&BBB~!gO>%BnU5O= z4^uNPnp)XL;3;^Zl%`E~n_1BEb)2=!(-)&GxIF^)!G<(BqR45%FBHvG7x%e_k$kaY z6m-!3P!11!Uz9&#pZsGV;v#5iM8AokAk;Md7-zKdmET)^QlC4NVqPS4f5M~>)=vpg znhz!T6lIr75RvWL2I|!9s2}0@QBp}z*&$Q}lw^?Yj*1H-YNl)v`S{P_2`%*0Rqedm zm@a%2!w+Q7fJbp>`|gwFKi*+mxQb~3eT@7n{E_e7wnx%ii_mXmG}gI!xu{Ju7brYW zVkrl?%u8>Zf40J|Q&ydKSi$ke#ngR1KH#7xy!>XL~*=$`bwsv$)Q#-|DAVm2Txqn*^W zXkK&}Dtg$LWeiy`I`jn+CCziC;i`r7_xlk@-%|W9$ni+B-0=3N52#_jD{Lq@8>fr4 zZ!?x$4jG@Dw6kH^5jj^h{tA8-D3YB7%P!1-wx>oQvL2*)$TVYsRTvM&FZLJ0rG(pf zDW~;eusIhOjdM=SFRxj)`Br~46J0yIuzO6fB{aW$k;34PrXjBPcqSI%09Hzy_POPL zQM&OZ9{*rf^0`YYo&Ram2aSG=JF9xp`e#R&%jOhDVzNl1YF&G(Jh=Il@oyh6nZZ?N}kziMi zHF+cAXW~3;W>Ct9?53j(&28fMP;hb!CWl#ZSqAHdQlz@~j=@uZJHe83nr*BtYKh;c zHy^zc1akbmZayto{$bTCcE|RRl1M2(Yn>KjLf-hmwf~yKpLAz}0qZux_c**IeRo{k z=^3JD)CN)CAAAlXGy~ip)Nf>e4(dHgZau*^GRwY=CM!+uc?5LuQW1l9kP&OZMsc0C zUhnQ;$*GG_@v{{a{I(O2*R!ZqxsV?U@9DOGvz(k&aGgdMy{tGan|VZ+7ezs>OGI@CltW++P|vGB0CKTeQh%2=#F zh`XG5=ES)M1Y>Cb_rmD$oCM1@w5eN9bD-6JcvdOT+C#XFX&bTvifI>oZIB({KY>8y z3o279QSuq&g1eKb<%kZRRMWHO6FRG6zjmD=?>-TaR*CB4Q;&UWt~0M!!-1$mzbW3& z4e*cg)TwGFXlo90U0O&+c+tR;LpI))T{u3|x*ccIq^`Qmo0g~EY{P68foF&k8(e#By75^ars~$r@=!h+yRP(?jGil6oRc!r_ zWUEfnl9U6@r!PM$vP!jCTBZ(@yB+;lz_d z#Nlt*%byd&Ani|@yg1ilqFJCpE@N3+nOISWW+KM2+{ zlA1o6DT&%?WUb}ek#27A!NKG_&C1GVlqF5t{6eP~rR1jzpY_m2N*0j4EXs0`98!t0 z(1_3yvY}`~rwaBHAB+`Qn_6G@X-X7oPS0zmrzH)(wt_!PQV{H%24}&gL@Cc*+MITn zcWkwJA4v&}>(wIY7c`z|QvZyw9AUBg+}^JXJz3Pf>(rfZzZuQOGIw`cf>4TRcU-#$BP@IYm0~Wfc`!rcAv!j}h`E4JNuZ=AmG8=@>YQidKF-*Y*5d^FvN%B|z`AX`wqPc-&8?yEOr=(B{k z$NoxfRX8h^V5M2D0E;nmwxSqnGSMd@dBXBzwkhJS?C-rH$O3jfJMxsJajKmW+0rME z5!K9#z54rz=i{ZCx`NWR^1L3rR5u690N(M=W@1=J~#1W@{Xxq1rikrJ{M+Cq9MI)Wn) zMnezwp(*<#738Y9#ftqC1nxleeFD+=-2F({9~AcJ4Rw+znlI8*bj()d_@JNNvbZR9 zhhHdBKTDipcZe&e`Cf2Ne65?XcvU%EVb@hMsqfhfkf z4Rmtbr`BXo8ETq;5{%ndmi$N)Dg3J%wj_sG(#@CX3>L@>A8azDj}^(t^$0k0;dhIN z_lttllx^6|(GEYTfihSZXPO;LyiCEF(%&z!a7v#|OW_KwpZhL{{*;S|v2eDd6uug# zZF_PYyxe};@#%T!`Wx8}a&jvFM;XXG5m6PTb4Wq~IfC1i_}Fx3#SZ-OA}N2cM6n>$ zq9vo*Ra_smMw|UHFTXU*ZjtAp03i>R=^y4gtmwvC`PRcaK??}8KsyWeb1Ma~$q6w- zlywZaM!NY2W9j*FgPj2o%3_O==P*yJH8UvW&qeolTMhF74Ah`vt$JJh`dOup$G!|M zV~b`|8hybSBMa}hmcAaNmh!+$PfsdNT+wmJZj2UHWet05$dPO;3sM!vZ(FZ`HZZER zSL-D`KV`b3Qoy50<3Ksu(}#xaK>e%3?dAMA(7oi~uCTA@bnZ{`NpfOzN#28llKE&g z{KXuykL~B6CtCxe$s6K*IM(aW-h3!9$VTL0rmP93Ibjr7Wn?ac@Mg&MP>N1>`k43; zIS2=&vVM)oK^*m){%nH0P7))g5u;Rs=AZh4^iN(CnZjdxsh`U`g6}^+*KCgyPR6mV zmzY-na^)jRGc_xw_4(CmL`8Yb=T#m#v@d>#eYBMHQSvP;kaZ(pQwxe=6fYs{5rPh) z^N)wTC&RSsf^`SDf#4f)5ZAVQjdPbt8H|YLyU%61DzxNX;qvf?Jkg%X6ZD$LFlKS1P~C z1MjnVk!6~9MUKEvbD)yg&uNWU0A7pmgXu&U618SgS9k-|e&=vjMUGSO<#+p@`3G6Q z;&HWKYY9X0B)y}xUKg0N8jnojZGif(kz8JHmT3kj1h^6>IDP8MvLz_86qALgPZ3@;FXRbS!eHBl{m%$*hNm&0X@4uh+cHKqN7zDb$0+!s z#YJaVJUqMJr5P`*9j?6Pr-7L!#qj?@pL%CGYpv`tb)zg$tH*~{w_#8d$XqXK?YQxhF)vgeBB|%dfMiH z+T*<*#YWYDxq28}jk6QfZR>iMiMbQlZ8ja5BjSZ%6?XbZbf(L;=EqB#2erL<=}pxm zu>RYnzQ#e@_Hw%xmbD*-RK1foSAklB>6}IM6L=yC4;$vs9yDSi4D_9wK&w^ez5`FN zlLY?O*Eb@-y$k{>_Po6=$?SQSvA_WegN4bQdRSsx_ zf!^iM+~JP_zYzdVZr*l9nTr#L#b6Yv=g~dgZws4AU7cOz%pU&a`LTZA;4!O7m}?e6 z4K*y^t={pF?yGl z(F_gQ&igfkRavX2x1jQZfa=Lwa}PCSPV(NS3K+Gkc|sx=kA||0I)j!0S0_#fuPleP01^>HfeIn$Fn!OU=Kx zZku&=H0r?mf_F~XnSmUAkjEu%+S*XS(Q^#}E3fVzC{lrsTNGniBrnEO;XhUr-WkBe z823&05F*{X#ids)yw$)RqDtC~eowdt$|(xyJ8rnV-oh|7D^T|vNLd5#@OhNgW!0S? z`Un6OdlzqJ2WNRC8&lqvt*Ok_nZNfQ6x+JIJ-AWVc?9$m07}ss2`I5OvU?8bCrmyJ z-**3Pj`L8Zn-wVu9DX{H4 zi1xdAWy2`bVZ!nLXW@B}BI3CZWrqwCx#@(}fEJN=3G$9ffkaqA6mIl8YN zC0m90VY#9GET+oe<3D}s(Ea(Mv4>wp($aLjIDd9gMHGmlSWFV-Bmv(_0OEJvY%@q6Xlx z?O)P@I0X)&Sc|g-l|Xs>A<$hIJ@T#*BJLfYs1Fae{T(#~(LAHMQZe<$(51X87Ztm) zE-u{#<`b;p6YAPV_1oS&hz^G=g(ht4_dzFhy$B`n1MmTg_a1h4-zyQ}mhCjqu{aUK zoaYH^V4-t4wA@T;_b(Cr<6E5BJ&ckPQ(7mrBF=ABtPb<^_aq=6%Nvw2t7xqZmozIE z`bD#Ap$+!>B2cvl*0ptRtBQfz3^7us`L-NAt;ciH*o!KzgZy`8>?`Y=wXk1r5v(Ud z0X8tQd}q#@3K3&(hIDPZ>8b-0!s~*Z8H%HzVcrj|0#o?5b)1$dEUPEe=IEe+b=vY8 zTcv!DJGpZ6UnlkdXfgfnhLz)x_59IzAGFL|Q;n$6rNGNBJHF9^11so`&qT_0sO)wq z`-%$KOqlqgk%e2m3fR0pso81StP0&XJ%)@(Q)RAxcbeZB+Wg&V=^QfXkOq(-49>>+`vbZU=ig^V z|8fOn3-)nmLf~H+*rPZ+s4DYo^s`X^KQ{P8Uln?j_SI9NuE|^z2qF^isSRJ*-yAD_ zIJL=}JrGL=K0)*Q>-0p(c&`}~s1 zB)re_PhbZ3a#9{R9-!tlJ>t|j=vSoUf^ zqkrUoj%zzqJ4`&f#7cP%;f8 ziv`UMzow5SAEuqIFKIm5$-H(6hA2)d9fmg&e!phcEWE2+P`&8+KO87AY1K@Z+qQmn z`lS*G99aMp(^Z7sshN28LOyxRS1z0i&Lnli+o?XtRLQ07>E=!Q4Xoa~LGDhDRqQh% zO4IdRUGnux}Di3h?ExWT-B7%BVv*8LtE`$T-h`e`ch%=+py&uYPjo`UJPh+qY^h&@fU2rJZJ3;6KV*e28T_H<{$7S`~}(Ql5EQF&NGQ+@VXgO zfV7oaZtzKia~2uO1X4F%y5%R`539@%H}03k$O0jszf*o9 zZ8nCq>}=jVN1{A{Jzgw*5t9y30DO_KdEZn7j~x6RDR|p&%LHC+S!=*J;;p~1f<@zqKbwI?CE1gtmL12Mknb7qH)C?W+?---g?9O!i(29msgFNbQS zpq;YMrI(=kZx`f?gAWy&*9OL^3v=e1iONW+X}rl^AfXp=m6>t4O48ptkMlYV_A zbRNnMg}!uqjn3P0cW@SpHeC2WE90pNGjPeb@e8pfa|1fsfYjnGblspCyCogG1-^yw zS5#U20IO7l;mb&$uI7U&XA%jo4s*_jjss5wiOePoz zYWtOgz4v03Ka4t&KEoAPK8!N)&q0+k5(DmNJO}&7>R{!_x;e!XX4Ee^^pdquQ@mL# zE~cr#FMSGlc)6j|Rt}bOMG5s*(8IX>Hu9t-7q&Pzy57TE7;hTtV8iKvVVl5V+@*Z# zE6`Wg=TQ+u9hGSz!*`p4sOVW$0bxnC34uz=x;!9Ye2xBrPsDn=!9o!AHg~96wQ=^( zYig1y)!N%OGVL608=l&*KvGH%=Dlo8ehH^0&@yJ!i=LO2Qf`Jqf;5Cxry3%){*wbV z^vb#d@ad+P7ckhepk?EUd`k72PED0eQ*39Zc2P_AryD#bq8iFuRxeFnUW6m^Ta z)j-kh7F0)dZ)QZ-S13`@7mkI8f^~W=1aq3a;j`KL1~p0IlPtQIx?RanTu-t=JBJ1n zkJ`SI98a50xFxpjdHxJWmM5tjph_DizA1K14El_Ue)B3xbiJywYlJnY$r(Nkz0zbb z*{gdgceWd5U>5FUBW|#sz9U)@4YHA}F89lC z%2s_sQsm#!t^}Qj3a{9%{CBl1w40JD!gFEm3yF^RR1Mr$UyeY*z0(|;h+b&mcA+)oIgs{zEt(Phi_@Q+>vdTb|Uv(1*G99ZP(LcA<%F? z+LHR@dot@jZ??MABHy2DM9hqjxbnw1x_+fSEUjBh?F4hV^gh*egkfz z>k42~2y$nOTA^vRt#0#9*v_xrJZHQn!ditKyz~xqC|cy zqMImXE}+ioH64>zOaY$|cTG3dusJyE`D~QRmL@AlQjAx5R@34jf!Ct5qDqs+V2{bV zH)Om@@0E&ApZZ(|_ys|f77aI)1d*uov$;)}12cq;#$=tx)MR1`3qB!L?7Ir*+Q^J1 za$Q=#@**(-1!tF_OH|B{_m+6I&M)H0@4J7aT!lSye`HM0~v4ykk$TN~OIC$q<*L zR0`h$`+9q9!fIiviO9%i$S!_XH(#->_wST>f=vO`TuNY-gMMSjbbxL$UE_x?YPP`z z19HDjqq+KIuRoDQdB5vv0GZ3MqG78s%pzhtiMQ;dXIIq|h0n_w#@ysd)!w_ye*c7N9Q#Rmn@lAwDBCsW;WLGuhBCW*{Ms#Adz0S|2szG4u9l9pMfcKom#z)~UntGRzRPgg z&>)so@Pfd%CZEZ6yiMAx+gx>8q63IG%|S{1-EaFri$RY=joy@BLC(pe4DkbqKQ#(} zu$>{+{&F_sC@MrXMQV4pD-u|Lrs3etzw?jy=A?)El($Cl&4l?w)#P^FlqS0{$9HY5 zGBeK&0H_1=n+>hPw#49oLw zUfOz`gWq5v#f@V%_)PTi?WFS;u_{`e8g4Hm8+7(RXEzmPYn(C(U@P(h?k4xr*KJJeH6E4jAxWm(kj z-V-?a2`LSBFZ=G@vx*1hex(AeQLOrQjB<1a7MG71lFJvbD^Y)C=MC-r>e3L+#fn9C zxt0Rb>w@tObe*$quEy|mm_Q0Yhoav|)ry`G>}7XFD*Y8GP}O8trf&RwMzXAFHlUe) z92Zf}_&2r6Y>76Q2wbeq@0hiBNu&;sEcSB7ZFX}Zv;qCmf@NVUW+I2EhDd(7js0lN zo3S@D-gsCD@QpG?@&uNlVV^+v^G*w0_6E}F@U~3rw+p1lhELoVr4{S7PCVsq>{3B` zZJK>2&PD1XFo)MPgf%_b>PZ~>>>Ro^-3R~f^(Rxn#Gl`x7_)oVxwtB+HjQg;vn*hu zh1g_#zJY$Ou|TLtlEcPojKy7@EGXV}S8?hsM2d)}PTm`jHF{GBt+T#LUV#VDV_F%v@LchmLIxS$=kMP!^6C9bW+NM~^NSb4i zG78FVWadwksvqbfy2hMUKR-u=pYlr5b7xS(>rLbba+TCq!I!VdNs``GP8^_OZpipO zplR7wOSDancz)JFl?gY_M`P)G3P78@e|EFPX6MBXPEh%)$pt$dU(zfkl5FN%wX0gJ zM9Yfv4>pI^F}{C66F7J3*qLe9tKZf93ZPefG^d#@3%QvjT}5Y^Cdh-L^XC>@?{;PK z!1l^Nf>C1gI)1u9bKoYg7jLFC7ak1vSGKc4BI9;N@=j9_jGj#rfAq+%*7 z?r~dG1L#+MWHfMVRl0SCSI|F@E`uj(HyQ|r$5l;mBqLI4UV=E6ZagM9`4iGJ@0hV7 z^54nca(J3dHU4h?N~bTNoXXk|Rb92t%(CDZUWX3xh=kCdmv3;Z-`r3ZJ#YC)OHPs+ zVj<8^OI?ht{CT=9Gt8bDBm19~;xl4j`deSssc1ya`MY;QzwDnXr_GT#%Q%_IN zu9;GOt4cOQy3O=kuMTfj2wv4zoY|{fUEFo2N0#3_B$yISzkQVvCEpwZV~agzF^)GVw# z`l4rvQ4M&-U9 z{GufpQb}b+?@-d3FKQWnb$Mw`VlJE7-Q=U8mlwz?BA&Upo)N|?Yzk?UHhls1P7%^VysEx0Ijra%s=G*HOfKQt{q?Y32xqntbE8BA}BC=jx)Bv+F~Cj zWltx2UMX}{Xf-Mj3#g>%cjxCqxP&#)G4IZPYa6el?|*#+7h-I-_ z&#V^WV90H8*VtLPvMz!Db2$|WI_lo=bQm9Co>Bp@WH0_JAlvkxnF}gut(TGT>gBAg z*wt#)@n#gOuf8}fbhw1uF=}4g*yIh<`v1U4UCD#s<|SbqU>oc@)1wyMQ9NFG#fV=z(+(@e&j!MCmssn6 zgi+9#!huJc*SX}M-UU$ilby`^&sjWG=X~7yag3X2bDu z?N}~1IH#b9s9oF$fEyyETN_{w$V|;ekfY))t=tkE9#-OEKZ6-w>X!wRshFsxo%k^= zK99u7E6dXG=|Cwc)`(`Sq&A_cObE=%YH%V?w0P! zhDYQfLFz(h(3?IMY^^=EAWwdhDVt-9>}TB>hSVp-tn~Ghgg3|&Q<(BzmSGqsIYUJ^ z8v51>owl>5$x4{ajug*G=ACrefGip=zz)a>p ztEF~kU(EF8P5ZQo#7oc-^M92Up935M?&wD%dP2PtA85&WSeK-~1Gkx@L(icsp69 z)qs|!rq1mXq|$TeT*4Kdx=1~xm3Sgxd2f?<9)J$O2&3E&a=c(&u7%vg~uwfvz<@^~e~ZL(qV+Eb6il~V)f zUV$p5YpWTt%hgx|MtAnzd0KXNS2$EU-Z}V&e|}w2Xr`o_keZbi&uIpiiyUowjw>mr zZorPf6a(MN70&JQ@vcMF`|_5fPp{(vx6OBI6rBo1y6}h+5Hc6IYa3$GJos%E?z%s_ z3A7Zhw-eF2t%2`a;9=J$y64xg((#L*fkJBR?&9UcrjS4&UkSR3F+(1Di?z|>yrF9RV+l|*Dh3~NK;k8{IP~+a>uHPr zkwyIl6F${lkDrLv)R(@++TE(_Ha=%L0Gy?TVVd$|FMvRg6XpGZDYXBQVU!2CaNJM%a6Q z26q$IS7WjY(ko45APb+x-)w6GVTajD=9>?2z&r~ASV!3>>j7)@u7CLy6F(WJh0d}%;%iizbb6DMIR=hB&ZJfRebsRpnYWxG@$0$Wx=on zyT!$0;0r`#Lk|Ak4h8h1SqrS!ACPRJnN~W@zeT@(xnYUizzI_;Yq0H`fmp6xG>x5_$-QL0Q8m&+gQ84;mm|n`-qAs2;pK?;MHJ6=NLmQLaeW=<2F_ zB<}I|sI7w#BK$3pQXyz#?uovLo>i0;o@dB8gUh~C`*0!TR|1;x-eEdExY7QGCQ5=}i}x{j(f)XQA7CVJK|tJnmKYT+k9b_k zkV7gTo!rKpk~%@(xsl;*QXO8_pDgqWJ;zufN^ZxYB-8J10_>*ol1p{`IwV# z-^ZqXZz=W=>d0rYht{t1q0g9Lf_{ds17ROS9zly(5&mp|5`U<;rx@}M%%004KhaTY z>wKp=fr$rq$PL$SnAMf$o{hW>83gR2N{gG+3@mdlvatfHO~p_y2ZmF$2h zX6Pr%(OObgJ7$4uF7No@9qK?0QjhT9C7$(?mepwmYW_*G?io_B`(@>2@JYFao4ng^a{aI*fq+cL+7i(SZirQq>-s2QWa%^| zy@mVcLMT9bOtiGzU=H6UaB{lhIH&!%Va{jbEQBr$%<3e-QqsE;p&vAO;Yv|ub~gUz zV(pF!zzJ^|or4{=psi#AkNLrLjKwdpE}kXHfvv-d?I>-pAND5S70mK=mo(|7-z|<5 zn#TWnG}l^2!3pwy<@fT@I;9$N&>P<-B1|1ia{jj#rUZu1XV(B*Iy93Im8VtfAyfy4 zMWn*j&OkTis{l2|M8sZi$#!3kZtuR;Z!Rf3Ul3=%+r+gK1{tWhgezFuE(H$qF^8v= za7SjZYY7Die>vlOFVx|l%mLoD1xgM9@KP}?B;wI-%EV2|;a)0vp1PyJkXQJQ_>Sql zzY)&>+!wVMUmpt3ZN!m?X&u+y92HWhaV~&+b@7ZEmag(mW2ZXFYO@t4<+l+VPIqGq z*JxH4FZULhG{_>wN?Yp|%$YcN4ISnyPo@^L2SYflP8=intRgW+;T(nvcO=zCmgu>P z8?>F8^yup84GVFJ-P;CP@W?{(!v`z>|{kcjGsqFz(VB}hJxK7 z2%7_snd7U%zyP={tFd^F^wKHJz*VnX&uD$L+`C!pQ@_qZelbA@I4 zA}iNb8o03pK1`>uDEvu#*DhM18zd=7#xN;JFdm3zQ1sseye#0=FC4$6BwpfrKGnpuTCGKiW{m6C z^ak#)#aUh%{w_iTnG1D>VsN_k?1+VNu$FOcig480-ld{8FRK!VVsJoIHA`1G!lq7H zFpY3C9#_Ch#BMc3(aYY`il%Xad>CoDrCBAM3w|i4S`a$be99McQurCWqR%asy>uw# z9ts`RqP{>JJ=P@<^`){hJv3aKjdf%LzU~z5e0=K;SYIjy(?p3+*;w*glp$bHgLCMEJT9MVjGNM{#~?dJZ+FDCSlNcd?R!Z z1j2j8k+M#e-OV)HFuzj6z;XQ*m7LqVIoLl~q;I%d4)ed&)tGd^sqoczq?1#KnIoS* zfSzO_`xCJI4Z(t8g$%%van!rgFe9f+RE)UN-~Mm$cE0uX^tHI9h=Q%O z`Vbjv@0BJ*z?_n)pHQ}Z-?Diwb5Pu9C|f_1vZfzqAbbU1RI%3ZO!H6k8F49F)orc} zd{t!v&=>TXR0eL+EJe$!p&}0#z>wLJzEAsW$(#!3j^P}d=?p1EstI|0={fe zb=|;{WnA){DS_S3^;+Pp*45ubF`mpcKn*XatdkNaSg^~PbtwM8MGlYA`Z;~AJ6P*B zY1gw|*q9(R9%Gjq%GlPg913Jj46S%?CbWdXl>hQ3aDNycQhx>O014jbb?Uc!K*I6u z5`(L)PQFHP=9XxuuW|ji;%)G>`LTQ0Ic{H!u8mzJ5q>e&H5_*O45cB~wN#rov;{4? zU#XeU)fwkye}CfSa}IZ{vhIWDRb=8^X{^LyHZo{vs{#8=^Jz?d3_==K%nM*q} zM9lgpn_G8+`*lDYn_-qBFV38AHnUd3n@reK^Y2K-GO@DeO9RO?#E-zAK|hBkL`u`; z=IVn9Y4jt5v&AgSYXcRJ!$oc@bu35LX9-6(DU&(cKUos$1J6$mikl5(owwdEtbSvJ z+Ye=Hs&CtsL1PQO+zia@ZM<$FcN#DN>> zHnw{1Ft1&2B%4;!+pDklN8<{Mg1Vc9J=`yjoVj5#PdB7QCEytCiKJb_Y^uDv~%( zSxlkmdO~hD|jy>6g^+vCcpt_>j|a@~}fRy?eOz zR$BqsZT7A=6v|t9ig}`T>EI*BM-!>uDx)3?fhPPVN-xncSc7X;e@g`(7H=znK+*DO zoq>1c3uM(Mq79RK=X1FwBf9EbNAy9mVZ2a%2Q6k?l6`S1PgOY07Y)aZKZRis%#xm} zxJ(KBw9=_K!w9QNoFgTOh09UIN4ku{vgQ{q#3KC#4QkD}dUg^s`t1fhir(j()cp8_ zXKMDwXIpUPol#8zYWe-0p2Ks1Hp<4mr`?>pbBv>Vo1>PL)=h`C^Rd8CbCe8;ZlXn| zoJRxwmU%KqYA8nNK`sO(if(?v55cN>y25JqzlD1{44(8m-_)(nN>5zp#Zm*C{*oPi z)gS2kySS=;ws)vtRctWVmJAka;2 zl771iN3CalIB8_H+3@C*h`UIR?vK{Pklg9z_G>gq!{@Qcuu+feQNzZ3Kg&{~6~2v~ zX9?X_>20%=)<+dT+8inJImpDN|Ef8Iy6|n~nW1G3cKuQ-d5CanE7uJHiE+dt53DDA z;rEZ+Esem<;uH@FXwO_^HS6p5;$s}<6fjPl>QLB%@MR?5kAr(f?Zb>mpuBPE(iQq- zttxI8)7y%31RN6P*;U;i0;{%n`S{&Xg%0eHQxCvEwSx+iES2}_#Tk<85*#PU@Bpu& z1d4L^_1lN2RcA>1!?>yx(A3hOj7&e!5JILqrgk)*D z?ffZbs)ml=iUCJ=6t>;Vb8W*iaD~*mO{Ecaw<>R{mjtkno zT-@sekGCH>dc3=jZ_pb~J1EE&(W)OV9>&FpU$9@aLWM(-Eo`UXa{NTT)OBsVi!|gG zyX>DlRCL;RKK5!4H75SWy+oNly-a!^hS9Bck#uK`CdXXRIC6XKX4^%6ZKDHL_MQsJ zRsq5Z*xv#XKzJ%joq;sH?kt{iM?FYCs7t~_d0uGItyWY2$XG3Ltv9GGr!N&3Ot+ee zbh*;*;?HfHgYfOOg&Zcb%?Afp?pV4{P36C+qv|dT@)J%-c9Gh3P7m)>O)# z)X?P`rt1rmQ^7H7{2L3qec+jLsG()SuBTrDH50ZOL8D9N^7Q>?99rtloW|v_0FT!v zsK+U0KZW=0Q*$)*AsWgmed5QUtIUK@i~xgfh0>qPRqe0Fa(}!?f^D$O@z*RJ>+p1u z55shL(oT<|jfQcF(xpP@d>e)GU{&9((6wRe3x}cf=np7xU;CXQU{0?vKmLu1X$Ps! zJH^ll%~A5VD)j{~s4eWM;pK}rfhour&ARVK3%l2Y7TnGC9NUM&x&nSiFOR(qe;J|M z`W78zt7_31*mhjjEZqN?fLO~iUHx069w%orIDF5gVJfn;`}ESR|7-=?$h3OOF$`!U z!1eShPTg4;_R}4nl*=iZ3p~4i@WFGaxY%zTdPgyAnx=U@a{#JTq|nC$xKf~RB(mdC zR-Afix>WS<+X$(dlsDcYF*J45+;- zDqcZbNVQCI@T^`c<#!BkOaTjO>e9H7-7iu#C9Gc_T6c?-QlVd-HCFM&aVFqOEp$H&V}3;yPM-$jfU$d=OIzs`WBcs8MZ4~i zSUaU$ulZp!g|V@Kq59oNUoLST7;E7qt#YT~Js!zh#zyhBxlUPy8LsQ`V+PBl9x|is z0GiwGJ!<@rD^$F|;Y2U8hWyS!>N3o&+mFp_kJTOX+*-RD8Ntw4t9SBtLOm7heCtD3 z)j7i~7&>7XIz_$39<$kl$DPIKfA*A*f`f0x-o}pKpuZd@I(axl53xozP@!;`*yT&WQkzAas3)AUiClWt(*onFjm=MWo~F!RgVL1OzISn zcM}yo#)RQF4<9+gBoQ$|eX_rMUR3leZ;`x6aw6HaV+>fw4OLxQMET#}_>=rED#o`T zvzIA46$!N|hx)GQqjr7^Z5FEJ#>i}Z6J|#EFrWN+Sx7JgPn{l+Ta5$612sz#Jv>x4 z{iwYxYr(>vxN9|Q5HU;7u>w=UIqe}E3rews`zYmE7e#>81=Q*Q^+C~!&&c-Nzf0m= z;i3wB2Ba50e2CRWbQ!DVe>KUT(e}@q=iKxZ%(ajmxf3o>^5juPGvIo+0J8P)*FFsk z(c!PDlB4frZeszVpf0yY>@LSS@AQ!j+}FThh*zS&lXoZX;@Ch&I?SYejqy+#0Q!<> zlPG9#XrbWH7&^km_cC;w4~UYENFrfBX6 zlv9*GgtV?7i#a=r6dp*oDv25v38*k}gri<8D4>kJe%Kl)<##p)2@OxYeqpW`;KvKC zU{(ywVvsfz`R=ZQ#NW~T|Dz-b(MXz^8K^^Y%o%zo(Xg#2c`|jc!T7!hpIC9CNmb;f z-aopG*K1MRSI*aA%vQ-58|qG6KmO?Kh$xqx6L3y4k4x;D_p#(q93yAOjBt$|j@a}vUS?LGUYg-hz%FEJ5-SiNE_)T3;t-2RPn$*)dHZtOMp(5%Wip3Ity%tF8SO8$Pk zsNL?-wkd#WIdX7DLDD)OAvgjX!Guj zj#oM3ux;M@_;HL56y8vp;Qvg|m(OU7^c)Qo*e+@gB?U2+exms+8D2BknEc<`_c(uW zjt7b$yo0+P_!S-zD*5NVT=s0uDqrJ%G1y@>IOT83Tut;d} zh=_6ThEbYHaEh_ZAd+xfZv+~ImGp){M*zAW*MCAy&gF7Xh;eT`5Ut$|Kic#L+C3&;9>-GaaLO~oK`NlcK02~3H{?VS7;iIAi*ZIISG8Q z+X!j?wIjk4Z&Wm`CwXGtk9+~@!i%oM{LNtk8f&+o7XOzJu)U_)6-Q=bJv0eMS$Yid z1dB~*YL!wn91*Fp0|ANwnxOzyd*oM=T!tX+VANzF9tXupLWQC!u<=swt8-1T>~kR( zwk$#_Ius2F73{g4bWp@WLy+z-fqw77`~nPL5HOIH`)4G|_@umVA|Gud<>*0ZI!Rr~ z#M2t`R@Cg%Ycw5i zUW*g_cG{?b1tyM)_AyRa;(MiJ>*_5g{>zVFH+NGg$D!D0kVs(d+Sr)b^O%U7>9}PQ zmiskr3R=S8kA6<(Ii-SI1yMmsP>)JeeWZ^@K+%v{?H1kfZU13$?(V`sK@D>F90>ZZ z@HYldZ;GcjMQG~v;4Lh{Q{(A9ObU<0Ir*Qd3os99R=;2WlmlS1NIK5AYu2f&#}H*S z@Zf2C5aabn<&|$8`E2+E`dafZYmPPs$!VUno@WK&<3q7L@JK`B`D;I(xg!DxMO-ho ziyWy$TT_zIFrNZy@Q|l6dvcx5coQsbQ1Tr%8}F6xzxRAIyp?Q)sq08VUq5_Zp@eR` z0ex2>U{&&WmvN*Mr!J8PurY|2;vn@FP~qx}LU^>sAxQh1Hyj^3q@($oA8Q%_JcsgZ&eqHG!Gt-^d|ygX7J_#nPa``=kHN}S z)a6mT8{7^RsPXtB#dnkWpfR}~@?u++A_PgmpH=G#=BKPz_)P?G$&g7BFx%n__(aZ6 zglAFE?<9HZn7Kb`$^i9+_R9px{z#T8|67{}Lp|BlIQk29f+a2TQq{fI7ip^dN&Jfb zQ|tOIqxN??l_F<1V8JRx9sdacnXzUHU_MHg0VDw8)u0S)IKCW7`nBT>Yb~lsfyR@c zgHlDI1S;i zRikUMYCIxW0urqdo3?KeK+R$zIIEOC=PDZJZW-r~5I`UnpNJ&RRxbQYU%hCO5H6NY zKFnNc%Ovj6az4vTTY|67hKPf-xUsADlwidkC23x%z5^rpjX5O(P7bAhMT`r^i;CzVk|ca#Ak0@T(t zlKsl-wvsFU25Pi2tKkBp89C%*vco&{R9hh@Ov;9xAREYui5?>bm{zCvf z22r;r_ZyAG36=pmZVQ5o)q##aEM!(Cn#)*^DMYUqKB)9@QeZ05-cgshtHVtK_8oBc z00My#li*jLXWD$MFJv<=Op0?+)hHe(@`3lSm_iW%BS4cl#`O-k{FxsB&jq*%0$}wX znn2FFy-|X{5YC7PjSwnra~?9E{NKmYtMV6LR9pF^H=k&nEHxIupKDW;mG3l}lL~sR zHT@9zxu&V8mKImQJ0IX!ASe#iVsBr87$h;6@b7%vWKZYlRfV~E?ejPpj&Af9&De+> zJ8qDAexzWFdE5=+D-d19jX%7@Bazvog_{S+Wky4;UQxOmb?BeL6;p^>X}e~*x1>Lf z2NCGEnG$urHWOb1MY73dIK1Bwx{OJ%BVAuc6ShsCtb#h3Y<=GX{?ApbH2rnfv}GIR z-^Ds^iYA~?GaD`~_U~eS!9tPh_h#+2?kM@)S)m2rL;>F|4JYWqWVwZio0gfO&)~4VNLnbssM|aDOfLOk;9kgrTLP~!4c7g6 zVSFOW+SHh3^ZUtLJpye%ebLtNU-ywS%?fvYaN(+g)9>U;X46`u0g&SFEWU`I>s|aX z{#ev)9sdXxN#W#vcAVM5F<s*aAHD)Z0O#=!2^&)X?zTzXImF-`d?1JWoi; z6_*uE>&B7G^2I?#e1Hc;1NkpQ^bw$N(=%{n;tT2xkymC-8-i?eb0~fs_Z*_9Wx1b4@-~bl1=X& zbRBYaKavQZ#fEnKfz@Prb_HL(UW%+a=eqIwqSIsdd4O&N@lvpzJFjRyO0B9oBGRz$ z`KJQB1Dqx(*I<%ah1$*Zz?&O)-usVB`E0|j`-S_bg(&f+)JWmMxHvEiy1<>R z^SaYJ4d|>p9G_l;KfJt-*1~?GiuV`*HUMZtq115NGOsrumsOmwXa>9ns|*eS1@$=4 zG3Q2*iUlJ*NiUO4NXQ?Fumj=%$am$rN&NmjsdQ_taT%Y#gR2n65~Kg=6AG}!LIK%P z5%3_rB5;=bzo+B#ahuSywkd_M;L%KYlE@nn!Q6mlst|ijjXn~0JqQF}{_F43boyno zS5`=gfE}eH)lzQ9g$o@%NH`~}*o~y*X8v&}51`rL(T^l(#LRsU=~p$8gnYts{ZD1zfFPj~3D-Cyakh9=g`)Ee) z$&PFE3SXpPGw{>XbPHDfzEG>p|F~e_rh(y2B%RjugxXGfB@;sT5e`)4RxWVs*r*X( zgC-zgfs>80WcQH%9ci{D%k*hf41PM%b0VI)r$9i^u-CH+TgyP;!<2MWL8$QGwAUF7 zxBi|nYi}P8K$1y?H(oD8d5yQ_OSwK53+{Q;kV0faolLv-%q-hHKg0^>i0!BqHpY-3 zS82+iE!v1Q4FY{!RE(%UP7Pbp{_f-NmK-`P(;o$iz?bGifeKM1+nFjnFu#{@XumH=#dExT*M*cmTl8ufI_q~31e0Mbm4R4`x;n37k z*ZNUnD0&KfgX}r(Er|TAYsld|C`Tq+58bay6^rejm%g!#w)^q zUzte1Ww++>mhXiVF0q2e=5VR;;^2v&zK?OQOv=25Rc~^x$`f__Lq)@X$3qHGKsJj;bxi-KZkCtSuQw2R{4pO7lS1Jvik8i2< z(HC`bJ`!zQ_+TMap3YS|+CUEPZwsw4I&vW`z78tbeWxd#5td27yBh+!i&8(1etlA4 zhnPB?!$yVjd`HFG2X+$NcZ25;q3ARKT#JQgJw;WuleX2JzHl6=fp&Q(ELQh;_!-QNAogj+G%>)3G*%HI5pVq&q|8omdAu?5;Sjd9ysqg9Q&Y9Esmwl%Hd zwFj%MC5^K;N3f0#9^(nMqgKZ)JF&5DKXcQ#jT1^YL8kQdno-hHOUlj?bc~i%fdQB4 zJCCISTFyfv=E}{V`&(qK%>4e{uvJOl(bnDOf|q*CyoXhoQx{Lc0MIdj(}}co@*-qLyedd;Wc-JvUGA_pSfBEIm8M@)Z_Pe?_er(*f2rq1 zCJ;^<%2DdtNpT$CPZfXtCF9hc?v*?rKJ>9=bOiVEpzj%?D-@$0pV+Z%xICl=ABrsp zMWksozl|^;Uihchvvhf@(g-?l-27}bF;TzMg7?Gii7wU1kxDw&k@np1QOeZwloYI- z3`v3R;BU??}CGO%@f#+m9!;OW`wUz3VOx*|$?pz3*BSTB?NW#ua6VBZFruOgkh zq4WA{UEykHg)l-IXNGR ze-uYvV)O|@TUUQ?`v%>mcX*GPg*?{X^sr&MS`*N_)M0$>C0l)$d0H zNQOj60wgJ$bNeu->zMzDcD`Isy!!|!+;cGZB43eJNZ}DX>v+%?pD;S@A8Nl(t!O^j zC7S!#)KW-!QF_pPT05(t6=l#k%+r{6q4Ce%a8{l2QsoxfAgA*%^34|Sya~qMI4sT# zR54F(7Z2`bi{(A=$Wby0#E8`nvB7{K0s4p?eA889X%TqY{6fop4Z~@p#o}TS5$r3> zbwV+Z1q@?0Z*AMV2ATV(VOKL=zCy@Lt=)Kyp3Rxh8b4HxN7cZR#3j|q27|s2Y4Wp(r!FwK)uU!X$!gU_~$Qv$FEA$foa&p!5>R&^DgX zb@w=RoXC16b(_*MT(}Tl>A3RDE>NZ@vwg=%&F2+sV_SZs<#LEGThtF5(9n5JeOqq9i zb=nyY#0wwF)=q;y6ED7WAoZwL3h73gI-sxvVl%6paz!zPZypZU@Tbf$(J+mf#CH0= zD!GHKaUdp*2t?(mr8L$p`7CrK#n5n;sa)p{SGGC`dHW4Ndr)bq9Ij90NJG#4CQe*T z6JI(r3Y~$4^y33qMz_tQ(vWFxk_U_<#{&3>r|H1hElc49TUyTF&&tX3MrsH4UNqQ{ zGYnlZ-|IWq1`wc-vg~K|F_g?mc~;z22qel?^5cUNe5mRT8GhKs^p$z0Kk1yaUYm00 zecemz2Q+^5wYU>1-b93%qRmq72B9324`5f5=p4Iu@X*G{l6>IuaC3O_$dWsRHBLPC z$55>+6rEYk>fzdh&zQGn!qXdM(Lk*J+H!l4Dp&FMv199;h|8a-{klc%ag4RWQ$>H~ zdt*(q5Pcs2&>)y}>ua*V=QV+KlwhCtrmj2V-R{`Z7uUET)nKU6RD^h}9WGc9z}_mZ zktwb|w`;%zOk%C~l6*;x2l#QdV_0kXuSSyhT1XGPXKt(Z-ZWJk(M~B^lo5~r^da!F zH-niGZOp=R%#bH9EjM8xyP{@{ApjhzgRg%G0j%Fnl9BPq_b%~=C8*RdA6Z>qk+Cu- z3&KLbc(i?W+u@(HoL{v&bH8K57#;3Zgnd>WGk&#i@EM9eA(Gu&787=2fMi_SuTde-G(F2OmJ-7fuEXODBM-%4o^S_BwegR1|bNq+L z8d^z~4VOQ^<_35+?xzi9SgX|ll%{8|X8!5lfie0yLlOqt!<*l<4=h_oik$J=95!uf zUi=Yur!Cz{RgxLm?YCn&D&YV2e#u<3Td>|833?^(T8a*%rI|LjPzILNAyVDkTh~C< zo_6UbjJE~^Q4mObKNV%|drNOqGtD~gbl&BJBlo%1F&FDhK`8Tpb?g-F*1Ry!^05}M zd`@4jgVGsyyZx%Vv=91w@7Xy0%fjEl#~}_Nw+TSj^7k|Js3grNn*3-@AwT%; zrC=?Yt|=P;M|XJOoCCa@4zFhuV|r@+Trc`Ta_o$&?Fiqh^XSczKLN?U!3Gq)yPCPE z7P9y2ag^kz!)v^3-Adn*9L!m*&H^2nYxf_mhsxnGK6J4+@?(`&@e<4>n+Z*Z0xwEp)?-j~yYmW>gDIlI~?*<~xwh zdarfSwnw&i;Byw>t?w2?&xt$g88mF1R)=Ur98%eTXDJ6F)XIgHB9d0)p}ozCP_ z&IDsOGM$%Qat`2&1>r3R|2C+c>M-F?N(<6qQIpJoe?MiL09(8mypX2E4`SZx?W1WO=_&*F-=Thi#_D(Pl&|lTFIdPsQ!pVRO~vba zfzMl|LSI=sUP{_>>Dhe@a$e4YI%jTuNh5;(p=JE%C`5ZUw~0So+%~Fu9?MIIJJ9ZJ zIFr+9TYR!SP6b7eWoRk{+oTf^IH8T&>%F1iE~!KT0jAwX@Yo_81Cx-8b>$R zw1>Hmrw72SYkzY3Gw-+BSorf*oa@fLRwE12_o-d)JS1by9a(1J{dqj=QkA=Yq&guS zI-?M((lENZkeC;b^uC${>>`^&UqI1WH3NTyE}xqE^UZHmu_emc50xLkBrZy=y*R-6 zo2Y`ESs6$hrWWN}f>m+jd6pCVl2y-cJrb$F9AQC_kYE{YAzW#N@PFcO3orb$2r3{obq! zR=3}p70vZQ-# zsMt+WBFhGH_D$!U*L>RIFD*xh8y&cvx_oFk%hobdS-ZV23^?7()1_U9QaYhrp`p5FQst# z>HoETWw**uBlJB7;M7LO2%H6Uo!+E>?E1DLMjIOau9Je7=d${%mJ=?@Go6$Cr}6d2 zmXkay1oAzd7xSB@{TM1gS&{!|@Q(TrZa8)DEvo{!rISwnh`<~gD?US2x_$|@$)S%zMZQp>U+poT=c3s)0 zWwdV?|Dwm`X#B!}ZBaxt>-gktMf(oevi}vmi5UmOVFhg_6}^FuGX z-RD+9({GQu^RbIh>waH9v7WdveTb~LYwy`(zTA*d=ti2B@QfZAG&7%wH*BbDS&p{I z#65UPd<)<3!{^s41J3XT>5MPn6q|U?`nDH5X6c-$mGH?ziYBpmP!9f=IqLyGYG+= zH0{zo7X;Fh2e(A2YW=yDvA0Q3Bgr7m00 zNI5qb6y1ZQ6vWm|WBSCF_AVp2QZ*d@p){F{g=&WNd4`_14Jr+To%Q#tb&cZT-AL4I zi(a`iv$f?yYjd#;1A{xp-C;0vcr5|~*|dbSH32x!*-&HDmV$}(G;qKBRqv%IjASUQ z?y}<+TF89BubNVe^IJ}XSbOX68ku1PSTx|aTrrnNA9k2#y!$k2K59v>V#Fr*FCFn7 z@!~XPej|37`;c@&x0X!5kbDs9EB?~u=w0IsqPB%e_OEB7ZLt>fE0*%fb8+HP+8Y~Z z>dZB(doKSi7@t;t)f;#E%$0FYnH<@^Y3%c$q+5OLHZlPPZi1%k5;U|S# zX&0f%;*vaHZFO~zlNR2W3(D2JTao9pv={l6{DQgn+LmAGi0qs*Q7%HO>EJVVEq9I6 zEB6uwdy~vqD!yo9vDe3L_IM%o7W4OS$%Jpso3+Q&ox{knJyYiBb#L^27B|DC;zY@v zoaL9xlgT=hocLX4zGy1Yd+_fJ4W5A4+SPH%*VU2j=!+6tX4hAh7T)|_JZ4!TvuMCC zc#@Q79(Q>AE3j36BM4fTTavjpT2(zzY0$oPX}*LVKn3M<@u5#raOyIz1%5kFT@swA z+vjs%6jNF~d*Cl*xz?|7&RGoQUYR*L8_F;m@4@RreT!eZ#d~y+69_(i(n>aH)l3r< zgwJNGsOL7sdF|pv0h6*Fu^#(u?^O!>c6@c$1hw3S=j?Z@}#l5-^t4>SSV@q!}>cIDUCAT^K0bN0|mKp?_A|4 zu9g2It#FHvqmH}I@5LD1g;x)!9>+VZeGKP>_J`~kzi! z@sv1X?gtF3lGh&@xT}zy{--x)YQ>;fo{>~d!W%%1?7Z_|3uc&3B}z=Z>v~C?tI9Sj z9Iv#rUOMLe*RMm$SG+U%M2zgwnl^3Me;q0K=kpo|iee<`VcG8gX3YJ4NMWArbwX8= ziBvuoY4}TJm?~peGm*XnIXNX!0dK2ouM(mnM~#F}Dk8Fl|G(4N;u5@B7JXNmd_gB8 z&tvN;*O;0o{HJlY`g%k&-V`kh6Sc3YomyYBmB(`D z7N3mx@_|OEoTAAYDkN3WIh^@Onc(SdXo(ry< zo$U`ryr4MrG{Wx5dgLRV3#x0v@mt*SM%q~4$GF5D(vHfV1rgh* z2r*B&H1&@g@E^4bfSuiEX!rN};>^){qxMZum*;-VW~#Hq1TU(nKlKnGf1Q#0BK_JG z#305QN&=j(X0Fs4!%YZ22ZHa}hm{7H4`&T9;DYp}oYl0Mf;MiRVO^Zu`14I-z!m@K zjf$Z4$%c+D8poHi3uIG;RzX!+B0z3-E!yz4jD6Rra*Q#SvzX|BK{}+{&n9y5f3dW1 z4(Gv3D7p}l;y<9$@B<0l#EkVvl}qy<%Z-0nr=QB4RcKTcOQ!XQHU3{}m0~x{LAcEt z;B3CX%a&k?R&o{^2dxe;yl&wB*pOL1xwZ@=r{>j?t%>p17qyH`7HL;PQzm#U!r73{ zwo(fUP9k2Cta(fbi)It3LkHdn5c6U+Wut7r74T2L$O zTNfhTmlAedVlySF3?*$$RMB%xnQY*^&7Kc`QnzWFM1gqOtje7kh?Qnb5j-2&@%V+Y z=J`-MX)eZ(^K}~DXE+)DJTBW*V$OI_wucCVcG0sHO(fXdt&6ke>lK3U%1ny=c!vmfM{Nc?S+C%3 z{Rpz1vf0KcAG5D^TYa%pmuY^)%OrFA)1My$=SVqU?Oj!rzau%r_@B&-i3Zh!83B>g z_N7wn1;T92)6#yt!1q_Xm+PM1qupMyUFR;wQV(Q+oFie zM<+xLOV&iEMSdXkVs--627|4p&8dX5<72o`Q0v4$c2Q<^W)s zPoIoa&<4p7JQOTFoGmyL_k`vr}G87m)tTY=&~_cpsH~^W0?8EZ4p}q7W)sA zTr_vBW%;T{M*Co33D(1{(Q}xa_$^QQ!p+Zhos8AByBkv;p3^4;_Lp~db~-H{K|2kR z4=Iu?eJQO@-W{RNmbprrXH^@YA67n)-W~J?Xv+0Zto5{vFI|=dpjTyW30CE8nPOwY zfzxK>7k1buUCZvOUAP)Qoqq%AiIo z=w(y@ve?3_wOmkqpQ z{9;wDmrmB|8!Nb0LFjLL|9loh9$3OTS_<3IIT8BPaOuFzFL7F6@iyZwm2czIM++O` zrjhTv4ue&>4V|T|&M5B>+{#U6Kb`sd4&c)3;CPm#pC5VR9K{7VV%PqR{6{e3)%u*t z_@yZ&^+d)$Ayu2fZ>mhzdN*5+kjFX#ML(xO{3e(ZAITW{DU0K`AhgUhG!fUy>^dSNtf`c zHoGN`XA6TqUNzS}oZv;bMyA1fuUci;foXd5J-*7MiB|s^BdHVbBHwH{*rM)y zEMR^O@D~DX-Md3^x;oMp{ethY;~Tg-2q-&b3DPWk!GQ1U#3 zQ|xyZT;gj|)$wBHo9^|s>YKUhGpz9sYYq=(WT^ zS0(vT-0z3<2EP3?{)aB)6ud&WjX}<_DA=Ci^?&wHKytKV_Ap9R z;(MB%uK9S?$1Vx%hwSqnV=;cQy>(CNpx%dX`0{bgYpfcqR&=0GH31Wwa8Alvp#Q`o zY*!rhwFCp1H3TAAtzxnn_+|PFx6yG^f59>)h)}vUU*{C@Y3E9l2##($k3VWEN=n+U zo_*2S^Jr(uWAe*)(T6?$8=O*}#cee*o;E%M8-E>Yf*x^*?E?Hv0W*gpnR4#~boHX% zHu2Ke0s0m78M7-bvn&VeZb017P1Pc2%`RT-hn(}C*WIl*%>lX-Ye#~a-8e1G`bdn= zFBmm=_hlNK7W&sx1!XaV1-}2_*ps_R^!Y5v3{lVopCZ|tpiV-;Xv+DXi1@Nx6cHli z8xT5Um;W=l(}9)rWh4e^{i*#CtXG*)=dl~tch-lb8B5v^<|J1(P(FLYDDg9jY zv@BWkT#`xDXyT%h2$BsQ=V5@~mz0{D($rbmnuKX+ZW-aG8CnK1%$%4?=Jji#>U4LMcnoOlQ%a7Cyb{l=@ zBXCamHm}vN_afA@hc0(RY`MCx*VNF*ay5$t14jRS8>4k#vz55}R0#u37)FR)Vlu}- z$+$+omKqzRM!Cj`Ju~sMM;*YV?2nvaU~FaW4`$c5?#m^|D<&|r#H0ML?p%-{JWUeb z7RdYzd~(jT+$3)I4nGl*m*Pdw!0t>zNzDH=Ct6%`M=v96pynlSgi_HQHmq*cQpZ6J ze3xb9?6CAsWj+jg@?53&=sZrP{!l-B z9vVvF_%4-@T{q@GUtLRYU$;8R+?QRYjvib@;hc0@3vjfq3xsG+IXPp0e7+YlZ)!V1 zPswZgexh=1*&#rUY7HN_Bt}Nd+mryRYI*LkeNw(3`@}aqN3DWVn6eA0DTMR_pHU!7 zAig)V3_%h^#hAswfwW1|2e211qII`E1SQsp8QZE2=&K^=aEJRBH(HbedUGoc%r%bt z+*i@^I001!=Mk(dnyNV2zGGE4_-Q+?OBNP7?Sq z+U*yEKJro!r7`$W#1~*@8_R4cm_PNQK0p5E*HlKqY@?>OX>-erf=Yh;I3S2v6&GLZ zw=ecQ@$$!|EpcU2T>O~@G3f-IEOA*+{D*x#?AFXzAzWbI%*C{rQkZ>yaJq-=Cttog zx+Rrv6};=LvR`{!m7!kP<*5YekaPUdr$Nhp`aMybu1t*z;wA4q-w`eTTEt!g$7+7P zn8bdw21$X;*w?r3N8~nTYr3>o& zC8@~T=-ADU!t50=LwdlF4d z*R{;7Z%YDb?X4$N^P9JD8y<#6$0{l@vO@#i^lfDickgo^tPixjsOSs%JkSQD+n+{4 z0xQCW#L8bNDlolZctMyUBWN_Q{==Vs^BdKZ@XM#4NWQ&W@Wn+zR9|1CE_|XHm$IKi zN=Vk|6pJ3F|B{BN?498oB85)kCy%LkVCSTk#s@QZiz)jpixaK7Q>{p_5vSIJw{5%+ zH_3LPk&2F{T)9=akCwgW9TBoDv8ptCQ&dlx@P7@%9z$4+)nS(v=8W`=EOM?b)s*A` ze7g1{sd(Zwa%DB%ti+GQO32Yi+t*sX88alV z`24;i`s5*xu5j8s?Mh%Oj3fMOO-+p8UK+t!Hr*9Lc9M(CS=u&J{%`7!Gw(S(tG9!$ z@7;OPs0Zu6Z4>4PlTb}{yY1XG1Qn?zbK=he0 z-p$z^RSlu>MFcJsRv348-S4NfdL1oiz|_2o(f50nrNFQ`Y4@CPI>d^28bV0?OBJEx zO0l0TmzSal4{I8&k3q@(<3N1|Qbg_dYQ%wG{vTVPnqy>5(w%gvLz=w$!!MU`GBb6E@_U;XS=at8NI5b7=k@XAsnYWg3s}XTIc$u1JSS zi;K|pJCJXn*tgbSbJw!4*|MG)(^IVASLMglQp^6)VwBoV)i2)L_jA*CHmHK#93RXp za5FUMBC>H8i(4Ccq&OO=WdsTg-)8b^7sN-faP%uiGki3qyWmq~a3)CDH!VU6p1B)n zCc-Br<@*d%$vu`$e>V@=%=yZv6b={z;aG#&GoKDNUc9QL$U~$fH?J9yF!Bjg$M%oM zMW1G8G4i?h(465dA~h@AIZS>Tu+#o$i$`VE~Ui}D17}@fqMRLfHpQgA5oy`>0EIDCi``9{j zPsn)NO{B`qhp@K~2-=o%iLYT7CDZi}wuY9x-q0s?Tehdup2d zGzx$GXMSBHQOi7*FNII_?1MY9UA1Gww2IR>KP}_cUQP4Ev~O1S8I0^{Nb?f~LROtD zkbJWDI-^`hT@b!)oaY#|q&%=2#@R}fSSLm-tl`Uy;aH@ssNG5OG2+~GJ0!O?IK5B; zjwJ=1l01yBbqgb+S-*rrwz7Sj)539Bx3t$$b{TgwdPwG|9yv=vNS?)Inp5%Zs?$bJ zx?p5~#Dx^k#cS3GO{%F?Hp8!%l?AOaQjWgLUzUGDETtU6w*3_c?zB8xlHjISsrocM zS2-+iAE&U8C57?*E6-wOzXzsW>NhDjs8h78Z`i4D;xGCWh8jC-eh~XRUQXSZ^L;g{ z#3=04Z-gcYq4;XHTDo#ZeBG%+;I+WMY4$ z`qy8p{WtvACzl$ zcjG5 zejj$%xcv{*JY)Lvf!u!Uyy8ihXX|VXwr7&Fk+|uv(+5` z*|RK5&Y_e1#ZMItNT!JhyJnDg&u^qGbqn_j&l1u*J485Ku}8zL2WeeSwgJESzSv*jd<7mtIyf)PNg3i8*vh! z8mx@i_AyMCKP4kDRFXAB5}_r`<20|W)RuWKwq z&)us`PH2Cj%*tyDlW_DtN=?99L;F!7^Pz#jz1DU+5&_Uw2f@dnnE8q z&Bjs)VGRetsf>TB-G#oyZzJB)Bd=r== zqT2j+*AWw2MwN$^`Z~L9w##KrF-NCh=|u~cuTETQyxm%P%i&!nWy(=j0>}^(F9)i! z5tM1Yc3#F{%vQnOrcb1VML0f|#kC`7>%Xj_4rXPc=+pJ>yxhSsk9b#wVBWk1^J}JC z(OGKcTd1f8ukf6(r^a#qyw@)<=iW%JwaB3_GPL(F*2f^Pb`oan6Z;}c%3W{uX1LK0 zG0(Jp?(c5f#@#6{9bQb0SzHQ6EPn}^a!b5Wm&8m!4jw`ZyV7XAE*dqxoVv$UO(bEW+6?f(PqWULDR;k%8#oN7I)zLa$g9x_RbDY}h&=3PBb~U#qmS38*cNTbk+xb^9toImHt9U< zW6vZM#bs`6CXWBKK(7rVncC!;uP1sg-K@_kX8zov_;b#XP1%nKQ)1-zFY(X>5?674iwRA|(uJpX`)Bj0+jf)Y;Z- zytT>o}Wxgxpl7J3A`t?#EueY>3BhIneT>LGM-Ph81 z^qxIRf3AW;>@Ah%(StX3lt#B?MkpM7{sOtwGh@<5K2G#QLf6dM9ow>(I-2$mst!lz z2yI%qYR*v$&T9vQVpf9B=}D-khK>{mQj2({&v;f`2!{Pknkvov(A4P$v|Y(nw&VV}yJAbV zy{>iYeU8UAJ;r7vT19qX4}HN(a-D8s7)gXKJWPes;5`2blAy%Uq(1%oBHYZ%6Ua`H zy;>A~EUgU7lgN&46seXPxT-K~-fmPJd0XIW6wd4YiSrZSwb_%f;XHNStSW;8^d#|RScZ4*x?{yRXpk|2BDHHIT71Wd z?X~7?)-U1FcD?Uj-RRMNj+LT)FwOtdn%AH~SHF{$RR7)~c3XJLK(W7d);6mBPZ@4> zf4ZQKO;7d-3o#2ov+?>(v6?88d+WR^yv=QAq!Y?5*qbALFbja>~kDa%MJQIGZqErob%vY#94(l zhG_yuaRS;Sz`77JR^w_Y4mNIGdX%{uF}7Q#P3)lXqN03L;t_-e)e47)+6EaH)80*h zs844{Bh10oqpqTW7RHl^n!qJNxOL4;%s)^2Ec|y5~E+ zmd}3acs^(iRk!jddNm{)b+S*7D2aXLO*e9y?GltogGsrl|G?_agN;TUm~W-y4aEh!f`yVBadp zNa}vRonzEVA>G10*If%B0j^#%mRLM$K36AIYddVkto{Hrn&GqMi-Id&_%pPmUy zXm`PQ!AT*jSd;M>$t+|=cR6@@Ng@4MpWqUbNtebYw;vXztkcEwu96PtRNz8}kZMEu z?XvxUm07$eSOWD#2%)`3{wmgMHbkSw^>#-VDocZ3LWyqxg{Z-6%GPAq(3!>pv%%H` zc{!f}F-@Jt@{tqVqf{i|4eyWYN&jHpTmjJp_>z)&1`}SQ&-s(9^rKG|M3_cGaYJG( zYJ_d4T)s;Zoq#c!v5XjepRHQ!GPDt^Yw+@1oyst z;sR7}>wK+kN9j(D#nVijc_EMk&njBnt^;Zc_t|WnJ!D#T6#gGKhCM zsrtmTnSz}3-^(in9E!K{p|Rc20sSW@^6b>VKSRcl)y74aOK<)StMdNO&miqW%#f_l zG->@#pIxos{oE)J9)6`Al-Z*!uQV>t>m{hKxfQDjvF#cEoo+xItX0$gM&;*I9V|1? z{jPutkwW%|e&o5`e^`o5&B&^b-QH-FLavA|lQ_?xp9_C_Q2+Z{h`W5GJ=C-_og6Ab zoj;C6gr3WQ`qKl<)8EU^k7|)N=X3klIlpi8mK|u=!SERcmsfN-2HJacf4I%e!lH73 z^XA}JgHYtR2sS79%v71N!q?K@%l;94$M1Oh|7W|itY(~?V3NuNtO~$j!RZ!R zt+YgoE3=XFVZ%j;EW-q*wO0xXu9nVt+sA(U;Djx`OTDkX5{7A3P}R}c{$P)+BhS}5UmWgPU~m$r5WAG>)G-iIC~BTDo3oszD##NKqW7##)Y}U|~ zH4M)*?i$H0;{^`JK&|5uNCPE0jvI)7^5XH=X=< lGv&Ci1ZdAN0bmgRqki(56WJMgJKI4@Jzf1=);T3K0RS(67(M_1 From 34c245a3290f0f8ce8e539cfe17282038a33e8d8 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 3 Aug 2023 11:21:48 +0200 Subject: [PATCH 1388/2432] Add `faiss`-based `KNNIndex` classes (#7842) --- .github/actions/setup/action.yml | 1 + CHANGELOG.md | 1 + test/nn/pool/test_knn.py | 49 ++++++++++ torch_geometric/nn/pool/__init__.py | 4 + torch_geometric/nn/pool/knn.py | 134 ++++++++++++++++++++++++++++ 5 files changed, 189 insertions(+) create mode 100644 test/nn/pool/test_knn.py create mode 100644 torch_geometric/nn/pool/knn.py diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index c7935465aa47..fc34eb3bed67 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -55,6 +55,7 @@ runs: - name: Install extension packages if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' }} run: | + pip install faiss-cpu pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} pip install scipy pip install --no-index torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html diff --git a/CHANGELOG.md b/CHANGELOG.md index a886fea57a68..df1b0a1f95e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `faiss`-based `KNNINdex` classes for L2 or maximum inner product search ([#7842](https://github.com/pyg-team/pytorch_geometric/pull/7842)) - Added the `OSE_GVCS` dataset ([#7811](https://github.com/pyg-team/pytorch_geometric/pull/7811)) - Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) - Added `lexsort` implementation ([#7775](https://github.com/pyg-team/pytorch_geometric/pull/7775)) diff --git a/test/nn/pool/test_knn.py b/test/nn/pool/test_knn.py new file mode 100644 index 000000000000..42253877bfe3 --- /dev/null +++ b/test/nn/pool/test_knn.py @@ -0,0 +1,49 @@ +import pytest +import torch + +from torch_geometric.nn import L2KNNIndex, MIPSKNNIndex +from torch_geometric.testing import withCUDA, withPackage + + +@withCUDA +@withPackage('faiss') +@pytest.mark.parametrize('k', [2]) +def test_L2_knn(device, k): + lhs = torch.randn(10, 16, device=device) + rhs = torch.randn(100, 16, device=device) + + index = L2KNNIndex(rhs) + assert index.get_emb().device == device + assert torch.equal(index.get_emb(), rhs) + + out = index.search(lhs, k=k) + assert out.score.device == device + assert out.index.device == device + + mat = torch.linalg.norm(lhs.unsqueeze(1) - rhs.unsqueeze(0), dim=-1).pow(2) + score, index = mat.sort(dim=-1) + + assert torch.allclose(out.score, score[:, :k]) + assert torch.equal(out.index, index[:, :k]) + + +@withCUDA +@withPackage('faiss') +@pytest.mark.parametrize('k', [2]) +def test_MIPS_knn(device, k): + lhs = torch.randn(10, 16, device=device) + rhs = torch.randn(100, 16, device=device) + + index = MIPSKNNIndex(rhs) + assert index.get_emb().device == device + assert torch.equal(index.get_emb(), rhs) + + out = index.search(lhs, k=k) + assert out.score.device == device + assert out.index.device == device + + mat = lhs @ rhs.t() + score, index = mat.sort(dim=-1, descending=True) + + assert torch.allclose(out.score, score[:, :k]) + assert torch.equal(out.index, index[:, :k]) diff --git a/torch_geometric/nn/pool/__init__.py b/torch_geometric/nn/pool/__init__.py index cc7a0145be2d..99975c26de9a 100644 --- a/torch_geometric/nn/pool/__init__.py +++ b/torch_geometric/nn/pool/__init__.py @@ -9,6 +9,7 @@ from .avg_pool import avg_pool, avg_pool_neighbor_x, avg_pool_x from .edge_pool import EdgePooling from .glob import global_add_pool, global_max_pool, global_mean_pool +from .knn import KNNIndex, L2KNNIndex, MIPSKNNIndex from .graclus import graclus from .max_pool import max_pool, max_pool_neighbor_x, max_pool_x from .mem_pool import MemPooling @@ -318,6 +319,9 @@ def nearest( 'global_add_pool', 'global_mean_pool', 'global_max_pool', + 'KNNIndex', + 'L2KNNIndex', + 'MIPSKNNIndex', 'TopKPooling', 'SAGPooling', 'EdgePooling', diff --git a/torch_geometric/nn/pool/knn.py b/torch_geometric/nn/pool/knn.py new file mode 100644 index 000000000000..325b8835a82f --- /dev/null +++ b/torch_geometric/nn/pool/knn.py @@ -0,0 +1,134 @@ +import warnings +from typing import NamedTuple, Optional + +import torch +from torch import Tensor + + +class KNNOutput(NamedTuple): + score: Tensor + index: Tensor + + +class KNNIndex: + r"""A base class to perform fast :math:`k`-nearest neighbor search + (:math:`k`-NN) via the :obj:`faiss` library. + + Please ensure that :obj:`faiss` is installed by running + + .. code-block:: bash + + pip install faiss-cpu + # or + pip install faiss-gpu + + depending on whether to plan to use GPU-processing for :math:`k`-NN search. + + Args: + index_factory (str): The name of the index factory to use, *e.g.*, + :obj:`"IndexFlatL2"` or :obj:`"IndexFlatIP"`. See `here + `_ for more information. + emb (torch.Tensor, optional): The data points to add. + (default: :obj:`None`) + """ + def __init__(self, index_factory: str, emb: Optional[Tensor] = None): + warnings.filterwarnings('ignore', '.*TypedStorage is deprecated.*') + + import faiss + + self.numel = 0 + self.index_factory = index_factory + self.index: Optional[faiss.Index] = None + + if emb is not None: + self.add(emb) + + def _create_index(self, channels: int): + import faiss + return faiss.index_factory(channels, self.index_factory) + + def add(self, emb: Tensor): + r"""Adds new data points to the :class:`KNNIndex` to search in. + + Args: + emb (torch.Tensor): The data points to add. + """ + import faiss + import faiss.contrib.torch_utils + + if emb.dim() != 2: + raise ValueError(f"'emb' needs to be two-dimensional " + f"(got {emb.dim()} dimensions)") + + if self.index is None: + self.index = self._create_index(emb.size(1)) + + if emb.device != torch.device('cpu'): + self.index = faiss.index_cpu_to_gpu( + faiss.StandardGpuResources(), + emb.device.index, + self.index, + ) + + self.numel += emb.size(0) + self.index.add(emb.detach()) + + def search(self, emb: Tensor, k: int) -> KNNOutput: + r"""Search for the :math:`k` nearest neighbors of the given data + points. Returns the distance/similarity score of the nearest neighbors + and their indices. + + Args: + emb (torch.Tensor): The data points to add. + k (int): The number of nearest neighbors to return. + """ + if self.index is None: + raise RuntimeError(f"'{self.__class__.__name__}' is not yet " + "initialized. Please call `add(...)` first.") + + if emb.dim() != 2: + raise ValueError(f"'emb' needs to be two-dimensional " + f"(got {emb.dim()} dimensions)") + + return KNNOutput(*self.index.search(emb.detach(), k)) + + def get_emb(self) -> Tensor: + r"""Returns the data points stored in the :class:`KNNIndex`.""" + if self.index is None: + raise RuntimeError(f"'{self.__class__.__name__}' is not yet " + "initialized. Please call `add(...)` first.") + + return self.index.reconstruct_n(0, self.numel) + + +class L2KNNIndex(KNNIndex): + r"""Performs fast :math:`k`-nearest neighbor search (:math:`k`-NN) based on + the :math:`L_2` metric via the :obj:`faiss` library. + + Args: + emb (torch.Tensor, optional): The data points to add. + (default: :obj:`None`) + """ + def __init__(self, emb: Optional[Tensor] = None): + super().__init__(index_factory=None, emb=emb) + + def _create_index(self, channels: int): + import faiss + return faiss.IndexFlatL2(channels) + + +class MIPSKNNIndex(KNNIndex): + r"""Performs fast :math:`k`-nearest neighbor search (:math:`k`-NN) based on + the maximum inner product via the :obj:`faiss` library. + + Args: + emb (torch.Tensor, optional): The data points to add. + (default: :obj:`None`) + """ + def __init__(self, emb: Optional[Tensor] = None): + super().__init__(index_factory=None, emb=emb) + + def _create_index(self, channels: int): + import faiss + return faiss.IndexFlatIP(channels) From 028807fc6e91a72c2bd66c21ed998ab2c75f2d3b Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Thu, 3 Aug 2023 19:09:05 +0800 Subject: [PATCH 1389/2432] Fix unused argument `to_undirected` in `read_npz` (#7843) --- torch_geometric/io/npz.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/io/npz.py b/torch_geometric/io/npz.py index 3e7ffd07f82d..2074af94adc2 100644 --- a/torch_geometric/io/npz.py +++ b/torch_geometric/io/npz.py @@ -11,7 +11,7 @@ def read_npz(path: str, to_undirected: bool = True) -> Data: with np.load(path) as f: - return parse_npz(f) + return parse_npz(f, to_undirected=to_undirected) def parse_npz(f: Dict[str, Any], to_undirected: bool = True) -> Data: From 497ed9190c9bc764f17707114d1fa969e41d85d2 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Thu, 3 Aug 2023 04:44:18 -0700 Subject: [PATCH 1390/2432] `CUDF` for `bipartite_subgraph` (#7765) ``` cd /opt/pyg; pip uninstall -y torch-geometric; rm -rf pytorch_geometric; git clone -b bipartite-cudf https://github.com/pyg-team/pytorch_geometric.git; cd /opt/pyg/pytorch_geometric; pip install .; py.test -s /opt/pyg/pytorch_geometric/test/utils/test_subgraph.py -v test/utils/test_subgraph.py::test_get_num_hops PASSED test/utils/test_subgraph.py::test_subgraph PASSED test/utils/test_subgraph.py::test_bipartite_subgraph PASSED test/utils/test_subgraph.py::test_bipartite_subgraph_large_cudf PASSED test/utils/test_subgraph.py::test_k_hop_subgraph PASSED ==================================================================================== 5 passed ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/utils/test_subgraph.py | 15 +++++++++++++++ torch_geometric/utils/subgraph.py | 18 ++++++++---------- 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df1b0a1f95e2..e3918a07ad74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246), [#7534](https://github.com/pyg-team/pytorch_geometric/pull/7534)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) -- Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493)) +- Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493), [#7765](https://github.com/pyg-team/pytorch_geometric/pull/7765)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671)) diff --git a/test/utils/test_subgraph.py b/test/utils/test_subgraph.py index 6b462b1fd12b..8b37f877fd54 100644 --- a/test/utils/test_subgraph.py +++ b/test/utils/test_subgraph.py @@ -1,6 +1,7 @@ import torch from torch_geometric.nn import GCNConv, Linear +from torch_geometric.testing import withCUDA, withPackage from torch_geometric.utils import ( bipartite_subgraph, get_num_hops, @@ -72,6 +73,20 @@ def test_bipartite_subgraph(): assert out[1].tolist() == [3.0, 4.0, 9.0, 10.0] +@withCUDA +@withPackage('pandas') +def test_bipartite_subgraph_large_index(device): + subset = torch.tensor([50_000_000], device=device) + edge_index = torch.tensor([[50_000_000], [50_000_000]], device=device) + + edge_index, _ = bipartite_subgraph( + (subset, subset), + edge_index, + relabel_nodes=True, + ) + assert edge_index.tolist() == [[0], [0]] + + def test_k_hop_subgraph(): edge_index = torch.tensor([ [0, 1, 2, 3, 4, 5], diff --git a/torch_geometric/utils/subgraph.py b/torch_geometric/utils/subgraph.py index 490e53a6b78f..a64d59e622ab 100644 --- a/torch_geometric/utils/subgraph.py +++ b/torch_geometric/utils/subgraph.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.typing import OptTensor, PairTensor +from torch_geometric.utils.map import map_index from torch_geometric.utils.mask import index_to_mask from torch_geometric.utils.num_nodes import maybe_num_nodes @@ -172,6 +173,7 @@ def bipartite_subgraph( else: src_size = src_subset.size(0) src_node_mask = src_subset + src_subset = src_subset.nonzero().view(-1) if dst_subset.dtype != torch.bool: dst_size = int(edge_index[1].max()) + 1 if size is None else size[1] @@ -179,22 +181,18 @@ def bipartite_subgraph( else: dst_size = dst_subset.size(0) dst_node_mask = dst_subset + dst_subset = dst_subset.nonzero().view(-1) edge_mask = src_node_mask[edge_index[0]] & dst_node_mask[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: - node_idx_i = edge_index.new_zeros(src_node_mask.size(0)) - node_idx_j = edge_index.new_zeros(dst_node_mask.size(0)) - node_idx_i[src_subset] = torch.arange(int(src_node_mask.sum()), - device=node_idx_i.device) - node_idx_j[dst_subset] = torch.arange(int(dst_node_mask.sum()), - device=node_idx_j.device) - edge_index = torch.stack([ - node_idx_i[edge_index[0]], - node_idx_j[edge_index[1]], - ], dim=0) + src_index, _ = map_index(edge_index[0], src_subset, max_index=src_size, + inclusive=True) + dst_index, _ = map_index(edge_index[1], dst_subset, max_index=dst_size, + inclusive=True) + edge_index = torch.stack([src_index, dst_index], dim=0) if return_edge_mask: return edge_index, edge_attr, edge_mask From 1d1583a6559a6781787d8f9945441d1fbbc7ce26 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Thu, 3 Aug 2023 04:51:56 -0700 Subject: [PATCH 1391/2432] Utilize `cudf` for `relabel_nodes` pathway in `utils.subgraph` (#7764) ``` cd /opt/pyg; pip uninstall -y torch-geometric; rm -rf pytorch_geometric; git clone -b cudf-relabel https://github.com/pyg-team/pytorch_geometric.git; cd /opt/pyg/pytorch_geometric; pip install .; py.test -s /opt/pyg/pytorch_geometric/test/utils/test_subgraph.py -v test/utils/test_subgraph.py::test_get_num_hops PASSED test/utils/test_subgraph.py::test_subgraph PASSED test/utils/test_subgraph.py::test_subgraph_large_cudf PASSED test/utils/test_subgraph.py::test_bipartite_subgraph PASSED test/utils/test_subgraph.py::test_k_hop_subgraph PASSED ==================================================================================== 5 passed ``` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/utils/test_subgraph.py | 9 +++++++++ torch_geometric/utils/subgraph.py | 12 ++++++++---- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e3918a07ad74..c6b1167446cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246), [#7534](https://github.com/pyg-team/pytorch_geometric/pull/7534)) - Added the option to override `use_segmm` selection in `HeteroLinear` ([#7474](https://github.com/pyg-team/pytorch_geometric/pull/7474)) - Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) -- Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493), [#7765](https://github.com/pyg-team/pytorch_geometric/pull/7765)) +- Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493), [#7764](https://github.com/pyg-team/pytorch_geometric/pull/7764) [#7765](https://github.com/pyg-team/pytorch_geometric/pull/7765)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) - Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671)) diff --git a/test/utils/test_subgraph.py b/test/utils/test_subgraph.py index 8b37f877fd54..893aca29353b 100644 --- a/test/utils/test_subgraph.py +++ b/test/utils/test_subgraph.py @@ -50,6 +50,15 @@ def test_subgraph(): assert out[1].tolist() == [7, 8, 9, 10] +@withCUDA +@withPackage('pandas') +def test_subgraph_large_index(device): + subset = torch.tensor([50_000_000], device=device) + edge_index = torch.tensor([[50_000_000], [50_000_000]], device=device) + edge_index, _ = subgraph(subset, edge_index, relabel_nodes=True) + assert edge_index.tolist() == [[0], [0]] + + def test_bipartite_subgraph(): edge_index = torch.tensor([[0, 5, 2, 3, 3, 4, 4, 3, 5, 5, 6], [0, 0, 3, 2, 0, 0, 2, 1, 2, 3, 1]]) diff --git a/torch_geometric/utils/subgraph.py b/torch_geometric/utils/subgraph.py index a64d59e622ab..89c37b9c41fd 100644 --- a/torch_geometric/utils/subgraph.py +++ b/torch_geometric/utils/subgraph.py @@ -94,16 +94,20 @@ def subgraph( else: num_nodes = subset.size(0) node_mask = subset + subset = node_mask.nonzero().view(-1) edge_mask = node_mask[edge_index[0]] & node_mask[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: - node_idx = torch.zeros(node_mask.size(0), dtype=torch.long, - device=device) - node_idx[subset] = torch.arange(node_mask.sum().item(), device=device) - edge_index = node_idx[edge_index] + edge_index, _ = map_index( + edge_index.view(-1), + subset, + max_index=num_nodes, + inclusive=True, + ) + edge_index = edge_index.view(2, -1) if return_edge_mask: return edge_index, edge_attr, edge_mask From e14dd1360d743d3409975192ff70f58cf3064122 Mon Sep 17 00:00:00 2001 From: Nripesh Niketan <86844847+NripeshN@users.noreply.github.com> Date: Thu, 3 Aug 2023 17:27:23 +0530 Subject: [PATCH 1392/2432] Add Apple Silicon GPU Acceleration Benchmark (#7711) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Akihiro Nitta Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- benchmark/citation/train_eval.py | 21 ++++++++++++++++++++- benchmark/kernel/main_performance.py | 8 +++++++- benchmark/kernel/train_eval.py | 17 ++++++++++++++++- benchmark/points/train_eval.py | 15 ++++++++++++++- benchmark/runtime/dgl/main.py | 8 +++++++- benchmark/runtime/dgl/train.py | 5 +++++ benchmark/runtime/main.py | 8 +++++++- benchmark/runtime/train.py | 5 +++++ benchmark/training/training_benchmark.py | 8 +++++++- 10 files changed, 89 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6b1167446cd..1d4537870d68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,7 +74,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) - Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) - Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) -- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770), [#7784](https://github.com/pyg-team/pytorch_geometric/pull/7784), [#7785](https://github.com/pyg-team/pytorch_geometric/pull/7785)) +- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770), [#7711](https://github.com/pyg-team/pytorch_geometric/pull/7711), [#7784](https://github.com/pyg-team/pytorch_geometric/pull/7784), [#7785](https://github.com/pyg-team/pytorch_geometric/pull/7785)) ### Changed diff --git a/benchmark/citation/train_eval.py b/benchmark/citation/train_eval.py index 40bb8e03101d..0e7a7893440b 100644 --- a/benchmark/citation/train_eval.py +++ b/benchmark/citation/train_eval.py @@ -9,7 +9,12 @@ from torch_geometric.profile import timeit, torch_profile from torch_geometric.utils import index_to_mask -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') def random_planetoid_splits(data, num_classes): @@ -53,6 +58,13 @@ def run_train(dataset, model, runs, epochs, lr, weight_decay, early_stopping, if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + try: + import torch.mps + torch.mps.synchronize() + except ImportError: + pass t_start = time.perf_counter() @@ -84,6 +96,13 @@ def run_train(dataset, model, runs, epochs, lr, weight_decay, early_stopping, if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + try: + import torch.mps + torch.mps.synchronize() + except ImportError: + pass t_end = time.perf_counter() diff --git a/benchmark/kernel/main_performance.py b/benchmark/kernel/main_performance.py index 777c0435ab1f..c35737d20d36 100644 --- a/benchmark/kernel/main_performance.py +++ b/benchmark/kernel/main_performance.py @@ -36,7 +36,13 @@ parser.add_argument('--compile', action='http://github.com/store_true') args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + if torch.cuda.is_available(): amp = torch.cuda.amp.autocast(enabled=False) else: diff --git a/benchmark/kernel/train_eval.py b/benchmark/kernel/train_eval.py index 6ec11954ba24..7c268b0beb11 100644 --- a/benchmark/kernel/train_eval.py +++ b/benchmark/kernel/train_eval.py @@ -9,7 +9,12 @@ from torch_geometric.loader import DataLoader from torch_geometric.loader import DenseDataLoader as DenseLoader -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') def cross_validation_with_val_set(dataset, model, folds, epochs, batch_size, @@ -38,6 +43,13 @@ def cross_validation_with_val_set(dataset, model, folds, epochs, batch_size, if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + try: + import torch.mps + torch.mps.synchronize() + except ImportError: + pass t_start = time.perf_counter() @@ -62,6 +74,9 @@ def cross_validation_with_val_set(dataset, model, folds, epochs, batch_size, if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + torch.mps.synchronize() t_end = time.perf_counter() durations.append(t_end - t_start) diff --git a/benchmark/points/train_eval.py b/benchmark/points/train_eval.py index a5c364a6db2e..7912ff43501c 100644 --- a/benchmark/points/train_eval.py +++ b/benchmark/points/train_eval.py @@ -8,7 +8,12 @@ from torch_geometric.loader import DataLoader from torch_geometric.profile import timeit, torch_profile -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') def run_train(train_dataset, test_dataset, model, epochs, batch_size, @@ -25,6 +30,10 @@ def run_train(train_dataset, test_dataset, model, epochs, batch_size, for epoch in range(1, epochs + 1): if torch.cuda.is_available(): torch.cuda.synchronize() + elif (hasattr(torch.backends, 'mps') + and torch.backends.mps.is_available()): + import torch.mps + torch.mps.synchronize() t_start = time.perf_counter() @@ -33,6 +42,10 @@ def run_train(train_dataset, test_dataset, model, epochs, batch_size, if torch.cuda.is_available(): torch.cuda.synchronize() + elif (hasattr(torch.backends, 'mps') + and torch.backends.mps.is_available()): + import torch.mps + torch.mps.synchronize() t_end = time.perf_counter() diff --git a/benchmark/runtime/dgl/main.py b/benchmark/runtime/dgl/main.py index bf8dbbde3d07..9afc3950cfe8 100644 --- a/benchmark/runtime/dgl/main.py +++ b/benchmark/runtime/dgl/main.py @@ -11,7 +11,13 @@ from runtime.dgl.rgcn import RGCN, RGCNSPMV from runtime.dgl.train import train_runtime -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + with HiddenPrint(): Cora = citation_graph.load_cora() CiteSeer = citation_graph.load_citeseer() diff --git a/benchmark/runtime/dgl/train.py b/benchmark/runtime/dgl/train.py index f74ecdf9f13e..1823e1a32cc1 100644 --- a/benchmark/runtime/dgl/train.py +++ b/benchmark/runtime/dgl/train.py @@ -18,6 +18,9 @@ def train_runtime(model, data, epochs, device): if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + import torch.mps + torch.mps.synchronize() t_start = time.perf_counter() for epoch in range(epochs): @@ -29,6 +32,8 @@ def train_runtime(model, data, epochs, device): if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + torch.mps.synchronize() t_end = time.perf_counter() return t_end - t_start diff --git a/benchmark/runtime/main.py b/benchmark/runtime/main.py index 798a22f8e512..80a5656bb4f7 100644 --- a/benchmark/runtime/main.py +++ b/benchmark/runtime/main.py @@ -9,7 +9,13 @@ from torch_geometric.datasets import Entities, Planetoid -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data') Cora = Planetoid(osp.join(root, 'Cora'), 'Cora') CiteSeer = Planetoid(osp.join(root, 'CiteSeer'), 'CiteSeer') diff --git a/benchmark/runtime/train.py b/benchmark/runtime/train.py index bd99519a6575..7dacfa56aa9e 100644 --- a/benchmark/runtime/train.py +++ b/benchmark/runtime/train.py @@ -14,6 +14,9 @@ def train_runtime(model, data, epochs, device): if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + import torch.mps + torch.mps.synchronize() t_start = time.perf_counter() for epoch in range(epochs): @@ -25,6 +28,8 @@ def train_runtime(model, data, epochs, device): if torch.cuda.is_available(): torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + torch.mps.synchronize() t_end = time.perf_counter() return t_end - t_start diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index 0abc5cdab82e..bdbb6f2620d2 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -87,7 +87,13 @@ def run(args: argparse.ArgumentParser): warnings.warn("Cannot write profile data to CSV because profiling is " "disabled") - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + if torch.cuda.is_available(): + device = torch.device('cuda') + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') + else: + device = torch.device('cpu') + # If we use a custom number of steps, then we need to use RandomSampler, # which already does shuffle. shuffle = False if args.num_steps != -1 else True From 9f9a38bf02e392d3bda116b4b258204fde63098e Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Thu, 3 Aug 2023 07:13:03 -0700 Subject: [PATCH 1393/2432] Measure time per epoch in some examples (#7725) Signed-off-by: Serge Panev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Akihiro Nitta Co-authored-by: rusty1s --- examples/autoencoder.py | 5 +++++ examples/cluster_gcn_ppi.py | 5 +++++ examples/cluster_gcn_reddit.py | 6 ++++++ examples/gat.py | 5 +++++ examples/gcn.py | 5 +++++ examples/gcn2_cora.py | 5 +++++ examples/gcn2_ppi.py | 5 +++++ examples/geniepath.py | 5 +++++ examples/glnn.py | 11 +++++++++++ examples/graph_sage_unsup.py | 5 +++++ examples/graph_sage_unsup_ppi.py | 5 +++++ examples/mem_pool.py | 5 +++++ examples/ppi.py | 5 +++++ examples/proteins_diff_pool.py | 5 +++++ examples/proteins_dmon_pool.py | 5 +++++ examples/proteins_gmt.py | 5 +++++ examples/proteins_mincut_pool.py | 7 ++++++- examples/reddit.py | 5 +++++ examples/renet.py | 5 +++++ examples/rev_gnn.py | 5 +++++ examples/rgat.py | 5 +++++ examples/rgcn.py | 5 +++++ examples/rgcn_link_pred.py | 5 +++++ examples/seal_link_pred.py | 5 +++++ examples/super_gat.py | 5 +++++ 25 files changed, 133 insertions(+), 1 deletion(-) diff --git a/examples/autoencoder.py b/examples/autoencoder.py index f564d2f7b9e4..0095b632bea3 100644 --- a/examples/autoencoder.py +++ b/examples/autoencoder.py @@ -1,5 +1,6 @@ import argparse import os.path as osp +import time import torch @@ -109,7 +110,11 @@ def test(data): return model.test(z, data.pos_edge_label_index, data.neg_edge_label_index) +times = [] for epoch in range(1, args.epochs + 1): + start = time.time() loss = train() auc, ap = test(test_data) print(f'Epoch: {epoch:03d}, AUC: {auc:.4f}, AP: {ap:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/cluster_gcn_ppi.py b/examples/cluster_gcn_ppi.py index 5b5271c480ef..36e07a5d0494 100644 --- a/examples/cluster_gcn_ppi.py +++ b/examples/cluster_gcn_ppi.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -80,9 +81,13 @@ def test(loader): return f1_score(y, pred, average='micro') if pred.sum() > 0 else 0 +times = [] for epoch in range(1, 201): + start = time.time() loss = train() val_f1 = test(val_loader) test_f1 = test(test_loader) print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_f1:.4f}, ' f'Test: {test_f1:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/cluster_gcn_reddit.py b/examples/cluster_gcn_reddit.py index eddcfa7a2056..09ab32ce0135 100644 --- a/examples/cluster_gcn_reddit.py +++ b/examples/cluster_gcn_reddit.py @@ -1,3 +1,5 @@ +import time + import torch import torch.nn.functional as F from torch.nn import ModuleList @@ -99,7 +101,9 @@ def test(): # Inference should be performed on the full graph. return accs +times = [] for epoch in range(1, 31): + start = time.time() loss = train() if epoch % 5 == 0: train_acc, val_acc, test_acc = test() @@ -107,3 +111,5 @@ def test(): # Inference should be performed on the full graph. f'Val: {val_acc:.4f}, test: {test_acc:.4f}') else: print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/gat.py b/examples/gat.py index 49769f09a9d9..1147acdcaca7 100644 --- a/examples/gat.py +++ b/examples/gat.py @@ -1,5 +1,6 @@ import argparse import os.path as osp +import time import torch import torch.nn.functional as F @@ -69,11 +70,15 @@ def test(): return accs +times = [] best_val_acc = final_test_acc = 0 for epoch in range(1, args.epochs + 1): + start = time.time() loss = train() train_acc, val_acc, tmp_test_acc = test() if val_acc > best_val_acc: best_val_acc = val_acc test_acc = tmp_test_acc log(Epoch=epoch, Loss=loss, Train=train_acc, Val=val_acc, Test=test_acc) + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/gcn.py b/examples/gcn.py index a8be4952c752..4aaf0e2317d8 100644 --- a/examples/gcn.py +++ b/examples/gcn.py @@ -1,5 +1,6 @@ import argparse import os.path as osp +import time import torch import torch.nn.functional as F @@ -90,10 +91,14 @@ def test(): best_val_acc = final_test_acc = 0 +times = [] for epoch in range(1, args.epochs + 1): + start = time.time() loss = train() train_acc, val_acc, tmp_test_acc = test() if val_acc > best_val_acc: best_val_acc = val_acc test_acc = tmp_test_acc log(Epoch=epoch, Loss=loss, Train=train_acc, Val=val_acc, Test=test_acc) + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/gcn2_cora.py b/examples/gcn2_cora.py index eadc358fd03e..92dc43dd94f6 100644 --- a/examples/gcn2_cora.py +++ b/examples/gcn2_cora.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -77,7 +78,9 @@ def test(): best_val_acc = test_acc = 0 +times = [] for epoch in range(1, 1001): + start = time.time() loss = train() train_acc, val_acc, tmp_test_acc = test() if val_acc > best_val_acc: @@ -86,3 +89,5 @@ def test(): print(f'Epoch: {epoch:04d}, Loss: {loss:.4f} Train: {train_acc:.4f}, ' f'Val: {val_acc:.4f}, Test: {tmp_test_acc:.4f}, ' f'Final Test: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/gcn2_ppi.py b/examples/gcn2_ppi.py index a22cc5ffa64c..1176a230c618 100644 --- a/examples/gcn2_ppi.py +++ b/examples/gcn2_ppi.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -89,9 +90,13 @@ def test(loader): return f1_score(y, pred, average='micro') if pred.sum() > 0 else 0 +times = [] for epoch in range(1, 2001): + start = time.time() loss = train() val_f1 = test(val_loader) test_f1 = test(test_loader) print(f'Epoch: {epoch:04d}, Loss: {loss:.4f}, Val: {val_f1:.4f}, ' f'Test: {test_f1:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/geniepath.py b/examples/geniepath.py index 0b6535129e5b..2349fbe198ee 100644 --- a/examples/geniepath.py +++ b/examples/geniepath.py @@ -1,5 +1,6 @@ import argparse import os.path as osp +import time import torch from sklearn.metrics import f1_score @@ -141,9 +142,13 @@ def test(loader): return f1_score(y, pred, average='micro') if pred.sum() > 0 else 0 +times = [] for epoch in range(1, 101): + start = time.time() loss = train() val_f1 = test(val_loader) test_f1 = test(test_loader) print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Val: {val_f1:.4f}, ' f'Test: {test_f1:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/glnn.py b/examples/glnn.py index f7b3f05f026c..840f87200612 100644 --- a/examples/glnn.py +++ b/examples/glnn.py @@ -3,6 +3,7 @@ import argparse import os.path as osp +import time import torch import torch.nn.functional as F @@ -56,13 +57,18 @@ def test_teacher(): return accs +times = [] print('Training Teacher GNN:') for epoch in range(1, 201): + start = time.time() loss = train_teacher() if epoch % 20 == 0: train_acc, val_acc, test_acc = test_teacher() print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' f'Val: {val_acc:.4f}, Test: {test_acc:.4f}') + times.append(time.time() - start) + start = time.time() +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") with torch.no_grad(): # Obtain soft labels from the GNN: y_soft = gnn(data.x, data.edge_index).log_softmax(dim=-1) @@ -91,10 +97,15 @@ def test_student(): return accs +times = [] print('Training Student MLP:') for epoch in range(1, 501): + start = time.time() loss = train_student() if epoch % 20 == 0: train_acc, val_acc, test_acc = test_student() print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' f'Val: {val_acc:.4f}, Test: {test_acc:.4f}') + times.append(time.time() - start) + start = time.time() +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/graph_sage_unsup.py b/examples/graph_sage_unsup.py index 385ca559a8b7..d523cdd7485e 100644 --- a/examples/graph_sage_unsup.py +++ b/examples/graph_sage_unsup.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -67,8 +68,12 @@ def test(): return val_acc, test_acc +times = [] for epoch in range(1, 51): + start = time.time() loss = train() val_acc, test_acc = test() print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, ' f'Val: {val_acc:.4f}, Test: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/graph_sage_unsup_ppi.py b/examples/graph_sage_unsup_ppi.py index 481effaec0fb..87a53a15287c 100644 --- a/examples/graph_sage_unsup_ppi.py +++ b/examples/graph_sage_unsup_ppi.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -94,9 +95,13 @@ def test(): return train_f1, val_f1, test_f1 +times = [] for epoch in range(1, 6): + start = time.time() loss = train() print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}') train_f1, val_f1, test_f1 = test() print(f'Train F1: {train_f1:.4f}, Val F1: {val_f1:.4f}, ' f'Test F1: {test_f1:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/mem_pool.py b/examples/mem_pool.py index 76063067e2fc..a50c720f79c3 100644 --- a/examples/mem_pool.py +++ b/examples/mem_pool.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -99,9 +100,11 @@ def test(loader): return total_correct / len(loader.dataset) +times = [] patience = start_patience = 250 test_acc = best_val_acc = 0. for epoch in range(1, 2001): + start = time.time() train() val_acc = test(val_loader) if epoch % 500 == 0: @@ -115,3 +118,5 @@ def test(loader): print(f'Epoch {epoch:02d}, Val: {val_acc:.3f}, Test: {test_acc:.3f}') if patience <= 0: break + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/ppi.py b/examples/ppi.py index 0512b0d4abcb..170294f96a85 100644 --- a/examples/ppi.py +++ b/examples/ppi.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -69,9 +70,13 @@ def test(loader): return f1_score(y, pred, average='micro') if pred.sum() > 0 else 0 +times = [] for epoch in range(1, 101): + start = time.time() loss = train() val_f1 = test(val_loader) test_f1 = test(test_loader) print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Val: {val_f1:.4f}, ' f'Test: {test_f1:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/proteins_diff_pool.py b/examples/proteins_diff_pool.py index ef376060685c..a6da315bb531 100644 --- a/examples/proteins_diff_pool.py +++ b/examples/proteins_diff_pool.py @@ -1,4 +1,5 @@ import os.path as osp +import time from math import ceil import torch @@ -146,7 +147,9 @@ def test(loader): best_val_acc = test_acc = 0 +times = [] for epoch in range(1, 151): + start = time.time() train_loss = train(epoch) val_acc = test(val_loader) if val_acc > best_val_acc: @@ -154,3 +157,5 @@ def test(loader): best_val_acc = val_acc print(f'Epoch: {epoch:03d}, Train Loss: {train_loss:.4f}, ' f'Val Acc: {val_acc:.4f}, Test Acc: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/proteins_dmon_pool.py b/examples/proteins_dmon_pool.py index 808f70ced9af..16d67c7b6823 100644 --- a/examples/proteins_dmon_pool.py +++ b/examples/proteins_dmon_pool.py @@ -1,4 +1,5 @@ import os.path as osp +import time from math import ceil import torch @@ -95,7 +96,9 @@ def test(loader): return loss_all / len(loader.dataset), correct / len(loader.dataset) +times = [] for epoch in range(1, 101): + start = time.time() train_loss = train(train_loader) _, train_acc = test(train_loader) val_loss, val_acc = test(val_loader) @@ -104,3 +107,5 @@ def test(loader): f'Train Acc: {train_acc:.3f}, Val Loss: {val_loss:.3f}, ' f'Val Acc: {val_acc:.3f}, Test Loss: {test_loss:.3f}, ' f'Test Acc: {test_acc:.3f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/proteins_gmt.py b/examples/proteins_gmt.py index 0667e93d2785..9e2f65ba7d27 100644 --- a/examples/proteins_gmt.py +++ b/examples/proteins_gmt.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -81,9 +82,13 @@ def test(loader): return total_correct / len(loader.dataset) +times = [] for epoch in range(1, 201): + start = time.time() train_loss = train() val_acc = test(val_loader) test_acc = test(test_loader) print(f'Epoch: {epoch:03d}, Loss: {train_loss:.4f}, ' f'Val Acc: {val_acc:.4f}, Test Acc: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/proteins_mincut_pool.py b/examples/proteins_mincut_pool.py index 45a1fbf2b7c4..18d3a710fb21 100644 --- a/examples/proteins_mincut_pool.py +++ b/examples/proteins_mincut_pool.py @@ -1,4 +1,5 @@ import os.path as osp +import time from math import ceil import torch @@ -97,10 +98,12 @@ def test(loader): return loss_all / len(loader.dataset), correct / len(loader.dataset) +times = [] best_val_acc = test_acc = 0 best_val_loss = float('inf') patience = start_patience = 50 -for epoch in range(1, 15000): +for epoch in range(1, 15001): + start = time.time() train_loss = train(epoch) _, train_acc = test(train_loader) val_loss, val_acc = test(val_loader) @@ -116,3 +119,5 @@ def test(loader): f'Train Acc: {train_acc:.3f}, Val Loss: {val_loss:.3f}, ' f'Val Acc: {val_acc:.3f}, Test Loss: {test_loss:.3f}, ' f'Test Acc: {test_acc:.3f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/reddit.py b/examples/reddit.py index 01e1bb584862..6da41444b668 100644 --- a/examples/reddit.py +++ b/examples/reddit.py @@ -1,5 +1,6 @@ import copy import os.path as osp +import time import torch import torch.nn.functional as F @@ -108,9 +109,13 @@ def test(): return accs +times = [] for epoch in range(1, 11): + start = time.time() loss, acc = train(epoch) print(f'Epoch {epoch:02d}, Loss: {loss:.4f}, Approx. Train: {acc:.4f}') train_acc, val_acc, test_acc = test() print(f'Epoch: {epoch:02d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' f'Test: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/renet.py b/examples/renet.py index 67cb6a619fb1..e3a52984b0cc 100644 --- a/examples/renet.py +++ b/examples/renet.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -68,8 +69,12 @@ def test(loader): return result.tolist() +times = [] for epoch in range(1, 21): + start = time.time() train() mrr, hits1, hits3, hits10 = test(test_loader) print(f'Epoch: {epoch:02d}, MRR: {mrr:.4f}, Hits@1: {hits1:.4f}, ' f'Hits@3: {hits3:.4f}, Hits@10: {hits10:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/rev_gnn.py b/examples/rev_gnn.py index 56fd5fa3aa86..5f73a0c1a958 100644 --- a/examples/rev_gnn.py +++ b/examples/rev_gnn.py @@ -5,6 +5,7 @@ # | 7 layers 160 channels | 0.8276 ± 0.0027 | 0.9272 ± 0.0006 | import os.path as osp +import time import torch import torch.nn.functional as F @@ -177,10 +178,12 @@ def test(epoch): return train_acc, valid_acc, test_acc +times = [] best_val = 0.0 final_train = 0.0 final_test = 0.0 for epoch in range(1, 1001): + start = time.time() loss = train(epoch) train_acc, val_acc, test_acc = test(epoch) if val_acc > best_val: @@ -189,6 +192,8 @@ def test(epoch): final_test = test_acc print(f'Loss: {loss:.4f}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' f'Test: {test_acc:.4f}') + times.append(time.time() - start) print(f'Final Train: {final_train:.4f}, Best Val: {best_val:.4f}, ' f'Final Test: {final_test:.4f}') +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/rgat.py b/examples/rgat.py index 69635fce0ccb..aa44c49db9da 100644 --- a/examples/rgat.py +++ b/examples/rgat.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -52,8 +53,12 @@ def test(): return train_acc, test_acc +times = [] for epoch in range(1, 51): + start = time.time() loss = train() train_acc, test_acc = test() print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {train_acc:.4f} ' f'Test: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/rgcn.py b/examples/rgcn.py index 49c1926447bb..4adc30b1f120 100644 --- a/examples/rgcn.py +++ b/examples/rgcn.py @@ -1,5 +1,6 @@ import argparse import os.path as osp +import time import torch import torch.nn.functional as F @@ -81,8 +82,12 @@ def test(): return train_acc, test_acc +times = [] for epoch in range(1, 51): + start = time.time() loss = train() train_acc, test_acc = test() print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {train_acc:.4f} ' f'Test: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/rgcn_link_pred.py b/examples/rgcn_link_pred.py index fd47d254cb15..cd0495149bea 100644 --- a/examples/rgcn_link_pred.py +++ b/examples/rgcn_link_pred.py @@ -7,6 +7,7 @@ to run on CPU (following the experimental setup in the official paper). """ import os.path as osp +import time import torch import torch.nn.functional as F @@ -174,9 +175,13 @@ def compute_mrr(z, edge_index, edge_type): return (1. / torch.tensor(ranks, dtype=torch.float)).mean() +times = [] for epoch in range(1, 10001): + start = time.time() loss = train() print(f'Epoch: {epoch:05d}, Loss: {loss:.4f}') if (epoch % 500) == 0: valid_mrr, test_mrr = test() print(f'Val MRR: {valid_mrr:.4f}, Test MRR: {test_mrr:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/seal_link_pred.py b/examples/seal_link_pred.py index 256083fff3a4..d358a799bfbe 100644 --- a/examples/seal_link_pred.py +++ b/examples/seal_link_pred.py @@ -1,5 +1,6 @@ import math import os.path as osp +import time from itertools import chain import numpy as np @@ -216,8 +217,10 @@ def test(loader): return roc_auc_score(torch.cat(y_true), torch.cat(y_pred)) +times = [] best_val_auc = test_auc = 0 for epoch in range(1, 51): + start = time.time() loss = train() val_auc = test(val_loader) if val_auc > best_val_auc: @@ -225,3 +228,5 @@ def test(loader): test_auc = test(test_loader) print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_auc:.4f}, ' f'Test: {test_auc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") diff --git a/examples/super_gat.py b/examples/super_gat.py index 4520740fbe96..400d60985306 100644 --- a/examples/super_gat.py +++ b/examples/super_gat.py @@ -1,4 +1,5 @@ import os.path as osp +import time import torch import torch.nn.functional as F @@ -61,8 +62,12 @@ def test(data): return accs +times = [] for epoch in range(1, 501): + start = time.time() train(data) train_acc, val_acc, test_acc = test(data) print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' f'Test: {test_acc:.4f}') + times.append(time.time() - start) +print(f"Median time per epoch: {torch.tensor(times).median():.4f}s") From d68ecc043aa2b4bdabeb42018b6c65bdf99b2a1d Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 4 Aug 2023 07:42:08 -0700 Subject: [PATCH 1394/2432] Clean-up `TGN` example (#7644) checked that accuracys match before and after my PR --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinu Sunil Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/tgn.py | 58 +++++++++---------- test/nn/models/test_tgn.py | 16 +++-- torch_geometric/loader/temporal_dataloader.py | 35 ++++++++++- 4 files changed, 69 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d4537870d68..0be4897656bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Integrate `neg_sampling_ratio` into `TemporalDataLoader` ([#7644](https://github.com/pyg-team/pytorch_geometric/pull/7644)) - Added `faiss`-based `KNNINdex` classes for L2 or maximum inner product search ([#7842](https://github.com/pyg-team/pytorch_geometric/pull/7842)) - Added the `OSE_GVCS` dataset ([#7811](https://github.com/pyg-team/pytorch_geometric/pull/7811)) - Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) diff --git a/examples/tgn.py b/examples/tgn.py index a7c52596d5db..bd60d40e8612 100644 --- a/examples/tgn.py +++ b/examples/tgn.py @@ -36,15 +36,24 @@ # expensive memory transfer costs for mini-batches: data = data.to(device) -# Ensure to only sample actual destination nodes as negatives. -min_dst_idx, max_dst_idx = int(data.dst.min()), int(data.dst.max()) train_data, val_data, test_data = data.train_val_test_split( val_ratio=0.15, test_ratio=0.15) -train_loader = TemporalDataLoader(train_data, batch_size=200) -val_loader = TemporalDataLoader(val_data, batch_size=200) -test_loader = TemporalDataLoader(test_data, batch_size=200) - +train_loader = TemporalDataLoader( + train_data, + batch_size=200, + neg_sampling_ratio=1.0, +) +val_loader = TemporalDataLoader( + val_data, + batch_size=200, + neg_sampling_ratio=1.0, +) +test_loader = TemporalDataLoader( + test_data, + batch_size=200, + neg_sampling_ratio=1.0, +) neighbor_loader = LastNeighborLoader(data.num_nodes, size=10, device=device) @@ -115,33 +124,25 @@ def train(): total_loss = 0 for batch in train_loader: - batch = batch.to(device) optimizer.zero_grad() + batch = batch.to(device) - src, pos_dst, t, msg = batch.src, batch.dst, batch.t, batch.msg - - # Sample negative destination nodes. - neg_dst = torch.randint(min_dst_idx, max_dst_idx + 1, (src.size(0), ), - dtype=torch.long, device=device) - - n_id = torch.cat([src, pos_dst, neg_dst]).unique() - n_id, edge_index, e_id = neighbor_loader(n_id) + n_id, edge_index, e_id = neighbor_loader(batch.n_id) assoc[n_id] = torch.arange(n_id.size(0), device=device) # Get updated memory of all nodes involved in the computation. z, last_update = memory(n_id) z = gnn(z, last_update, edge_index, data.t[e_id].to(device), data.msg[e_id].to(device)) - - pos_out = link_pred(z[assoc[src]], z[assoc[pos_dst]]) - neg_out = link_pred(z[assoc[src]], z[assoc[neg_dst]]) + pos_out = link_pred(z[assoc[batch.src]], z[assoc[batch.dst]]) + neg_out = link_pred(z[assoc[batch.src]], z[assoc[batch.neg_dst]]) loss = criterion(pos_out, torch.ones_like(pos_out)) loss += criterion(neg_out, torch.zeros_like(neg_out)) # Update memory and neighbor loader with ground-truth state. - memory.update_state(src, pos_dst, t, msg) - neighbor_loader.insert(src, pos_dst) + memory.update_state(batch.src, batch.dst, batch.t, batch.msg) + neighbor_loader.insert(batch.src, batch.dst) loss.backward() optimizer.step() @@ -162,21 +163,15 @@ def test(loader): aps, aucs = [], [] for batch in loader: batch = batch.to(device) - src, pos_dst, t, msg = batch.src, batch.dst, batch.t, batch.msg - neg_dst = torch.randint(min_dst_idx, max_dst_idx + 1, (src.size(0), ), - dtype=torch.long, device=device) - - n_id = torch.cat([src, pos_dst, neg_dst]).unique() - n_id, edge_index, e_id = neighbor_loader(n_id) + n_id, edge_index, e_id = neighbor_loader(batch.n_id) assoc[n_id] = torch.arange(n_id.size(0), device=device) z, last_update = memory(n_id) z = gnn(z, last_update, edge_index, data.t[e_id].to(device), data.msg[e_id].to(device)) - - pos_out = link_pred(z[assoc[src]], z[assoc[pos_dst]]) - neg_out = link_pred(z[assoc[src]], z[assoc[neg_dst]]) + pos_out = link_pred(z[assoc[batch.src]], z[assoc[batch.dst]]) + neg_out = link_pred(z[assoc[batch.src]], z[assoc[batch.neg_dst]]) y_pred = torch.cat([pos_out, neg_out], dim=0).sigmoid().cpu() y_true = torch.cat( @@ -186,9 +181,8 @@ def test(loader): aps.append(average_precision_score(y_true, y_pred)) aucs.append(roc_auc_score(y_true, y_pred)) - memory.update_state(src, pos_dst, t, msg) - neighbor_loader.insert(src, pos_dst) - + memory.update_state(batch.src, batch.dst, batch.t, batch.msg) + neighbor_loader.insert(batch.src, batch.dst) return float(torch.tensor(aps).mean()), float(torch.tensor(aucs).mean()) diff --git a/test/nn/models/test_tgn.py b/test/nn/models/test_tgn.py index cb1f5c6ddd14..958224599a73 100644 --- a/test/nn/models/test_tgn.py +++ b/test/nn/models/test_tgn.py @@ -1,3 +1,4 @@ +import pytest import torch from torch_geometric.data import TemporalData @@ -10,7 +11,8 @@ ) -def test_tgn(): +@pytest.mark.parametrize('neg_sampling_ratio', [0.0, 1.0]) +def test_tgn(neg_sampling_ratio): memory_dim = 16 time_dim = 16 @@ -20,8 +22,11 @@ def test_tgn(): msg = torch.randn(10, 16) data = TemporalData(src=src, dst=dst, t=t, msg=msg) - loader = TemporalDataLoader(data, batch_size=5) - + loader = TemporalDataLoader( + data, + batch_size=5, + neg_sampling_ratio=neg_sampling_ratio, + ) neighbor_loader = LastNeighborLoader(data.num_nodes, size=3) assert neighbor_loader.cur_e_id == 0 assert neighbor_loader.e_id.size() == (data.num_nodes, 3) @@ -39,13 +44,12 @@ def test_tgn(): # Test TGNMemory training: for i, batch in enumerate(loader): - n_id = torch.cat([batch.src, batch.dst]).unique() - n_id, edge_index, e_id = neighbor_loader(n_id) + n_id, edge_index, e_id = neighbor_loader(batch.n_id) z, last_update = memory(n_id) memory.update_state(batch.src, batch.dst, batch.t, batch.msg) neighbor_loader.insert(batch.src, batch.dst) if i == 0: - assert n_id.size(0) == 4 + assert n_id.size(0) >= 4 assert edge_index.numel() == 0 assert e_id.numel() == 0 assert z.size() == (n_id.size(0), memory_dim) diff --git a/torch_geometric/loader/temporal_dataloader.py b/torch_geometric/loader/temporal_dataloader.py index d67957935d72..a67cbffdc513 100644 --- a/torch_geometric/loader/temporal_dataloader.py +++ b/torch_geometric/loader/temporal_dataloader.py @@ -14,18 +14,31 @@ class TemporalDataLoader(torch.utils.data.DataLoader): from which to load the data. batch_size (int, optional): How many samples per batch to load. (default: :obj:`1`) + neg_sampling_ratio (float, optional): The ratio of sampled negative + destination nodes to the number of postive destination nodes. + (default: :obj:`0.0`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`. """ - def __init__(self, data: TemporalData, batch_size: int = 1, **kwargs): + def __init__( + self, + data: TemporalData, + batch_size: int = 1, + neg_sampling_ratio: float = 0.0, + **kwargs, + ): # Remove for PyTorch Lightning: kwargs.pop('dataset', None) kwargs.pop('collate_fn', None) - kwargs.pop('shuffle', None) self.data = data self.events_per_batch = batch_size + self.neg_sampling_ratio = neg_sampling_ratio + + if neg_sampling_ratio > 0: + self.min_dst = int(data.dst.min()) + self.max_dst = int(data.dst.max()) if kwargs.get('drop_last', False) and len(data) % batch_size != 0: arange = range(0, len(data) - batch_size, batch_size) @@ -35,4 +48,20 @@ def __init__(self, data: TemporalData, batch_size: int = 1, **kwargs): super().__init__(arange, 1, shuffle=False, collate_fn=self, **kwargs) def __call__(self, arange: List[int]) -> TemporalData: - return self.data[arange[0]:arange[0] + self.events_per_batch] + batch = self.data[arange[0]:arange[0] + self.events_per_batch] + + n_ids = [batch.src, batch.dst] + + if self.neg_sampling_ratio > 0: + batch.neg_dst = torch.randint( + low=self.min_dst, + high=self.max_dst + 1, + size=(round(self.neg_sampling_ratio * batch.dst.size(0)), ), + dtype=batch.dst.dtype, + device=batch.dst.device, + ) + n_ids += [batch.neg_dst] + + batch.n_id = torch.cat(n_ids, dim=0).unique() + + return batch From 003d472b553641a6c6de6ed6ab9b9230d2f2d829 Mon Sep 17 00:00:00 2001 From: husimplicity <36654893+husimplicity@users.noreply.github.com> Date: Fri, 4 Aug 2023 23:19:39 +0800 Subject: [PATCH 1395/2432] Add GraphLearn-for-PyTorch(GLT) distributed examples (#7402) This PR adds a GraphLearn-for-PyTorch(GLT) distributed example integrated with PyG GraphSAGE model. --------- Co-authored-by: huxleyhu Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + .../graphlearn_for_pytorch/README.md | 98 ++++++ .../dist_train_sage_sup_config.yml | 38 +++ .../dist_train_sage_supervised.py | 312 ++++++++++++++++++ .../graphlearn_for_pytorch/launch.py | 95 ++++++ .../partition_ogbn_dataset.py | 145 ++++++++ 6 files changed, 689 insertions(+) create mode 100644 examples/distributed/graphlearn_for_pytorch/README.md create mode 100644 examples/distributed/graphlearn_for_pytorch/dist_train_sage_sup_config.yml create mode 100644 examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py create mode 100644 examples/distributed/graphlearn_for_pytorch/launch.py create mode 100644 examples/distributed/graphlearn_for_pytorch/partition_ogbn_dataset.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0be4897656bd..235ba8ef926e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a distributed example using `graphlearn-for-pytorch` ([#7402](https://github.com/pyg-team/pytorch_geometric/pull/7402)) - Integrate `neg_sampling_ratio` into `TemporalDataLoader` ([#7644](https://github.com/pyg-team/pytorch_geometric/pull/7644)) - Added `faiss`-based `KNNINdex` classes for L2 or maximum inner product search ([#7842](https://github.com/pyg-team/pytorch_geometric/pull/7842)) - Added the `OSE_GVCS` dataset ([#7811](https://github.com/pyg-team/pytorch_geometric/pull/7811)) diff --git a/examples/distributed/graphlearn_for_pytorch/README.md b/examples/distributed/graphlearn_for_pytorch/README.md new file mode 100644 index 000000000000..1f2a95ddcb90 --- /dev/null +++ b/examples/distributed/graphlearn_for_pytorch/README.md @@ -0,0 +1,98 @@ +# Using GraphLearn-for-PyTorch (GLT) for Distributed Training with PyG + +**[GraphLearn-for-PyTorch (GLT)](https://github.com/alibaba/graphlearn-for-pytorch)** is a graph learning library for PyTorch that makes distributed GNN training easy and efficient. +GLT leverages GPUs to accelerate graph sampling and utilizes UVA and GPU caches to reduce the data conversion and transferring costs during graph sampling and model training. +Most of the APIs of GLT are compatible with PyG, so PyG users only need to modify a few lines of their PyG code to train their model with GLT. + +## Requirements + +- `python >= 3.6` +- `torch >= 1.12` +- `graphlearn-torch` + +## Distributed (Multi-Node) Example + +This example shows how to leverage [GraphLearn-for-PyTorch (GLT)](https://github.com/alibaba/graphlearn-for-pytorch) to train PyG models in a distributed scenario with GPUs. The dataset in this example is `ogbn-products` from the [Open Graph Benchmark](https://ogb.stanford.edu/), but you can also train on `ogbn-papers100M` with only minor modifications. + +To run this example, you can run the example as described below or directly make use of our [`launch.py`](launch.py) script. +The training results will be generated and saved in `dist_sage_sup.txt`. + +### Running the Example + +#### Step 1: Prepare and partition the data + +Here, we use `ogbn-products` and partition it into two partitions: + +```bash +python partition_ogbn_dataset.py --dataset=ogbn-products --root_dir=../../../data/ogbn-products --num_partitions=2 +``` + +#### Step 2: Run the example in each training node + +For example, running the example in two nodes each with two GPUs: + +```bash +# Node 0: +CUDA_VISIBLE_DEVICES=0,1 python dist_train_sage_supervised.py \ + --num_nodes=2 --node_rank=0 --master_addr=localhost \ + --dataset=ogbn-products --dataset_root_dir=../../../data/ogbn-products \ + --in_channel=100 --out_channel=47 + +# Node 1: +CUDA_VISIBLE_DEVICES=2,3 python dist_train_sage_supervised.py \ + --num_nodes=2 --node_rank=1 --master_addr=localhost \ + --dataset=ogbn-products --dataset_root_dir=../../../data/ogbn-products \ + --in_channel=100 --out_channel=47 +``` + +**Notes:** + +1. You should change the `master_addr` to the IP of `node#0`. +2. Since there is randomness during data partitioning, please ensure all nodes are using the same partitioned data when running `dist_train_sage_supervised.py`. + +### Using the `launch.py` Script + +#### Step 1: Setup a distributed file system + +**Note**: You may skip this step if you already set up folder(s) synchronized across machines. + +To perform distributed sampling, files and codes need to be accessed across multiple machines. +A distributed file system (*i.e.*, [NFS](https://wiki.archlinux.org/index.php/NFS), [SSHFS](https://www.digitalocean.com/community/tutorials/how-to-use-sshfs-to-mount-remote-file-systems-over-ssh), [Ceph](https://docs.ceph.com/en/latest/install), ...) exempts you from synchnonizing files such as partition information. + +#### Step 2: Prepare and partition the data + +In distributed training (under the worker mode), each node in the cluster holds a partition of the graph. +Thus, before the training starts, we partition the `ogbn-products` dataset into multiple partitions, each of which corresponds to a specific training worker. + +The partitioning occurs in three steps: + 1. Run the partition algorithm to assign nodes to partitions. + 2. Construct the partitioned graph structure based on the node assignment. + 3. Split the node features and edge features into partitions. + +GLT supports caching graph topology and frequently accessed features in GPU to accelerate GPU sampling and feature collection. +For feature caching, we adopt a pre-sampling-based approach to determine the hotness of nodes, and cache features for nodes with higher hotness while loading the graph. +The uncached features are stored in pinned memory for efficient access via UVA. + +For further information about partitioning, please refer to the [official tutorial](https://github.com/alibaba/graphlearn-for-pytorch/blob/main/docs/tutorial/dist.md). + +Here, we use `ogbn-products` and partition it into two partitions: + +```bash +python partition_ogbn_dataset.py --dataset=ogbn-products --root_dir=../../../data/ogbn-products --num_partitions=2 +``` + +#### Step 3: Set up the configure file + +An example configuration file in given via [`dist_train_sage_sup_config.yml`](dist_train_sage_sup_config.yml). + +#### Step 4: Launch the distributed training + +```bash +pip install paramiko +pip install click +apt install tmux +python launch.py --config=dist_train_sage_sup_config.yml --master_addr=0.0.0.0 --master_port=11234 +``` + +Here, `master_addr` is for the master RPC address, and `master_port` is for PyTorch's process group initialization across training processes. +Note that you should change the `master_addr` to the IP of `node#0`. diff --git a/examples/distributed/graphlearn_for_pytorch/dist_train_sage_sup_config.yml b/examples/distributed/graphlearn_for_pytorch/dist_train_sage_sup_config.yml new file mode 100644 index 000000000000..633be1a7a181 --- /dev/null +++ b/examples/distributed/graphlearn_for_pytorch/dist_train_sage_sup_config.yml @@ -0,0 +1,38 @@ +# IP addresses for all nodes. +# Note: The first 3 params are expected to form usernames@nodes:ports. +nodes: + - 0.0.0.0 + - 1.1.1.1 + +# SSH ports for each node: +ports: [22, 22] + +# Username for remote IPs: +usernames: + - your_username_for_node_0 + - your_username_for_node_1 + +# Path to Python with GLT environment for each node: +python_bins: + - /path/to/python + - /path/to/python + +# The dataset name, e.g., ogbn-products, ogbn-papers100M. +# Note: make sure the name of dataset_root_dir is the same as the dataset name. +dataset: ogbn-products + +# `in_channel` and `out_channel` of the dataset, e.g.,: +# - ogbn-products: in_channel=100, out_channel=47 +# - ogbn-papers100M: in_channel=128, out_channel=172 +in_channel: 100 +out_channel: 47 + +# Path to the pytorch_geometric directory: +dst_paths: + - /path/to/pytorch_geometric + - /path/to/pytorch_geometric + +# Setup visible CUDA devices for each node: +visible_devices: + - 0,1,2,3 + - 0,1,2,3 diff --git a/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py b/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py new file mode 100644 index 000000000000..6a44bcbf9604 --- /dev/null +++ b/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py @@ -0,0 +1,312 @@ +import argparse +import os.path as osp +import time + +import graphlearn_torch as glt +import torch +import torch.distributed +import torch.nn.functional as F +from ogb.nodeproppred import Evaluator +from torch import Tensor +from torch.nn.parallel import DistributedDataParallel + +from torch_geometric.nn import GraphSAGE + + +@torch.no_grad() +def test(model, test_loader, dataset_name): + evaluator = Evaluator(name=dataset_name) + model.eval() + xs = [] + y_true = [] + for i, batch in enumerate(test_loader): + if i == 0: + device = batch.x.device + x = model(batch.x, batch.edge_index)[:batch.batch_size] + xs.append(x.cpu()) + y_true.append(batch.y[:batch.batch_size].cpu()) + + xs = [t.to(device) for t in xs] + y_true = [t.to(device) for t in y_true] + y_pred = torch.cat(xs, dim=0).argmax(dim=-1, keepdim=True) + y_true = torch.cat(y_true, dim=0).unsqueeze(-1) + test_acc = evaluator.eval({ + 'y_true': y_true, + 'y_pred': y_pred, + })['acc'] + return test_acc + + +def run_training_proc( + local_proc_rank: int, + num_nodes: int, + node_rank: int, + num_training_procs_per_node: int, + dataset_name: str, + in_channels: int, + out_channels: int, + dataset: glt.distributed.DistDataset, + train_idx: Tensor, + test_idx: Tensor, + epochs: int, + batch_size: int, + master_addr: str, + training_pg_master_port: int, + train_loader_master_port: int, + test_loader_master_port: int, +): + # Initialize graphlearn_torch distributed worker group context: + glt.distributed.init_worker_group( + world_size=num_nodes * num_training_procs_per_node, + rank=node_rank * num_training_procs_per_node + local_proc_rank, + group_name='distributed-sage-supervised-trainer') + + current_ctx = glt.distributed.get_context() + current_device = torch.device(local_proc_rank % torch.cuda.device_count()) + + # Initialize training process group of PyTorch: + torch.distributed.init_process_group( + backend='nccl', # or choose 'gloo' if 'nccl' is not supported. + rank=current_ctx.rank, + world_size=current_ctx.world_size, + init_method='tcp://{}:{}'.format(master_addr, training_pg_master_port)) + + # Create distributed neighbor loader for training. + # We replace PyG's NeighborLoader with GLT's DistNeighborLoader. + # GLT parameters for sampling are quite similar to PyG. + # We only need to configure additional network and device parameters: + train_idx = train_idx.split( + train_idx.size(0) // num_training_procs_per_node)[local_proc_rank] + train_loader = glt.distributed.DistNeighborLoader( + data=dataset, + num_neighbors=[15, 10, 5], + input_nodes=train_idx, + batch_size=batch_size, + shuffle=True, + collect_features=True, + to_device=current_device, + worker_options=glt.distributed.MpDistSamplingWorkerOptions( + num_workers=1, + worker_devices=[current_device], + worker_concurrency=4, + master_addr=master_addr, + master_port=train_loader_master_port, + channel_size='1GB', + pin_memory=True, + ), + ) + + # Create distributed neighbor loader for testing. + test_idx = test_idx.split(test_idx.size(0) // + num_training_procs_per_node)[local_proc_rank] + test_loader = glt.distributed.DistNeighborLoader( + data=dataset, + num_neighbors=[15, 10, 5], + input_nodes=test_idx, + batch_size=batch_size, + shuffle=False, + collect_features=True, + to_device=current_device, + worker_options=glt.distributed.MpDistSamplingWorkerOptions( + num_workers=2, + worker_devices=[ + torch.device('cuda', i % torch.cuda.device_count()) + for i in range(2) + ], + worker_concurrency=4, + master_addr=master_addr, + master_port=test_loader_master_port, + channel_size='2GB', + pin_memory=True, + ), + ) + + # Define the model and optimizer. + torch.cuda.set_device(current_device) + model = GraphSAGE( + in_channels=in_channels, + hidden_channels=256, + num_layers=3, + out_channels=out_channels, + ).to(current_device) + model = DistributedDataParallel(model, device_ids=[current_device.index]) + + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + + # Train and test: + f = open('dist_sage_sup.txt', 'a+') + for epoch in range(0, epochs): + model.train() + start = time.time() + for batch in train_loader: + optimizer.zero_grad() + out = model(batch.x, batch.edge_index)[:batch.batch_size] + loss = F.cross_entropy(out, batch.y[:batch.batch_size].long()) + loss.backward() + optimizer.step() + f.write(f'-- [Trainer {current_ctx.rank}] Epoch: {epoch:03d}, ' + f'Loss: {loss:.4f}, Epoch Time: {time.time() - start}\n') + + torch.cuda.synchronize() + torch.distributed.barrier() + + if epoch == 0 or epoch > (epochs // 2): + test_acc = test(model, test_loader, dataset_name) + f.write(f'-- [Trainer {current_ctx.rank}] ' + f'Test Acc: {test_acc:.4f}\n') + torch.cuda.synchronize() + torch.distributed.barrier() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--dataset', + type=str, + default='ogbn-products', + help='The name of the dataset', + ) + parser.add_argument( + '--in_channel', + type=int, + default=100, + help='Number of input features of the dataset', + ) + parser.add_argument( + '--out_channel', + type=int, + default=47, + help='Number of classes of the dataset', + ) + parser.add_argument( + '--num_dataset_partitions', + type=int, + default=2, + help='The number of partitions', + ) + parser.add_argument( + '--dataset_root_dir', + type=str, + default='../../../data/products', + help='The root directory (relative path) of the partitioned dataset', + ) + parser.add_argument( + '--num_nodes', + type=int, + default=2, + help='Number of distributed nodes', + ) + parser.add_argument( + '--node_rank', + type=int, + default=0, + help='The current node rank', + ) + parser.add_argument( + '--num_training_procs', + type=int, + default=2, + help='The number of traning processes per node', + ) + parser.add_argument( + '--epochs', + type=int, + default=10, + help='The number of training epochs', + ) + parser.add_argument( + '--batch_size', + type=int, + default=512, + help='The batch size for the training and testing data loaders', + ) + parser.add_argument( + '--master_addr', + type=str, + default='localhost', + help='The master address for RPC initialization', + ) + parser.add_argument( + '--training_pg_master_port', + type=int, + default=11111, + help="The port used for PyTorch's process group initialization", + ) + parser.add_argument( + '--train_loader_master_port', + type=int, + default=11112, + help='The port used for RPC initialization for training', + ) + parser.add_argument( + '--test_loader_master_port', + type=int, + default=11113, + help='The port used for RPC initialization for testing', + ) + args = parser.parse_args() + + # Record configuration information for debugging + f = open('dist_sage_sup.txt', 'a+') + f.write('--- Distributed training example of supervised SAGE ---\n') + f.write(f'* dataset: {args.dataset}\n') + f.write(f'* dataset root dir: {args.dataset_root_dir}\n') + f.write(f'* number of dataset partitions: {args.num_dataset_partitions}\n') + f.write(f'* total nodes: {args.num_nodes}\n') + f.write(f'* node rank: {args.node_rank}\n') + f.write(f'* number of training processes per node: ' + f'{args.num_training_procs}\n') + f.write(f'* epochs: {args.epochs}\n') + f.write(f'* batch size: {args.batch_size}\n') + f.write(f'* master addr: {args.master_addr}\n') + f.write(f'* training process group master port: ' + f'{args.training_pg_master_port}\n') + f.write(f'* training loader master port: ' + f'{args.train_loader_master_port}\n') + f.write(f'* testing loader master port: {args.test_loader_master_port}\n') + + f.write('--- Loading data partition ...\n') + root_dir = osp.join(osp.dirname(osp.realpath(__file__)), + args.dataset_root_dir) + data_pidx = args.node_rank % args.num_dataset_partitions + dataset = glt.distributed.DistDataset() + + label_file = osp.join(root_dir, f'{args.dataset}-label', 'label.pt') + dataset.load( + root_dir=osp.join(root_dir, f'{args.dataset}-partitions'), + partition_idx=data_pidx, + graph_mode='ZERO_COPY', + whole_node_label_file=label_file, + ) + train_file = osp.join(root_dir, f'{args.dataset}-train-partitions', + f'partition{data_pidx}.pt') + train_idx = torch.load(train_file) + test_file = osp.join(root_dir, f'{args.dataset}-test-partitions', + f'partition{data_pidx}.pt') + test_idx = torch.load(test_file) + train_idx.share_memory_() + test_idx.share_memory_() + + f.write('--- Launching training processes ...\n') + torch.multiprocessing.spawn( + run_training_proc, + args=( + args.num_nodes, + args.node_rank, + args.num_training_procs, + args.dataset, + args.in_channel, + args.out_channel, + dataset, + train_idx, + test_idx, + args.epochs, + args.batch_size, + args.master_addr, + args.training_pg_master_port, + args.train_loader_master_port, + args.test_loader_master_port, + ), + nprocs=args.num_training_procs, + join=True, + ) diff --git a/examples/distributed/graphlearn_for_pytorch/launch.py b/examples/distributed/graphlearn_for_pytorch/launch.py new file mode 100644 index 000000000000..4af7a4ca2513 --- /dev/null +++ b/examples/distributed/graphlearn_for_pytorch/launch.py @@ -0,0 +1,95 @@ +import argparse + +import click +import paramiko +import yaml + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--config', + type=str, + default='dist_train_sage_sup_config.yml', + help='The path to the configuration file', + ) + parser.add_argument( + '--epochs', + type=int, + default=10, + help='The number of training epochs', + ) + parser.add_argument( + '--batch_size', + type=int, + default=512, + help='The batch size for the training and testing data loaders', + ) + parser.add_argument( + '--master_addr', + type=str, + default='0.0.0.0', + help='Master IP address for synchronization across all training nodes', + ) + parser.add_argument( + '--master_port', + type=str, + default='11345', + help='The port for synchronization across all training nodes', + ) + args = parser.parse_args() + + config = open(args.config, 'r') + config = yaml.safe_load(config) + dataset = config['dataset'] + ip_list = config['nodes'] + port_list = config['ports'] + username_list = config['usernames'] + dst_path_list = config['dst_paths'] + node_ranks = list(range(len(ip_list))) + num_nodes = len(node_ranks) + visible_devices = config['visible_devices'] + python_bins = config['python_bins'] + num_cores = len(str(visible_devices[0]).split(',')) + in_channel = str(config['in_channel']) + out_channel = str(config['out_channel']) + + dataset_path = '../../../data/' + passwd_dict = {} + for username, ip in zip(username_list, ip_list): + passwd_dict[ip + username] = click.prompt( + f'Password for {username}@{ip}', hide_input=True) + for username, ip, port, dst, noderk, device, pythonbin in zip( + username_list, + ip_list, + port_list, + dst_path_list, + node_ranks, + visible_devices, + python_bins, + ): + trans = paramiko.Transport((ip, port)) + trans.connect(username=username, password=passwd_dict[ip + username]) + ssh = paramiko.SSHClient() + ssh._transport = trans + + to_dist_dir = 'cd ' + dst + \ + '/examples/distributed/graphlearn_for_pytorch/ ' + exec_example = "tmux new -d 'CUDA_VISIBLE_DEVICES=" + str(device) + \ + " " + pythonbin + " dist_train_sage_supervised.py --dataset=" + \ + dataset + " --dataset_root_dir=" + dataset_path + dataset + \ + " --in_channel=" + in_channel + " --out_channel=" + out_channel + \ + " --node_rank=" + str(noderk) + " --num_dataset_partitions=" + \ + str(num_nodes) + " --num_nodes=" + str(num_nodes) + \ + " --num_training_procs=" + str(num_cores) + " --master_addr=" + \ + args.master_addr + " --training_pg_master_port=" + \ + args.master_port + " --train_loader_master_port=" + \ + str(int(args.master_port) + 1) + " --test_loader_master_port=" + \ + str(int(args.master_port) + 2) + " --batch_size=" + \ + str(args.batch_size) + " --epochs=" + str(args.epochs) + + print(to_dist_dir + ' && ' + exec_example + " '") + stdin, stdout, stderr = ssh.exec_command( + to_dist_dir + ' && ' + exec_example + " '", bufsize=1) + print(stdout.read().decode()) + print(stderr.read().decode()) + ssh.close() diff --git a/examples/distributed/graphlearn_for_pytorch/partition_ogbn_dataset.py b/examples/distributed/graphlearn_for_pytorch/partition_ogbn_dataset.py new file mode 100644 index 000000000000..02347a026709 --- /dev/null +++ b/examples/distributed/graphlearn_for_pytorch/partition_ogbn_dataset.py @@ -0,0 +1,145 @@ +import argparse +import ast +import os.path as osp + +import graphlearn_torch as glt +import torch +from ogb.nodeproppred import PygNodePropPredDataset + + +def partition_dataset( + ogbn_dataset: str, + root_dir: str, + num_partitions: int, + num_nbrs: glt.NumNeighbors, + chunk_size: int, + cache_ratio: float, +): + ########################################################################### + # In distributed training (under the worker mode), each node in the cluster + # holds a partition of the graph. Thus before the training starts, we + # partition the dataset into multiple partitions, each of which corresponds + # to a specific training worker. + # The partitioning occurs in three steps: + # 1. Run a partition algorithm to assign nodes to partitions. + # 2. Construct partition graph structure based on the node assignment. + # 3. Split the node features and edge features based on the partition + # result. + ########################################################################### + + print(f'-- Loading {ogbn_dataset} ...') + dataset = PygNodePropPredDataset(ogbn_dataset, root_dir) + data = dataset[0] + print(f'* node count: {data.num_nodes}') + print(f'* edge count: {data.num_edges}') + split_idx = dataset.get_idx_split() + + print('-- Saving label ...') + label_dir = osp.join(root_dir, f'{ogbn_dataset}-label') + glt.utils.ensure_dir(label_dir) + torch.save(data.y.squeeze(), osp.join(label_dir, 'label.pt')) + + print('-- Partitioning training idx ...') + train_idx = split_idx['train'] + train_idx = train_idx.split(train_idx.size(0) // num_partitions) + train_idx_partitions_dir = osp.join( + root_dir, + f'{ogbn_dataset}-train-partitions', + ) + glt.utils.ensure_dir(train_idx_partitions_dir) + for pidx in range(num_partitions): + torch.save( + train_idx[pidx], + osp.join(train_idx_partitions_dir, f'partition{pidx}.pt'), + ) + + print('-- Partitioning test idx ...') + test_idx = split_idx['test'] + test_idx = test_idx.split(test_idx.size(0) // num_partitions) + test_idx_partitions_dir = osp.join( + root_dir, + f'{ogbn_dataset}-test-partitions', + ) + glt.utils.ensure_dir(test_idx_partitions_dir) + for pidx in range(num_partitions): + torch.save( + test_idx[pidx], + osp.join(test_idx_partitions_dir, f'partition{pidx}.pt'), + ) + + print('-- Initializing graph ...') + csr_topo = glt.data.Topology(edge_index=data.edge_index, + input_layout='COO') + graph = glt.data.Graph(csr_topo, mode='ZERO_COPY') + + print('-- Sampling hotness ...') + glt_sampler = glt.sampler.NeighborSampler(graph, num_nbrs) + node_probs = [] + for pidx in range(num_partitions): + seeds = train_idx[pidx] + prob = glt_sampler.sample_prob(seeds, data.num_nodes) + node_probs.append(prob.cpu()) + + print('-- Partitioning graph and features ...') + partitions_dir = osp.join(root_dir, f'{ogbn_dataset}-partitions') + freq_partitioner = glt.partition.FrequencyPartitioner( + output_dir=partitions_dir, + num_parts=num_partitions, + num_nodes=data.num_nodes, + edge_index=data.edge_index, + probs=node_probs, + node_feat=data.x, + chunk_size=chunk_size, + cache_ratio=cache_ratio, + ) + freq_partitioner.partition() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--dataset', + type=str, + default='ogbn-products', + help='The name of the dataset', + ) + parser.add_argument( + '--num_partitions', + type=int, + default=2, + help='The Number of partitions', + ) + parser.add_argument( + '--root_dir', + type=str, + default='../../../data/ogbn-products', + help='The root directory (relative path) of the partitioned dataset', + ) + parser.add_argument( + '--num_nbrs', + type=ast.literal_eval, + default='[15,10,5]', + help='The number of neighbors to sample hotness for feature caching', + ) + parser.add_argument( + '--chunk_size', + type=int, + default=10000, + help='The chunk size for feature partitioning', + ) + parser.add_argument( + '--cache_ratio', + type=float, + default=0.2, + help='The proportion to cache features per partition', + ) + args = parser.parse_args() + + partition_dataset( + ogbn_dataset=args.dataset, + root_dir=osp.join(osp.dirname(osp.realpath(__file__)), args.root_dir), + num_partitions=args.num_partitions, + num_nbrs=args.num_nbrs, + chunk_size=args.chunk_size, + cache_ratio=args.cache_ratio, + ) From a2bc6c3d5db30b23764cbb82a649321545983721 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Fri, 4 Aug 2023 08:55:34 -0700 Subject: [PATCH 1396/2432] Add partitioning example for distributed training (#7846) This code belongs to the part of the whole distributed training for PyG. We provide two scripts here: 1) one to generate the ogbn-products homo partition dataset with the num_partitions (default=2) argument. 2) second to generate the ogbn-mags hetero partition dataset with the num_partitions (default=2) argument. First will download the raw dataset in current folder; all partition results will be stored as below folder structure. 1) homo partition ![image](https://github.com/pyg-team/pytorch_geometric/assets/33777424/767a0650-ffda-4e14-8be6-c71e92f19757) 2) hetero partition ![image](https://github.com/pyg-team/pytorch_geometric/assets/33777424/6325b9bd-3d57-44af-8617-cd5fc39ea859) You can setup the different partition numbers to get the different partition dataset. This partition dataset already include the train_idx (seeds) /test_idx/ labels to make more easy to use. Any comments please let us know. thanks. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- examples/distributed/pyg/partition_graph.py | 49 +++++++++++++++++++ .../distributed/pyg/partition_hetero_graph.py | 48 ++++++++++++++++++ 3 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 examples/distributed/pyg/partition_graph.py create mode 100644 examples/distributed/pyg/partition_hetero_graph.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 235ba8ef926e..0cedaf7923b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,7 +37,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493), [#7764](https://github.com/pyg-team/pytorch_geometric/pull/7764) [#7765](https://github.com/pyg-team/pytorch_geometric/pull/7765)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671), [#7846](https://github.com/pyg-team/pytorch_geometric/pull/7846)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/examples/distributed/pyg/partition_graph.py b/examples/distributed/pyg/partition_graph.py new file mode 100644 index 000000000000..c5069f299134 --- /dev/null +++ b/examples/distributed/pyg/partition_graph.py @@ -0,0 +1,49 @@ +import argparse +import os +import os.path as osp + +import torch +from ogb.nodeproppred import PygNodePropPredDataset + +from torch_geometric.distributed import Partitioner + + +def partition_dataset(ogbn_dataset: str, root_dir: str, num_parts: int): + save_dir = osp.join(root_dir, f'{ogbn_dataset}-partitions') + dataset = PygNodePropPredDataset(ogbn_dataset) + data = dataset[0] + + partitioner = Partitioner(data, num_parts, save_dir) + partitioner.generate_partition() + split_idx = dataset.get_idx_split() + + print('-- Saving label ...') + label_dir = osp.join(root_dir, f'{ogbn_dataset}-label') + os.makedirs(label_dir, exist_ok=True) + torch.save(data.y.squeeze(), osp.join(label_dir, 'label.pt')) + + print('-- Partitioning training indices ...') + train_idx = split_idx['train'] + train_idx = train_idx.split(train_idx.size(0) // num_parts) + train_part_dir = osp.join(root_dir, f'{ogbn_dataset}-train-partitions') + os.makedirs(train_part_dir, exist_ok=True) + for i in range(num_parts): + torch.save(train_idx[i], osp.join(train_part_dir, f'partition{i}.pt')) + + print('-- Partitioning test indices ...') + test_idx = split_idx['test'] + test_idx = test_idx.split(test_idx.size(0) // num_parts) + test_part_dir = osp.join(root_dir, f'{ogbn_dataset}-test-partitions') + os.makedirs(test_part_dir, exist_ok=True) + for i in range(num_parts): + torch.save(test_idx[i], osp.join(test_part_dir, f'partition{i}.pt')) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--dataset', type=str, default='ogbn-products') + parser.add_argument('--root_dir', type=str, default='./data/products') + parser.add_argument('--num_partitions', type=int, default=2) + args = parser.parse_args() + + partition_dataset(args.dataset, args.root_dir, args.num_partitions) diff --git a/examples/distributed/pyg/partition_hetero_graph.py b/examples/distributed/pyg/partition_hetero_graph.py new file mode 100644 index 000000000000..ab8d7fd65db9 --- /dev/null +++ b/examples/distributed/pyg/partition_hetero_graph.py @@ -0,0 +1,48 @@ +import argparse +import os +import os.path as osp + +import torch + +from torch_geometric.datasets import OGB_MAG +from torch_geometric.distributed import Partitioner + + +def partition_dataset(ogbn_dataset: str, root_dir: str, num_parts: int): + save_dir = osp.join(root_dir, f'{ogbn_dataset}-partitions') + dataset = OGB_MAG(root=ogbn_dataset, preprocess='metapath2vec') + data = dataset[0] + + partitioner = Partitioner(data, num_parts, save_dir) + partitioner.generate_partition() + + print('-- Saving label ...') + label_dir = osp.join(root_dir, f'{ogbn_dataset}-label') + os.makedirs(label_dir, exist_ok=True) + torch.save(data['paper'].y.squeeze(), osp.join(label_dir, 'label.pt')) + + print('-- Partitioning training indices ...') + train_idx = data['paper'].train_mask.nonzero().view(-1) + train_idx = train_idx.split(train_idx.size(0) // num_parts) + train_part_dir = osp.join(root_dir, f'{ogbn_dataset}-train-partitions') + os.makedirs(train_part_dir, exist_ok=True) + for i in range(num_parts): + torch.save(train_idx[i], osp.join(train_part_dir, f'partition{i}.pt')) + + print('-- Partitioning test indices ...') + test_idx = data['paper'].test_mask.nonzero().view(-1) + test_idx = test_idx.split(test_idx.size(0) // num_parts) + test_part_dir = osp.join(root_dir, f'{ogbn_dataset}-test-partitions') + os.makedirs(test_part_dir, exist_ok=True) + for i in range(num_parts): + torch.save(test_idx[i], osp.join(test_part_dir, f'partition{i}.pt')) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--dataset', type=str, default='ogbn-mag') + parser.add_argument('--root_dir', type=str, default='./data/mag') + parser.add_argument('--num_partitions', type=int, default=2) + args = parser.parse_args() + + partition_dataset(args.dataset, args.root_dir, args.num_partitions) From ca5311c2d1cf3d384916c47c112b69dcd7703f24 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 5 Aug 2023 10:41:11 +0200 Subject: [PATCH 1397/2432] Add `batch_size` argument to `unbatch` functionalities (#7851) --- CHANGELOG.md | 1 + torch_geometric/utils/unbatch.py | 23 +++++++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0cedaf7923b0..fd17cdfe9b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a `batch_size` argument to `unbatch` functionalities ([#7851](https://github.com/pyg-team/pytorch_geometric/pull/7851)) - Added a distributed example using `graphlearn-for-pytorch` ([#7402](https://github.com/pyg-team/pytorch_geometric/pull/7402)) - Integrate `neg_sampling_ratio` into `TemporalDataLoader` ([#7644](https://github.com/pyg-team/pytorch_geometric/pull/7644)) - Added `faiss`-based `KNNINdex` classes for L2 or maximum inner product search ([#7842](https://github.com/pyg-team/pytorch_geometric/pull/7842)) diff --git a/torch_geometric/utils/unbatch.py b/torch_geometric/utils/unbatch.py index 62efcbf07bb0..0ba85c7e1135 100644 --- a/torch_geometric/utils/unbatch.py +++ b/torch_geometric/utils/unbatch.py @@ -1,4 +1,4 @@ -from typing import List +from typing import List, Optional import torch from torch import Tensor @@ -6,7 +6,12 @@ from torch_geometric.utils import degree -def unbatch(src: Tensor, batch: Tensor, dim: int = 0) -> List[Tensor]: +def unbatch( + src: Tensor, + batch: Tensor, + dim: int = 0, + batch_size: Optional[int] = None, +) -> List[Tensor]: r"""Splits :obj:`src` according to a :obj:`batch` vector along dimension :obj:`dim`. @@ -17,6 +22,7 @@ def unbatch(src: Tensor, batch: Tensor, dim: int = 0) -> List[Tensor]: entry in :obj:`src` to a specific example. Must be ordered. dim (int, optional): The dimension along which to split the :obj:`src` tensor. (default: :obj:`0`) + batch_size (int, optional) The batch size. (default: :obj:`None`) :rtype: :class:`List[Tensor]` @@ -27,11 +33,15 @@ def unbatch(src: Tensor, batch: Tensor, dim: int = 0) -> List[Tensor]: >>> unbatch(src, batch) (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) """ - sizes = degree(batch, dtype=torch.long).tolist() + sizes = degree(batch, batch_size, dtype=torch.long).tolist() return src.split(sizes, dim) -def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: +def unbatch_edge_index( + edge_index: Tensor, + batch: Tensor, + batch_size: Optional[int] = None, +) -> List[Tensor]: r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector. Args: @@ -39,6 +49,7 @@ def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: batch (LongTensor): The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. Must be ordered. + batch_size (int, optional) The batch size. (default: :obj:`None`) :rtype: :class:`List[Tensor]` @@ -53,10 +64,10 @@ def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: tensor([[0, 1, 1, 2], [1, 0, 2, 1]])) """ - deg = degree(batch, dtype=torch.int64) + deg = degree(batch, batch_size, dtype=torch.long) ptr = torch.cat([deg.new_zeros(1), deg.cumsum(dim=0)[:-1]], dim=0) edge_batch = batch[edge_index[0]] edge_index = edge_index - ptr[edge_batch] - sizes = degree(edge_batch, dtype=torch.int64).cpu().tolist() + sizes = degree(edge_batch, batch_size, dtype=torch.long).cpu().tolist() return edge_index.split(sizes, dim=1) From 7b1ed2a46e250005eae53bfff9e6203406505ef1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 7 Aug 2023 07:47:56 +0200 Subject: [PATCH 1398/2432] Fix `ONNX` test on PyTorch nightly (#7853) --- test/nn/models/test_basic_gnn.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 95b4854834b2..b38e2cedda6c 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -224,7 +224,7 @@ def test_packaging(): @withPackage('torch>=1.12.0') @withPackage('onnx', 'onnxruntime') -def test_onnx(tmp_path, capfd): +def test_onnx(tmp_path): import onnx import onnxruntime as ort @@ -251,9 +251,6 @@ def forward(self, x, edge_index): path = osp.join(tmp_path, 'model.onnx') torch.onnx.export(model, (x, edge_index), path, input_names=('x', 'edge_index'), opset_version=16) - if torch_geometric.typing.WITH_PT2: - out, _ = capfd.readouterr() - assert '0 NONE 0 NOTE 0 WARNING 0 ERROR' in out model = onnx.load(path) onnx.checker.check_model(model) From aadb1355e5418b971307cf00e81a0fd94d1c32a5 Mon Sep 17 00:00:00 2001 From: ZhengHongming888 Date: Sun, 6 Aug 2023 23:58:15 -0700 Subject: [PATCH 1399/2432] Add distributed feature info for distributed training (#7715) This code belongs to the part of the whole distributed training for PyG. (This PR is to replace #7678) This PR originally designed for the DistFeature class and now merged with LocalFeatureStore - Add partition/rpc info into LocalFeatureStore like num_partition, partition_idx, feature_pb (feature_partitionbook), partition_meta, RpcRouter, etc Add one new class (RpcCallFeatureLookup) to do real remote rpc feature_lookup work Add one api ( .lookup_features() ) to do feature lookup in local node and remote nodes based on sampled global node ids/edge ids based on torch rpc apis one unit test to verify the function of local/remote feature lookup under .test/distributed/. folder Now we combined the local feature store and distributed feature properties (partition info and rpc remote access apis) into one FeatureStore. later on we will change the class name from LocalFeatureStore into PartitionFeatureStore with another PR. Any comments please let us know. thanks. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey Co-authored-by: root --- CHANGELOG.md | 2 +- test/distributed/test_rpc.py | 6 +- torch_geometric/data/hetero_data.py | 2 +- .../distributed/local_feature_store.py | 190 +++++++++++++++++- .../distributed/local_graph_store.py | 40 +++- torch_geometric/distributed/rpc.py | 8 +- torch_geometric/typing.py | 2 + 7 files changed, 236 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd17cdfe9b16..2adefeddce0f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493), [#7764](https://github.com/pyg-team/pytorch_geometric/pull/7764) [#7765](https://github.com/pyg-team/pytorch_geometric/pull/7765)) - Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) - Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) -- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671), [#7846](https://github.com/pyg-team/pytorch_geometric/pull/7846)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671), [#7846](https://github.com/pyg-team/pytorch_geometric/pull/7846), [#7715](https://github.com/pyg-team/pytorch_geometric/pull/7715)) - Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) - Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) - Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) diff --git a/test/distributed/test_rpc.py b/test/distributed/test_rpc.py index 69d47a5ee691..924f2a5a7b68 100644 --- a/test/distributed/test_rpc.py +++ b/test/distributed/test_rpc.py @@ -6,7 +6,7 @@ import torch_geometric.distributed.rpc as rpc from torch_geometric.distributed import LocalFeatureStore from torch_geometric.distributed.dist_context import DistContext, DistRole -from torch_geometric.distributed.rpc import RpcRouter +from torch_geometric.distributed.rpc import RPCRouter from torch_geometric.testing import onlyLinux @@ -44,7 +44,7 @@ def run_rpc_feature_test( ] # 3) Find the mapping between worker and partition ID: - rpc_router = RpcRouter(partition_to_workers) + rpc_router = RPCRouter(partition_to_workers) assert rpc_router.get_to_worker(partition_idx=0) == 'dist-feature-test-0' assert rpc_router.get_to_worker(partition_idx=1) == 'dist-feature-test-1' @@ -60,7 +60,7 @@ def run_rpc_feature_test( feature.partition_idx = rank feature.feature_pb = partition_book feature.meta = meta - feature.set_local_only(local_only=False) + feature.local_only = False feature.set_rpc_router(rpc_router) # Global node IDs: diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 0e2955d7be96..a3e6cd37d206 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -18,6 +18,7 @@ EdgeTensorType, EdgeType, FeatureTensorType, + NodeOrEdgeType, NodeType, QueryType, SparseTensor, @@ -29,7 +30,6 @@ mask_select, ) -NodeOrEdgeType = Union[NodeType, EdgeType] NodeOrEdgeStorage = Union[NodeStorage, EdgeStorage] diff --git a/torch_geometric/distributed/local_feature_store.py b/torch_geometric/distributed/local_feature_store.py index 260087e09a74..b1543c2ae64a 100644 --- a/torch_geometric/distributed/local_feature_store.py +++ b/torch_geometric/distributed/local_feature_store.py @@ -9,7 +9,26 @@ from torch_geometric.data import FeatureStore, TensorAttr from torch_geometric.data.feature_store import _FieldStatus -from torch_geometric.typing import EdgeType, NodeType +from torch_geometric.distributed.rpc import ( + RPCCallBase, + RPCRouter, + rpc_async, + rpc_register, +) +from torch_geometric.typing import EdgeType, NodeOrEdgeType, NodeType + + +class RPCCallFeatureLookup(RPCCallBase): + r"""A wrapper for RPC calls to the feature store.""" + def __init__(self, dist_feature: FeatureStore): + super().__init__() + self.dist_feature = dist_feature + + def rpc_async(self, *args, **kwargs): + return self.dist_feature.rpc_local_feature_get(*args, **kwargs) + + def rpc_sync(self, *args, **kwargs): + raise NotImplementedError @dataclass @@ -38,6 +57,15 @@ def __init__(self): # Save the mapping from global node/edge IDs to indices in `_feat`: self._global_id_to_index: Dict[Union[NodeType, EdgeType], Tensor] = {} + # For partition/rpc information related to distribute features: + self.num_partitions = 1 + self.partition_idx = 0 + self.feature_pb: Union[Tensor, Dict[NodeOrEdgeType, Tensor]] + self.local_only = False + self.rpc_router: Optional[RPCRouter] = None + self.meta: Optional[Dict] = None + self.rpc_call_id: Optional[int] = None + @staticmethod def key(attr: TensorAttr) -> Tuple[str, str]: return (attr.group_name, attr.attr_name) @@ -107,6 +135,166 @@ def _get_tensor_size(self, attr: TensorAttr) -> Tuple[int, ...]: def get_all_tensor_attrs(self) -> List[LocalTensorAttr]: return [self._tensor_attr_cls.cast(*key) for key in self._feat.keys()] + def set_rpc_router(self, rpc_router: RPCRouter): + self.rpc_router = rpc_router + + if not self.local_only: + if self.rpc_router is None: + raise ValueError("An RPC router must be provided") + rpc_call = RPCCallFeatureLookup(self) + self.rpc_call_id = rpc_register(rpc_call) + else: + self.rpc_call_id = None + + def lookup_features( + self, + index: Tensor, + is_node_feat: bool = True, + input_type: Optional[NodeOrEdgeType] = None, + ) -> torch.futures.Future: + r"""Lookup of local/remote features.""" + remote_fut = self._remote_lookup_features(index, is_node_feat, + input_type) + local_feature = self._local_lookup_features(index, is_node_feat, + input_type) + res_fut = torch.futures.Future() + + def when_finish(*_): + try: + remote_feature_list = remote_fut.wait() + # combine the feature from remote and local + result = torch.zeros(index.size(0), local_feature[0].size(1), + dtype=local_feature[0].dtype) + result[local_feature[1]] = local_feature[0] + for remote in remote_feature_list: + result[remote[1]] = remote[0] + except Exception as e: + res_fut.set_exception(e) + else: + res_fut.set_result(result) + + remote_fut.add_done_callback(when_finish) + return res_fut + + def _local_lookup_features( + self, + index: Tensor, + is_node_feat: bool = True, + input_type: Optional[Union[NodeType, EdgeType]] = None, + ) -> Tuple[Tensor, Tensor]: + r""" lookup the features in local nodes based on node/edge ids """ + if self.meta['is_hetero']: + feat = self + pb = self.feature_pb[input_type] + else: + feat = self + pb = self.feature_pb + + input_order = torch.arange(index.size(0), dtype=torch.long) + partition_ids = pb[index] + + local_mask = partition_ids == self.partition_idx + local_ids = torch.masked_select(index, local_mask) + local_index = torch.masked_select(input_order, local_mask) + + if self.meta["is_hetero"]: + if is_node_feat: + kwargs = dict(group_name=input_type, attr_name='x') + ret_feat = feat.get_tensor_from_global_id( + index=local_ids, **kwargs) + else: + kwargs = dict(group_name=input_type, attr_name='edge_attr') + ret_feat = feat.get_tensor_from_global_id( + index=local_ids, **kwargs) + else: + if is_node_feat: + kwargs = dict(group_name=None, attr_name='x') + ret_feat = feat.get_tensor_from_global_id( + index=local_ids, **kwargs) + else: + kwargs = dict(group_name=(None, None), attr_name='edge_attr') + ret_feat = feat.get_tensor_from_global_id( + index=local_ids, **kwargs) + + return ret_feat, local_index + + def _remote_lookup_features( + self, + index: Tensor, + is_node_feat: bool = True, + input_type: Optional[Union[NodeType, EdgeType]] = None, + ) -> torch.futures.Future: + r"""Fetch the remote features with the remote node/edge ids""" + + if self.meta["is_hetero"]: + pb = self.feature_pb[input_type] + else: + pb = self.feature_pb + + input_order = torch.arange(index.size(0), dtype=torch.long) + partition_ids = pb[index] + futs, indexes = [], [] + for pidx in range(0, self.num_partitions): + if pidx == self.partition_idx: + continue + remote_mask = (partition_ids == pidx) + remote_ids = index[remote_mask] + if remote_ids.shape[0] > 0: + to_worker = self.rpc_router.get_to_worker(pidx) + futs.append( + rpc_async( + to_worker, + self.rpc_call_id, + args=(remote_ids.cpu(), is_node_feat, input_type), + )) + indexes.append(torch.masked_select(input_order, remote_mask)) + collect_fut = torch.futures.collect_all(futs) + res_fut = torch.futures.Future() + + def when_finish(*_): + try: + fut_list = collect_fut.wait() + result = [] + for i, fut in enumerate(fut_list): + result.append((fut.wait(), indexes[i])) + except Exception as e: + res_fut.set_exception(e) + else: + res_fut.set_result(result) + + collect_fut.add_done_callback(when_finish) + return res_fut + + def rpc_local_feature_get( + self, + index: Tensor, + is_node_feat: bool = True, + input_type: Optional[Union[NodeType, EdgeType]] = None, + ) -> Tensor: + r"""Lookup of features in remote nodes.""" + if self.meta['is_hetero']: + feat = self + if is_node_feat: + kwargs = dict(group_name=input_type, attr_name='x') + ret_feat = feat.get_tensor_from_global_id( + index=index, **kwargs) + else: + kwargs = dict(group_name=input_type, attr_name='edge_attr') + ret_feat = feat.get_tensor_from_global_id( + index=index, **kwargs) + else: + feat = self + if is_node_feat: + kwargs = dict(group_name=None, attr_name='x') + ret_feat = feat.get_tensor_from_global_id( + index=index, **kwargs) + else: + kwargs = dict(group_name=(None, None), attr_name='edge_attr') + ret_feat = feat.get_tensor_from_global_id( + index=index, **kwargs) + + return ret_feat + # Initialization ########################################################## @classmethod diff --git a/torch_geometric/distributed/local_graph_store.py b/torch_geometric/distributed/local_graph_store.py index fb05f2ecc659..5127f0634ebc 100644 --- a/torch_geometric/distributed/local_graph_store.py +++ b/torch_geometric/distributed/local_graph_store.py @@ -1,6 +1,6 @@ import json import os.path as osp -from typing import Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import Tensor @@ -18,10 +18,40 @@ def __init__(self): self._edge_attr: Dict[Tuple, EdgeAttr] = {} self._edge_id: Dict[Tuple, Tensor] = {} + self.num_partitions = 1 + self.partition_idx = 0 + # Mapping between node ID and partition ID + self.node_pb: Union[Tensor, Dict[NodeType, Tensor]] = None + # Mapping between edge ID and partition ID + self.edge_pb: Union[Tensor, Dict[EdgeType, Tensor]] = None + # Meta information related to partition and graph store info + self.meta: Optional[Dict[Any, Any]] = None + # Partition labels + self.labels: Union[Tensor, Dict[EdgeType, Tensor]] = None + @staticmethod def key(attr: EdgeAttr) -> Tuple: return (attr.edge_type, attr.layout.value) + def get_partition_ids_from_nids( + self, + ids: torch.Tensor, + node_type: Optional[NodeType] = None, + ) -> Tensor: + r"""Get the partition IDs of node IDs for a specific node type.""" + if self.meta['is_hetero']: + assert node_type is not None + return self.node_pb[node_type][ids] + return self.node_pb[ids] + + def get_partition_ids_from_eids(self, eids: torch.Tensor, + edge_type: Optional[EdgeType] = None): + r"""Get the partition IDs of edge IDs for a specific edge type.""" + if self.meta["is_hetero"]: + assert edge_type is not None + return self.edge_pb[edge_type][eids] + return self.edge_pb[eids] + def put_edge_id(self, edge_id: Tensor, *args, **kwargs) -> bool: edge_attr = self._edge_attr_cls.cast(*args, **kwargs) self._edge_id[self.key(edge_attr)] = edge_id @@ -126,15 +156,17 @@ def from_partition(cls, root: str, pid: int) -> 'LocalGraphStore': if not meta['is_hetero']: attr = dict(edge_type=None, layout='coo', size=graph_data['size']) - graph_store.put_edge_index((graph_data['row'], graph_data['col']), - **attr) + graph_store.put_edge_index( + torch.stack((graph_data['row'], graph_data['col']), dim=0), + **attr) graph_store.put_edge_id(graph_data['edge_id'], **attr) if meta['is_hetero']: for edge_type, data in graph_data.items(): attr = dict(edge_type=edge_type, layout='coo', size=data['size']) - graph_store.put_edge_index((data['row'], data['col']), **attr) + graph_store.put_edge_index( + torch.stack((data['row'], data['col']), dim=0), **attr) graph_store.put_edge_id(data['edge_id'], **attr) return graph_store diff --git a/torch_geometric/distributed/rpc.py b/torch_geometric/distributed/rpc.py index 7802cea2becc..761b61694f6f 100644 --- a/torch_geometric/distributed/rpc.py +++ b/torch_geometric/distributed/rpc.py @@ -97,7 +97,7 @@ def shutdown_rpc(graceful: bool = True): atexit.register(shutdown_rpc, False) -class RpcRouter: +class RPCRouter: r"""A router to get the worker based on the partition ID.""" def __init__(self, partition_to_workers: List[List[str]]): for pid, rpc_worker_list in enumerate(partition_to_workers): @@ -132,7 +132,7 @@ def rpc_partition_to_workers( return partition_to_workers -class RpcCallBase(ABC): +class RPCCallBase(ABC): r"""A wrapper base class for RPC calls in remote processes.""" @abstractmethod def rpc_sync(self, *args, **kwargs): @@ -145,11 +145,11 @@ def rpc_async(self, *args, **kwargs): _rpc_call_lock = threading.RLock() _rpc_call_id: int = 0 -_rpc_call_pool: Dict[int, RpcCallBase] = {} +_rpc_call_pool: Dict[int, RPCCallBase] = {} @rpc_require_initialized -def rpc_register(call: RpcCallBase) -> int: +def rpc_register(call: RPCCallBase) -> int: r"""Registers a call for RPC requests.""" global _rpc_call_id, _rpc_call_pool diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 762d2829c3e5..1a6f1a61dbf0 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -219,6 +219,8 @@ def t(self) -> Tensor: # Only support accessing its transpose: # `data[('author', 'writes', 'paper')] EdgeType = Tuple[str, str, str] +NodeOrEdgeType = Union[NodeType, EdgeType] + DEFAULT_REL = 'to' EDGE_TYPE_STR_SPLIT = '__' From b965d768aeeb3ed7861a37e65689e1e91d7d18be Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 10 Aug 2023 14:13:15 +0200 Subject: [PATCH 1400/2432] `BasicGNN.jittable()` support (#7865) --- CHANGELOG.md | 1 + test/nn/models/test_basic_gnn.py | 10 + test/nn/test_model_summary.py | 26 ++- torch_geometric/nn/conv/message_passing.py | 3 +- torch_geometric/nn/models/basic_gnn.py | 211 ++++++++++++++++----- torch_geometric/nn/models/linkx.py | 51 ++--- torch_geometric/nn/models/rect.py | 107 +++++++---- torch_geometric/utils/trim_to_layer.py | 2 +- 8 files changed, 296 insertions(+), 115 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2adefeddce0f..057c48668aa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added TorchScript support inside `BasicGNN` models ([#7865](https://github.com/pyg-team/pytorch_geometric/pull/7865)) - Added a `batch_size` argument to `unbatch` functionalities ([#7851](https://github.com/pyg-team/pytorch_geometric/pull/7851)) - Added a distributed example using `graphlearn-for-pytorch` ([#7402](https://github.com/pyg-team/pytorch_geometric/pull/7402)) - Integrate `neg_sampling_ratio` into `TemporalDataLoader` ([#7644](https://github.com/pyg-team/pytorch_geometric/pull/7644)) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index b38e2cedda6c..0b98b57ca32c 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -138,6 +138,16 @@ def test_edge_cnn(out_dim, dropout, act, norm, jk): assert model(x, edge_index).size() == (3, out_channels) +def test_jittable(): + x = torch.randn(3, 8) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + + model = GCN(8, 16, num_layers=2).jittable() + model = torch.jit.script(model) + + assert model(x, edge_index).size() == (3, 16) + + @pytest.mark.parametrize('out_dim', out_dims) @pytest.mark.parametrize('jk', jks) def test_one_layer_gnn(out_dim, jk): diff --git a/test/nn/test_model_summary.py b/test/nn/test_model_summary.py index f07aff03c162..0f2926d3255c 100644 --- a/test/nn/test_model_summary.py +++ b/test/nn/test_model_summary.py @@ -64,6 +64,9 @@ def test_summary_basic(gcn): | ├─(convs)ModuleList | -- | -- | 1,072 | | │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | 528 | | │ └─(1)GCNConv | [100, 16], [2, 20] | [100, 32] | 544 | +| ├─(norms)ModuleList | -- | -- | -- | +| │ └─(0)Identity | [100, 16] | [100, 16] | -- | +| │ └─(1)Identity | -- | -- | -- | +---------------------+--------------------+----------------+----------+ """ assert summary(gcn['model'], gcn['x'], gcn['edge_index']) == expected[1:-1] @@ -81,6 +84,9 @@ def test_summary_with_sparse_tensor(gcn): | ├─(convs)ModuleList | -- | -- | 1,072 | | │ └─(0)GCNConv | [100, 32], [100, 100] | [100, 16] | 528 | | │ └─(1)GCNConv | [100, 16], [100, 100] | [100, 32] | 544 | +| ├─(norms)ModuleList | -- | -- | -- | +| │ └─(0)Identity | [100, 16] | [100, 16] | -- | +| │ └─(1)Identity | -- | -- | -- | +---------------------+-----------------------+----------------+----------+ """ assert summary(gcn['model'], gcn['x'], gcn['adj_t']) == expected[1:-1] @@ -96,10 +102,15 @@ def test_summary_with_max_depth(gcn): | ├─(dropout)Dropout | [100, 16] | [100, 16] | -- | | ├─(act)ReLU | [100, 16] | [100, 16] | -- | | ├─(convs)ModuleList | -- | -- | 1,072 | +| ├─(norms)ModuleList | -- | -- | -- | +---------------------+--------------------+----------------+----------+ """ - assert summary(gcn['model'], gcn['x'], gcn['edge_index'], - max_depth=1) == expected[1:-1] + assert summary( + gcn['model'], + gcn['x'], + gcn['edge_index'], + max_depth=1, + ) == expected[1:-1] @withPackage('tabulate') @@ -118,10 +129,17 @@ def test_summary_with_leaf_module(gcn): | │ └─(1)GCNConv | [100, 16], [2, 20] | [100, 32] | 544 | | │ │ └─(aggr_module)SumAggregation | [120, 32], [120] | [100, 32] | -- | | │ │ └─(lin)Linear | [100, 16] | [100, 32] | 512 | +| ├─(norms)ModuleList | -- | -- | -- | +| │ └─(0)Identity | [100, 16] | [100, 16] | -- | +| │ └─(1)Identity | -- | -- | -- | +-----------------------------------------+--------------------+----------------+----------+ """ - assert summary(gcn['model'], gcn['x'], gcn['edge_index'], - leaf_module=None) == expected[13:-1] + assert summary( + gcn['model'], + gcn['x'], + gcn['edge_index'], + leaf_module=None, + ) == expected[13:-1] @withPackage('tabulate') diff --git a/torch_geometric/nn/conv/message_passing.py b/torch_geometric/nn/conv/message_passing.py index 1daf66490eac..662cdcd59a38 100644 --- a/torch_geometric/nn/conv/message_passing.py +++ b/torch_geometric/nn/conv/message_passing.py @@ -768,7 +768,8 @@ def register_edge_update_forward_hook(self, @torch.jit.unused def jittable(self, typing: Optional[str] = None) -> 'MessagePassing': r"""Analyzes the :class:`MessagePassing` instance and produces a new - jittable module. + jittable module that can be used in combination with + :meth:`torch.jit.script`. Args: typing (str, optional): If given, will generate a concrete instance diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index 15397a6b5b20..155b9a8368b1 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -1,5 +1,5 @@ import copy -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Final, List, Optional, Tuple, Union import torch from torch import Tensor @@ -23,7 +23,7 @@ activation_resolver, normalization_resolver, ) -from torch_geometric.typing import Adj, OptTensor +from torch_geometric.typing import Adj, OptTensor, SparseTensor from torch_geometric.utils.trim_to_layer import TrimToLayer @@ -61,6 +61,9 @@ class BasicGNN(torch.nn.Module): **kwargs (optional): Additional arguments of the underlying :class:`torch_geometric.nn.conv.MessagePassing` layers. """ + supports_edge_weight: Final[bool] + supports_edge_attr: Final[bool] + def __init__( self, in_channels: int, @@ -117,18 +120,21 @@ def __init__( self.convs.append( self.init_conv(in_channels, hidden_channels, **kwargs)) - self.norms = None - if norm is not None: - norm_layer = normalization_resolver( - norm, - hidden_channels, - **(norm_kwargs or {}), - ) - self.norms = ModuleList() - for _ in range(num_layers - 1): - self.norms.append(copy.deepcopy(norm_layer)) - if jk is not None: - self.norms.append(copy.deepcopy(norm_layer)) + self.norms = ModuleList() + norm_layer = normalization_resolver( + norm, + hidden_channels, + **(norm_kwargs or {}), + ) + if norm_layer is None: + norm_layer = torch.nn.Identity() + for _ in range(num_layers - 1): + self.norms.append(copy.deepcopy(norm_layer)) + + if jk is not None: + self.norms.append(copy.deepcopy(norm_layer)) + else: + self.norms.append(torch.nn.Identity()) if jk is not None and jk != 'last': self.jk = JumpingKnowledge(jk, hidden_channels, num_layers) @@ -152,18 +158,42 @@ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" for conv in self.convs: conv.reset_parameters() - for norm in self.norms or []: - norm.reset_parameters() + for norm in self.norms: + if hasattr(norm, 'reset_parameters'): + norm.reset_parameters() if hasattr(self, 'jk'): self.jk.reset_parameters() if hasattr(self, 'lin'): self.lin.reset_parameters() - def forward( + @torch.jit._overload_method + def forward( # noqa + x, + edge_index, + edge_weight=None, + edge_attr=None, + num_sampled_nodes_per_hop=None, + num_sampled_edges_per_hop=None, + ): + # type: (Tensor, Tensor, OptTensor, OptTensor, Optional[List[int]], Optional[List[int]]) -> Tensor # noqa + pass + + @torch.jit._overload_method + def forward( # noqa + x, + edge_index, + edge_weight=None, + edge_attr=None, + num_sampled_nodes_per_hop=None, + num_sampled_edges_per_hop=None, + ): + # type: (Tensor, SparseTensor, OptTensor, OptTensor, Optional[List[int]], Optional[List[int]]) -> Tensor # noqa + pass + + def forward( # noqa self, x: Tensor, - edge_index: Adj, - *, + edge_index: Tensor, # TODO Support `SparseTensor` in type hint. edge_weight: OptTensor = None, edge_attr: OptTensor = None, num_sampled_nodes_per_hop: Optional[List[int]] = None, @@ -172,7 +202,7 @@ def forward( r""" Args: x (torch.Tensor): The input node features. - edge_index (torch.Tensor): The edge indices. + edge_index (torch.Tensor or SparseTensor): The edge indices. edge_weight (torch.Tensor, optional): The edge weights (if supported by the underlying GNN layer). (default: :obj:`None`) edge_attr (torch.Tensor, optional): The edge features (if supported @@ -196,8 +226,10 @@ def forward( "'edge_weight' and 'edge_attr'") xs: List[Tensor] = [] - for i in range(self.num_layers): - if num_sampled_nodes_per_hop is not None: + assert len(self.convs) == len(self.norms) + for i, (conv, norm) in enumerate(zip(self.convs, self.norms)): + if (num_sampled_nodes_per_hop is not None + and not torch.jit.is_scripting()): x, edge_index, value = self._trim( i, num_sampled_nodes_per_hop, @@ -215,28 +247,28 @@ def forward( # As such, we rely on a static solution to pass optional edge # weights and edge attributes to the module. if self.supports_edge_weight and self.supports_edge_attr: - x = self.convs[i](x, edge_index, edge_weight=edge_weight, - edge_attr=edge_attr) + x = conv(x, edge_index, edge_weight=edge_weight, + edge_attr=edge_attr) elif self.supports_edge_weight: - x = self.convs[i](x, edge_index, edge_weight=edge_weight) + x = conv(x, edge_index, edge_weight=edge_weight) elif self.supports_edge_attr: - x = self.convs[i](x, edge_index, edge_attr=edge_attr) + x = conv(x, edge_index, edge_attr=edge_attr) else: - x = self.convs[i](x, edge_index) - if i == self.num_layers - 1 and self.jk_mode is None: - break - if self.act is not None and self.act_first: - x = self.act(x) - if self.norms is not None: - x = self.norms[i](x) - if self.act is not None and not self.act_first: - x = self.act(x) - x = self.dropout(x) - if hasattr(self, 'jk'): - xs.append(x) + x = conv(x, edge_index) + + if i < self.num_layers - 1 or self.jk_mode is not None: + if self.act is not None and self.act_first: + x = self.act(x) + x = norm(x) + if self.act is not None and not self.act_first: + x = self.act(x) + x = self.dropout(x) + if hasattr(self, 'jk'): + xs.append(x) x = self.jk(xs) if hasattr(self, 'jk') else x x = self.lin(x) if hasattr(self, 'lin') else x + return x @torch.no_grad() @@ -328,6 +360,76 @@ def inference( return x_all + def jittable(self, use_sparse_tensor: bool = False) -> 'BasicGNN': + r"""Produces a new jittable instance module that can be used in + combination with :meth:`torch.jit.script`.""" + class EdgeIndexJittable(torch.nn.Module): + def __init__(self, child: BasicGNN): + super().__init__() + self.child = child + + def reset_parameters(self): + self.child.reset_parameters() + + def forward( + self, + x: Tensor, + edge_index: Tensor, + edge_weight: OptTensor = None, + edge_attr: OptTensor = None, + num_sampled_nodes_per_hop: Optional[List[int]] = None, + num_sampled_edges_per_hop: Optional[List[int]] = None, + ) -> Tensor: + return self.child( + x, + edge_index, + edge_weight, + edge_attr, + num_sampled_nodes_per_hop, + num_sampled_edges_per_hop, + ) + + def __repr__(self) -> str: + return str(self.child) + + class SparseTensorJittable(torch.nn.Module): + def __init__(self, child: BasicGNN): + super().__init__() + self.child = child + + def reset_parameters(self): + self.child.reset_parameters() + + def forward( + self, + x: Tensor, + edge_index: SparseTensor, + edge_weight: OptTensor = None, + edge_attr: OptTensor = None, + num_sampled_nodes_per_hop: Optional[List[int]] = None, + num_sampled_edges_per_hop: Optional[List[int]] = None, + ) -> Tensor: + return self.child( + x, + edge_index, + edge_weight, + edge_attr, + num_sampled_nodes_per_hop, + num_sampled_edges_per_hop, + ) + + def __repr__(self) -> str: + return str(self.child) + + out = copy.deepcopy(self) + convs = [conv.jittable() for conv in out.convs] + out.convs = torch.nn.ModuleList(convs) + out._trim = None # TODO Trimming is currently not support in JIT mode. + + if use_sparse_tensor: + return SparseTensorJittable(out) + return EdgeIndexJittable(out) + def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.in_channels}, ' f'{self.out_channels}, num_layers={self.num_layers})') @@ -368,8 +470,8 @@ class GCN(BasicGNN): **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.GCNConv`. """ - supports_edge_weight = True - supports_edge_attr = False + supports_edge_weight: Final[bool] = True + supports_edge_attr: Final[bool] = False def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -412,8 +514,8 @@ class GraphSAGE(BasicGNN): **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.SAGEConv`. """ - supports_edge_weight = False - supports_edge_attr = False + supports_edge_weight: Final[bool] = False + supports_edge_attr: Final[bool] = False def init_conv(self, in_channels: Union[int, Tuple[int, int]], out_channels: int, **kwargs) -> MessagePassing: @@ -453,8 +555,8 @@ class GIN(BasicGNN): **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.GINConv`. """ - supports_edge_weight = False - supports_edge_attr = False + supports_edge_weight: Final[bool] = False + supports_edge_attr: Final[bool] = False def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -511,8 +613,8 @@ class GAT(BasicGNN): :class:`torch_geometric.nn.conv.GATConv` or :class:`torch_geometric.nn.conv.GATv2Conv`. """ - supports_edge_weight = False - supports_edge_attr = True + supports_edge_weight: Final[bool] = False + supports_edge_attr: Final[bool] = True def init_conv(self, in_channels: Union[int, Tuple[int, int]], out_channels: int, **kwargs) -> MessagePassing: @@ -573,8 +675,8 @@ class PNA(BasicGNN): **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.PNAConv`. """ - supports_edge_weight = False - supports_edge_attr = True + supports_edge_weight: Final[bool] = False + supports_edge_attr: Final[bool] = True def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -614,8 +716,8 @@ class EdgeCNN(BasicGNN): **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.EdgeConv`. """ - supports_edge_weight = False - supports_edge_attr = False + supports_edge_weight: Final[bool] = False + supports_edge_attr: Final[bool] = False def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -629,4 +731,11 @@ def init_conv(self, in_channels: int, out_channels: int, return EdgeConv(mlp, **kwargs) -__all__ = ['GCN', 'GraphSAGE', 'GIN', 'GAT', 'PNA', 'EdgeCNN'] +__all__ = [ + 'GCN', + 'GraphSAGE', + 'GIN', + 'GAT', + 'PNA', + 'EdgeCNN', +] diff --git a/torch_geometric/nn/models/linkx.py b/torch_geometric/nn/models/linkx.py index b9691201261e..8a9c6685e643 100644 --- a/torch_geometric/nn/models/linkx.py +++ b/torch_geometric/nn/models/linkx.py @@ -31,16 +31,16 @@ def reset_parameters(self): inits.uniform(self.in_channels, self.bias) @torch.jit._overload_method - def forward(self, edge_index, edge_weight=None): - # type: (SparseTensor, OptTensor) -> Tensor + def forward(self, edge_index, edge_weight=None): # noqa + # type: (Tensor, OptTensor) -> Tensor pass @torch.jit._overload_method - def forward(self, edge_index, edge_weight=None): - # type: (Tensor, OptTensor) -> Tensor + def forward(self, edge_index, edge_weight=None): # noqa + # type: (SparseTensor, OptTensor) -> Tensor pass - def forward( + def forward( # noqa self, edge_index: Adj, edge_weight: OptTensor = None, @@ -182,44 +182,51 @@ def forward( return self.final_mlp(out.relu_()) - def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover - edge_index_type = typing.split(',')[1].strip() - + def jittable(self, use_sparse_tensor: bool = False) -> torch.nn.Module: class EdgeIndexJittable(torch.nn.Module): - def __init__(self, child): + def __init__(self, child: LINKX): super().__init__() self.child = child def reset_parameters(self): self.child.reset_parameters() - def forward(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: + def forward( + self, + x: Tensor, + edge_index: Tensor, + edge_weight: OptTensor = None, + ) -> Tensor: return self.child(x, edge_index, edge_weight) + def __repr__(self) -> str: + return str(self.child) + class SparseTensorJittable(torch.nn.Module): - def __init__(self, child): + def __init__(self, child: LINKX): super().__init__() self.child = child def reset_parameters(self): self.child.reset_parameters() - def forward(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None): + def forward( + self, + x: Tensor, + edge_index: SparseTensor, + edge_weight: OptTensor = None, + ): return self.child(x, edge_index, edge_weight) + def __repr__(self) -> str: + return str(self.child) + if self.edge_lin.jittable is not None: self.edge_lin = self.edge_lin.jittable() - if 'Tensor' == edge_index_type: - jittable_module = EdgeIndexJittable(self) - elif 'SparseTensor' == edge_index_type: - jittable_module = SparseTensorJittable(self) - else: - raise ValueError(f"Could not parse types '{typing}'") - - return jittable_module + if use_sparse_tensor: + return SparseTensorJittable(self) + return EdgeIndexJittable(self) def __repr__(self) -> str: return (f'{self.__class__.__name__}(num_nodes={self.num_nodes}, ' diff --git a/torch_geometric/nn/models/rect.py b/torch_geometric/nn/models/rect.py index d4e70c81d245..d17569b0cec5 100644 --- a/torch_geometric/nn/models/rect.py +++ b/torch_geometric/nn/models/rect.py @@ -51,48 +51,58 @@ def reset_parameters(self): torch.nn.init.xavier_uniform_(self.lin.weight.data) @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (Tensor, SparseTensor, OptTensor) -> Tensor + def forward(self, x, edge_index, edge_weight=None): # noqa + # type: (Tensor, Tensor, OptTensor) -> Tensor pass @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (Tensor, Tensor, OptTensor) -> Tensor + def forward(self, x, edge_index, edge_weight=None): # noqa + # type: (Tensor, SparseTensor, OptTensor) -> Tensor pass - def forward(self, x: Tensor, edge_index: Adj, - edge_weight: OptTensor = None) -> Tensor: + def forward( # noqa + self, + x: Tensor, + edge_index: Adj, + edge_weight: OptTensor = None, + ) -> Tensor: """""" x = self.conv(x, edge_index, edge_weight) x = F.dropout(x, p=self.dropout, training=self.training) return self.lin(x) @torch.jit._overload_method - def embed(self, x, edge_index, edge_weight=None): - # type: (Tensor, SparseTensor, OptTensor) -> Tensor + def embed(self, x, edge_index, edge_weight=None): # noqa + # type: (Tensor, Tensor, OptTensor) -> Tensor pass @torch.jit._overload_method - def embed(self, x, edge_index, edge_weight=None): - # type: (Tensor, Tensor, OptTensor) -> Tensor + def embed(self, x, edge_index, edge_weight=None): # noqa + # type: (Tensor, SparseTensor, OptTensor) -> Tensor pass - def embed(self, x: Tensor, edge_index: Adj, - edge_weight: OptTensor = None) -> Tensor: + def embed( # noqa + self, + x: Tensor, + edge_index: Adj, + edge_weight: OptTensor = None, + ) -> Tensor: with torch.no_grad(): return self.conv(x, edge_index, edge_weight) - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: + def get_semantic_labels( + self, + x: Tensor, + y: Tensor, + mask: Tensor, + ) -> Tensor: r"""Replaces the original labels by their class-centers.""" with torch.no_grad(): y = y[mask] mean = scatter(x[mask], y, dim=0, reduce='mean') return mean[y] - def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover - edge_index_type = typing.split(',')[1].strip() - + def jittable(self, use_sparse_tensor: bool = False) -> torch.nn.Module: class EdgeIndexJittable(torch.nn.Module): def __init__(self, child: RECT_L): super().__init__() @@ -102,20 +112,35 @@ def __init__(self, child: RECT_L): def reset_parameters(self): self.child.reset_parameters() - def forward(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: + def forward( + self, + x: Tensor, + edge_index: Tensor, + edge_weight: OptTensor = None, + ) -> Tensor: return self.child(x, edge_index, edge_weight) @torch.jit.export - def embed(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: + def embed( + self, + x: Tensor, + edge_index: Tensor, + edge_weight: OptTensor = None, + ) -> Tensor: return self.child.embed(x, edge_index, edge_weight) @torch.jit.export - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: + def get_semantic_labels( + self, + x: Tensor, + y: Tensor, + mask: Tensor, + ) -> Tensor: return self.child.get_semantic_labels(x, y, mask) + def __repr__(self) -> str: + return str(self.child) + class SparseTensorJittable(torch.nn.Module): def __init__(self, child: RECT_L): super().__init__() @@ -125,28 +150,38 @@ def __init__(self, child: RECT_L): def reset_parameters(self): self.child.reset_parameters() - def forward(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None): + def forward( + self, + x: Tensor, + edge_index: SparseTensor, + edge_weight: OptTensor = None, + ): return self.child(x, edge_index, edge_weight) @torch.jit.export - def embed(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None) -> Tensor: + def embed( + self, + x: Tensor, + edge_index: SparseTensor, + edge_weight: OptTensor = None, + ) -> Tensor: return self.child.embed(x, edge_index, edge_weight) @torch.jit.export - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: + def get_semantic_labels( + self, + x: Tensor, + y: Tensor, + mask: Tensor, + ) -> Tensor: return self.child.get_semantic_labels(x, y, mask) - if 'Tensor' == edge_index_type: - jittable_module = EdgeIndexJittable(self) - elif 'SparseTensor' == edge_index_type: - jittable_module = SparseTensorJittable(self) - else: - raise ValueError(f"Could not parse types '{typing}'") + def __repr__(self) -> str: + return str(self.child) - return jittable_module + if use_sparse_tensor: + return SparseTensorJittable(self) + return EdgeIndexJittable(self) def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.in_channels}, ' diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index 885d6ba7c90f..bd6a2452f53e 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -95,7 +95,7 @@ def forward( x: Tensor, edge_index: Adj, edge_attr: Optional[Tensor] = None, - ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + ) -> Tuple[Tensor, Adj, Optional[Tensor]]: if (not isinstance(num_sampled_nodes_per_hop, list) and isinstance(num_sampled_edges_per_hop, list)): From e95cdaf058f77dcc43d1e11d5d5460cfb013ca39 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Thu, 10 Aug 2023 13:23:55 +0100 Subject: [PATCH 1401/2432] [Code Coverage] `loader/utils.py` (#7857) Part of #6528. --------- Co-authored-by: rusty1s --- test/loader/test_utils.py | 16 ++++++++++++++++ torch_geometric/loader/utils.py | 27 +++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 test/loader/test_utils.py diff --git a/test/loader/test_utils.py b/test/loader/test_utils.py new file mode 100644 index 000000000000..f547596e8d42 --- /dev/null +++ b/test/loader/test_utils.py @@ -0,0 +1,16 @@ +import pytest +import torch + +from torch_geometric.loader.utils import index_select + + +def test_index_select(): + x = torch.randn(3, 5) + index = torch.tensor([0, 2]) + assert torch.equal(index_select(x, index), x[index]) + assert torch.equal(index_select(x, index, dim=-1), x[..., index]) + + +def test_index_select_out_of_range(): + with pytest.raises(IndexError, match="out of range"): + index_select(torch.randn(3, 5), torch.tensor([0, 2, 3])) diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 835ce62fa281..5da757319ee3 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -29,10 +29,29 @@ ) -def index_select(value: FeatureTensorType, index: Tensor, - dim: int = 0) -> Tensor: - - # PyTorch currently only supports indexing via `torch.int64` :( +def index_select( + value: FeatureTensorType, + index: Tensor, + dim: int = 0, +) -> Tensor: + r"""Indexes the :obj:`value` tensor along dimension :obj:`dim` using the + entries in :obj:`index`. + + Args: + value (torch.Tensor or np.ndarray): The input tensor. + index (torch.Tensor): The 1-D tensor containing the indices to index. + dim (int, optional): The dimension in which to index. + (default: :obj:`0`) + + .. warning:: + + :obj:`index` is casted to a :obj:`torch.int64` tensor internally, as + `PyTorch currently only supports indexing + `_ via + :obj:`torch.int64`. + """ + # PyTorch currently only supports indexing via `torch.int64`: + # https://github.com/pytorch/pytorch/issues/61819 index = index.to(torch.int64) if isinstance(value, Tensor): From c80d57818f18f7dda3880fe2747f307204a16295 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 10 Aug 2023 14:40:39 +0200 Subject: [PATCH 1402/2432] Fix typo in `BasicGNN.forward` doc-string (#7868) --- torch_geometric/nn/models/basic_gnn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index 155b9a8368b1..3b8d312094c6 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -209,12 +209,12 @@ def forward( # noqa by the underlying GNN layer). (default: :obj:`None`) num_sampled_nodes_per_hop (List[int], optional): The number of sampled nodes per hop. - Useful in :class:~torch_geometric.loader.NeighborLoader` + Useful in :class:`~torch_geometric.loader.NeighborLoader` scenarios to only operate on minimal-sized representations. (default: :obj:`None`) num_sampled_edges_per_hop (List[int], optional): The number of sampled edges per hop. - Useful in :class:~torch_geometric.loader.NeighborLoader` + Useful in :class:`~torch_geometric.loader.NeighborLoader` scenarios to only operate on minimal-sized representations. (default: :obj:`None`) """ From bdf92ce0a42a60218fcee56ed26978fd62eee265 Mon Sep 17 00:00:00 2001 From: Hatem Helal Date: Thu, 10 Aug 2023 13:46:53 +0100 Subject: [PATCH 1403/2432] Add smiles string to `QM9` dataset (#7867) The smiles string is useful for many molecular GNNs that try and learn the mapping between the SMILES representation and molecular properties. This change relies on the dataset being pre-processed with RDKit. --------- Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/datasets/qm9.py | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 057c48668aa2..1d5618e74da9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Updated `QM9` data pre-processing to include the SMILES string ([#7867](https://github.com/pyg-team/pytorch_geometric/pull/7867)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Fixed device issue in `PNAConv.get_degree_histogram` ([#7830](https://github.com/pyg-team/pytorch_geometric/pull/7830)) - Fixed the shape of `edge_label_time` when using temporal sampling on homogeneous graphs ([#7807](https://github.com/pyg-team/pytorch_geometric/pull/7807)) diff --git a/torch_geometric/datasets/qm9.py b/torch_geometric/datasets/qm9.py index c719b0d57ec7..f9b47972655e 100644 --- a/torch_geometric/datasets/qm9.py +++ b/torch_geometric/datasets/qm9.py @@ -288,9 +288,19 @@ def process(self): y = target[i].unsqueeze(0) name = mol.GetProp('_Name') - - data = Data(x=x, z=z, pos=pos, edge_index=edge_index, - edge_attr=edge_attr, y=y, name=name, idx=i) + smiles = Chem.MolToSmiles(mol, isomericSmiles=True) + + data = Data( + x=x, + z=z, + pos=pos, + edge_index=edge_index, + smiles=smiles, + edge_attr=edge_attr, + y=y, + name=name, + idx=i, + ) if self.pre_filter is not None and not self.pre_filter(data): continue From 7ac4654db727df73d1443c5102020abe859e5eb6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 11 Aug 2023 10:25:51 +0200 Subject: [PATCH 1404/2432] Fix typos in `pyproject.toml` (#7872) --- codecov.yml | 2 +- pyproject.toml | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/codecov.yml b/codecov.yml index 019fd7b42729..c2a07adaa059 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,4 +1,4 @@ -# see https://docs.codecov.io/docs/codecov-yaml +# See: https://docs.codecov.io/docs/codecov-yaml coverage: range: 80..100 round: down diff --git a/pyproject.toml b/pyproject.toml index 649ea8246cea..8827ce8bd04d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors=[ ] description="Graph Neural Network Library for PyTorch" readme="README.md" -requires-python=">=3.7" +requires-python=">=3.8" keywords=[ "deep-learning", "pytorch", @@ -22,7 +22,6 @@ classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Programming Language :: Python", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -105,13 +104,16 @@ based_on_style = "pep8" split_before_named_assigns = false blank_line_before_nested_class_or_def = false +[tool.flake8] +ignore = ["F811", "W503", "W504"] + [tool.pyright] include = ["torch_geometric/utils/*"] [tool.isort] multi_line_output = 3 include_trailing_comma = true -skip = [".gitingore", "__init__.py"] +skip = [".gitignore", "__init__.py"] [tool.pytest.ini_options] addopts = "--capture=no" @@ -163,10 +165,5 @@ exclude_lines = [ "pass", "raise NotImplementedError", "register_parameter", - "warn", "torch.cuda.is_available", - "WITH_PT2", ] - -[tool.flake8] -ignore = ["F811", "W503", "W504"] From 60ea78bc2fbb07bf10080889ab27265e6da1ee31 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 14 Aug 2023 09:20:02 +0200 Subject: [PATCH 1405/2432] Fix broken `RECT` and `LINKX` tests (#7878) --- test/nn/models/test_linkx.py | 6 ++---- test/nn/models/test_rect.py | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/test/nn/models/test_linkx.py b/test/nn/models/test_linkx.py index 6c8d5dc01fd9..d85959dcb4b7 100644 --- a/test/nn/models/test_linkx.py +++ b/test/nn/models/test_linkx.py @@ -25,13 +25,11 @@ def test_linkx(num_edge_layers): assert torch.allclose(out, model(x, adj.t()), atol=1e-6) if is_full_test(): - t = '(OptTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) + jit = torch.jit.script(model.jittable()) assert torch.allclose(jit(x, edge_index), out) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = '(OptTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) + jit = torch.jit.script(model.jittable(use_sparse_tensor=True)) assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) out = model(None, edge_index) diff --git a/test/nn/models/test_rect.py b/test/nn/models/test_rect.py index 6364dbe211b9..3cfe2e25faf2 100644 --- a/test/nn/models/test_rect.py +++ b/test/nn/models/test_rect.py @@ -32,15 +32,13 @@ def test_rect(): assert labeds_out.size() == (int(mask.sum()), 8) if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) + jit = torch.jit.script(model.jittable()) assert torch.allclose(jit(x, edge_index), out) assert torch.allclose(embed_out, jit.embed(x, edge_index)) assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) + jit = torch.jit.script(model.jittable(use_sparse_tensor=True)) assert torch.allclose(jit(x, adj.t()), out) assert torch.allclose(embed_out, jit.embed(x, adj.t())) assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) From 4d832a6af96b1b6300d6c81fcf298154d49aa885 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Tue, 15 Aug 2023 11:40:57 -0700 Subject: [PATCH 1406/2432] Multi-GPU Taobao example (#7822) on Luna (8xA100): 1 gpu: ~22 it/s 8 gpu: ~15 it/s --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Akihiro Nitta --- examples/multi_gpu/taobao_multigpu_large.py | 270 ++++++++++++++++++++ 1 file changed, 270 insertions(+) create mode 100644 examples/multi_gpu/taobao_multigpu_large.py diff --git a/examples/multi_gpu/taobao_multigpu_large.py b/examples/multi_gpu/taobao_multigpu_large.py new file mode 100644 index 000000000000..588560816091 --- /dev/null +++ b/examples/multi_gpu/taobao_multigpu_large.py @@ -0,0 +1,270 @@ +# An Multi GPU implementation of unsupervised bipartite GraphSAGE +# using the Alibaba Taobao dataset. +import os +import os.path as osp + +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn.functional as F +import tqdm +from sklearn.metrics import roc_auc_score +from torch.nn import Embedding, Linear +from torch.nn.parallel import DistributedDataParallel + +import torch_geometric.transforms as T +from torch_geometric.datasets import Taobao +from torch_geometric.loader import LinkNeighborLoader +from torch_geometric.nn import SAGEConv +from torch_geometric.utils.convert import to_scipy_sparse_matrix + + +class ItemGNNEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = SAGEConv(-1, hidden_channels) + self.conv2 = SAGEConv(hidden_channels, hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward(self, x, edge_index): + x = self.conv1(x, edge_index).relu() + x = self.conv2(x, edge_index).relu() + return self.lin(x) + + +class UserGNNEncoder(torch.nn.Module): + def __init__(self, hidden_channels, out_channels): + super().__init__() + self.conv1 = SAGEConv((-1, -1), hidden_channels) + self.conv2 = SAGEConv((-1, -1), hidden_channels) + self.conv3 = SAGEConv((-1, -1), hidden_channels) + self.lin = Linear(hidden_channels, out_channels) + + def forward(self, x_dict, edge_index_dict): + item_x = self.conv1( + x_dict['item'], + edge_index_dict[('item', 'to', 'item')], + ).relu() + + user_x = self.conv2( + (x_dict['item'], x_dict['user']), + edge_index_dict[('item', 'rev_to', 'user')], + ).relu() + + user_x = self.conv3( + (item_x, user_x), + edge_index_dict[('item', 'rev_to', 'user')], + ).relu() + + return self.lin(user_x) + + +class EdgeDecoder(torch.nn.Module): + def __init__(self, hidden_channels): + super().__init__() + self.lin1 = Linear(2 * hidden_channels, hidden_channels) + self.lin2 = Linear(hidden_channels, 1) + + def forward(self, z_src, z_dst, edge_label_index): + row, col = edge_label_index + z = torch.cat([z_src[row], z_dst[col]], dim=-1) + + z = self.lin1(z).relu() + z = self.lin2(z) + return z.view(-1) + + +class Model(torch.nn.Module): + def __init__(self, num_users, num_items, hidden_channels, out_channels): + super().__init__() + self.user_emb = Embedding(num_users, hidden_channels) + self.item_emb = Embedding(num_items, hidden_channels) + self.item_encoder = ItemGNNEncoder(hidden_channels, out_channels) + self.user_encoder = UserGNNEncoder(hidden_channels, out_channels) + self.decoder = EdgeDecoder(out_channels) + + def forward(self, x_dict, edge_index_dict, edge_label_index): + z_dict = {} + x_dict['user'] = self.user_emb(x_dict['user']) + x_dict['item'] = self.item_emb(x_dict['item']) + z_dict['item'] = self.item_encoder( + x_dict['item'], + edge_index_dict[('item', 'to', 'item')], + ) + z_dict['user'] = self.user_encoder(x_dict, edge_index_dict) + + return self.decoder(z_dict['user'], z_dict['item'], edge_label_index) + + +def run_train(rank, data, train_data, val_data, test_data, world_size): + if rank == 0: + print("Setting up Data Loaders...") + train_edge_label_idx = train_data[('user', 'to', 'item')].edge_label_index + train_edge_label_idx = train_edge_label_idx.split( + train_edge_label_idx.size(1) // world_size, dim=1)[rank].clone() + train_loader = LinkNeighborLoader( + data=train_data, + num_neighbors=[8, 4], + edge_label_index=(('user', 'to', 'item'), train_edge_label_idx), + neg_sampling='binary', + batch_size=2048, + shuffle=True, + num_workers=16, + drop_last=True, + ) + + val_loader = LinkNeighborLoader( + data=val_data, + num_neighbors=[8, 4], + edge_label_index=( + ('user', 'to', 'item'), + val_data[('user', 'to', 'item')].edge_label_index, + ), + edge_label=val_data[('user', 'to', 'item')].edge_label, + batch_size=2048, + shuffle=False, + num_workers=16, + ) + + test_loader = LinkNeighborLoader( + data=test_data, + num_neighbors=[8, 4], + edge_label_index=( + ('user', 'to', 'item'), + test_data[('user', 'to', 'item')].edge_label_index, + ), + edge_label=test_data[('user', 'to', 'item')].edge_label, + batch_size=2048, + shuffle=False, + num_workers=16, + ) + + def train(): + model.train() + + total_loss = total_examples = 0 + for batch in tqdm.tqdm(train_loader): + batch = batch.to(rank) + optimizer.zero_grad() + + pred = model( + batch.x_dict, + batch.edge_index_dict, + batch['user', 'item'].edge_label_index, + ) + loss = F.binary_cross_entropy_with_logits( + pred, batch['user', 'item'].edge_label) + + loss.backward() + optimizer.step() + total_loss += float(loss) + total_examples += pred.numel() + + return total_loss / total_examples + + @torch.no_grad() + def test(loader): + model.eval() + + preds, targets = [], [] + for batch in tqdm.tqdm(loader): + batch = batch.to(rank) + + pred = model( + batch.x_dict, + batch.edge_index_dict, + batch['user', 'item'].edge_label_index, + ).sigmoid().view(-1).cpu() + target = batch['user', 'item'].edge_label.long().cpu() + + preds.append(pred) + targets.append(target) + + pred = torch.cat(preds, dim=0).numpy() + target = torch.cat(targets, dim=0).numpy() + + return roc_auc_score(target, pred) + + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12355' + dist.init_process_group('nccl', rank=rank, world_size=world_size) + model = Model( + num_users=data['user'].num_nodes, + num_items=data['item'].num_nodes, + hidden_channels=64, + out_channels=64, + ).to(rank) + # Initialize lazy modules + for batch in train_loader: + batch = batch.to(rank) + _ = model( + batch.x_dict, + batch.edge_index_dict, + batch['user', 'item'].edge_label_index, + ) + break + model = DistributedDataParallel(model, device_ids=[rank]) + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + for epoch in range(1, 21): + loss = train() + if rank == 0: + val_auc = test(val_loader) + test_auc = test(test_loader) + if rank == 0: + print(f'Epoch: {epoch:02d}, Loss: {loss:4f}, Val: {val_auc:.4f}, ' + f'Test: {test_auc:.4f}') + + +if __name__ == '__main__': + path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data/Taobao') + + dataset = Taobao(path) + data = dataset[0] + + data['user'].x = torch.arange(0, data['user'].num_nodes) + data['item'].x = torch.arange(0, data['item'].num_nodes) + + # Only consider user<>item relationships for simplicity: + del data['category'] + del data['item', 'category'] + del data['user', 'item'].time + del data['user', 'item'].behavior + + # Add a reverse ('item', 'rev_to', 'user') relation for message passing: + data = T.ToUndirected()(data) + + # Perform a link-level split into training, validation, and test edges: + print('Computing data splits...') + train_data, val_data, test_data = T.RandomLinkSplit( + num_val=0.1, + num_test=0.1, + neg_sampling_ratio=1.0, + add_negative_train_samples=False, + edge_types=[('user', 'to', 'item')], + rev_edge_types=[('item', 'rev_to', 'user')], + )(data) + print('Done!') + + # Compute sparsified item<>item relationships through users: + print('Computing item<>item relationships...') + mat = to_scipy_sparse_matrix(data['user', 'item'].edge_index).tocsr() + mat = mat[:data['user'].num_nodes, :data['item'].num_nodes] + comat = mat.T @ mat + comat.setdiag(0) + comat = comat >= 3. + comat = comat.tocoo() + row = torch.from_numpy(comat.row).to(torch.long) + col = torch.from_numpy(comat.col).to(torch.long) + item_to_item_edge_index = torch.stack([row, col], dim=0) + + # Add the generated item<>item relationships for high-order information: + train_data['item', 'item'].edge_index = item_to_item_edge_index + val_data['item', 'item'].edge_index = item_to_item_edge_index + test_data['item', 'item'].edge_index = item_to_item_edge_index + print('Done!') + + world_size = torch.cuda.device_count() + print('Let\'s use', world_size, 'GPUs!') + mp.spawn(run_train, + args=(data, train_data, val_data, test_data, world_size), + nprocs=world_size, join=True) From cb16ca230be0993f3206a6d5e4d497d53c9c213f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 16 Aug 2023 08:59:29 +0200 Subject: [PATCH 1407/2432] Fix `model_summary` on modules with uninitialized parameters (#7884) --- CHANGELOG.md | 1 + test/nn/test_model_summary.py | 24 ++++++++++++++++++++++++ torch_geometric/nn/summary.py | 8 ++++++-- 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d5618e74da9..49fe53f4f98f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `model_summary` on modules with uninitialized parameters ([#7884](https://github.com/pyg-team/pytorch_geometric/pull/7884)) - Updated `QM9` data pre-processing to include the SMILES string ([#7867](https://github.com/pyg-team/pytorch_geometric/pull/7867)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Fixed device issue in `PNAConv.get_degree_histogram` ([#7830](https://github.com/pyg-team/pytorch_geometric/pull/7830)) diff --git a/test/nn/test_model_summary.py b/test/nn/test_model_summary.py index 0f2926d3255c..4efe8c3b599f 100644 --- a/test/nn/test_model_summary.py +++ b/test/nn/test_model_summary.py @@ -92,6 +92,30 @@ def test_summary_with_sparse_tensor(gcn): assert summary(gcn['model'], gcn['x'], gcn['adj_t']) == expected[1:-1] +@withPackage('tabulate') +def test_lazy_gcn(): + expected = """ ++---------------------+--------------------+----------------+----------+ +| Layer | Input Shape | Output Shape | #Param | +|---------------------+--------------------+----------------+----------| +| GCN | [100, 32], [2, 20] | [100, 32] | -1 | +| ├─(dropout)Dropout | [100, 16] | [100, 16] | -- | +| ├─(act)ReLU | [100, 16] | [100, 16] | -- | +| ├─(convs)ModuleList | -- | -- | -1 | +| │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | -1 | +| │ └─(1)GCNConv | [100, 16], [2, 20] | [100, 32] | 544 | +| ├─(norms)ModuleList | -- | -- | -- | +| │ └─(0)Identity | [100, 16] | [100, 16] | -- | +| │ └─(1)Identity | -- | -- | -- | ++---------------------+--------------------+----------------+----------+ +""" + model = GCN(-1, 16, num_layers=2, out_channels=32) + x = torch.randn(100, 32) + edge_index = torch.randint(100, size=(2, 20)) + + assert summary(model, x, edge_index) == expected[1:-1] + + @withPackage('tabulate') def test_summary_with_max_depth(gcn): expected = """ diff --git a/torch_geometric/nn/summary.py b/torch_geometric/nn/summary.py index f37dd73d8552..8256bacc9c8c 100644 --- a/torch_geometric/nn/summary.py +++ b/torch_geometric/nn/summary.py @@ -6,6 +6,7 @@ from torch.nn import Module from torch_geometric.nn.conv import MessagePassing +from torch_geometric.nn.dense.linear import is_uninitialized_parameter from torch_geometric.typing import SparseTensor @@ -88,8 +89,11 @@ def hook(module, inputs, output): info['input_shape'] = input_shape[module_id] info['output_shape'] = output_shape[module_id] info['depth'] = depth - num_params = sum(p.numel() for p in module.parameters()) - info['#param'] = f'{num_params:,}' if num_params > 0 else '--' + if any([is_uninitialized_parameter(p) for p in module.parameters()]): + info['#param'] = '-1' + else: + num_params = sum(p.numel() for p in module.parameters()) + info['#param'] = f'{num_params:,}' if num_params > 0 else '--' info_list.append(info) if not isinstance(module, ScriptModule): From 2b33bca71eb0870f012cda5d8f12f4a45478af57 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 16 Aug 2023 14:35:04 +0200 Subject: [PATCH 1408/2432] Add note for broken URLs in `ShapeNet` and `S3DIS` (#7889) --- torch_geometric/datasets/s3dis.py | 4 ++++ torch_geometric/datasets/shapenet.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/torch_geometric/datasets/s3dis.py b/torch_geometric/datasets/s3dis.py index 570b5b42e7a6..bfc9bc809022 100644 --- a/torch_geometric/datasets/s3dis.py +++ b/torch_geometric/datasets/s3dis.py @@ -43,6 +43,10 @@ class S3DIS(InMemoryDataset): url = ('https://shapenet.cs.stanford.edu/media/' 'indoor3d_sem_seg_hdf5_data.zip') + # In case `shapenet.cs.stanford.edu` is offline, try to download the data + # from here: + # https://cvg-data.inf.ethz.ch/s3dis/ + def __init__( self, root: str, diff --git a/torch_geometric/datasets/shapenet.py b/torch_geometric/datasets/shapenet.py index 16dcc9a12446..306a344bbe08 100644 --- a/torch_geometric/datasets/shapenet.py +++ b/torch_geometric/datasets/shapenet.py @@ -76,6 +76,10 @@ class ShapeNet(InMemoryDataset): url = ('https://shapenet.cs.stanford.edu/media/' 'shapenetcore_partanno_segmentation_benchmark_v0_normal.zip') + # In case `shapenet.cs.stanford.edu` is offline, try to download the data + # from Kaggle instead (requires login): + # https://www.kaggle.com/datasets/mitkir/shapenet/download?datasetVersionNumber=1 + category_ids = { 'Airplane': '02691156', 'Bag': '02773838', From 58e325fcfd52ea13ae5c676e9b892b369eac551e Mon Sep 17 00:00:00 2001 From: Moritz Blum <31183934+moritzblum@users.noreply.github.com> Date: Thu, 17 Aug 2023 21:02:21 +0900 Subject: [PATCH 1409/2432] `Wikidata5M` dataset (#7864) implements the Wikidata5m transductive and inductive link prediction datasets --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/wikidata.py | 131 +++++++++++++++++++++++++++ 3 files changed, 134 insertions(+) create mode 100644 torch_geometric/datasets/wikidata.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 49fe53f4f98f..456be23a1e3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `Wikidata5M` dataset ([#7864](https://github.com/pyg-team/pytorch_geometric/pull/7864)) - Added TorchScript support inside `BasicGNN` models ([#7865](https://github.com/pyg-team/pytorch_geometric/pull/7865)) - Added a `batch_size` argument to `unbatch` functionalities ([#7851](https://github.com/pyg-team/pytorch_geometric/pull/7851)) - Added a distributed example using `graphlearn-for-pytorch` ([#7402](https://github.com/pyg-team/pytorch_geometric/pull/7402)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 399feed97b82..e7287d5633e3 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -71,6 +71,7 @@ from .hydro_net import HydroNet from .airfrans import AirfRANS from .jodie import JODIEDataset +from .wikidata import Wikidata5M from .dbp15k import DBP15K from .aminer import AMiner @@ -174,6 +175,7 @@ 'HydroNet', 'AirfRANS', 'JODIEDataset', + 'Wikidata5M', ] hetero_datasets = [ diff --git a/torch_geometric/datasets/wikidata.py b/torch_geometric/datasets/wikidata.py new file mode 100644 index 000000000000..c06b82e7cb46 --- /dev/null +++ b/torch_geometric/datasets/wikidata.py @@ -0,0 +1,131 @@ +import os +import os.path as osp +from typing import Callable, Dict, List, Optional + +import torch + +from torch_geometric.data import ( + Data, + InMemoryDataset, + download_url, + extract_tar, +) + + +class Wikidata5M(InMemoryDataset): + r"""The Wikidata-5M dataset from the `"KEPLER: A Unified Model for + Knowledge Embedding and Pre-trained Language Representation" + `_ paper, + containing 4,594,485 entities, 822 relations, + 20,614,279 train triples, 5,163 validation triples, and 5,133 test triples. + + `Wikidata-5M `_ + is a large-scale knowledge graph dataset with aligned corpus + extracted form Wikidata. + + Args: + root (str): Root directory where the dataset should be saved. + setting (str, optional): + If :obj:`"transductive"`, loads the transductive dataset. + If :obj:`"inductive"`, loads the inductive dataset. + (default: :obj:`"transductive"`) + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + """ + def __init__( + self, + root: str, + setting: str = 'transductive', + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + if setting not in {'transductive', 'inductive'}: + raise ValueError(f"Invalid 'setting' argument (got '{setting}')") + + self.setting = setting + + self.urls = [ + ('https://www.dropbox.com/s/7jp4ib8zo3i6m10/' + 'wikidata5m_text.txt.gz?dl=1'), + 'https://uni-bielefeld.sciebo.de/s/yuBKzBxsEc9j3hy/download', + ] + if self.setting == 'inductive': + self.urls.append('https://www.dropbox.com/s/csed3cgal3m7rzo/' + 'wikidata5m_inductive.tar.gz?dl=1') + else: + self.urls.append('https://www.dropbox.com/s/6sbhm0rwo4l73jq/' + 'wikidata5m_transductive.tar.gz?dl=1') + + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0]) + + @property + def raw_file_names(self) -> List[str]: + return [ + 'wikidata5m_text.txt.gz', + 'download', + f'wikidata5m_{self.setting}_train.txt', + f'wikidata5m_{self.setting}_valid.txt', + f'wikidata5m_{self.setting}_test.txt', + ] + + @property + def processed_file_names(self) -> str: + return f'{self.setting}_data.pt' + + def download(self): + for url in self.urls: + download_url(http://github.com/url,%20self.raw_dir) + path = osp.join(self.raw_dir, f'wikidata5m_{self.setting}.tar.gz') + extract_tar(path, self.raw_dir) + os.remove(path) + + def process(self): + import gzip + + entity_to_id: Dict[str, int] = {} + with gzip.open(self.raw_paths[0], 'rt') as f: + for i, line in enumerate(f): + values = line.strip().split('\t') + entity_to_id[values[0]] = i + + x = torch.load(self.raw_paths[1]) + + edge_index = [] + edge_type = [] + split_index = [] + + rel_to_id: Dict[str, int] = {} + for split, path in enumerate(self.raw_paths[2:]): + with open(path, 'r') as f: + for line in f: + head, rel, tail = line[:-1].split('\t') + edge_index.append([entity_to_id[head], entity_to_id[tail]]) + if rel not in rel_to_id: + rel_to_id[rel] = len(rel_to_id) + edge_type.append(rel_to_id[rel]) + split_index.append(split) + + edge_index = torch.tensor(edge_index).t().contiguous() + edge_type = torch.tensor(edge_type) + split_index = torch.tensor(split_index) + + data = Data( + x=x, + edge_index=edge_index, + edge_type=edge_type, + train_mask=split_index == 0, + val_mask=split_index == 1, + test_mask=split_index == 2, + ) + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From e1fb0557c9529cbf25d7854107598bf0c9912950 Mon Sep 17 00:00:00 2001 From: Jintang Li Date: Thu, 17 Aug 2023 21:07:28 +0800 Subject: [PATCH 1410/2432] Added `HalfHop` graph upsampling augmentation (#7827) This PR adds the `HalfHop` graph upsampling augmentation from the ICML 2023 paper [Half-Hop: A graph upsampling approach for slowing down message passing](https://openreview.net/forum?id=lXczFIwQkv). According to the authors, `HalfHop` works by adding artificial slow nodes between neighbors to slow down message propagation, see below. ![image](https://github.com/pyg-team/pytorch_geometric/assets/39986668/62448ab7-3d83-422a-82bc-6397dab55909) Reference: + https://github.com/nerdslab/halfhop cc: @mazabou --------- Co-authored-by: Jinu Sunil Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/transforms/test_half_hop.py | 46 ++++++++++++ torch_geometric/transforms/__init__.py | 2 + torch_geometric/transforms/half_hop.py | 97 ++++++++++++++++++++++++++ 4 files changed, 146 insertions(+) create mode 100644 test/transforms/test_half_hop.py create mode 100644 torch_geometric/transforms/half_hop.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 456be23a1e3f..43b13ca3b6b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `HalfHop` graph upsampling augmentation ([#7827](https://github.com/pyg-team/pytorch_geometric/pull/7827)) - Added the `Wikidata5M` dataset ([#7864](https://github.com/pyg-team/pytorch_geometric/pull/7864)) - Added TorchScript support inside `BasicGNN` models ([#7865](https://github.com/pyg-team/pytorch_geometric/pull/7865)) - Added a `batch_size` argument to `unbatch` functionalities ([#7851](https://github.com/pyg-team/pytorch_geometric/pull/7851)) diff --git a/test/transforms/test_half_hop.py b/test/transforms/test_half_hop.py new file mode 100644 index 000000000000..cc0fc6652894 --- /dev/null +++ b/test/transforms/test_half_hop.py @@ -0,0 +1,46 @@ +import torch + +from torch_geometric.data import Data +from torch_geometric.transforms import HalfHop + + +def test_half_hop(): + edge_index = torch.tensor([[0, 1, 1, 2, 0, 1, 2], [1, 0, 2, 1, 0, 1, 2]]) + x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + dtype=torch.float) + data = Data(x=x, edge_index=edge_index) + + transform = HalfHop() + assert str(transform) == 'HalfHop(alpha=0.5, p=1.0)' + data = transform(data) + + expected_edge_index = [[0, 1, 2, 0, 1, 1, 2, 3, 4, 5, 6, 1, 0, 2, 1], + [0, 1, 2, 3, 4, 5, 6, 1, 0, 2, 1, 3, 4, 5, 6]] + expected_x = torch.tensor( + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [3, 4, 5, 6], + [3, 4, 5, 6], [7, 8, 9, 10], [7, 8, 9, 10]], dtype=torch.float) + assert len(data) == 3 + assert data.num_nodes == 7 + assert data.edge_index.tolist() == expected_edge_index + assert torch.allclose(data.x, expected_x, atol=1e-4) + assert data.slow_node_mask.tolist() == [ + False, False, False, True, True, True, True + ] + + torch.manual_seed(1) + data = Data(x=x, edge_index=edge_index) + transform = HalfHop(p=0.5) + assert str(transform) == 'HalfHop(alpha=0.5, p=0.5)' + data = transform(data) + + expected_edge_index = [[1, 0, 1, 2, 0, 1, 2, 3, 4, 5, 1, 2, 1], + [0, 0, 1, 2, 3, 4, 5, 1, 2, 1, 3, 4, 5]] + expected_x = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], + [3, 4, 5, 6], [7, 8, 9, 10], [7, 8, 9, 10]], + dtype=torch.float) + assert data.num_nodes == 6 + assert data.edge_index.tolist() == expected_edge_index + assert torch.allclose(data.x, expected_x, atol=1e-4) + assert data.slow_node_mask.tolist() == [ + False, False, False, True, True, True + ] diff --git a/torch_geometric/transforms/__init__.py b/torch_geometric/transforms/__init__.py index aef8b7913eb9..703219424472 100644 --- a/torch_geometric/transforms/__init__.py +++ b/torch_geometric/transforms/__init__.py @@ -37,6 +37,7 @@ from .virtual_node import VirtualNode from .add_positional_encoding import AddLaplacianEigenvectorPE, AddRandomWalkPE from .feature_propagation import FeaturePropagation +from .half_hop import HalfHop from .distance import Distance from .cartesian import Cartesian @@ -106,6 +107,7 @@ 'AddLaplacianEigenvectorPE', 'AddRandomWalkPE', 'FeaturePropagation', + 'HalfHop', ] vision_transforms = [ diff --git a/torch_geometric/transforms/half_hop.py b/torch_geometric/transforms/half_hop.py new file mode 100644 index 000000000000..a03df145ea31 --- /dev/null +++ b/torch_geometric/transforms/half_hop.py @@ -0,0 +1,97 @@ +import torch + +from torch_geometric.data import Data +from torch_geometric.data.datapipes import functional_transform +from torch_geometric.transforms import BaseTransform + + +@functional_transform('half_hop') +class HalfHop(BaseTransform): + r"""Graph upsampling augmentation from the + `"Half-Hop: A graph upsampling approach for slowing down + message passing" `_ paper. + The graph is augmented by adding artificial slow nodes between neighbors + to slow down message propagation. (functional name: :obj:`half_hop`). + + .. note:: + `HalfHop` augmentation is not supported if `data` has + :attr:`edge_weight` or :attr:`edge_attr`. + + Args: + alpha (float, optional): The interpolation factor + used to compute slow node features + :math:`x = \alpha*x_src + (1-\alpha)*x_dst` (default: :obj:`0.5`) + p (float, optional): The probability of half-hopping + an edge. (default: :obj:`1.0`) + + .. code-block:: python + + import torch_geometric.transforms as T + + data = ... + data = T.HalfHop(alpha=0.5)(data) # Apply transformation + out = model(data.x, data.edge_index) # Feed-forward + out = out[~data.slow_node_mask] # Get rid of slow nodes + """ + def __init__(self, alpha: float = 0.5, p: float = 1.0): + if alpha < 0. or alpha > 1.: + raise ValueError(f'Interpolation factor has to be between 0 and 1 ' + f'(got {alpha}') + if p < 0. or p > 1.: + raise ValueError( + f'Ratio of half-hopped edges has to be between 0 and 1 ' + f'(got {p}') + self.p = p + self.alpha = alpha + + def forward(self, data: Data) -> Data: + + if data.edge_weight is not None or data.edge_attr is not None: + raise ValueError("'HalfHop' augmentation is not supported if " + "'data' has 'edge_weight' or 'edge_attr'") + + x, edge_index = data.x, data.edge_index + + # isolate self loops which are not half-hopped + self_loop_mask = edge_index[0] == edge_index[1] + edge_index_self_loop = edge_index[:, self_loop_mask] + edge_index = edge_index[:, ~self_loop_mask] + + # randomly sample nodes and half-hop their edges + node_mask = torch.rand(data.num_nodes, device=x.device) < self.p + edge_mask = node_mask[edge_index[1]] + edge_index_to_halfhop = edge_index[:, edge_mask] + edge_index_to_keep = edge_index[:, ~edge_mask] + + # add new slow nodes of which features are initialized + # by linear interpolation + num_halfhop_edges = edge_index_to_halfhop.size(1) + slow_node_ids = torch.arange(num_halfhop_edges, + device=x.device) + data.num_nodes + x_src = x[edge_index_to_halfhop[0]] + x_dst = x[edge_index_to_halfhop[1]] + x_slow_node = self.alpha * x_src + (1 - self.alpha) * x_dst + new_x = torch.cat([x, x_slow_node], dim=0) + + # add new edges between slow nodes and the original nodes + edge_index_slow = [ + torch.stack([edge_index_to_halfhop[0], slow_node_ids]), + torch.stack([slow_node_ids, edge_index_to_halfhop[1]]), + torch.stack([edge_index_to_halfhop[1], slow_node_ids]) + ] + new_edge_index = torch.cat( + [edge_index_to_keep, edge_index_self_loop, *edge_index_slow], + dim=1) + + # prepare a mask that distinguishes between original nodes & slow nodes + slow_node_mask = torch.cat( + [x.new_zeros(x.size(0)), + x.new_ones(slow_node_ids.size(0))], dim=0).bool() + + data.x, data.edge_index = new_x, new_edge_index + data.slow_node_mask = slow_node_mask + + return data + + def __repr__(self) -> str: + return f'{self.__class__.__name__}(alpha={self.alpha}, p={self.p})' From 27644d3755492458f6c36a11877d4c86e32cf4fe Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 18 Aug 2023 10:21:50 +0200 Subject: [PATCH 1411/2432] Add nightly GPU tests (#7895) --- .github/actions/setup/action.yml | 15 ++++++++++++- .github/workflows/full_gpu_testing.yml | 30 ++++++++++++++++++++++++++ .github/workflows/full_testing.yml | 2 +- CHANGELOG.md | 1 + test/nn/conv/test_gen_conv.py | 26 +++++++++++----------- torch_geometric/nn/conv/rgcn_conv.py | 4 ++-- torch_geometric/nn/dense/linear.py | 2 +- torch_geometric/transforms/half_hop.py | 29 ++++++++++++------------- torch_geometric/typing.py | 16 ++++++++++++++ 9 files changed, 92 insertions(+), 33 deletions(-) create mode 100644 .github/workflows/full_gpu_testing.yml diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index fc34eb3bed67..41d7a449d269 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -35,6 +35,7 @@ runs: run: | pip install torch==${{ inputs.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA available:', torch.cuda.is_available())" python -c "import torch; print('CUDA:', torch.version.cuda)" shell: bash @@ -43,6 +44,7 @@ runs: run: | pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/${{ inputs.cuda-version }} python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA available:', torch.cuda.is_available())" python -c "import torch; print('CUDA:', torch.version.cuda)" shell: bash @@ -52,10 +54,21 @@ runs: pip install --no-index pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash + - name: Install faiss-cpu + if: ${{ inputs.cuda-version == 'cpu' }} + run: | + pip install faiss-cpu + shell: bash + + - name: Install faiss-gpu + if: ${{ inputs.cuda-version != 'cpu' }} + run: | + pip install faiss-gpu + shell: bash + - name: Install extension packages if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' }} run: | - pip install faiss-cpu pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} pip install scipy pip install --no-index torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html diff --git a/.github/workflows/full_gpu_testing.yml b/.github/workflows/full_gpu_testing.yml new file mode 100644 index 000000000000..969e98328f0c --- /dev/null +++ b/.github/workflows/full_gpu_testing.yml @@ -0,0 +1,30 @@ +name: Full GPU Testing + +on: # yamllint disable-line rule:truthy + workflow_dispatch: + schedule: + - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST + +jobs: + + full_gpu_pytest: + if: github.repository == 'pyg-team/pytorch_geometric' + runs-on: [self-hosted, nvidia] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup packages + uses: ./.github/actions/setup + with: + cuda-version: 'cu118' + + - name: Install main package + run: | + pip install -e .[full,test] + + - name: Run tests + run: | + FULL_TEST=1 pytest + shell: bash diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 33f58f571c07..0f069024849f 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -7,7 +7,7 @@ on: # yamllint disable-line rule:truthy jobs: - pytest: + full_pytest: if: github.repository == 'pyg-team/pytorch_geometric' runs-on: ${{ matrix.os }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 43b13ca3b6b3..2b6eed65c68b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added nightly GPU tests ([#7895](https://github.com/pyg-team/pytorch_geometric/pull/7895)) - Added the `HalfHop` graph upsampling augmentation ([#7827](https://github.com/pyg-team/pytorch_geometric/pull/7827)) - Added the `Wikidata5M` dataset ([#7864](https://github.com/pyg-team/pytorch_geometric/pull/7864)) - Added TorchScript support inside `BasicGNN` models ([#7865](https://github.com/pyg-team/pytorch_geometric/pull/7865)) diff --git a/test/nn/conv/test_gen_conv.py b/test/nn/conv/test_gen_conv.py index 558d012a693b..42a1d8294d9c 100644 --- a/test/nn/conv/test_gen_conv.py +++ b/test/nn/conv/test_gen_conv.py @@ -43,12 +43,12 @@ def test_gen_conv(aggr): if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x1, edge_index), out1, atol=1e-4) assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out1, - atol=1e-6) - assert torch.allclose(jit(x1, edge_index, value), out2, atol=1e-6) + atol=1e-4) + assert torch.allclose(jit(x1, edge_index, value), out2, atol=1e-4) assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out2, - atol=1e-6) + atol=1e-4) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' @@ -80,13 +80,13 @@ def test_gen_conv(aggr): if is_full_test(): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out1, atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index), out1, atol=1e-4) assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1, - atol=1e-6) + atol=1e-4) assert torch.allclose(jit((x1, x2), edge_index, value), out2, - atol=1e-6) + atol=1e-4) assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out2, - atol=1e-6) + atol=1e-4) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' @@ -139,14 +139,14 @@ def test_gen_conv(aggr): t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), edge_index, value), out1, - atol=1e-6) + atol=1e-4) assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1, atol=1e-6) + out1, atol=1e-4) assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2, atol=1e-6) + out2, atol=1e-4) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj4.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj4.t()), out2, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj4.t()), out1, atol=1e-4) + assert torch.allclose(jit((x1, None), adj4.t()), out2, atol=1e-4) diff --git a/torch_geometric/nn/conv/rgcn_conv.py b/torch_geometric/nn/conv/rgcn_conv.py index e49e89456602..e7ff9f342d0c 100644 --- a/torch_geometric/nn/conv/rgcn_conv.py +++ b/torch_geometric/nn/conv/rgcn_conv.py @@ -229,7 +229,7 @@ def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]], out = out + h.contiguous().view(-1, self.out_channels) else: # No regularization/Basis-decomposition ======================== - if (torch_geometric.typing.WITH_PYG_LIB and self.num_bases is None + if (torch_geometric.typing.WITH_SEGMM and self.num_bases is None and x_l.is_floating_point() and isinstance( edge_index, Tensor)) and (self.use_segmm == -1 or bool(self.use_segmm)): @@ -273,7 +273,7 @@ def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]], return out def message(self, x_j: Tensor, edge_type_ptr: OptTensor) -> Tensor: - if torch_geometric.typing.WITH_PYG_LIB and edge_type_ptr is not None: + if torch_geometric.typing.WITH_SEGMM and edge_type_ptr is not None: # TODO Re-weight according to edge type degree for `aggr=mean`. return pyg_lib.ops.segment_matmul(x_j, edge_type_ptr, self.weight) diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 9783d7a37f6a..05900aac143e 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -258,7 +258,7 @@ def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: x (torch.Tensor): The input features. type_vec (torch.Tensor): A vector that maps each entry to a type. """ - if (torch_geometric.typing.WITH_PYG_LIB + if (torch_geometric.typing.WITH_SEGMM and (self.use_segmm == -1 or bool(self.use_segmm))): assert self.weight is not None diff --git a/torch_geometric/transforms/half_hop.py b/torch_geometric/transforms/half_hop.py index a03df145ea31..b7eceb792637 100644 --- a/torch_geometric/transforms/half_hop.py +++ b/torch_geometric/transforms/half_hop.py @@ -7,14 +7,14 @@ @functional_transform('half_hop') class HalfHop(BaseTransform): - r"""Graph upsampling augmentation from the - `"Half-Hop: A graph upsampling approach for slowing down - message passing" `_ paper. + r"""The graph upsampling augmentation from the + `"Half-Hop: A Graph Upsampling Approach for Slowing Down Message Passing" + `_ paper. The graph is augmented by adding artificial slow nodes between neighbors to slow down message propagation. (functional name: :obj:`half_hop`). .. note:: - `HalfHop` augmentation is not supported if `data` has + :class:`HalfHop` augmentation is not supported if :obj:`data` has :attr:`edge_weight` or :attr:`edge_attr`. Args: @@ -28,27 +28,26 @@ class HalfHop(BaseTransform): import torch_geometric.transforms as T - data = ... - data = T.HalfHop(alpha=0.5)(data) # Apply transformation - out = model(data.x, data.edge_index) # Feed-forward - out = out[~data.slow_node_mask] # Get rid of slow nodes + transform = T.HalfHop(alpha=0.5) + data = transform(data) # Apply transformation. + out = model(data.x, data.edge_index) # Feed-forward. + out = out[~data.slow_node_mask] # Get rid of slow nodes. """ def __init__(self, alpha: float = 0.5, p: float = 1.0): if alpha < 0. or alpha > 1.: - raise ValueError(f'Interpolation factor has to be between 0 and 1 ' - f'(got {alpha}') + raise ValueError(f"Interpolation factor has to be between 0 and 1 " + f"(got '{alpha}'") if p < 0. or p > 1.: - raise ValueError( - f'Ratio of half-hopped edges has to be between 0 and 1 ' - f'(got {p}') + raise ValueError(f"Ratio of half-hopped edges has to be between " + f"0 and 1 (got '{p}'") + self.p = p self.alpha = alpha def forward(self, data: Data) -> Data: - if data.edge_weight is not None or data.edge_attr is not None: raise ValueError("'HalfHop' augmentation is not supported if " - "'data' has 'edge_weight' or 'edge_attr'") + "'data' contains 'edge_weight' or 'edge_attr'") x, edge_index = data.x, data.edge_index diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 1a6f1a61dbf0..c412069c1344 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -1,3 +1,4 @@ +import sys import warnings from typing import Dict, List, Optional, Tuple, Union @@ -17,6 +18,20 @@ import pyg_lib # noqa WITH_PYG_LIB = True WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul') + WITH_SEGMM = hasattr(pyg_lib.ops, 'segment_matmul') + if WITH_SEGMM and 'pytest' in sys.modules and torch.cuda.is_available(): + # NOTE `segment_matmul` is currently bugged on older NVIDIA cards which + # let our GPU tests on CI crash. Try if this error is present on the + # current GPU and disable `WITH_SEGMM`/`WITH_GMM` if necessary. + # TODO Drop this code block once `segment_matmul` is fixed. + try: + x = torch.randn(3, 4, device='cuda') + ptr = torch.tensor([0, 2, 3], device='cuda') + weight = torch.tensor([2, 4, 4], device='cuda') + out = pyg_lib.ops.segment_matmul(x, ptr, weight) + except RuntimeError: + WITH_GMM = False + WITH_SEGMM = False WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add') WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort') WITH_METIS = hasattr(pyg_lib, 'partition') @@ -27,6 +42,7 @@ pyg_lib = object WITH_PYG_LIB = False WITH_GMM = False + WITH_SEGMM = False WITH_SAMPLED_OP = False WITH_INDEX_SORT = False WITH_METIS = False From c5588409f4528a309962492e3cadc281d5f2ba41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Dav=C3=B3?= Date: Fri, 18 Aug 2023 10:23:18 +0200 Subject: [PATCH 1412/2432] Enable sorted recommendations in `LightGCN` model (#7888) The main motivation behind this is to be able to calculate the precision at 5, 10, 20 without making multiple calls to `model.recommend` --- torch_geometric/nn/models/lightgcn.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/torch_geometric/nn/models/lightgcn.py b/torch_geometric/nn/models/lightgcn.py index 9de700f61465..d9acd18a49e8 100644 --- a/torch_geometric/nn/models/lightgcn.py +++ b/torch_geometric/nn/models/lightgcn.py @@ -164,6 +164,7 @@ def recommend( src_index: OptTensor = None, dst_index: OptTensor = None, k: int = 1, + sorted: bool = True, ) -> Tensor: r"""Get top-:math:`k` recommendations for nodes in :obj:`src_index`. @@ -177,6 +178,8 @@ def recommend( If set to :obj:`None`, all nodes will be used. (default: :obj:`None`) k (int, optional): Number of recommendations. (default: :obj:`1`) + sorted (bool, optional): Whether to sort the recommendations + by score. (default: :obj:`True`) """ out_src = out_dst = self.get_embedding(edge_index, edge_weight) @@ -187,7 +190,7 @@ def recommend( out_dst = out_dst[dst_index] pred = out_src @ out_dst.t() - top_index = pred.topk(k, dim=-1).indices + top_index = pred.topk(k, dim=-1, sorted=sorted).indices if dst_index is not None: # Map local top-indices to original indices. top_index = dst_index[top_index.view(-1)].view(*top_index.size()) From c0c060c192e68f4088757bc12b6e551736bda87b Mon Sep 17 00:00:00 2001 From: Ramona Bendias Date: Fri, 18 Aug 2023 13:16:29 +0200 Subject: [PATCH 1413/2432] [Explain] Clarify target usage for `CaptumExplainer` (#7820) Fixes topics discussed in https://github.com/pyg-team/pytorch_geometric/discussions/7812. --------- Co-authored-by: Jinu Sunil Co-authored-by: Matthias Fey --- .../algorithm/test_captum_explainer.py | 23 ++++++++++--------- .../explain/algorithm/captum_explainer.py | 21 +++++++++-------- torch_geometric/explain/explainer.py | 14 +++++++---- 3 files changed, 34 insertions(+), 24 deletions(-) diff --git a/test/explain/algorithm/test_captum_explainer.py b/test/explain/algorithm/test_captum_explainer.py index 48009fcf1711..f9e4655a8a2b 100644 --- a/test/explain/algorithm/test_captum_explainer.py +++ b/test/explain/algorithm/test_captum_explainer.py @@ -199,21 +199,22 @@ def test_captum_explainer_multiclass_classification( @withPackage('captum') -@pytest.mark.parametrize('method', methods) -@pytest.mark.parametrize('node_mask_type', node_mask_types) -@pytest.mark.parametrize('edge_mask_type', edge_mask_types) +@pytest.mark.parametrize( + 'method', + [m for m in methods if m != 'ShapleyValueSampling'], +) +@pytest.mark.parametrize( + 'node_mask_type', + [nm for nm in node_mask_types if nm is not None], +) +@pytest.mark.parametrize( + 'edge_mask_type', + [em for em in edge_mask_types if em is not None], +) @pytest.mark.parametrize('index', [1, torch.arange(2)]) def test_captum_hetero_data(method, node_mask_type, edge_mask_type, index, hetero_data, hetero_model): - if method == 'ShapleyValueSampling': - # This currently takes too long to test and is already covered by - # by the homogeneous graph test case. - return - - if node_mask_type is None or edge_mask_type is None: - return - model_config = ModelConfig(mode='regression', task_level='node') explainer = Explainer( diff --git a/torch_geometric/explain/algorithm/captum_explainer.py b/torch_geometric/explain/algorithm/captum_explainer.py index a5e10653a83c..d47f68b6b633 100644 --- a/torch_geometric/explain/algorithm/captum_explainer.py +++ b/torch_geometric/explain/algorithm/captum_explainer.py @@ -60,17 +60,17 @@ def __init__( import captum.attr # noqa if isinstance(attribution_method, str): - self.attribution_method = getattr( + self.attribution_method_class = getattr( captum.attr, attribution_method, ) else: - self.attribution_method = attribution_method + self.attribution_method_class = attribution_method if not self._is_supported_attribution_method(): raise ValueError(f"{self.__class__.__name__} does not support " f"attribution method " - f"{self.attribution_method.__name__}") + f"{self.attribution_method_class.__name__}") if kwargs.get('internal_batch_size', 1) != 1: warnings.warn("Overriding 'internal_batch_size' to 1") @@ -97,7 +97,7 @@ def _get_mask_type(self) -> MaskLevelType: def _get_attribute_parameters(self) -> Dict[str, Any]: r"""Returns the attribute arguments.""" - signature = inspect.signature(self.attribution_method.attribute) + signature = inspect.signature(self.attribution_method_class.attribute) return signature.parameters def _needs_baseline(self) -> bool: @@ -114,7 +114,7 @@ def _is_supported_attribution_method(self) -> bool: # This is redundant for now since all supported methods need a baseline if self._needs_baseline(): return False - elif self.attribution_method.__name__ in self.SUPPORTED_METHODS: + elif self.attribution_method_class.__name__ in self.SUPPORTED_METHODS: return True return False @@ -152,16 +152,19 @@ def forward( captum_model = CaptumModel(model, mask_type, index, self.model_config) - attribution_method = self.attribution_method(captum_model) + self.attribution_method_instance = self.attribution_method_class( + captum_model) - # In captum, the target is the index for which - # the attribution is computed. + # In captum, the target is the class index for which + # the attribution is computed. With CaptumModel, we transform + # the binary classification into a multi-class. This way we can + # explain both classes and need to pass a target here as well. if self.model_config.mode == ModelMode.regression: target = None else: target = target[index] - attributions = attribution_method.attribute( + attributions = self.attribution_method_instance.attribute( inputs=inputs, target=target, additional_forward_args=add_forward_args, diff --git a/torch_geometric/explain/explainer.py b/torch_geometric/explain/explainer.py index 1c8020b8530b..e2643509a387 100644 --- a/torch_geometric/explain/explainer.py +++ b/torch_geometric/explain/explainer.py @@ -170,11 +170,14 @@ def __call__( If the explanation type is :obj:`"phenomenon"`, the target has to be provided. If the explanation type is :obj:`"model"`, the target should be - set to :obj:`None` and will get automatically inferred. + set to :obj:`None` and will get automatically inferred. For + classification tasks, the target needs to contain the class + labels. (default: :obj:`None`) + index (Union[int, Tensor], optional): The indices in the + first-dimension of the model output to explain. + Can be a single index or a tensor of indices. + If set to :obj:`None`, all model outputs will be explained. (default: :obj:`None`) - index (Union[int, Tensor], optional): The index of the model - output to explain. Can be a single index or a tensor of - indices. (default: :obj:`None`) **kwargs: additional arguments to pass to the GNN. """ # Choose the `target` depending on the explanation type: @@ -192,6 +195,9 @@ def __call__( prediction = self.get_prediction(x, edge_index, **kwargs) target = self.get_target(prediction) + if isinstance(index, int): + index = torch.tensor([index]) + training = self.model.training self.model.eval() From deff5a46deaf319fb77e10dae97a20c47509c6ac Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 18 Aug 2023 13:40:25 +0200 Subject: [PATCH 1414/2432] Fix `NaN` issue in `SetTransformerAggregation` (#7902) Fixes #7899 --- CHANGELOG.md | 1 + test/nn/aggr/test_set_transformer.py | 6 ++++-- torch_geometric/nn/aggr/set_transformer.py | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b6eed65c68b..e9193a871d82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed an issue where `SetTransformerAggregation` produced `NaN` values for isolates nodes ([#7902](https://github.com/pyg-team/pytorch_geometric/pull/7902)) - Fixed `model_summary` on modules with uninitialized parameters ([#7884](https://github.com/pyg-team/pytorch_geometric/pull/7884)) - Updated `QM9` data pre-processing to include the SMILES string ([#7867](https://github.com/pyg-team/pytorch_geometric/pull/7867)) - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) diff --git a/test/nn/aggr/test_set_transformer.py b/test/nn/aggr/test_set_transformer.py index 518bebd3e5a2..1e7a3a51c7c6 100644 --- a/test/nn/aggr/test_set_transformer.py +++ b/test/nn/aggr/test_set_transformer.py @@ -6,7 +6,7 @@ def test_set_transformer_aggregation(): x = torch.randn(6, 16) - index = torch.tensor([0, 0, 1, 1, 1, 2]) + index = torch.tensor([0, 0, 1, 1, 1, 3]) aggr = SetTransformerAggregation(16, num_seed_points=2, heads=2) aggr.reset_parameters() @@ -14,7 +14,9 @@ def test_set_transformer_aggregation(): 'heads=2, layer_norm=False, dropout=0.0)') out = aggr(x, index) - assert out.size() == (3, 2 * 16) + assert out.size() == (4, 2 * 16) + assert out.isnan().sum() == 0 + assert out[2].abs().sum() == 0 if is_full_test(): jit = torch.jit.script(aggr) diff --git a/torch_geometric/nn/aggr/set_transformer.py b/torch_geometric/nn/aggr/set_transformer.py index 20e6ba17cadb..d331d0a41876 100644 --- a/torch_geometric/nn/aggr/set_transformer.py +++ b/torch_geometric/nn/aggr/set_transformer.py @@ -105,6 +105,8 @@ def forward( for decoder in self.decoders: x = decoder(x) + x = x.nan_to_num() + return x.flatten(1, 2) if self.concat else x.mean(dim=1) def __repr__(self) -> str: From 1ffce718317a4c8bb0161a68c157cfa76abfd1f1 Mon Sep 17 00:00:00 2001 From: Serge Panev Date: Mon, 21 Aug 2023 04:09:46 -0700 Subject: [PATCH 1415/2432] Fix to_cugraph and from_cugraph tests in test_convert (#7908) Fix to match the behavior expected with cugraph >= 23.08 Signed-off-by: Serge Panev --- test/utils/test_convert.py | 6 +++--- torch_geometric/utils/convert.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index 93ea55d84586..8e4b8280a98f 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -462,13 +462,13 @@ def test_to_cugraph(edge_weight, directed, relabel_nodes): edge_list = graph.view_edge_list() assert edge_list is not None - edge_list = edge_list.sort_values(by=['src', 'dst']) + edge_list = edge_list.sort_values(by=[0, 1]) - cu_edge_index = edge_list[['src', 'dst']].to_pandas().values + cu_edge_index = edge_list[[0, 1]].to_pandas().values cu_edge_index = torch.from_numpy(cu_edge_index).t() cu_edge_weight = None if edge_weight is not None: - cu_edge_weight = edge_list['weights'].to_pandas().values + cu_edge_weight = edge_list['2'].to_pandas().values cu_edge_weight = torch.from_numpy(cu_edge_weight) cu_edge_index, cu_edge_weight = sort_edge_index(cu_edge_index, diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index eccb23051bbb..e0fa499b5799 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -457,13 +457,13 @@ def from_cugraph(g: Any) -> Tuple[Tensor, Optional[Tensor]]: """ df = g.view_edge_list() - src = from_dlpack(df['src'].to_dlpack()).long() - dst = from_dlpack(df['dst'].to_dlpack()).long() + src = from_dlpack(df[0].to_dlpack()).long() + dst = from_dlpack(df[1].to_dlpack()).long() edge_index = torch.stack([src, dst], dim=0) edge_weight = None - if 'weights' in df: - edge_weight = from_dlpack(df['weights'].to_dlpack()) + if '2' in df: + edge_weight = from_dlpack(df['2'].to_dlpack()) return edge_index, edge_weight From 486df71256ce3d19b787f697ec508fb697cc8574 Mon Sep 17 00:00:00 2001 From: Frinkleko Shen Date: Mon, 21 Aug 2023 19:14:50 +0800 Subject: [PATCH 1416/2432] Fix broken URLs in `HGBDataset` (#7907) Old links are not available. According to changes in its [repo](https://github.com/THUDM/HGB), dataset can be found in [here](https://drive.google.com/drive/folders/10-pf2ADCjq_kpJKFHHLHxr_czNNCJ3aX) now. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/hgb_dataset.py | 27 +++++++++++++++++-------- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9193a871d82..9d309282733f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed broken links in `HGBDataset` ([#7907](https://github.com/pyg-team/pytorch_geometric/pull/7907)) - Fixed an issue where `SetTransformerAggregation` produced `NaN` values for isolates nodes ([#7902](https://github.com/pyg-team/pytorch_geometric/pull/7902)) - Fixed `model_summary` on modules with uninitialized parameters ([#7884](https://github.com/pyg-team/pytorch_geometric/pull/7884)) - Updated `QM9` data pre-processing to include the SMILES string ([#7867](https://github.com/pyg-team/pytorch_geometric/pull/7867)) diff --git a/torch_geometric/datasets/hgb_dataset.py b/torch_geometric/datasets/hgb_dataset.py index 2d77fd673600..223276e5408d 100644 --- a/torch_geometric/datasets/hgb_dataset.py +++ b/torch_geometric/datasets/hgb_dataset.py @@ -40,10 +40,6 @@ class HGBDataset(InMemoryDataset): transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) """ - - url = ('https://cloud.tsinghua.edu.cn/d/2d965d2fc2ee41d09def/files/' - '?p=%2F{}.zip&dl=1') - names = { 'acm': 'ACM', 'dblp': 'DBLP', @@ -51,9 +47,24 @@ class HGBDataset(InMemoryDataset): 'imdb': 'IMDB', } - def __init__(self, root: str, name: str, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None): + urls = { + 'acm': ('https://drive.google.com/uc?' + 'export=download&id=1xbJ4QE9pcDJOcALv7dYhHDCPITX2Iddz'), + 'dblp': ('https://drive.google.com/uc?' + 'export=download&id=1fLLoy559V7jJaQ_9mQEsC06VKd6Qd3SC'), + 'freebase': ('https://drive.google.com/uc?' + 'export=download&id=1vw-uqbroJZfFsWpriC1CWbtHCJMGdWJ7'), + 'imdb': ('https://drive.google.com/uc?' + 'export=download&id=18qXmmwKJBrEJxVQaYwKTL3Ny3fPqJeJ2'), + } + + def __init__( + self, + root: str, + name: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): self.name = name.lower() assert self.name in set(self.names.keys()) super().__init__(root, transform, pre_transform) @@ -77,7 +88,7 @@ def processed_file_names(self) -> str: return 'data.pt' def download(self): - url = self.url.format(self.names[self.name]) + url = self.urls[self.name] path = download_url(http://github.com/url,%20self.raw_dir) extract_zip(path, self.raw_dir) os.unlink(path) From 309576a54c405f7c4a034a06fbd48b70a1ce0b3a Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Mon, 21 Aug 2023 19:27:43 +0800 Subject: [PATCH 1417/2432] using global node ids for partitioned graph's row/col (#7910) @rusty1s For distributed pyg training development, we prefer to use global node ids to represent partitioned graph store. It would be convenient for us to neighbor sample the partitioned graph if we use global ids for rows/cols. --------- Signed-off-by: Liu,Kaixuan Co-authored-by: Matthias Fey --- torch_geometric/distributed/partition.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/torch_geometric/distributed/partition.py b/torch_geometric/distributed/partition.py index 918d03648de5..13b72bc835f0 100644 --- a/torch_geometric/distributed/partition.py +++ b/torch_geometric/distributed/partition.py @@ -150,10 +150,14 @@ def generate_partition(self): size = (self.data[src].num_nodes, self.data[dst].num_nodes) mask = part_data.edge_type == i + rows = part_data.edge_index[0, mask] + cols = part_data.edge_index[1, mask] + global_rows = node_id[rows] + global_cols = node_perm[cols] out[edge_type] = { 'edge_id': edge_id[mask], - 'row': part_data.edge_index[0, mask], - 'col': part_data.edge_index[1, mask], + 'row': global_rows, + 'col': global_cols, 'size': size, } torch.save(out, osp.join(path, 'graph.pt')) @@ -213,12 +217,16 @@ def generate_partition(self): node_id = node_perm[start:end] node_map[node_id] = pid + rows = part_data.edge_index[0] + cols = part_data.edge_index[1] + global_rows = node_id[rows] + global_cols = node_perm[cols] torch.save( { 'edge_id': edge_id, - 'row': part_data.edge_index[0], - 'col': part_data.edge_index[1], + 'row': global_rows, + 'col': global_cols, 'size': (data.num_nodes, data.num_nodes), }, osp.join(path, 'graph.pt')) From 684f17958fe8c949644c1fe1d6b2d5f70e313411 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 21 Aug 2023 15:32:34 +0200 Subject: [PATCH 1418/2432] Fix `ReferenceError: weakly-referenced object no longer exists` from PyTorch nightly (#7911) --- test/transforms/test_rooted_subgraph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/transforms/test_rooted_subgraph.py b/test/transforms/test_rooted_subgraph.py index ae7699a48637..eaee4b148bf0 100644 --- a/test/transforms/test_rooted_subgraph.py +++ b/test/transforms/test_rooted_subgraph.py @@ -62,7 +62,7 @@ def test_rooted_rw_subgraph(): assert out.n_sub_batch.tolist() == [0, 0, 1, 1, 2, 2] -@withPackage('torch>=1.12.0') +@withPackage('torch>=1.12.0', 'torch<2.1.0') def test_rooted_subgraph_minibatch(): x = torch.randn(3, 8) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) From 1dd13de89a5168b950ca5bf37717fea4d2a95f7f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 22 Aug 2023 11:12:17 +0200 Subject: [PATCH 1419/2432] Allow floating-point slicing in `Dataset` (#7915) --- CHANGELOG.md | 1 + test/datasets/test_enzymes.py | 1 + torch_geometric/data/dataset.py | 8 ++++++++ 3 files changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d309282733f..0e9a9240f605 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for floating-point slicing in `Dataset`, *e.g.*, `dataset[:0.9]` ([#7915](https://github.com/pyg-team/pytorch_geometric/pull/7915)) - Added nightly GPU tests ([#7895](https://github.com/pyg-team/pytorch_geometric/pull/7895)) - Added the `HalfHop` graph upsampling augmentation ([#7827](https://github.com/pyg-team/pytorch_geometric/pull/7827)) - Added the `Wikidata5M` dataset ([#7864](https://github.com/pyg-team/pytorch_geometric/pull/7864)) diff --git a/test/datasets/test_enzymes.py b/test/datasets/test_enzymes.py index 4b7cc07665cb..c0d24c7747ec 100644 --- a/test/datasets/test_enzymes.py +++ b/test/datasets/test_enzymes.py @@ -18,6 +18,7 @@ def test_enzymes(get_dataset): assert len(dataset.shuffle()) == 600 assert len(dataset.shuffle(return_perm=True)) == 2 assert len(dataset[:100]) == 100 + assert len(dataset[0.1:0.2]) == 60 assert len(dataset[torch.arange(100, dtype=torch.long)]) == 100 mask = torch.zeros(600, dtype=torch.bool) mask[:100] = 1 diff --git a/torch_geometric/data/dataset.py b/torch_geometric/data/dataset.py index 18bb65a5525e..2b4b44c30dba 100644 --- a/torch_geometric/data/dataset.py +++ b/torch_geometric/data/dataset.py @@ -275,6 +275,14 @@ def index_select(self, idx: IndexType) -> 'Dataset': indices = self.indices() if isinstance(idx, slice): + start, stop, step = idx.start, idx.stop, idx.step + # Allow floating-point slicing, e.g., dataset[:0.9] + if isinstance(start, float): + start = round(start * len(self)) + if isinstance(stop, float): + stop = round(stop * len(self)) + idx = slice(start, stop, step) + indices = indices[idx] elif isinstance(idx, Tensor) and idx.dtype == torch.long: From f3a1dbd0992060b31e7c508b34c4625a4fb6602f Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Wed, 23 Aug 2023 17:40:22 +0200 Subject: [PATCH 1420/2432] Add support for XPU device in `PrefetchLoader` (#7918) Device abstraction is now covered by an internal class, called `PrefetchLoaderDevice`. It handles the device selection mechanism, stream creation, and synchronization. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/loader/prefetch.py | 67 ++++++++++++++++++++++-------- torch_geometric/typing.py | 7 ++++ 3 files changed, 57 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e9a9240f605..968fa1e512c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for XPU device in `PrefetchLoader` ([#7918](https://github.com/pyg-team/pytorch_geometric/pull/7918)) - Added support for floating-point slicing in `Dataset`, *e.g.*, `dataset[:0.9]` ([#7915](https://github.com/pyg-team/pytorch_geometric/pull/7915)) - Added nightly GPU tests ([#7895](https://github.com/pyg-team/pytorch_geometric/pull/7895)) - Added the `HalfHop` graph upsampling augmentation ([#7827](https://github.com/pyg-team/pytorch_geometric/pull/7827)) diff --git a/torch_geometric/loader/prefetch.py b/torch_geometric/loader/prefetch.py index 3bbfd69c6978..1340c9e7660f 100644 --- a/torch_geometric/loader/prefetch.py +++ b/torch_geometric/loader/prefetch.py @@ -1,3 +1,4 @@ +import warnings from contextlib import nullcontext from functools import partial from typing import Any, Optional @@ -5,6 +6,47 @@ import torch from torch.utils.data import DataLoader +from torch_geometric.typing import WITH_IPEX + + +class DeviceHelper: + def __init__(self, device: Optional[torch.device] = None): + with_cuda = torch.cuda.is_available() + with_xpu = torch.xpu.is_available() if WITH_IPEX else False + + if device is None: + if with_cuda: + device = 'cuda' + elif with_xpu: + device = 'xpu' + else: + device = 'cpu' + + self.device = torch.device(device) + self.is_gpu = self.device.type in ['cuda', 'xpu'] + + if ((self.device.type == 'cuda' and not with_cuda) + or (self.device.type == 'xpu' and not with_xpu)): + warnings.warn(f"Requested device '{self.device.type}' is not " + f"available, falling back to CPU") + self.device = torch.device('cpu') + + self.stream = None + self.stream_context = nullcontext + self.module = getattr(torch, self.device.type) if self.is_gpu else None + + def maybe_init_stream(self) -> None: + if self.is_gpu: + self.stream = self.module.Stream() + self.stream_context = partial( + self.module.stream, + stream=self.stream, + ) + + def maybe_wait_stream(self) -> None: + if self.stream is not None: + self.module.current_stream().wait_stream(self.stream) + class PrefetchLoader: r"""A GPU prefetcher class for asynchronously transferring data of a @@ -20,37 +62,27 @@ def __init__( loader: DataLoader, device: Optional[torch.device] = None, ): - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.loader = loader - self.device = torch.device(device) - - self.is_cuda = torch.cuda.is_available() and self.device.type == 'cuda' + self.device_helper = DeviceHelper(device) def non_blocking_transfer(self, batch: Any) -> Any: - if not self.is_cuda: + if not self.device_helper.is_gpu: return batch if isinstance(batch, (list, tuple)): return [self.non_blocking_transfer(v) for v in batch] if isinstance(batch, dict): return {k: self.non_blocking_transfer(v) for k, v in batch.items()} - batch = batch.pin_memory() - return batch.to(self.device, non_blocking=True) + batch = batch.pin_memory(self.device_helper.device) + return batch.to(self.device_helper.device, non_blocking=True) def __iter__(self) -> Any: first = True - if self.is_cuda: - stream = torch.cuda.Stream() - stream_context = partial(torch.cuda.stream, stream=stream) - else: - stream = None - stream_context = nullcontext + self.device_helper.maybe_init_stream() for next_batch in self.loader: - with stream_context(): + with self.device_helper.stream_context(): next_batch = self.non_blocking_transfer(next_batch) if not first: @@ -58,8 +90,7 @@ def __iter__(self) -> Any: else: first = False - if stream is not None: - torch.cuda.current_stream().wait_stream(stream) + self.device_helper.maybe_wait_stream() batch = next_batch diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index c412069c1344..38a3a01fbb46 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -205,6 +205,13 @@ def masked_select_nnz(src: SparseTensor, mask: Tensor, raise ImportError("'masked_select_nnz' requires 'torch-sparse'") +try: + import intel_extension_for_pytorch # noqa + WITH_IPEX = True +except (ImportError, OSError): + WITH_IPEX = False + + class MockTorchCSCTensor: def __init__( self, From 8052cec8bc33914caf5e046a1d57019cb45baa3a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 24 Aug 2023 07:17:53 +0200 Subject: [PATCH 1421/2432] Add GPU information to GPU runner (#7922) --- .github/workflows/full_gpu_testing.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/full_gpu_testing.yml b/.github/workflows/full_gpu_testing.yml index 969e98328f0c..f650cc75ef30 100644 --- a/.github/workflows/full_gpu_testing.yml +++ b/.github/workflows/full_gpu_testing.yml @@ -20,6 +20,10 @@ jobs: with: cuda-version: 'cu118' + - name: Print GPU information + run: | + nvidia-smi + - name: Install main package run: | pip install -e .[full,test] From ac086463556c9291e8a933498b4ddb766557089f Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 24 Aug 2023 08:02:47 +0200 Subject: [PATCH 1422/2432] Fix `bias_initializer` in `HeteroLinear` (#7923) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + test/nn/dense/test_linear.py | 11 +++++++++++ torch_geometric/nn/dense/linear.py | 4 ++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 968fa1e512c4..4946196c040e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed the usage of `bias_initializer` in `HeteroLinear` ([#7923](https://github.com/pyg-team/pytorch_geometric/pull/7923)) - Fixed broken links in `HGBDataset` ([#7907](https://github.com/pyg-team/pytorch_geometric/pull/7907)) - Fixed an issue where `SetTransformerAggregation` produced `NaN` values for isolates nodes ([#7902](https://github.com/pyg-team/pytorch_geometric/pull/7902)) - Fixed `model_summary` on modules with uninitialized parameters ([#7884](https://github.com/pyg-team/pytorch_geometric/pull/7884)) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index d193276dc2a9..9c004b7b070f 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -127,6 +127,17 @@ def test_hetero_linear(device): assert torch.allclose(jit(x, type_vec), out, atol=1e-3) +def test_hetero_linear_initializer(): + lin = HeteroLinear( + 16, + 32, + num_types=3, + weight_initializer='glorot', + bias_initializer='zeros', + ) + assert torch.equal(lin.bias, torch.zeros_like(lin.bias)) + + @withCUDA @pytest.mark.parametrize('use_segmm', [True, False]) def test_hetero_linear_amp(device, use_segmm): diff --git a/torch_geometric/nn/dense/linear.py b/torch_geometric/nn/dense/linear.py index 05900aac143e..d7804c12d09e 100644 --- a/torch_geometric/nn/dense/linear.py +++ b/torch_geometric/nn/dense/linear.py @@ -249,8 +249,8 @@ def reset_parameters(self): r"""Resets all learnable parameters of the module.""" reset_weight_(self.weight, self.in_channels, self.kwargs.get('weight_initializer', None)) - reset_weight_(self.bias, self.in_channels, - self.kwargs.get('bias_initializer', None)) + reset_bias_(self.bias, self.in_channels, + self.kwargs.get('bias_initializer', None)) def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: r""" From 2e0e7e35ce0e5a3dbe951ced527aa436e591bccd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 24 Aug 2023 08:28:32 +0200 Subject: [PATCH 1423/2432] [Test] PyTorch 2.1 supports gradients on CSR matrices (#7924) --- test/nn/conv/test_gcn_conv.py | 6 +++--- test/nn/models/test_rev_gnn.py | 4 ++-- test/utils/test_sparse.py | 8 ++++---- torch_geometric/data/collate.py | 2 +- torch_geometric/loader/utils.py | 2 +- torch_geometric/nn/models/rev_gnn.py | 6 +++--- torch_geometric/typing.py | 11 ++++++----- torch_geometric/utils/spmm.py | 4 ++-- 8 files changed, 22 insertions(+), 21 deletions(-) diff --git a/test/nn/conv/test_gcn_conv.py b/test/nn/conv/test_gcn_conv.py index 2efc52e27af9..63ba7da52ca1 100644 --- a/test/nn/conv/test_gcn_conv.py +++ b/test/nn/conv/test_gcn_conv.py @@ -7,7 +7,7 @@ from torch_geometric.nn import GCNConv from torch_geometric.nn.conv.gcn_conv import gcn_norm from torch_geometric.testing import is_full_test -from torch_geometric.typing import SparseTensor +from torch_geometric.typing import WITH_PT21, SparseTensor from torch_geometric.utils import to_torch_coo_tensor, to_torch_csc_tensor @@ -120,8 +120,8 @@ def test_gcn_norm_gradient(requires_grad, layout): if layout == torch.sparse_csr: adj = adj.to_sparse_csr() - # TODO Sparse CSR tensor does not yet inherit `requires_grad` from `value`. - if layout == torch.sparse_csr: + # TODO Sparse CSR tensor doesn't inherit `requires_grad` for PyTorch < 2.1. + if layout == torch.sparse_csr and not WITH_PT21: assert not gcn_norm(adj)[0].requires_grad else: assert adj.requires_grad == gcn_norm(adj)[0].requires_grad diff --git a/test/nn/models/test_rev_gnn.py b/test/nn/models/test_rev_gnn.py index c114d97d3fb7..d9d9a1756b13 100644 --- a/test/nn/models/test_rev_gnn.py +++ b/test/nn/models/test_rev_gnn.py @@ -22,7 +22,7 @@ def test_revgnn_forward_inverse(num_groups): h_o = h.clone().detach() out = conv(h, edge_index) - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: assert h.untyped_storage().size() == 0 else: assert h.storage().size() == 0 @@ -80,7 +80,7 @@ def test_revgnn_diable(num_groups): target.backward() # Memory will not be freed if disable: - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: assert h.untyped_storage().size() == 4 * 4 * 32 else: assert h.storage().size() == 4 * 32 diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 922e04cecfb8..6c1af5e8dca0 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -134,7 +134,7 @@ def test_to_torch_csr_tensor(): assert torch.allclose(coo.indices(), edge_index) assert torch.allclose(coo.values(), edge_weight) - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: edge_attr = torch.randn(edge_index.size(1), 8) adj = to_torch_csr_tensor(edge_index, edge_attr) assert adj.size() == (4, 4, 8) @@ -155,7 +155,7 @@ def test_to_torch_csc_tensor(): assert adj.size() == (4, 4) assert adj.layout == torch.sparse_csc adj_coo = adj.to_sparse_coo().coalesce() - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: assert torch.allclose(adj_coo.indices(), edge_index) else: assert torch.allclose(adj_coo.indices().flip([0]), edge_index) @@ -165,7 +165,7 @@ def test_to_torch_csc_tensor(): assert adj.size() == (4, 4) assert adj.layout == torch.sparse_csc adj_coo = adj.to_sparse_coo().coalesce() - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: assert torch.allclose(adj_coo.indices(), edge_index) assert torch.allclose(adj_coo.values(), edge_weight) else: @@ -173,7 +173,7 @@ def test_to_torch_csc_tensor(): assert torch.allclose(adj_coo.indices()[:, perm], edge_index) assert torch.allclose(adj_coo.values()[perm], edge_weight) - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: edge_attr = torch.randn(edge_index.size(1), 8) adj = to_torch_csc_tensor(edge_index, edge_attr) assert adj.size() == (4, 4, 8) diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index 134efb7a1d95..f217479b67a5 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -154,7 +154,7 @@ def _collate( if torch.utils.data.get_worker_info() is not None: # Write directly into shared memory to avoid an extra copy: numel = sum(value.numel() for value in values) - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: storage = elem.untyped_storage()._new_shared( numel * elem.element_size(), device=elem.device) elif torch_geometric.typing.WITH_PT112: diff --git a/torch_geometric/loader/utils.py b/torch_geometric/loader/utils.py index 5da757319ee3..a51fd7c3ad26 100644 --- a/torch_geometric/loader/utils.py +++ b/torch_geometric/loader/utils.py @@ -62,7 +62,7 @@ def index_select( size = list(value.shape) size[dim] = index.numel() numel = math.prod(size) - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: storage = value.untyped_storage()._new_shared( numel * value.element_size()) else: diff --git a/torch_geometric/nn/models/rev_gnn.py b/torch_geometric/nn/models/rev_gnn.py index 662457bed993..2bf0ab15b501 100644 --- a/torch_geometric/nn/models/rev_gnn.py +++ b/torch_geometric/nn/models/rev_gnn.py @@ -58,7 +58,7 @@ def forward(ctx, fn: torch.nn.Module, fn_inverse: torch.nn.Module, detached_outputs = tuple(element.detach_() for element in outputs) # Clear memory of node features: - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: inputs[0].untyped_storage().resize_(0) else: # pragma: no cover inputs[0].storage().resize_(0) @@ -85,7 +85,7 @@ def backward(ctx, *grad_outputs): inputs_inverted = ctx.fn_inverse(*(outputs + inputs[1:])) if len(ctx.outputs) == 0: # Clear memory from outputs: for element in outputs: - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: element.untyped_storage().resize_(0) else: # pragma: no cover element.storage().resize_(0) @@ -94,7 +94,7 @@ def backward(ctx, *grad_outputs): inputs_inverted = (inputs_inverted, ) for elem_orig, elem_inv in zip(inputs, inputs_inverted): - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: elem_orig.untyped_storage().resize_( int(np.prod(elem_orig.size())) * elem_orig.element_size()) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 38a3a01fbb46..bdf3db12d635 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -6,10 +6,11 @@ import torch from torch import Tensor -WITH_PT2 = int(torch.__version__.split('.')[0]) >= 2 -WITH_PT111 = WITH_PT2 or int(torch.__version__.split('.')[1]) >= 11 -WITH_PT112 = WITH_PT2 or int(torch.__version__.split('.')[1]) >= 12 -WITH_PT113 = WITH_PT2 or int(torch.__version__.split('.')[1]) >= 13 +WITH_PT20 = int(torch.__version__.split('.')[0]) >= 2 +WITH_PT21 = WITH_PT20 and int(torch.__version__.split('.')[1]) >= 1 +WITH_PT111 = WITH_PT20 or int(torch.__version__.split('.')[1]) >= 11 +WITH_PT112 = WITH_PT20 or int(torch.__version__.split('.')[1]) >= 12 +WITH_PT113 = WITH_PT20 or int(torch.__version__.split('.')[1]) >= 13 if not hasattr(torch, 'sparse_csc'): torch.sparse_csc = -1 @@ -17,7 +18,7 @@ try: import pyg_lib # noqa WITH_PYG_LIB = True - WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul') + WITH_GMM = WITH_PT20 and hasattr(pyg_lib.ops, 'grouped_matmul') WITH_SEGMM = hasattr(pyg_lib.ops, 'segment_matmul') if WITH_SEGMM and 'pytest' in sys.modules and torch.cuda.is_available(): # NOTE `segment_matmul` is currently bugged on older NVIDIA cards which diff --git a/torch_geometric/utils/spmm.py b/torch_geometric/utils/spmm.py index b9dd77b99c33..d951b7e0ebf8 100644 --- a/torch_geometric/utils/spmm.py +++ b/torch_geometric/utils/spmm.py @@ -43,7 +43,7 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: if src.nnz() == 0: return other.new_zeros(src.size(0), other.size(1)) - if (torch_geometric.typing.WITH_PT2 and other.dim() == 2 + if (torch_geometric.typing.WITH_PT20 and other.dim() == 2 and not src.is_cuda() and not src.requires_grad()): # Use optimized PyTorch `torch.sparse.mm` path: csr = src.to_torch_sparse_csr_tensor().to(other.dtype) @@ -56,7 +56,7 @@ def spmm(src: Adj, other: Tensor, reduce: str = "sum") -> Tensor: # `torch.sparse.mm` only supports reductions on CPU for PyTorch>=2.0. # This will currently throw on error for CUDA tensors. - if torch_geometric.typing.WITH_PT2: + if torch_geometric.typing.WITH_PT20: if src.is_cuda and (reduce == 'min' or reduce == 'max'): raise NotImplementedError(f"`{reduce}` reduction is not yet " From 1be22175fa60d6d4437d65168bab74bcfda8b849 Mon Sep 17 00:00:00 2001 From: Chendi Qian <32506156+chendiqian@users.noreply.github.com> Date: Thu, 24 Aug 2023 14:49:09 +0200 Subject: [PATCH 1424/2432] Out-source PPR calculation to `torch_geometric.utils` (#7917) Add PPR calculation --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey Co-authored-by: Chendi Qian <32506156+Spazierganger@users.noreply.github.com> --- CHANGELOG.md | 1 + test/utils/test_ppr.py | 28 ++++++ torch_geometric/transforms/gdc.py | 131 +++------------------------- torch_geometric/utils/__init__.py | 2 + torch_geometric/utils/ppr.py | 138 ++++++++++++++++++++++++++++++ 5 files changed, 181 insertions(+), 119 deletions(-) create mode 100644 test/utils/test_ppr.py create mode 100644 torch_geometric/utils/ppr.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 4946196c040e..fdf2ce5f8957 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `utils.ppr` for personalized PageRank computation ([#7917](https://github.com/pyg-team/pytorch_geometric/pull/7917)) - Added support for XPU device in `PrefetchLoader` ([#7918](https://github.com/pyg-team/pytorch_geometric/pull/7918)) - Added support for floating-point slicing in `Dataset`, *e.g.*, `dataset[:0.9]` ([#7915](https://github.com/pyg-team/pytorch_geometric/pull/7915)) - Added nightly GPU tests ([#7895](https://github.com/pyg-team/pytorch_geometric/pull/7895)) diff --git a/test/utils/test_ppr.py b/test/utils/test_ppr.py new file mode 100644 index 000000000000..ca1a268d7735 --- /dev/null +++ b/test/utils/test_ppr.py @@ -0,0 +1,28 @@ +import pytest +import torch + +from torch_geometric.datasets import KarateClub +from torch_geometric.testing import withPackage +from torch_geometric.utils import get_ppr + + +@withPackage('numba') +@pytest.mark.parametrize('target', [None, torch.tensor([0, 4, 5, 6])]) +def test_get_ppr(target): + data = KarateClub()[0] + + edge_index, edge_weight = get_ppr( + data.edge_index, + alpha=0.1, + eps=1e-5, + target=target, + ) + + assert edge_index.size(0) == 2 + assert edge_index.size(1) == edge_weight.numel() + + min_row = 0 if target is None else target.min() + max_row = data.num_nodes - 1 if target is None else target.max() + assert edge_index[0].min() == min_row and edge_index[0].max() == max_row + assert edge_index[1].min() >= 0 and edge_index[1].max() < data.num_nodes + assert edge_weight.min() >= 0.0 and edge_weight.max() <= 1.0 diff --git a/torch_geometric/transforms/gdc.py b/torch_geometric/transforms/gdc.py index 348be9fe6e5d..e5c600dcc47d 100644 --- a/torch_geometric/transforms/gdc.py +++ b/torch_geometric/transforms/gdc.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List, Tuple +from typing import Any, Dict, Tuple import numpy as np import torch @@ -11,11 +11,12 @@ from torch_geometric.utils import ( add_self_loops, coalesce, + get_ppr, is_undirected, scatter, + sort_edge_index, to_dense_adj, ) -from torch_geometric.utils.sparse import index2ptr @functional_transform('gdc') @@ -83,9 +84,6 @@ def __init__( avg_degree=64), exact: bool = True, ): - - self.__calc_ppr__ = get_calc_ppr() - self.self_loop_weight = self_loop_weight self.normalization_in = normalization_in self.normalization_out = normalization_out @@ -303,20 +301,16 @@ def diffusion_matrix_approx( _, col = edge_index deg = scatter(edge_weight, col, 0, num_nodes, reduce='sum') - edge_index_np = edge_index.cpu().numpy() - - # Assumes sorted and coalesced edge indices: - indptr = index2ptr(edge_index[0], num_nodes).cpu().numpy() - out_degree = indptr[1:] - indptr[:-1] + edge_index, edge_weight = get_ppr( + edge_index, + alpha=kwargs['alpha'], + eps=kwargs['eps'], + num_nodes=num_nodes, + ) - neighbors, neighbor_weights = self.__calc_ppr__( - indptr, edge_index_np[1], out_degree, kwargs['alpha'], - kwargs['eps']) - ppr_normalization = 'col' if normalization == 'col' else 'row' - edge_index, edge_weight = self.__neighbors_to_graph__( - neighbors, neighbor_weights, ppr_normalization, - device=edge_index.device) - edge_index = edge_index.to(torch.long) + if normalization == 'col': + edge_index, edge_weight = sort_edge_index( + edge_index.flip([0]), edge_weight, num_nodes) if normalization == 'sym': # We can change the normalization from row-normalized to @@ -501,104 +495,3 @@ def __calculate_eps__( left = sorted_edges[avg_degree * num_nodes - 1] right = sorted_edges[avg_degree * num_nodes] return (left + right) / 2.0 - - def __neighbors_to_graph__( - self, - neighbors: List[List[int]], - neighbor_weights: List[List[float]], - normalization: str = 'row', - device: torch.device = 'cpu', - ) -> Tuple[Tensor, Tensor]: - r"""Combine a list of neighbors and neighbor weights to create a sparse - graph. - - Args: - neighbors (List[List[int]]): List of neighbors for each node. - neighbor_weights (List[List[float]]): List of weights for the - neighbors of each node. - normalization (str): Normalization of resulting matrix - (options: :obj:`"row"`, :obj:`"col"`). (default: :obj:`"row"`) - device (torch.device): Device to create output tensors on. - (default: :obj:`"cpu"`) - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - """ - edge_weight = torch.from_numpy(np.concatenate(neighbor_weights)) - edge_weight = edge_weight.to(device, torch.get_default_dtype()) - i = np.repeat(np.arange(len(neighbors)), - np.fromiter(map(len, neighbors), dtype=int)) - j = np.concatenate(neighbors) - if normalization == 'col': - edge_index = torch.from_numpy(np.vstack([j, i])).to(device) - N = len(neighbors) - edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N) - elif normalization == 'row': - edge_index = torch.from_numpy(np.vstack([i, j])).to(device) - else: - raise ValueError( - f"PPR matrix normalization {normalization} unknown.") - return edge_index, edge_weight - - -def get_calc_ppr(): - import numba - - @numba.jit(nopython=True, parallel=True) - def calc_ppr( - indptr: np.ndarray, - indices: np.ndarray, - out_degree: np.ndarray, - alpha: float, - eps: float, - ) -> Tuple[List[List[int]], List[List[float]]]: - r"""Calculate the personalized PageRank vector for all nodes - using a variant of the Andersen algorithm - (see Andersen et al. :Local Graph Partitioning using PageRank Vectors.) - - Args: - indptr (np.ndarray): Index pointer for the sparse matrix - (CSR-format). - indices (np.ndarray): Indices of the sparse matrix entries - (CSR-format). - out_degree (np.ndarray): Out-degree of each node. - alpha (float): Alpha of the PageRank to calculate. - eps (float): Threshold for PPR calculation stopping criterion - (:obj:`edge_weight >= eps * out_degree`). - - :rtype: (:class:`List[List[int]]`, :class:`List[List[float]]`) - """ - - alpha_eps = alpha * eps - js = [[0]] * len(out_degree) - vals = [[0.]] * len(out_degree) - for inode_uint in numba.prange(len(out_degree)): - inode = numba.int64(inode_uint) - p = {inode: 0.0} - r = {} - r[inode] = alpha - q = [inode] - while len(q) > 0: - unode = q.pop() - - res = r[unode] if unode in r else 0 - if unode in p: - p[unode] += res - else: - p[unode] = res - r[unode] = 0 - for vnode in indices[indptr[unode]:indptr[unode + 1]]: - _val = (1 - alpha) * res / out_degree[unode] - if vnode in r: - r[vnode] += _val - else: - r[vnode] = _val - - res_vnode = r[vnode] if vnode in r else 0 - if res_vnode >= alpha_eps * out_degree[vnode]: - if vnode not in q: - q.append(vnode) - js[inode] = list(p.keys()) - vals[inode] = list(p.values()) - return js, vals - - return calc_ppr diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 9609d13c5dca..46cc541ae509 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -51,6 +51,7 @@ from .tree_decomposition import tree_decomposition from .embedding import get_embeddings from .trim_to_layer import trim_to_layer +from .ppr import get_ppr from .train_test_split_edges import train_test_split_edges __all__ = [ @@ -135,6 +136,7 @@ 'tree_decomposition', 'get_embeddings', 'trim_to_layer', + 'get_ppr', 'train_test_split_edges', ] diff --git a/torch_geometric/utils/ppr.py b/torch_geometric/utils/ppr.py new file mode 100644 index 000000000000..c3e4b34a2329 --- /dev/null +++ b/torch_geometric/utils/ppr.py @@ -0,0 +1,138 @@ +from itertools import chain +from typing import Callable, List, Optional, Tuple + +import numpy as np +import torch +from torch import Tensor + +from torch_geometric.utils import is_torch_sparse_tensor, to_torch_csr_tensor +from torch_geometric.utils.num_nodes import maybe_num_nodes + +try: + import numba + WITH_NUMBA = True +except ImportError: # pragma: no cover + WITH_NUMBA = False + + +def _get_ppr( # pragma: no cover + rowptr: np.ndarray, + col: np.ndarray, + alpha: float, + eps: float, + target: Optional[np.ndarray] = None, +) -> Tuple[List[List[int]], List[List[float]]]: + + num_nodes = len(rowptr) - 1 if target is None else len(target) + alpha_eps = alpha * eps + js = [[0]] * num_nodes + vals = [[0.]] * num_nodes + + for inode_uint in numba.prange(num_nodes): + if target is None: + inode = numba.int64(inode_uint) + else: + inode = target[inode_uint] + + p = {inode: 0.0} + r = {} + r[inode] = alpha + q = [inode] + + while len(q) > 0: + unode = q.pop() + + res = r[unode] if unode in r else 0 + if unode in p: + p[unode] += res + else: + p[unode] = res + + r[unode] = 0 + start, end = rowptr[unode], rowptr[unode + 1] + ucount = end - start + + for vnode in col[start:end]: + _val = (1 - alpha) * res / ucount + if vnode in r: + r[vnode] += _val + else: + r[vnode] = _val + + res_vnode = r[vnode] if vnode in r else 0 + vcount = rowptr[vnode + 1] - rowptr[vnode] + if res_vnode >= alpha_eps * vcount: + if vnode not in q: + q.append(vnode) + + js[inode_uint] = list(p.keys()) + vals[inode_uint] = list(p.values()) + + return js, vals + + +_get_ppr_numba: Optional[Callable] = None + + +def get_ppr( + edge_index: Tensor, + alpha: float = 0.2, + eps: float = 1e-5, + target: Optional[Tensor] = None, + num_nodes: Optional[int] = None, +) -> Tuple[Tensor, Tensor]: + r"""Calculates the personalized PageRank (PPR) vector for all or a subset + of nodes using a variant of the `Andersen algorithm + `_. + + Args: + edge_index (torch.Tensor): The indices of the graph. + alpha (float, optional): The alpha value of the PageRank algorithm. + (default: :obj:`0.2`) + eps (float, optional): The threshold for stopping the PPR calculation + (:obj:`edge_weight >= eps * out_degree`). (default: :obj:`1e-5`) + target (torch.Tensor, optional): The target nodes to compute PPR for. + If not given, calculates PPR vectors for all nodes. + (default: :obj:`None`) + num_nodes (int, optional): The number of nodes. (default: :obj:`None`) + + :rtype: (:class:`torch.Tensor`, :class:`torch.Tensor`) + """ + if not WITH_NUMBA: # pragma: no cover + raise ImportError("'get_ppr' requires the 'numba' package") + + global _get_ppr_numba + if _get_ppr_numba is None: + _get_ppr_numba = numba.jit(nopython=True, parallel=True)(_get_ppr) + + num_nodes = maybe_num_nodes(edge_index, num_nodes) + + if not is_torch_sparse_tensor(edge_index): + size = (num_nodes, num_nodes) + edge_index = to_torch_csr_tensor(edge_index, size=size) + else: + edge_index = edge_index.to_sparse_csr() + + assert edge_index.layout == torch.sparse_csr + + rowptr, col = edge_index.crow_indices(), edge_index.col_indices() + + cols, weights = _get_ppr_numba( + rowptr.cpu().numpy(), + col.cpu().numpy(), + alpha, + eps, + None if target is None else target.cpu().numpy(), + ) + + device = edge_index.device + col = torch.tensor(list(chain.from_iterable(cols)), device=device) + weight = torch.tensor(list(chain.from_iterable(weights)), device=device) + deg = torch.tensor([len(value) for value in cols], device=device) + + row = torch.arange(num_nodes) if target is None else target + row = row.repeat_interleave(deg, output_size=col.numel()) + + edge_index = torch.stack([row, col], dim=0) + + return edge_index, weight From d6951e79c70d22fddbedbbd49f0c72628319f4a2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 24 Aug 2023 15:12:30 +0200 Subject: [PATCH 1425/2432] Added a test with `to_hetero` for `Sequential` models (#7927) --- test/nn/test_sequential.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/nn/test_sequential.py b/test/nn/test_sequential.py index 086e194c377c..43adf1b27a8a 100644 --- a/test/nn/test_sequential.py +++ b/test/nn/test_sequential.py @@ -9,8 +9,10 @@ GCNConv, JumpingKnowledge, MessagePassing, + SAGEConv, Sequential, global_mean_pool, + to_hetero, ) from torch_geometric.typing import SparseTensor @@ -142,3 +144,33 @@ def test_sequential_with_ordered_dict(): x = model(x, edge_index) assert x.size() == (4, 64) + + +def test_sequential_to_hetero(): + model = Sequential('x, edge_index', [ + (SAGEConv((-1, -1), 32), 'x, edge_index -> x1'), + ReLU(), + (SAGEConv((-1, -1), 64), 'x1, edge_index -> x2'), + ReLU(), + ]) + + x_dict = { + 'paper': torch.randn(100, 16), + 'author': torch.randn(100, 16), + } + edge_index_dict = { + ('paper', 'cites', 'paper'): + torch.randint(100, (2, 200), dtype=torch.long), + ('paper', 'written_by', 'author'): + torch.randint(100, (2, 200), dtype=torch.long), + ('author', 'writes', 'paper'): + torch.randint(100, (2, 200), dtype=torch.long), + } + metadata = list(x_dict.keys()), list(edge_index_dict.keys()) + + model = to_hetero(model, metadata, debug=False) + + out_dict = model(x_dict, edge_index_dict) + assert isinstance(out_dict, dict) and len(out_dict) == 2 + assert out_dict['paper'].size() == (100, 64) + assert out_dict['author'].size() == (100, 64) From 143487bc18a5bf3e7968c2637823791781e6135a Mon Sep 17 00:00:00 2001 From: Aniket Saxena <92912434+fork123aniket@users.noreply.github.com> Date: Fri, 25 Aug 2023 21:39:59 +0530 Subject: [PATCH 1426/2432] Moved `GraphMaskExplainer` to `torch_geometric.explain` (#7779) Co-authored-by: Akihiro Nitta --- CHANGELOG.md | 1 + .../graphmask_explainer.py | 3 +- .../explain/test_graphmask_explainer.py | 77 ------ .../algorithm/test_graphmask_explainer.py | 235 ++++++++++++++++++ torch_geometric/contrib/explain/__init__.py | 8 +- torch_geometric/explain/algorithm/__init__.py | 2 + .../algorithm}/graphmask_explainer.py | 0 7 files changed, 246 insertions(+), 80 deletions(-) rename examples/{contrib => explain}/graphmask_explainer.py (96%) delete mode 100644 test/contrib/explain/test_graphmask_explainer.py create mode 100644 test/explain/algorithm/test_graphmask_explainer.py rename torch_geometric/{contrib/explain => explain/algorithm}/graphmask_explainer.py (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index fdf2ce5f8957..a9e7f8fed592 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) - Fixed device issue in `PNAConv.get_degree_histogram` ([#7830](https://github.com/pyg-team/pytorch_geometric/pull/7830)) - Fixed the shape of `edge_label_time` when using temporal sampling on homogeneous graphs ([#7807](https://github.com/pyg-team/pytorch_geometric/pull/7807)) +- Moved `torch_geometric.contrib.explain.GraphMaskExplainer` to `torch_geometric.explain.algorithm.GraphMaskExplainer` ([#7779](https://github.com/pyg-team/pytorch_geometric/pull/7779)) - Made `FieldStatus` enum picklable to avoid `PicklingError` in a multi-process setting ([#7808](https://github.com/pyg-team/pytorch_geometric/pull/7808)) - Fixed `edge_label_index` computation in `LinkNeighborLoader` for the homogeneous+`disjoint` mode ([#7791](https://github.com/pyg-team/pytorch_geometric/pull/7791)) - Fixed `CaptumExplainer` for `binary_classification` tasks ([#7787](https://github.com/pyg-team/pytorch_geometric/pull/7787)) diff --git a/examples/contrib/graphmask_explainer.py b/examples/explain/graphmask_explainer.py similarity index 96% rename from examples/contrib/graphmask_explainer.py rename to examples/explain/graphmask_explainer.py index 906fa4dfd367..88eb8dbe9c4c 100644 --- a/examples/contrib/graphmask_explainer.py +++ b/examples/explain/graphmask_explainer.py @@ -3,9 +3,8 @@ import torch import torch.nn.functional as F -from torch_geometric.contrib.explain import GraphMaskExplainer from torch_geometric.datasets import Planetoid -from torch_geometric.explain import Explainer +from torch_geometric.explain import Explainer, GraphMaskExplainer from torch_geometric.nn import GATConv, GCNConv device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') diff --git a/test/contrib/explain/test_graphmask_explainer.py b/test/contrib/explain/test_graphmask_explainer.py deleted file mode 100644 index ba5f2b622a01..000000000000 --- a/test/contrib/explain/test_graphmask_explainer.py +++ /dev/null @@ -1,77 +0,0 @@ -import pytest -import torch - -from torch_geometric.contrib.explain import GraphMaskExplainer -from torch_geometric.explain import Explainer -from torch_geometric.explain.config import ( - ModelConfig, - ModelMode, - ModelTaskLevel, -) -from torch_geometric.nn import GCNConv, global_add_pool - - -class GCN(torch.nn.Module): - def __init__(self, model_config: ModelConfig): - super().__init__() - self.model_config = model_config - assert model_config.mode == ModelMode.binary_classification - - self.conv1 = GCNConv(3, 16) - self.conv2 = GCNConv(16, 1) - - def forward(self, x, edge_index, batch=None, edge_label_index=None): - x = self.conv1(x, edge_index).relu() - x = self.conv2(x, edge_index) - - if self.model_config.task_level == ModelTaskLevel.graph: - x = global_add_pool(x, batch) - elif self.model_config.task_level == ModelTaskLevel.edge: - assert edge_label_index is not None - x = x[edge_label_index[0]] * x[edge_label_index[1]] - - return x - - -x = torch.randn(8, 3) -edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7], - [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6], -]) -batch = torch.tensor([0, 0, 0, 1, 1, 2, 2, 2]) -edge_label_index = torch.tensor([[0, 1, 2], [3, 4, 5]]) - - -@pytest.mark.parametrize('task_level', ['node', 'edge', 'graph']) -def test_graph_mask_explainer(task_level): - model_config = ModelConfig( - mode='binary_classification', - task_level=task_level, - return_type='raw', - ) - - model = GCN(model_config) - - explainer = Explainer( - model=model, - algorithm=GraphMaskExplainer(2, epochs=5, log=False), - explanation_type='model', - node_mask_type='attributes', - edge_mask_type='object', - model_config=model_config, - ) - - explanation = explainer( - x, - edge_index, - batch=batch, - edge_label_index=edge_label_index, - ) - - assert explanation.node_mask.size() == explanation.x.size() - assert explanation.node_mask.min() >= 0 - assert explanation.node_mask.max() <= 1 - - assert explanation.edge_mask.size() == (explanation.num_edges, ) - assert explanation.edge_mask.min() >= 0 - assert explanation.edge_mask.max() <= 1 diff --git a/test/explain/algorithm/test_graphmask_explainer.py b/test/explain/algorithm/test_graphmask_explainer.py new file mode 100644 index 000000000000..e9dec8e3765d --- /dev/null +++ b/test/explain/algorithm/test_graphmask_explainer.py @@ -0,0 +1,235 @@ +import pytest +import torch + +from torch_geometric.explain import Explainer, Explanation, GraphMaskExplainer +from torch_geometric.explain.config import ( + MaskType, + ModelConfig, + ModelMode, + ModelReturnType, + ModelTaskLevel, +) +from torch_geometric.nn import GCNConv, global_add_pool + + +class GCN(torch.nn.Module): + def __init__(self, model_config: ModelConfig): + super().__init__() + self.model_config = model_config + + if model_config.mode == ModelMode.multiclass_classification: + out_channels = 7 + else: + out_channels = 1 + + self.conv1 = GCNConv(3, 16) + self.conv2 = GCNConv(16, out_channels) + + def forward(self, x, edge_index, batch=None, edge_label_index=None): + x = self.conv1(x, edge_index).relu() + x = self.conv2(x, edge_index) + + if self.model_config.task_level == ModelTaskLevel.graph: + x = global_add_pool(x, batch) + elif self.model_config.task_level == ModelTaskLevel.edge: + assert edge_label_index is not None + x = x[edge_label_index[0]] * x[edge_label_index[1]] + + if self.model_config.mode == ModelMode.binary_classification: + if self.model_config.return_type == ModelReturnType.probs: + x = x.sigmoid() + elif self.model_config.mode == ModelMode.multiclass_classification: + if self.model_config.return_type == ModelReturnType.probs: + x = x.softmax(dim=-1) + elif self.model_config.return_type == ModelReturnType.log_probs: + x = x.log_softmax(dim=-1) + + return x + + +def check_explanation( + edge_mask_type: MaskType, + node_mask_type: MaskType, + explanation: Explanation, +): + if node_mask_type == MaskType.attributes: + assert explanation.node_mask.size() == explanation.x.size() + assert explanation.node_mask.min() >= 0 + assert explanation.node_mask.max() <= 1 + elif node_mask_type == MaskType.object: + assert explanation.node_mask.size() == (explanation.num_nodes, 1) + assert explanation.node_mask.min() >= 0 + assert explanation.node_mask.max() <= 1 + elif node_mask_type == MaskType.common_attributes: + assert explanation.node_mask.size() == (1, explanation.num_features) + assert explanation.node_mask.min() >= 0 + assert explanation.node_mask.max() <= 1 + + if edge_mask_type == MaskType.object: + assert explanation.edge_mask.size() == (explanation.num_edges, ) + assert explanation.edge_mask.min() >= 0 + assert explanation.edge_mask.max() <= 1 + + +node_mask_types = [ + MaskType.object, + MaskType.common_attributes, + MaskType.attributes, +] +edge_mask_types = [ + MaskType.object, + None, +] + +x = torch.randn(8, 3) +edge_index = torch.tensor([ + [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7], + [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6], +]) +batch = torch.tensor([0, 0, 0, 1, 1, 2, 2, 2]) +edge_label_index = torch.tensor([[0, 1, 2], [3, 4, 5]]) + + +@pytest.mark.parametrize('edge_mask_type', edge_mask_types) +@pytest.mark.parametrize('node_mask_type', node_mask_types) +@pytest.mark.parametrize('explanation_type', ['model', 'phenomenon']) +@pytest.mark.parametrize('task_level', ['node', 'edge', 'graph']) +@pytest.mark.parametrize('return_type', ['probs', 'raw']) +@pytest.mark.parametrize('index', [None, 2, torch.arange(3)]) +def test_graph_mask_explainer_binary_classification( + edge_mask_type, + node_mask_type, + explanation_type, + task_level, + return_type, + index, +): + model_config = ModelConfig( + mode='binary_classification', + task_level=task_level, + return_type=return_type, + ) + + model = GCN(model_config) + + target = None + if explanation_type == 'phenomenon': + with torch.no_grad(): + out = model(x, edge_index, batch, edge_label_index) + if model_config.return_type == ModelReturnType.raw: + target = (out > 0).long().view(-1) + if model_config.return_type == ModelReturnType.probs: + target = (out > 0.5).long().view(-1) + + explainer = Explainer( + model=model, + algorithm=GraphMaskExplainer(2, epochs=5), + explanation_type=explanation_type, + node_mask_type=node_mask_type, + edge_mask_type=edge_mask_type, + model_config=model_config, + ) + + explanation = explainer( + x, + edge_index, + target=target, + index=index, + batch=batch, + edge_label_index=edge_label_index, + ) + + check_explanation(edge_mask_type, node_mask_type, explanation) + + +@pytest.mark.parametrize('edge_mask_type', edge_mask_types) +@pytest.mark.parametrize('node_mask_type', node_mask_types) +@pytest.mark.parametrize('explanation_type', ['model', 'phenomenon']) +@pytest.mark.parametrize('task_level', ['node', 'edge', 'graph']) +@pytest.mark.parametrize('return_type', ['log_probs', 'probs', 'raw']) +@pytest.mark.parametrize('index', [None, 2, torch.arange(3)]) +def test_graph_mask_explainer_multiclass_classification( + edge_mask_type, + node_mask_type, + explanation_type, + task_level, + return_type, + index, +): + model_config = ModelConfig( + mode='multiclass_classification', + task_level=task_level, + return_type=return_type, + ) + + model = GCN(model_config) + + target = None + if explanation_type == 'phenomenon': + with torch.no_grad(): + target = model(x, edge_index, batch, edge_label_index).argmax(-1) + + explainer = Explainer( + model=model, + algorithm=GraphMaskExplainer(2, epochs=5), + explanation_type=explanation_type, + node_mask_type=node_mask_type, + edge_mask_type=edge_mask_type, + model_config=model_config, + ) + + explanation = explainer( + x, + edge_index, + target=target, + index=index, + batch=batch, + edge_label_index=edge_label_index, + ) + + check_explanation(edge_mask_type, node_mask_type, explanation) + + +@pytest.mark.parametrize('edge_mask_type', edge_mask_types) +@pytest.mark.parametrize('node_mask_type', node_mask_types) +@pytest.mark.parametrize('explanation_type', ['model', 'phenomenon']) +@pytest.mark.parametrize('task_level', ['node', 'edge', 'graph']) +@pytest.mark.parametrize('index', [None, 2, torch.arange(3)]) +def test_graph_mask_explainer_regression( + edge_mask_type, + node_mask_type, + explanation_type, + task_level, + index, +): + model_config = ModelConfig( + mode='regression', + task_level=task_level, + ) + + model = GCN(model_config) + + target = None + if explanation_type == 'phenomenon': + with torch.no_grad(): + target = model(x, edge_index, batch, edge_label_index) + + explainer = Explainer( + model=model, + algorithm=GraphMaskExplainer(2, epochs=5), + explanation_type=explanation_type, + node_mask_type=node_mask_type, + edge_mask_type=edge_mask_type, + model_config=model_config, + ) + + explanation = explainer( + x, + edge_index, + target=target, + index=index, + batch=batch, + edge_label_index=edge_label_index, + ) + + check_explanation(edge_mask_type, node_mask_type, explanation) diff --git a/torch_geometric/contrib/explain/__init__.py b/torch_geometric/contrib/explain/__init__.py index 3f66ffcc6d8a..aabe37cf13dc 100644 --- a/torch_geometric/contrib/explain/__init__.py +++ b/torch_geometric/contrib/explain/__init__.py @@ -1,5 +1,11 @@ -from .graphmask_explainer import GraphMaskExplainer from .pgm_explainer import PGMExplainer +from torch_geometric.explain.algorithm.graphmask_explainer import ( + GraphMaskExplainer as NewGraphMaskExplainer) +from torch_geometric.deprecation import deprecated + +GraphMaskExplainer = deprecated( + "use 'torch_geometric.explain.algorithm.GraphMaskExplainer' instead", )( + NewGraphMaskExplainer) __all__ = classes = [ 'GraphMaskExplainer', diff --git a/torch_geometric/explain/algorithm/__init__.py b/torch_geometric/explain/algorithm/__init__.py index e9b04adb8160..a462a5777edb 100644 --- a/torch_geometric/explain/algorithm/__init__.py +++ b/torch_geometric/explain/algorithm/__init__.py @@ -4,6 +4,7 @@ from .captum_explainer import CaptumExplainer from .pg_explainer import PGExplainer from .attention_explainer import AttentionExplainer +from .graphmask_explainer import GraphMaskExplainer __all__ = classes = [ 'ExplainerAlgorithm', @@ -12,4 +13,5 @@ 'CaptumExplainer', 'PGExplainer', 'AttentionExplainer', + 'GraphMaskExplainer', ] diff --git a/torch_geometric/contrib/explain/graphmask_explainer.py b/torch_geometric/explain/algorithm/graphmask_explainer.py similarity index 100% rename from torch_geometric/contrib/explain/graphmask_explainer.py rename to torch_geometric/explain/algorithm/graphmask_explainer.py From c78dbaf703a22d66fb4605210e1ff4d694145386 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 28 Aug 2023 07:42:49 +0200 Subject: [PATCH 1427/2432] Fix broken link in documentation (#7940) --- torch_geometric/contrib/explain/__init__.py | 4 ++-- torch_geometric/explain/algorithm/graphmask_explainer.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/torch_geometric/contrib/explain/__init__.py b/torch_geometric/contrib/explain/__init__.py index aabe37cf13dc..14a3ff0ac9b4 100644 --- a/torch_geometric/contrib/explain/__init__.py +++ b/torch_geometric/contrib/explain/__init__.py @@ -1,13 +1,13 @@ +from torch_geometric.deprecation import deprecated + from .pgm_explainer import PGMExplainer from torch_geometric.explain.algorithm.graphmask_explainer import ( GraphMaskExplainer as NewGraphMaskExplainer) -from torch_geometric.deprecation import deprecated GraphMaskExplainer = deprecated( "use 'torch_geometric.explain.algorithm.GraphMaskExplainer' instead", )( NewGraphMaskExplainer) __all__ = classes = [ - 'GraphMaskExplainer', 'PGMExplainer', ] diff --git a/torch_geometric/explain/algorithm/graphmask_explainer.py b/torch_geometric/explain/algorithm/graphmask_explainer.py index 8a034b87ce0a..27ace22ae9b4 100644 --- a/torch_geometric/explain/algorithm/graphmask_explainer.py +++ b/torch_geometric/explain/algorithm/graphmask_explainer.py @@ -46,9 +46,9 @@ class GraphMaskExplainer(ExplainerAlgorithm): .. note:: For an example of using :class:`GraphMaskExplainer`, - see `examples/contrib/graphmask_explainer.py + see `examples/explain/graphmask_explainer.py `_. + /explain/graphmask_explainer.py>`_. Args: num_layers (int): The number of layers to use. From 520cf255044a74395349ec42b4b290d0962dbb26 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 28 Aug 2023 07:59:49 +0200 Subject: [PATCH 1428/2432] Fix `from_networkx` conversion from `nx.stochastic_block_model` graphs (#7941) --- CHANGELOG.md | 1 + test/utils/test_convert.py | 19 +++++++++++++++++++ torch_geometric/utils/convert.py | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9e7f8fed592..6f44b775b297 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed `from_networkx` conversion from `nx.stochastic_block_model` graphs ([#7941](https://github.com/pyg-team/pytorch_geometric/pull/7941)) - Fixed the usage of `bias_initializer` in `HeteroLinear` ([#7923](https://github.com/pyg-team/pytorch_geometric/pull/7923)) - Fixed broken links in `HGBDataset` ([#7907](https://github.com/pyg-team/pytorch_geometric/pull/7907)) - Fixed an issue where `SetTransformerAggregation` produced `NaN` values for isolates nodes ([#7902](https://github.com/pyg-team/pytorch_geometric/pull/7902)) diff --git a/test/utils/test_convert.py b/test/utils/test_convert.py index 8e4b8280a98f..5a259ee26f07 100644 --- a/test/utils/test_convert.py +++ b/test/utils/test_convert.py @@ -323,6 +323,25 @@ def test_from_networkx_subgraph_convert(): assert sub_edge_index_1.tolist() == sub_edge_index_2.tolist() +@withPackage('networkx') +@pytest.mark.parametrize('n', [100]) +@pytest.mark.parametrize('p', [0.8]) +@pytest.mark.parametrize('q', [0.2]) +def test_from_networkx_sbm(n, p, q): + import networkx as nx + G = nx.stochastic_block_model( + sizes=[n // 2, n // 2], + p=[[p, q], [q, p]], + seed=0, + directed=False, + ) + + data = from_networkx(G) + assert data.num_nodes == 100 + assert torch.equal(data.block[:50], data.block.new_zeros(50)) + assert torch.equal(data.block[50:], data.block.new_ones(50)) + + @withPackage('networkit') def test_to_networkit_vice_versa(): edge_index = torch.tensor([[0, 1], [1, 0]]) diff --git a/torch_geometric/utils/convert.py b/torch_geometric/utils/convert.py index e0fa499b5799..dce666aa4b7b 100644 --- a/torch_geometric/utils/convert.py +++ b/torch_geometric/utils/convert.py @@ -258,7 +258,7 @@ def from_networkx( else: try: data[key] = torch.tensor(value) - except (ValueError, TypeError): + except (ValueError, TypeError, RuntimeError): pass data['edge_index'] = edge_index.view(2, -1) From d3ac316c7d3433a864dfb0e06bb83b2ee28501f7 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Mon, 28 Aug 2023 07:04:03 +0100 Subject: [PATCH 1429/2432] Drop Python 3.7 support (#7939) Drops Python 3.7 support as PyTorch 2.0.0 also did (https://github.com/pytorch/pytorch/issues/80513) and Python 3.7 reached EOL (https://devguide.python.org/versions/). --- .github/workflows/building_pyg_conda.yml | 4 +--- .github/workflows/building_rusty1s_conda.yml | 4 +--- CHANGELOG.md | 1 + README.md | 2 +- docs/source/install/installation.rst | 2 +- torch_geometric/datasets/hydro_net.py | 9 +-------- torch_geometric/io/planetoid.py | 8 ++------ 7 files changed, 8 insertions(+), 22 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 7bb78c34bc6d..e92f6300c812 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -11,7 +11,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.8', '3.9', '3.10', '3.11'] torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] exclude: @@ -29,8 +29,6 @@ jobs: cuda-version: 'cu113' - torch-version: 1.13.0 cuda-version: 'cu118' - - torch-version: 2.0.0 - python-version: '3.7' - torch-version: 2.0.0 cuda-version: 'cu102' - torch-version: 2.0.0 diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index 3a2f369bb8fb..973b316e460c 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -11,7 +11,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.8', '3.9', '3.10', '3.11'] torch-version: [1.12.0, 1.13.0, 2.0.0] cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] exclude: @@ -29,8 +29,6 @@ jobs: cuda-version: 'cu113' - torch-version: 1.13.0 cuda-version: 'cu118' - - torch-version: 2.0.0 - python-version: '3.7' - torch-version: 2.0.0 cuda-version: 'cu102' - torch-version: 2.0.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f44b775b297..aa3d43cce193 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -149,6 +149,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Removed +- Dropped Python 3.7 support ([#7939](https://github.com/pyg-team/pytorch_geometric/pull/7939)) - Removed `layer_type` argument in `contrib.explain.GraphMaskExplainer` ([#7445](https://github.com/pyg-team/pytorch_geometric/pull/7445)) - Replaced `FastHGTConv` with `HGTConv` ([#7117](https://github.com/pyg-team/pytorch_geometric/pull/7117)) diff --git a/README.md b/README.md index e4bbb2aa05b2..784846c40abe 100644 --- a/README.md +++ b/README.md @@ -358,7 +358,7 @@ These approaches have been implemented in PyG, and can benefit from the above GN ## Installation -PyG is available for Python 3.7 to Python 3.11. +PyG is available for Python 3.8 to Python 3.11. ### Anaconda diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index 9c696c70b31c..9d2677f4c4f4 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -1,7 +1,7 @@ Installation ============ -:pyg:`PyG` is available for Python 3.7 to Python 3.11. +:pyg:`PyG` is available for Python 3.8 to Python 3.11. .. note:: We do not recommend installation as a root user on your system Python. diff --git a/torch_geometric/datasets/hydro_net.py b/torch_geometric/datasets/hydro_net.py index d501c615317e..e2ac24f056dd 100644 --- a/torch_geometric/datasets/hydro_net.py +++ b/torch_geometric/datasets/hydro_net.py @@ -2,7 +2,7 @@ import os import os.path as osp from dataclasses import dataclass -from functools import lru_cache +from functools import cached_property from glob import glob from pathlib import Path from typing import Callable, List, Optional, Tuple, Union @@ -18,13 +18,6 @@ extract_zip, ) -try: - from functools import cached_property -except ImportError: # Python 3.7 support. - - def cached_property(func): - return property(fget=lru_cache(maxsize=1)(func)) - class HydroNet(InMemoryDataset): r"""The HydroNet dataest from the diff --git a/torch_geometric/io/planetoid.py b/torch_geometric/io/planetoid.py index 3026b11f5534..7ce372eba6a1 100644 --- a/torch_geometric/io/planetoid.py +++ b/torch_geometric/io/planetoid.py @@ -1,5 +1,4 @@ import os.path as osp -import sys import warnings from itertools import repeat @@ -93,11 +92,8 @@ def read_file(folder, prefix, name): return read_txt_array(path, dtype=torch.long) with open(path, 'rb') as f: - if sys.version_info > (3, 0): - warnings.filterwarnings('ignore', '.*`scipy.sparse.csr` name.*') - out = pickle.load(f, encoding='latin1') - else: - out = pickle.load(f) + warnings.filterwarnings('ignore', '.*`scipy.sparse.csr` name.*') + out = pickle.load(f, encoding='latin1') if name == 'graph': return out From 32a76cb980ae00ed039ce5536d62af0421ab881c Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Mon, 28 Aug 2023 08:43:05 +0200 Subject: [PATCH 1430/2432] Add possibility to run training benchmarks on XPU device (#7925) Exemplary CMD: `python training_benchmark.py --device xpu --datasets Reddit --models gcn --num-layers 2 --num-hidden-channels 128 --batch-sizes 8192` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + benchmark/training/training_benchmark.py | 60 +++++++++++++++++++----- 2 files changed, 49 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa3d43cce193..1f38f719eb86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) - Added `utils.ppr` for personalized PageRank computation ([#7917](https://github.com/pyg-team/pytorch_geometric/pull/7917)) - Added support for XPU device in `PrefetchLoader` ([#7918](https://github.com/pyg-team/pytorch_geometric/pull/7918)) - Added support for floating-point slicing in `Dataset`, *e.g.*, `dataset[:0.9]` ([#7915](https://github.com/pyg-team/pytorch_geometric/pull/7915)) diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index bdbb6f2620d2..43ecb7ed6437 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -20,7 +20,12 @@ from torch_geometric import compile from torch_geometric.loader import NeighborLoader from torch_geometric.nn import PNAConv -from torch_geometric.profile import rename_profile_file, timeit, torch_profile +from torch_geometric.profile import ( + rename_profile_file, + timeit, + torch_profile, + xpu_profile, +) supported_sets = { 'ogbn-mag': ['rgat', 'rgcn'], @@ -28,6 +33,14 @@ 'Reddit': ['edge_cnn', 'gat', 'gcn', 'pna', 'sage'], } +device_conditions = { + 'cuda': (lambda: torch.cuda.is_available()), + 'mps': + (lambda: + (hasattr(torch.backends, 'mps') and torch.backends.mps.is_available())), + 'xpu': (lambda: torch.xpu.is_available()), +} + def train_homo(model, loader, optimizer, device, progress_bar=True, desc="", trim=False): @@ -87,18 +100,22 @@ def run(args: argparse.ArgumentParser): warnings.warn("Cannot write profile data to CSV because profiling is " "disabled") - if torch.cuda.is_available(): - device = torch.device('cuda') - elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): - device = torch.device('mps') - else: - device = torch.device('cpu') + if args.device == 'xpu': + try: + import intel_extension_for_pytorch as ipex + except ImportError: + raise RuntimeError('XPU device requires IPEX to be installed') + + if not device_conditions[args.device](): + raise RuntimeError(f'{args.device.upper()} is not available') + device = torch.device(args.device) # If we use a custom number of steps, then we need to use RandomSampler, # which already does shuffle. shuffle = False if args.num_steps != -1 else True print('BENCHMARK STARTS') + print(f'Running on {args.device.upper()}') for dataset_name in args.datasets: assert dataset_name in supported_sets.keys( ), f"Dataset {dataset_name} isn't supported." @@ -110,10 +127,19 @@ def run(args: argparse.ArgumentParser): hetero = True if dataset_name == 'ogbn-mag' else False mask, val_mask, test_mask = get_split_masks(data, dataset_name) degree = None - if torch.cuda.is_available(): + + if args.device == 'cpu': + amp = torch.cpu.amp.autocast(enabled=args.bf16) + elif args.device == 'cuda': amp = torch.cuda.amp.autocast(enabled=False) + elif args.device == 'xpu': + amp = torch.xpu.amp.autocast(enabled=False) else: - amp = torch.cpu.amp.autocast(enabled=args.bf16) + amp = nullcontext() + + if args.device == 'xpu' and args.warmup < 1: + print('XPU device requires warmup - setting warmup=1') + args.warmup = 1 inputs_channels = data[ 'paper'].num_features if dataset_name == 'ogbn-mag' \ @@ -205,6 +231,10 @@ def run(args: argparse.ArgumentParser): optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + if args.device == 'xpu': + model, optimizer = ipex.optimize( + model, optimizer=optimizer) + progress_bar = False if args.no_progress_bar else True train = train_hetero if hetero else train_homo @@ -254,9 +284,13 @@ def run(args: argparse.ArgumentParser): print(f'Test Accuracy: {test_acc:.4f}') if args.profile: - profile = torch_profile( - args.export_chrome_trace, csv_data, - args.write_csv) + if args.device == 'xpu': + profile = xpu_profile( + args.export_chrome_trace) + else: + profile = torch_profile( + args.export_chrome_trace, csv_data, + args.write_csv) with profile: train(model, subgraph_loader, optimizer, device, progress_bar=progress_bar, @@ -304,6 +338,8 @@ def run(args: argparse.ArgumentParser): argparser = argparse.ArgumentParser('GNN training benchmark') add = argparser.add_argument + add('--device', choices=['cpu', 'cuda', 'mps', 'xpu'], default='cpu', + help='Device to run benchmark on') add('--datasets', nargs='+', default=['ogbn-mag', 'ogbn-products', 'Reddit'], type=str) add('--use-sparse-tensor', action='http://github.com/store_true', From ecc4b7643ed5270f53313542dc32847de96831ab Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 28 Aug 2023 13:33:30 +0200 Subject: [PATCH 1431/2432] Check that local `edge_label_index` maps to global `edge_label_index` in `LinkNeighborLoader` (#7943) Related: #7900 --- test/loader/test_link_neighbor_loader.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 406e347b1106..9fde16f1d6fc 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -29,7 +29,7 @@ def test_homo_link_neighbor_loader_basic(device, subgraph_type, neg_edge_index = get_random_edge_index(50, 50, 500, device=device) neg_edge_index += 50 - edge_label_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1) + input_edges = torch.cat([pos_edge_index, neg_edge_index], dim=-1) edge_label = torch.cat([ torch.ones(500, device=device), torch.zeros(500, device=device), @@ -45,7 +45,7 @@ def test_homo_link_neighbor_loader_basic(device, subgraph_type, data, num_neighbors=[-1] * 2, batch_size=20, - edge_label_index=edge_label_index, + edge_label_index=input_edges, edge_label=edge_label if neg_sampling_ratio is None else None, subgraph_type=subgraph_type, neg_sampling_ratio=neg_sampling_ratio, @@ -58,8 +58,8 @@ def test_homo_link_neighbor_loader_basic(device, subgraph_type, batch = loader([0]) assert isinstance(batch, Data) - assert int(edge_label_index[0, 0]) in batch.n_id.tolist() - assert int(edge_label_index[1, 0]) in batch.n_id.tolist() + assert int(input_edges[0, 0]) in batch.n_id.tolist() + assert int(input_edges[1, 0]) in batch.n_id.tolist() for batch in loader: assert isinstance(batch, Data) @@ -97,6 +97,14 @@ def test_homo_link_neighbor_loader_basic(device, subgraph_type, assert torch.all(batch.edge_label[:20] == 1) assert torch.all(batch.edge_label[20:] == 0) + # Ensure local `edge_label_index` correctly maps to input edges. + global_edge_label_index = batch.n_id[batch.edge_label_index] + global_edge_label_index = ( + global_edge_label_index[:, batch.edge_label >= 1]) + global_edge_label_index = unique_edge_pairs(global_edge_label_index) + assert (len(global_edge_label_index & unique_edge_pairs(input_edges)) + == len(global_edge_label_index)) + @onlyNeighborSampler @pytest.mark.parametrize('subgraph_type', ['directional', 'bidirectional']) From 6e6634ad4bb71319f1c0ce22484a379bba2ae9a0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 28 Aug 2023 13:40:31 +0200 Subject: [PATCH 1432/2432] Test for zero graph breaks on CUDA (#7944) --- test/nn/models/test_basic_gnn.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 0b98b57ca32c..4f91704be54c 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -311,14 +311,19 @@ def test_trim_to_layer(): num_compile_calls = 0 +@withCUDA @onlyLinux @disableExtensions @withPackage('torch>=2.0.0') @pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA]) @pytest.mark.skip(reason="Does not work yet in the full test suite") -def test_compile_graph_breaks(Model): - x = torch.randn(3, 8) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) +def test_compile_graph_breaks(Model, device): + # TODO EdgeCNN and PNA currently lead to graph breaks on CUDA :( + if Model in {EdgeCNN, PNA} and device.type == 'cuda': + return + + x = torch.randn(3, 8, device=device) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device) kwargs = {} if Model in {GCN, GAT}: @@ -331,6 +336,7 @@ def test_compile_graph_breaks(Model): kwargs['deg'] = torch.tensor([1, 2, 1]) model = Model(in_channels=8, hidden_channels=16, num_layers=2, **kwargs) + model = model.to(device) def my_custom_backend(gm, *args): global num_compile_calls From 627e0db9e90a5b545204fbd35b33feee1f4ed765 Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Mon, 28 Aug 2023 14:49:15 +0200 Subject: [PATCH 1433/2432] Improve inference loop on GPU devices (#7896) In layer-wise inference loop, we perform computations as shown on the following pseudocode: ``` for layer in layers for batch in loader do inference per layer ``` In models that have more than one layer, we can benefit from caching batches during the first walk through the data. This PR introduces `CachedLoader`, which transfers batches to a pointed device and caches them. Additionally, an auxiliary function was provided, `make_batches_cacheable`, which decorates `BasicGNN` instance with a custom inference loop. Selected performance results (gained on Intel PVC): ``` Speedup: gcn[2L]+Reddit: 1.53x gcn[3L]+Reddit: 1.69x sage[2L]+Reddit: 1.55x sage[3L]+Reddit: 2.02x gcn[2L]+ogbn-products: 1.72x gcn[3L]+ogbn-products: 2.11x sage[2L]+ogbn-products: 1.83x sage[3L]+ogbn-products: 2.44x ``` Caching mechanism did not have a significant impact on models with a single layer. Drawbacks: - User should be aware that caching mechanism requires additional device memory to be allocated. In experiments, approximately 1GB was needed for the `Reddit` dataset. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/loader/test_cache.py | 78 ++++++++++++++++++++++++++ test/nn/models/test_basic_gnn.py | 23 ++++++++ torch_geometric/loader/__init__.py | 2 + torch_geometric/loader/cache.py | 70 +++++++++++++++++++++++ torch_geometric/nn/models/basic_gnn.py | 22 +++++++- 6 files changed, 195 insertions(+), 1 deletion(-) create mode 100644 test/loader/test_cache.py create mode 100644 torch_geometric/loader/cache.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f38f719eb86..fc84c1755963 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896)) - Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) - Added `utils.ppr` for personalized PageRank computation ([#7917](https://github.com/pyg-team/pytorch_geometric/pull/7917)) - Added support for XPU device in `PrefetchLoader` ([#7918](https://github.com/pyg-team/pytorch_geometric/pull/7918)) diff --git a/test/loader/test_cache.py b/test/loader/test_cache.py new file mode 100644 index 000000000000..7c15b2e94a94 --- /dev/null +++ b/test/loader/test_cache.py @@ -0,0 +1,78 @@ +import torch +from torch import Tensor + +from torch_geometric.data import Data +from torch_geometric.loader import CachedLoader, NeighborLoader +from torch_geometric.testing import withCUDA, withPackage + + +@withCUDA +@withPackage('pyg_lib') +def test_cached_loader(device): + x = torch.randn(14, 16) + edge_index = torch.tensor([ + [2, 3, 4, 5, 7, 7, 10, 11, 12, 13], + [0, 1, 2, 3, 2, 3, 7, 7, 7, 7], + ]) + + loader = NeighborLoader( + Data(x=x, edge_index=edge_index), + num_neighbors=[2], + batch_size=10, + shuffle=False, + ) + cached_loader = CachedLoader(loader, device=device) + + assert len(cached_loader) == len(loader) + assert len(cached_loader._cache) == 0 + + cache = [] + for i, batch in enumerate(cached_loader): + assert len(cached_loader._cache) == i + 1 + assert batch.x.device == device + assert batch.edge_index.device == device + + cache.append(batch) + + for i, batch in enumerate(cached_loader): + assert batch == cache[i] + + cached_loader.clear() + assert len(cached_loader._cache) == 0 + + +@withCUDA +@withPackage('pyg_lib') +def test_cached_loader_transform(device): + x = torch.randn(14, 16) + edge_index = torch.tensor([ + [2, 3, 4, 5, 7, 7, 10, 11, 12, 13], + [0, 1, 2, 3, 2, 3, 7, 7, 7, 7], + ]) + + loader = NeighborLoader( + Data(x=x, edge_index=edge_index), + num_neighbors=[2], + batch_size=10, + shuffle=False, + ) + cached_loader = CachedLoader( + loader, + device=device, + transform=lambda batch: batch.edge_index, + ) + + assert len(cached_loader) == len(loader) + assert len(cached_loader._cache) == 0 + + cache = [] + for i, batch in enumerate(cached_loader): + assert len(cached_loader._cache) == i + 1 + assert isinstance(batch, Tensor) + assert batch.dim() == 2 and batch.size(0) == 2 + assert batch.device == device + + cache.append(batch) + + for i, batch in enumerate(cached_loader): + assert torch.equal(batch, cache[i]) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 4f91704be54c..05bf9db1c336 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -350,6 +350,29 @@ def my_custom_backend(gm, *args): assert num_compile_calls - num_previous_compile_calls == 1 +@withPackage('pyg_lib') +def test_basic_gnn_cache(): + x = torch.randn(14, 16) + edge_index = torch.tensor([ + [2, 3, 4, 5, 7, 7, 10, 11, 12, 13], + [0, 1, 2, 3, 2, 3, 7, 7, 7, 7], + ]) + + loader = NeighborLoader( + Data(x=x, edge_index=edge_index), + num_neighbors=[-1], + batch_size=2, + ) + + model = GCN(in_channels=16, hidden_channels=16, num_layers=2) + model.eval() + + out1 = model.inference(loader, cache=False) + out2 = model.inference(loader, cache=True) + + assert torch.allclose(out1, out2) + + if __name__ == '__main__': import argparse diff --git a/torch_geometric/loader/__init__.py b/torch_geometric/loader/__init__.py index 494a380023e2..0aa431a10d8e 100644 --- a/torch_geometric/loader/__init__.py +++ b/torch_geometric/loader/__init__.py @@ -19,6 +19,7 @@ from .imbalanced_sampler import ImbalancedSampler from .dynamic_batch_sampler import DynamicBatchSampler from .prefetch import PrefetchLoader +from .cache import CachedLoader from .mixin import AffinityMixin __all__ = classes = [ @@ -44,6 +45,7 @@ 'ImbalancedSampler', 'DynamicBatchSampler', 'PrefetchLoader', + 'CachedLoader', 'AffinityMixin', ] diff --git a/torch_geometric/loader/cache.py b/torch_geometric/loader/cache.py new file mode 100644 index 000000000000..9d4434f6e6a0 --- /dev/null +++ b/torch_geometric/loader/cache.py @@ -0,0 +1,70 @@ +from collections.abc import Mapping +from typing import Any, Callable, List, Optional, Sequence + +import torch +from torch.utils.data import DataLoader + + +def to_device(inputs: Any, device: Optional[torch.device] = None) -> Any: + if hasattr(inputs, 'to'): + return inputs.to(device) + elif isinstance(inputs, Mapping): + return {key: to_device(value, device) for key, value in inputs.items()} + elif isinstance(inputs, tuple) and hasattr(inputs, '_fields'): + return type(inputs)(*(to_device(s, device) for s in zip(*inputs))) + elif isinstance(inputs, Sequence) and not isinstance(inputs, str): + return [to_device(s, device) for s in zip(*inputs)] + + return inputs + + +class CachedLoader: + r"""A loader to cache mini-batch outputs, e.g., obtained during + :class:`NeighborLoader` iterations. + + Args: + loader (torch.utils.data.DataLoader): The data loader. + device (torch.device, optional): The device to load the data to. + (default: :obj:`None`) + transform (callable, optional): A function/transform that takes in + a sampled mini-batch and returns a transformed version. + (default: :obj:`None`) + """ + def __init__( + self, + loader: DataLoader, + device: Optional[torch.device] = None, + transform: Optional[Callable] = None, + ): + self.loader = loader + self.device = device + self.transform = transform + + self._cache: List[Any] = [] + + def clear(self): + r"""Clears the cache.""" + self._cache = [] + + def __iter__(self) -> Any: + if len(self._cache): + for batch in self._cache: + yield batch + return + + for batch in self.loader: + + if self.transform is not None: + batch = self.transform(batch) + + batch = to_device(batch, self.device) + + self._cache.append(batch) + + yield batch + + def __len__(self) -> int: + return len(self.loader) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({self.loader})' diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index 3b8d312094c6..017e567d3ec8 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -6,7 +6,8 @@ from torch.nn import Linear, ModuleList from tqdm import tqdm -from torch_geometric.loader import NeighborLoader +from torch_geometric.data import Data +from torch_geometric.loader import CachedLoader, NeighborLoader from torch_geometric.nn.conv import ( EdgeConv, GATConv, @@ -303,6 +304,7 @@ def inference( device: Optional[Union[str, torch.device]] = None, embedding_device: Union[str, torch.device] = 'cpu', progress_bar: bool = False, + cache: bool = False, ) -> Tensor: r"""Performs layer-wise inference on large-graphs using a :class:`~torch_geometric.loader.NeighborLoader`, where @@ -324,6 +326,10 @@ def inference( (default: :obj:`"cpu"`) progress_bar (bool, optional): If set to :obj:`True`, will print a progress bar during computation. (default: :obj:`False`) + cache (bool, optional): If set to :obj:`True`, caches intermediate + sampler outputs for usage in later epochs. + This will avoid repeated sampling to accelerate inference. + (default: :obj:`False`) """ assert self.jk_mode is None or self.jk_mode == 'last' assert isinstance(loader, NeighborLoader) @@ -337,6 +343,20 @@ def inference( x_all = loader.data.x.to(embedding_device) + if cache: + + # Only cache necessary attributes: + def transform(data: Data) -> Data: + kwargs = dict(n_id=data.n_id, batch_size=data.batch_size) + if hasattr(data, 'adj_t'): + kwargs['adj_t'] = data.adj_t + else: + kwargs['edge_index'] = data.edge_index + + return Data.from_dict(kwargs) + + loader = CachedLoader(loader, device=device, transform=transform) + for i in range(self.num_layers): xs: List[Tensor] = [] for batch in loader: From fb79d8601442e452de3c71c87c4b67d46a575258 Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Mon, 28 Aug 2023 14:55:12 +0200 Subject: [PATCH 1434/2432] Add possibility to use `CachedLoader` in inference benchmarks (#7897) This PR adds `--cached-loader` option, that enables `CachedLoader` in inference loop. Waiting for #7896. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey --- CHANGELOG.md | 2 +- benchmark/inference/inference_benchmark.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc84c1755963..3f210d30ece1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896)) +- Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896), [#7897](https://github.com/pyg-team/pytorch_geometric/pull/7897)) - Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) - Added `utils.ppr` for personalized PageRank computation ([#7917](https://github.com/pyg-team/pytorch_geometric/pull/7917)) - Added support for XPU device in `PrefetchLoader` ([#7918](https://github.com/pyg-team/pytorch_geometric/pull/7918)) diff --git a/benchmark/inference/inference_benchmark.py b/benchmark/inference/inference_benchmark.py index 7625735086fd..35e839396d53 100644 --- a/benchmark/inference/inference_benchmark.py +++ b/benchmark/inference/inference_benchmark.py @@ -80,6 +80,9 @@ def run(args: argparse.ArgumentParser): _, _, test_mask = get_split_masks(data, dataset_name) degree = None + if hetero and args.cached_loader: + args.cached_loader = False + print('Disabling CachedLoader, not supported in Hetero models') if args.num_layers != [1] and not hetero and args.num_steps != -1: raise ValueError("Layer-wise inference requires `steps=-1`") @@ -209,7 +212,7 @@ def run(args: argparse.ArgumentParser): data = transformation(data) with cpu_affinity, amp, timeit() as time: - inference_kwargs = {} + inference_kwargs = dict(cache=args.cached_loader) if args.reuse_device_for_embeddings and not hetero: inference_kwargs['embedding_device'] = device for _ in range(args.warmup): @@ -332,4 +335,5 @@ def run(args: argparse.ArgumentParser): help='Write benchmark or PyTorch profile data to CSV') add('--export-chrome-trace', default=True, type=bool, help='Export chrome trace file. Works only with PyTorch profiler') + add('--cached-loader', action='http://github.com/store_true', help='Use CachedLoader') run(argparser.parse_args()) From a35ce9b7b66322445ba453ae2b539aaf432ba91e Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Mon, 28 Aug 2023 21:33:46 +0800 Subject: [PATCH 1435/2432] Add `recursive` argument to graph partition (#7945) Signed-off-by: Liu,Kaixuan Co-authored-by: Matthias Fey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- examples/distributed/pyg/partition_graph.py | 13 ++++++++++--- examples/distributed/pyg/partition_hetero_graph.py | 13 ++++++++++--- torch_geometric/distributed/partition.py | 6 ++++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/examples/distributed/pyg/partition_graph.py b/examples/distributed/pyg/partition_graph.py index c5069f299134..816e050aef5f 100644 --- a/examples/distributed/pyg/partition_graph.py +++ b/examples/distributed/pyg/partition_graph.py @@ -8,12 +8,17 @@ from torch_geometric.distributed import Partitioner -def partition_dataset(ogbn_dataset: str, root_dir: str, num_parts: int): +def partition_dataset( + ogbn_dataset: str, + root_dir: str, + num_parts: int, + recursive: bool = False, +): save_dir = osp.join(root_dir, f'{ogbn_dataset}-partitions') dataset = PygNodePropPredDataset(ogbn_dataset) data = dataset[0] - partitioner = Partitioner(data, num_parts, save_dir) + partitioner = Partitioner(data, num_parts, save_dir, recursive) partitioner.generate_partition() split_idx = dataset.get_idx_split() @@ -44,6 +49,8 @@ def partition_dataset(ogbn_dataset: str, root_dir: str, num_parts: int): parser.add_argument('--dataset', type=str, default='ogbn-products') parser.add_argument('--root_dir', type=str, default='./data/products') parser.add_argument('--num_partitions', type=int, default=2) + parser.add_argument('--recursive', action='http://github.com/store_true') args = parser.parse_args() - partition_dataset(args.dataset, args.root_dir, args.num_partitions) + partition_dataset(args.dataset, args.root_dir, args.num_partitions, + args.recursive) diff --git a/examples/distributed/pyg/partition_hetero_graph.py b/examples/distributed/pyg/partition_hetero_graph.py index ab8d7fd65db9..c64bab477f3b 100644 --- a/examples/distributed/pyg/partition_hetero_graph.py +++ b/examples/distributed/pyg/partition_hetero_graph.py @@ -8,12 +8,17 @@ from torch_geometric.distributed import Partitioner -def partition_dataset(ogbn_dataset: str, root_dir: str, num_parts: int): +def partition_dataset( + ogbn_dataset: str, + root_dir: str, + num_parts: int, + recursive: bool = False, +): save_dir = osp.join(root_dir, f'{ogbn_dataset}-partitions') dataset = OGB_MAG(root=ogbn_dataset, preprocess='metapath2vec') data = dataset[0] - partitioner = Partitioner(data, num_parts, save_dir) + partitioner = Partitioner(data, num_parts, save_dir, recursive) partitioner.generate_partition() print('-- Saving label ...') @@ -43,6 +48,8 @@ def partition_dataset(ogbn_dataset: str, root_dir: str, num_parts: int): parser.add_argument('--dataset', type=str, default='ogbn-mag') parser.add_argument('--root_dir', type=str, default='./data/mag') parser.add_argument('--num_partitions', type=int, default=2) + parser.add_argument('--recursive', type=bool, default=False) args = parser.parse_args() - partition_dataset(args.dataset, args.root_dir, args.num_partitions) + partition_dataset(args.dataset, args.root_dir, args.num_partitions, + args.recursive) diff --git a/torch_geometric/distributed/partition.py b/torch_geometric/distributed/partition.py index 13b72bc835f0..569686a62d71 100644 --- a/torch_geometric/distributed/partition.py +++ b/torch_geometric/distributed/partition.py @@ -58,6 +58,9 @@ class Partitioner: Args: data (Data or HeteroData): The data object. num_parts (int): The number of partitions. + recursive (bool, optional): If set to :obj:`True`, will use multilevel + recursive bisection instead of multilevel k-way partitioning. + (default: :obj:`False`) root (str): Root directory where the partitioned dataset should be saved. """ @@ -66,12 +69,14 @@ def __init__( data: Union[Data, HeteroData], num_parts: int, root: str, + recursive: bool = False, ): assert num_parts > 1 self.data = data self.num_parts = num_parts self.root = root + self.recursive = recursive @property def is_hetero(self) -> bool: @@ -103,6 +108,7 @@ def generate_partition(self): cluster_data = ClusterData( data, num_parts=self.num_parts, + recursive=self.recursive, log=True, keep_inter_cluster_edges=True, ) From ccf6706c9f63911667ba88647889ea0638f54019 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Mon, 28 Aug 2023 13:11:59 -0700 Subject: [PATCH 1436/2432] Added an `ogbn-papers100M` example (#7860) single gpu papers100m on 8xh100: 0.00027184496005873887 s/iter --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- examples/ogbn_papers_100m.py | 110 +++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 examples/ogbn_papers_100m.py diff --git a/examples/ogbn_papers_100m.py b/examples/ogbn_papers_100m.py new file mode 100644 index 000000000000..9242a2d53ffc --- /dev/null +++ b/examples/ogbn_papers_100m.py @@ -0,0 +1,110 @@ +import argparse +import os +import time + +import torch +import torch.nn.functional as F +from ogb.nodeproppred import PygNodePropPredDataset +from torchmetrics import Accuracy + +from torch_geometric.loader import NeighborLoader +from torch_geometric.nn import GCNConv + +parser = argparse.ArgumentParser() +parser.add_argument('--hidden_channels', type=int, default=64) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--epochs', type=int, default=3) +parser.add_argument('--batch_size', type=int, default=128) +parser.add_argument('--fan_out', type=int, default=50) + +args = parser.parse_args() + +if torch.cuda.is_available(): + device = torch.device('cuda') +else: + device = torch.device('cpu') + +dataset = PygNodePropPredDataset(name='ogbn-papers100M') +split_idx = dataset.get_idx_split() +data = dataset[0] +data.y = data.y.reshape(-1) +print("Data =", data) + + +def pyg_num_work(): + num_work = None + if hasattr(os, "sched_getaffinity"): + try: + num_work = len(os.sched_getaffinity(0)) / 2 + except Exception: + pass + if num_work is None: + num_work = os.cpu_count() / 2 + return int(num_work) + + +class GCN(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels): + super().__init__() + self.conv1 = GCNConv(in_channels, hidden_channels) + self.conv2 = GCNConv(hidden_channels, out_channels) + + def forward(self, x, edge_index, edge_weight=None): + x = F.dropout(x, p=0.5, training=self.training) + x = self.conv1(x, edge_index, edge_weight).relu() + x = F.dropout(x, p=0.5, training=self.training) + x = self.conv2(x, edge_index, edge_weight) + return x + + +model = GCN(dataset.num_features, args.hidden_channels, + dataset.num_classes).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0005) +batch_size = args.batch_size +train_loader = NeighborLoader(data, num_neighbors=[args.fan_out, args.fan_out], + input_nodes=split_idx['train'], + batch_size=batch_size, + num_workers=pyg_num_work()) +eval_loader = NeighborLoader(data, num_neighbors=[args.fan_out, args.fan_out], + input_nodes=split_idx['valid'], + batch_size=batch_size, num_workers=pyg_num_work()) +test_loader = NeighborLoader(data, num_neighbors=[args.fan_out, args.fan_out], + input_nodes=split_idx['test'], + batch_size=batch_size, num_workers=pyg_num_work()) +eval_steps = 100 +acc = Accuracy(task="multiclass", num_classes=dataset.num_classes).to(device) +for epoch in range(args.epochs): + for i, batch in enumerate(train_loader): + if i >= 10: + start = time.time() + batch = batch.to(device) + batch.y = batch.y.to(torch.long) + optimizer.zero_grad() + out = model(batch.x, batch.edge_index) + loss = F.cross_entropy(out[:batch_size], batch.y[:batch_size]) + loss.backward() + optimizer.step() + if i % 10 == 0: + print("Epoch: " + str(epoch) + ", Iteration: " + str(i) + + ", Loss: " + str(loss)) + print("Average Iteration Time:", (time.time() - start) / (i - 10), + "s/iter") + acc_sum = 0.0 + with torch.no_grad(): + for i, batch in enumerate(eval_loader): + if i >= eval_steps: + break + batch = batch.to(device) + batch.y = batch.y.to(torch.long) + out = model(batch.x, batch.edge_index) + acc_sum += acc(out[:batch_size].softmax(dim=-1), + batch.y[:batch_size]) + print(f"Validation Accuracy: {acc_sum/(i) * 100.0:.4f}%", ) +acc_sum = 0.0 +with torch.no_grad(): + for i, batch in enumerate(test_loader): + batch = batch.to(device) + batch.y = batch.y.to(torch.long) + out = model(batch.x, batch.edge_index) + acc_sum += acc(out[:batch_size].softmax(dim=-1), batch.y[:batch_size]) + print(f"Test Accuracy: {acc_sum/(i) * 100.0:.4f}%", ) From e6ab2ce5e6faa2c9b8a04339bbdbf42a56d30100 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 29 Aug 2023 08:57:09 +0200 Subject: [PATCH 1437/2432] Update graph break tests (#7947) --- test/nn/models/test_basic_gnn.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 05bf9db1c336..ebb9b9cb3c8f 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -8,6 +8,7 @@ import torch.nn.functional as F import torch_geometric.typing +from torch_geometric.compile import to_jittable from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader from torch_geometric.nn import SAGEConv @@ -308,16 +309,14 @@ def test_trim_to_layer(): assert torch.allclose(out1, out2) -num_compile_calls = 0 - - @withCUDA @onlyLinux @disableExtensions @withPackage('torch>=2.0.0') @pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA]) -@pytest.mark.skip(reason="Does not work yet in the full test suite") def test_compile_graph_breaks(Model, device): + import torch._dynamo as dynamo + # TODO EdgeCNN and PNA currently lead to graph breaks on CUDA :( if Model in {EdgeCNN, PNA} and device.type == 'cuda': return @@ -336,18 +335,13 @@ def test_compile_graph_breaks(Model, device): kwargs['deg'] = torch.tensor([1, 2, 1]) model = Model(in_channels=8, hidden_channels=16, num_layers=2, **kwargs) - model = model.to(device) - - def my_custom_backend(gm, *args): - global num_compile_calls - num_compile_calls += 1 - return gm.forward - - model = torch_geometric.compile(model, backend=my_custom_backend) + model = to_jittable(model).to(device) - num_previous_compile_calls = num_compile_calls - model(x, edge_index) - assert num_compile_calls - num_previous_compile_calls == 1 + explanation = dynamo.explain(model, x, edge_index) + if hasattr(explanation, 'graph_break_count'): + assert explanation.graph_break_count == 0 + else: + assert 'with 0 graph break' in explanation[0] @withPackage('pyg_lib') From 44cdccd17f12bc62a770d18ecf47d0aaf30d5e5d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 29 Aug 2023 09:22:45 +0200 Subject: [PATCH 1438/2432] Add `group_argsort` implementation (#7948) --- CHANGELOG.md | 1 + test/utils/test_scatter.py | 22 ++++++++++- torch_geometric/utils/__init__.py | 3 +- torch_geometric/utils/scatter.py | 65 ++++++++++++++++++++++++++++++- 4 files changed, 88 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f210d30ece1..cf1b6dbcd1be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `group_argsort` implementation ([#7948](https://github.com/pyg-team/pytorch_geometric/pull/7948)) - Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896), [#7897](https://github.com/pyg-team/pytorch_geometric/pull/7897)) - Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) - Added `utils.ppr` for personalized PageRank computation ([#7917](https://github.com/pyg-team/pytorch_geometric/pull/7917)) diff --git a/test/utils/test_scatter.py b/test/utils/test_scatter.py index 44e132c1c11e..658a7ac80f9e 100644 --- a/test/utils/test_scatter.py +++ b/test/utils/test_scatter.py @@ -5,7 +5,7 @@ from torch_geometric.profile import benchmark from torch_geometric.testing import disableExtensions, withCUDA, withPackage -from torch_geometric.utils import scatter +from torch_geometric.utils import group_argsort, scatter from torch_geometric.utils.scatter import scatter_argmax @@ -74,6 +74,26 @@ def test_scatter_any(device): assert float(out[i, j]) in src[2 * i:2 * i + 2, j].tolist() +@withCUDA +@pytest.mark.parametrize('num_groups', [4]) +@pytest.mark.parametrize('descending', [False, True]) +def test_group_argsort(num_groups, descending, device): + src = torch.randn(20, device=device) + index = torch.randint(0, num_groups, (20, ), device=device) + + out = group_argsort(src, index, 0, num_groups, descending=descending) + + expected = torch.empty_like(index) + for i in range(num_groups): + mask = index == i + tmp = src[mask].argsort(descending=descending) + perm = torch.empty_like(tmp) + perm[tmp] = torch.arange(tmp.numel(), device=device) + expected[mask] = perm + + assert torch.equal(out, expected) + + @withCUDA @disableExtensions def test_scatter_argmax(device): diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 46cc541ae509..30c648a61f4d 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -1,6 +1,6 @@ import copy -from .scatter import scatter +from .scatter import scatter, group_argsort from .segment import segment from .sort import index_sort from .degree import degree @@ -56,6 +56,7 @@ __all__ = [ 'scatter', + 'group_argsort', 'segment', 'index_sort', 'degree', diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index 7a87aa7420fb..e1c314b15328 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -170,8 +170,9 @@ def scatter_argmax(src: Tensor, index: Tensor, dim: int = 0, return out[1] # Only implemented under certain conditions for now :( - assert dim == 0 assert src.dim() == 1 and index.dim() == 1 + assert dim == 0 or dim == -1 + assert src.numel() == index.numel() if dim_size is None: dim_size = index.max() + 1 if index.numel() > 0 else 0 @@ -191,3 +192,65 @@ def scatter_argmax(src: Tensor, index: Tensor, dim: int = 0, out[index[nonzero]] = nonzero return out + + +def group_argsort( + src: Tensor, + index: Tensor, + dim: int = 0, + num_groups: Optional[int] = None, + descending: bool = False, + return_consecutive: bool = False, + stable: bool = False, +) -> Tensor: + r"""Returns the indices that sort the tensor :obj:`src` along a given + dimension in ascending order by value. + In contrast to :meth:`torch.argsort`, sorting is performed in groups + according to the values in :obj:`index`. + + Args: + src (torch.Tensor): The source tensor. + index (torch.Tensor): The index tensor. + dim (int, optional): The dimension along which to index. + (default: :obj:`0`) + num_groups (int, optional): The number of groups. + (default: :obj:`None`) + descending (bool, optional): Controls the sorting order (ascending or + descending). (default: :obj:`False`) + return_consecutive (bool, optional): If set to :obj:`True`, will not + offset the output to start from :obj:`0` for each group. + (default: :obj:`False`) + stable (bool, optional): Controls the relative order of equivalent + elements. (default: :obj:`False`) + """ + # Only implemented under certain conditions for now :( + assert src.dim() == 1 and index.dim() == 1 + assert dim == 0 or dim == -1 + assert src.numel() == index.numel() and src.numel() > 0 + + # Normalize `src` to range [0, 1]: + src = src - src.min() + src = src / src.max() + + # Compute `grouped_argsort`: + src = src - 2 * index if descending else src + 2 * index + if torch_geometric.typing.WITH_PT113: + perm = src.argsort(descending=descending, stable=stable) + else: + perm = src.argsort(descending=descending) + if stable: + warnings.warn("Ignoring option `stable=True` in 'group_argsort' " + "since it requires PyTorch >= 1.13.0") + out = torch.empty_like(index) + out[perm] = torch.arange(index.numel(), device=index.device) + + if return_consecutive: + return out + + # Compute cumulative sum of number of entries with the same index: + count = scatter(torch.ones_like(index), index, dim=dim, + dim_size=num_groups, reduce='sum') + ptr = count.new_zeros(count.numel() + 1) + torch.cumsum(count, dim=0, out=ptr[1:]) + + return out - ptr[index] From 2febd3820ec63bd12834237b3be3453ee5b08c2e Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 29 Aug 2023 14:39:51 +0200 Subject: [PATCH 1439/2432] Catch any `numba` import error (#7950) --- torch_geometric/utils/ppr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/utils/ppr.py b/torch_geometric/utils/ppr.py index c3e4b34a2329..31fadf1a687a 100644 --- a/torch_geometric/utils/ppr.py +++ b/torch_geometric/utils/ppr.py @@ -11,7 +11,7 @@ try: import numba WITH_NUMBA = True -except ImportError: # pragma: no cover +except Exception: # pragma: no cover WITH_NUMBA = False From 64058527056c8cbca692b0aaf124f1d82c830cb2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 30 Aug 2023 12:33:40 +0200 Subject: [PATCH 1440/2432] Account for unsorted inputs when computing `e_id` in `NeighborSampler` (#7953) --- CHANGELOG.md | 1 + .../algorithm/test_graphmask_explainer.py | 6 ++--- test/loader/test_link_neighbor_loader.py | 23 +++++++++++++++++++ test/loader/test_neighbor_loader.py | 22 ++++++++++++++++++ torch_geometric/loader/link_loader.py | 10 +++++--- .../loader/link_neighbor_loader.py | 8 +++++++ torch_geometric/loader/neighbor_loader.py | 10 ++++++++ torch_geometric/loader/node_loader.py | 10 +++++--- 8 files changed, 81 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf1b6dbcd1be..2f5052d017d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed a bug in which `batch.e_id` was not correctly computed on unsorted graph inputs ([#7953](https://github.com/pyg-team/pytorch_geometric/pull/7953)) - Fixed `from_networkx` conversion from `nx.stochastic_block_model` graphs ([#7941](https://github.com/pyg-team/pytorch_geometric/pull/7941)) - Fixed the usage of `bias_initializer` in `HeteroLinear` ([#7923](https://github.com/pyg-team/pytorch_geometric/pull/7923)) - Fixed broken links in `HGBDataset` ([#7907](https://github.com/pyg-team/pytorch_geometric/pull/7907)) diff --git a/test/explain/algorithm/test_graphmask_explainer.py b/test/explain/algorithm/test_graphmask_explainer.py index e9dec8e3765d..0bb7eacc0767 100644 --- a/test/explain/algorithm/test_graphmask_explainer.py +++ b/test/explain/algorithm/test_graphmask_explainer.py @@ -123,7 +123,7 @@ def test_graph_mask_explainer_binary_classification( explainer = Explainer( model=model, - algorithm=GraphMaskExplainer(2, epochs=5), + algorithm=GraphMaskExplainer(2, epochs=5, log=False), explanation_type=explanation_type, node_mask_type=node_mask_type, edge_mask_type=edge_mask_type, @@ -171,7 +171,7 @@ def test_graph_mask_explainer_multiclass_classification( explainer = Explainer( model=model, - algorithm=GraphMaskExplainer(2, epochs=5), + algorithm=GraphMaskExplainer(2, epochs=5, log=False), explanation_type=explanation_type, node_mask_type=node_mask_type, edge_mask_type=edge_mask_type, @@ -216,7 +216,7 @@ def test_graph_mask_explainer_regression( explainer = Explainer( model=model, - algorithm=GraphMaskExplainer(2, epochs=5), + algorithm=GraphMaskExplainer(2, epochs=5, log=False), explanation_type=explanation_type, node_mask_type=node_mask_type, edge_mask_type=edge_mask_type, diff --git a/test/loader/test_link_neighbor_loader.py b/test/loader/test_link_neighbor_loader.py index 9fde16f1d6fc..2955dd02a09b 100644 --- a/test/loader/test_link_neighbor_loader.py +++ b/test/loader/test_link_neighbor_loader.py @@ -570,3 +570,26 @@ def test_hetero_link_neighbor_loader_triplet(disjoint, temporal, amount): for i in range(batch_size): assert (node_store.time[node_store.batch == i].max() <= node_store.seed_time[i]) + + +@withPackage('pyg_lib') +def test_link_neighbor_loader_mapping(): + edge_index = torch.tensor([ + [0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 5], + [1, 2, 3, 4, 5, 8, 6, 7, 9, 10, 6, 11], + ]) + data = Data(edge_index=edge_index, num_nodes=12) + + loader = LinkNeighborLoader( + data, + edge_label_index=data.edge_index, + num_neighbors=[1], + batch_size=2, + shuffle=True, + ) + + for batch in loader: + assert torch.equal( + batch.n_id[batch.edge_index], + data.edge_index[:, batch.e_id], + ) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index b33209f35172..d4e2bbd48c55 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -692,3 +692,25 @@ def test_hetero_neighbor_loader_sampled_info(): for edge_type in batch.edge_types: assert (batch[edge_type].num_sampled_edges == expected_num_sampled_edges[edge_type]) + + +@withPackage('pyg_lib') +def test_neighbor_loader_mapping(): + edge_index = torch.tensor([ + [0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 5], + [1, 2, 3, 4, 5, 8, 6, 7, 9, 10, 6, 11], + ]) + data = Data(edge_index=edge_index, num_nodes=12) + + loader = NeighborLoader( + data, + num_neighbors=[1], + batch_size=2, + shuffle=True, + ) + + for batch in loader: + assert torch.equal( + batch.n_id[batch.edge_index], + data.edge_index[:, batch.e_id], + ) diff --git a/torch_geometric/loader/link_loader.py b/torch_geometric/loader/link_loader.py index 06f2a854702a..a9555db4383e 100644 --- a/torch_geometric/loader/link_loader.py +++ b/torch_geometric/loader/link_loader.py @@ -224,7 +224,9 @@ def filter_fn( if 'n_id' not in data: data.n_id = out.node if out.edge is not None and 'e_id' not in data: - data.e_id = out.edge + edge = out.edge.to(torch.long) + perm = self.link_sampler.edge_permutation + data.e_id = perm[out.edge] if perm is not None else out.edge data.batch = out.batch data.num_sampled_nodes = out.num_sampled_nodes @@ -260,8 +262,10 @@ def filter_fn( data[key].n_id = node for key, edge in (out.edge or {}).items(): - if 'e_id' not in data[key]: - data[key].e_id = edge + if edge is not None and 'e_id' not in data[key]: + edge = edge.to(torch.long) + perm = self.link_sampler.edge_permutation[key] + data[key].e_id = perm[edge] if perm is not None else edge data.set_value_dict('batch', out.batch) data.set_value_dict('num_sampled_nodes', out.num_sampled_nodes) diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 955f4b862f82..7d9e8c79397f 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -63,6 +63,14 @@ class LinkNeighborLoader(LinkLoader): The rest of the functionality mirrors that of :class:`~torch_geometric.loader.NeighborLoader`, including support for heterogeneous graphs. + In particular, the data loader will add the following attributes to the + returned mini-batch: + + * :obj:`n_id` The global node index for every sampled node + * :obj:`e_id` The global edge index for every sampled edge + * :obj:`input_id`: The global index of the :obj:`edge_label_index` + * :obj:`num_sampled_nodes`: The number of sampled nodes in each hop + * :obj:`num_sampled_edges`: The number of sampled edges in each hop .. note:: Negative sampling is currently implemented in an approximate diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index 634b7f387835..a5213b3b9882 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -101,6 +101,16 @@ class NeighborLoader(NodeLoader): sampled_data = next(iter(loader)) print(sampled_data.n_id) # Global node index of each node in batch. + In particular, the data loader will add the following attributes to the + returned mini-batch: + + * :obj:`batch_size` The number of seed nodes (first nodes in the batch) + * :obj:`n_id` The global node index for every sampled node + * :obj:`e_id` The global edge index for every sampled edge + * :obj:`input_id`: The global index of the :obj:`input_nodes` + * :obj:`num_sampled_nodes`: The number of sampled nodes in each hop + * :obj:`num_sampled_edges`: The number of sampled edges in each hop + Args: data (Any): A :class:`~torch_geometric.data.Data`, :class:`~torch_geometric.data.HeteroData`, or diff --git a/torch_geometric/loader/node_loader.py b/torch_geometric/loader/node_loader.py index ca7715cad8f1..102173fad210 100644 --- a/torch_geometric/loader/node_loader.py +++ b/torch_geometric/loader/node_loader.py @@ -156,7 +156,9 @@ def filter_fn( if 'n_id' not in data: data.n_id = out.node if out.edge is not None and 'e_id' not in data: - data.e_id = out.edge + edge = out.edge.to(torch.long) + perm = self.node_sampler.edge_permutation + data.e_id = perm[edge] if perm is not None else edge data.batch = out.batch data.num_sampled_nodes = out.num_sampled_nodes @@ -180,8 +182,10 @@ def filter_fn( data[key].n_id = node for key, edge in (out.edge or {}).items(): - if 'e_id' not in data[key]: - data[key].e_id = edge + if edge is not None and 'e_id' not in data[key]: + edge = edge.to(torch.long) + perm = self.node_sampler.edge_permutation[key] + data[key].e_id = perm[edge] if perm is not None else edge data.set_value_dict('batch', out.batch) data.set_value_dict('num_sampled_nodes', out.num_sampled_nodes) From 6789e422e8d548f5cd50cda0104d658a8a4212c0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 30 Aug 2023 12:54:21 +0200 Subject: [PATCH 1441/2432] Clean-up `papers100M` example (#7954) --- examples/ogbn_papers_100m.py | 137 ++++++++++++++++------------------- 1 file changed, 63 insertions(+), 74 deletions(-) diff --git a/examples/ogbn_papers_100m.py b/examples/ogbn_papers_100m.py index 9242a2d53ffc..5bd7b590e6a4 100644 --- a/examples/ogbn_papers_100m.py +++ b/examples/ogbn_papers_100m.py @@ -1,46 +1,37 @@ -import argparse import os import time +from typing import Optional import torch import torch.nn.functional as F from ogb.nodeproppred import PygNodePropPredDataset -from torchmetrics import Accuracy from torch_geometric.loader import NeighborLoader from torch_geometric.nn import GCNConv -parser = argparse.ArgumentParser() -parser.add_argument('--hidden_channels', type=int, default=64) -parser.add_argument('--lr', type=float, default=0.01) -parser.add_argument('--epochs', type=int, default=3) -parser.add_argument('--batch_size', type=int, default=128) -parser.add_argument('--fan_out', type=int, default=50) - -args = parser.parse_args() - -if torch.cuda.is_available(): - device = torch.device('cuda') -else: - device = torch.device('cpu') +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') dataset = PygNodePropPredDataset(name='ogbn-papers100M') split_idx = dataset.get_idx_split() -data = dataset[0] -data.y = data.y.reshape(-1) -print("Data =", data) -def pyg_num_work(): - num_work = None - if hasattr(os, "sched_getaffinity"): - try: - num_work = len(os.sched_getaffinity(0)) / 2 - except Exception: - pass - if num_work is None: - num_work = os.cpu_count() / 2 - return int(num_work) +def get_num_workers() -> int: + try: + return len(os.sched_getaffinity(0)) // 2 + except Exception: + return os.cpu_count() // 2 + + +kwargs = dict( + data=dataset[0], + num_neighbors=[50, 50], + batch_size=128, + num_workers=get_num_workers(), +) +train_loader = NeighborLoader(input_nodes=split_idx['train'], shuffle=True, + **kwargs) +val_loader = NeighborLoader(input_nodes=split_idx['valid'], **kwargs) +test_loader = NeighborLoader(input_nodes=split_idx['test'], **kwargs) class GCN(torch.nn.Module): @@ -49,62 +40,60 @@ def __init__(self, in_channels, hidden_channels, out_channels): self.conv1 = GCNConv(in_channels, hidden_channels) self.conv2 = GCNConv(hidden_channels, out_channels) - def forward(self, x, edge_index, edge_weight=None): + def forward(self, x, edge_index): x = F.dropout(x, p=0.5, training=self.training) - x = self.conv1(x, edge_index, edge_weight).relu() + x = self.conv1(x, edge_index).relu() x = F.dropout(x, p=0.5, training=self.training) - x = self.conv2(x, edge_index, edge_weight) + x = self.conv2(x, edge_index) return x -model = GCN(dataset.num_features, args.hidden_channels, - dataset.num_classes).to(device) +model = GCN(dataset.num_features, 64, dataset.num_classes).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0005) -batch_size = args.batch_size -train_loader = NeighborLoader(data, num_neighbors=[args.fan_out, args.fan_out], - input_nodes=split_idx['train'], - batch_size=batch_size, - num_workers=pyg_num_work()) -eval_loader = NeighborLoader(data, num_neighbors=[args.fan_out, args.fan_out], - input_nodes=split_idx['valid'], - batch_size=batch_size, num_workers=pyg_num_work()) -test_loader = NeighborLoader(data, num_neighbors=[args.fan_out, args.fan_out], - input_nodes=split_idx['test'], - batch_size=batch_size, num_workers=pyg_num_work()) -eval_steps = 100 -acc = Accuracy(task="multiclass", num_classes=dataset.num_classes).to(device) -for epoch in range(args.epochs): + + +def train(): + model.train() + for i, batch in enumerate(train_loader): - if i >= 10: - start = time.time() + start = time.perf_counter() batch = batch.to(device) - batch.y = batch.y.to(torch.long) optimizer.zero_grad() - out = model(batch.x, batch.edge_index) - loss = F.cross_entropy(out[:batch_size], batch.y[:batch_size]) + out = model(batch.x, batch.edge_index)[:batch.batch_size] + y = batch.y[:batch.batch_size].view(-1).to(torch.long) + loss = F.cross_entropy(out, y) loss.backward() optimizer.step() + if i % 10 == 0: - print("Epoch: " + str(epoch) + ", Iteration: " + str(i) + - ", Loss: " + str(loss)) - print("Average Iteration Time:", (time.time() - start) / (i - 10), - "s/iter") - acc_sum = 0.0 - with torch.no_grad(): - for i, batch in enumerate(eval_loader): - if i >= eval_steps: - break - batch = batch.to(device) - batch.y = batch.y.to(torch.long) - out = model(batch.x, batch.edge_index) - acc_sum += acc(out[:batch_size].softmax(dim=-1), - batch.y[:batch_size]) - print(f"Validation Accuracy: {acc_sum/(i) * 100.0:.4f}%", ) -acc_sum = 0.0 -with torch.no_grad(): - for i, batch in enumerate(test_loader): + print(f'Epoch: {epoch:02d}, Iteration: {i}, Loss: {loss:.4f}, ' + f's/iter: {time.perf_counter() - start:.6f}') + + +@torch.no_grad() +def test(loader: NeighborLoader, eval_steps: Optional[int] = None): + model.eval() + + total_correct = total_examples = 0 + for i, batch in enumerate(loader): + if eval_steps is not None and i >= eval_steps: + break + batch = batch.to(device) - batch.y = batch.y.to(torch.long) - out = model(batch.x, batch.edge_index) - acc_sum += acc(out[:batch_size].softmax(dim=-1), batch.y[:batch_size]) - print(f"Test Accuracy: {acc_sum/(i) * 100.0:.4f}%", ) + out = model(batch.x, batch.edge_index)[:batch.batch_size] + pred = out.argmax(dim=-1) + y = batch.y[:batch.batch_size].view(-1).to(torch.long) + + total_correct += int((pred == y).sum()) + total_examples += y.size(0) + + return total_correct / total_examples + + +for epoch in range(1, 4): + train() + val_acc = test(val_loader, eval_steps=100) + print(f'Val Acc: ~{val_acc:.4f}') + +test_acc = test(test_loader) +print(f'Test Acc: {test_acc:.4f}') From 3fc5ffaef18a6195612abc295664cdc54ea50262 Mon Sep 17 00:00:00 2001 From: Damian Szwichtenberg Date: Wed, 30 Aug 2023 14:27:36 +0200 Subject: [PATCH 1442/2432] Fix errors in benchmarks (#7956) CPU case was not covered in the device validation process. `cache` argument was not accepted by the `inference` method in heterogeneous models. --- CHANGELOG.md | 1 + benchmark/training/training_benchmark.py | 1 + benchmark/utils/hetero_gat.py | 2 +- benchmark/utils/hetero_sage.py | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f5052d017d1..7075ba59591f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Fixed bugs in benchmarks caused by a lack of the device conditions for CPU and unexpected `cache` argument in heterogeneous models ([#7956](https://github.com/pyg-team/pytorch_geometric/pull/7956) - Fixed a bug in which `batch.e_id` was not correctly computed on unsorted graph inputs ([#7953](https://github.com/pyg-team/pytorch_geometric/pull/7953)) - Fixed `from_networkx` conversion from `nx.stochastic_block_model` graphs ([#7941](https://github.com/pyg-team/pytorch_geometric/pull/7941)) - Fixed the usage of `bias_initializer` in `HeteroLinear` ([#7923](https://github.com/pyg-team/pytorch_geometric/pull/7923)) diff --git a/benchmark/training/training_benchmark.py b/benchmark/training/training_benchmark.py index 43ecb7ed6437..6c65a942856e 100644 --- a/benchmark/training/training_benchmark.py +++ b/benchmark/training/training_benchmark.py @@ -34,6 +34,7 @@ } device_conditions = { + 'cpu': (lambda: True), 'cuda': (lambda: torch.cuda.is_available()), 'mps': (lambda: diff --git a/benchmark/utils/hetero_gat.py b/benchmark/utils/hetero_gat.py index d3483e4ecd94..c67fe1c8ed88 100644 --- a/benchmark/utils/hetero_gat.py +++ b/benchmark/utils/hetero_gat.py @@ -16,7 +16,7 @@ def forward(self, x_dict, edge_index_dict): return self.model(x_dict, edge_index_dict) @torch.no_grad() - def inference(self, loader, device, progress_bar=False): + def inference(self, loader, device, progress_bar=False, **kwargs): self.model.eval() if progress_bar: loader = tqdm(loader, desc="Inference") diff --git a/benchmark/utils/hetero_sage.py b/benchmark/utils/hetero_sage.py index 1445ef6146bc..2f616b664531 100644 --- a/benchmark/utils/hetero_sage.py +++ b/benchmark/utils/hetero_sage.py @@ -15,7 +15,7 @@ def forward(self, x_dict, edge_index_dict): return self.model(x_dict, edge_index_dict) @torch.no_grad() - def inference(self, loader, device, progress_bar=False): + def inference(self, loader, device, progress_bar=False, **kwargs): self.model.eval() if progress_bar: loader = tqdm(loader, desc="Inference") From a52af694b8ce6a80811e20966fe6d08a3e7511fe Mon Sep 17 00:00:00 2001 From: Saurav Maheshkar Date: Wed, 30 Aug 2023 18:18:42 +0530 Subject: [PATCH 1443/2432] feat(benchmark/kernel): add sequential `BatchNorm` in `GIN` (#7955) This PR aims to update the GIN implementation in the kernel benchmark based on the discussions in [#2863](https://github.com/pyg-team/pytorch_geometric/issues/2863) and results as reported in [Does the position of BatchNorm matter in Graph Isomorphism Networks (GIN)](https://wandb.ai/graph-neural-networks/GIN/reports/Does-the-position-of-BatchNorm-matter-in-Graph-Isomorphism-Networks-GIN---Vmlldzo1MDkwMTM3). ![](https://user-images.githubusercontent.com/61241031/263945331-9c5de4c1-4d07-4db4-8ad6-eca5c13925e3.png) Request for Review: @rusty1s --- CHANGELOG.md | 1 + benchmark/kernel/gin.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7075ba59591f..b51a3ec39cdf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Updated `GIN` implementation in kernel benchmarks to have sequential batchnorms ([#7955](https://github.com/pyg-team/pytorch_geometric/pull/7955)) - Fixed bugs in benchmarks caused by a lack of the device conditions for CPU and unexpected `cache` argument in heterogeneous models ([#7956](https://github.com/pyg-team/pytorch_geometric/pull/7956) - Fixed a bug in which `batch.e_id` was not correctly computed on unsorted graph inputs ([#7953](https://github.com/pyg-team/pytorch_geometric/pull/7953)) - Fixed `from_networkx` conversion from `nx.stochastic_block_model` graphs ([#7941](https://github.com/pyg-team/pytorch_geometric/pull/7941)) diff --git a/benchmark/kernel/gin.py b/benchmark/kernel/gin.py index 0fea3094aad5..4ee3492ca878 100644 --- a/benchmark/kernel/gin.py +++ b/benchmark/kernel/gin.py @@ -13,6 +13,7 @@ def __init__(self, dataset, num_layers, hidden): Sequential( Linear(dataset.num_features, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -24,6 +25,7 @@ def __init__(self, dataset, num_layers, hidden): Sequential( Linear(hidden, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -60,6 +62,7 @@ def __init__(self, dataset, num_layers, hidden, mode='cat'): Sequential( Linear(dataset.num_features, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -71,6 +74,7 @@ def __init__(self, dataset, num_layers, hidden, mode='cat'): Sequential( Linear(hidden, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -115,6 +119,7 @@ def __init__(self, dataset, num_layers, hidden): Sequential( Linear(dataset.num_features, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -126,6 +131,7 @@ def __init__(self, dataset, num_layers, hidden): Sequential( Linear(hidden, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -162,6 +168,7 @@ def __init__(self, dataset, num_layers, hidden, mode='cat'): Sequential( Linear(dataset.num_features, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), @@ -173,6 +180,7 @@ def __init__(self, dataset, num_layers, hidden, mode='cat'): Sequential( Linear(hidden, hidden), ReLU(), + BN(hidden), Linear(hidden, hidden), ReLU(), BN(hidden), From 7a6052caa15616bb0bd1db386358db2678d86c0a Mon Sep 17 00:00:00 2001 From: Rajveer Rathod <64583161+rajveer43@users.noreply.github.com> Date: Fri, 1 Sep 2023 17:18:27 +0530 Subject: [PATCH 1444/2432] Enhancing GraphGym Documentation (#7885) Part of #5132. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- .../pgm_explainer_graph_classification.py | 18 +- .../dist_train_sage_supervised.py | 3 +- examples/infomax_inductive.py | 9 +- examples/kuzu/papers_100M/train.py | 7 +- examples/mnist_nn_conv.py | 18 +- torch_geometric/graphgym/config.py | 4 +- .../graphgym/contrib/layer/generalconv.py | 24 +- torch_geometric/graphgym/init.py | 11 +- torch_geometric/graphgym/loader.py | 33 ++- torch_geometric/graphgym/loss.py | 8 +- torch_geometric/graphgym/models/act.py | 16 +- torch_geometric/graphgym/models/encoder.py | 46 +++- torch_geometric/graphgym/models/gnn.py | 127 +++++---- torch_geometric/graphgym/models/head.py | 94 ++++--- torch_geometric/graphgym/models/layer.py | 257 ++++++++++-------- torch_geometric/graphgym/train.py | 26 +- torch_geometric/graphgym/utils/comp_budget.py | 4 +- torch_geometric/graphgym/utils/plot.py | 5 +- torch_geometric/nn/norm/graph_size_norm.py | 3 +- 19 files changed, 419 insertions(+), 294 deletions(-) diff --git a/examples/contrib/pgm_explainer_graph_classification.py b/examples/contrib/pgm_explainer_graph_classification.py index a90b3d766bb8..8960f1090f7f 100644 --- a/examples/contrib/pgm_explainer_graph_classification.py +++ b/examples/contrib/pgm_explainer_graph_classification.py @@ -5,8 +5,8 @@ import os.path as osp import torch -import torch.nn as nn import torch.nn.functional as F +from torch.nn import Linear, ReLU, Sequential import torch_geometric.transforms as T from torch_geometric.contrib.explain import PGMExplainer @@ -37,15 +37,21 @@ def normalized_cut_2d(edge_index, pos): return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0)) -class Net(nn.Module): +class Net(torch.nn.Module): def __init__(self): super().__init__() - nn1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), - nn.Linear(25, d.num_features * 32)) + nn1 = Sequential( + Linear(2, 25), + ReLU(), + Linear(25, d.num_features * 32), + ) self.conv1 = NNConv(d.num_features, 32, nn1, aggr='mean') - nn2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), - nn.Linear(25, 32 * 64)) + nn2 = Sequential( + Linear(2, 25), + ReLU(), + Linear(25, 32 * 64), + ) self.conv2 = NNConv(32, 64, nn2, aggr='mean') self.fc1 = torch.nn.Linear(64, 128) diff --git a/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py b/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py index 6a44bcbf9604..ca5cdbfa02c8 100644 --- a/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py +++ b/examples/distributed/graphlearn_for_pytorch/dist_train_sage_supervised.py @@ -69,7 +69,8 @@ def run_training_proc( backend='nccl', # or choose 'gloo' if 'nccl' is not supported. rank=current_ctx.rank, world_size=current_ctx.world_size, - init_method='tcp://{}:{}'.format(master_addr, training_pg_master_port)) + init_method=f'tcp://{master_addr}:{training_pg_master_port}', + ) # Create distributed neighbor loader for training. # We replace PyG's NeighborLoader with GLT's DistNeighborLoader. diff --git a/examples/infomax_inductive.py b/examples/infomax_inductive.py index 73e96e13734f..2faf9949050c 100644 --- a/examples/infomax_inductive.py +++ b/examples/infomax_inductive.py @@ -1,7 +1,6 @@ import os.path as osp import torch -import torch.nn as nn from tqdm import tqdm from torch_geometric.datasets import Reddit @@ -19,7 +18,7 @@ num_workers=12) -class Encoder(nn.Module): +class Encoder(torch.nn.Module): def __init__(self, in_channels, hidden_channels): super().__init__() self.convs = torch.nn.ModuleList([ @@ -30,9 +29,9 @@ def __init__(self, in_channels, hidden_channels): self.activations = torch.nn.ModuleList() self.activations.extend([ - nn.PReLU(hidden_channels), - nn.PReLU(hidden_channels), - nn.PReLU(hidden_channels) + torch.nn.PReLU(hidden_channels), + torch.nn.PReLU(hidden_channels), + torch.nn.PReLU(hidden_channels) ]) def forward(self, x, edge_index, batch_size): diff --git a/examples/kuzu/papers_100M/train.py b/examples/kuzu/papers_100M/train.py index 5b3da061eb79..9599c1c0191e 100644 --- a/examples/kuzu/papers_100M/train.py +++ b/examples/kuzu/papers_100M/train.py @@ -4,7 +4,6 @@ import kuzu import pandas as pd import torch -import torch.nn as nn import torch.nn.functional as F from tqdm import tqdm @@ -60,13 +59,13 @@ ) -class GraphSAGE(nn.Module): +class GraphSAGE(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout=0.2): super().__init__() - self.convs = nn.ModuleList() - self.norms = nn.ModuleList() + self.convs = torch.nn.ModuleList() + self.norms = torch.nn.ModuleList() self.convs.append(SAGEConv(in_channels, hidden_channels)) self.bns.append(BatchNorm(hidden_channels)) diff --git a/examples/mnist_nn_conv.py b/examples/mnist_nn_conv.py index 9386a2781a47..17295241b07a 100644 --- a/examples/mnist_nn_conv.py +++ b/examples/mnist_nn_conv.py @@ -1,8 +1,8 @@ import os.path as osp import torch -import torch.nn as nn import torch.nn.functional as F +from torch.nn import Linear, ReLU, Sequential import torch_geometric.transforms as T from torch_geometric.datasets import MNISTSuperpixels @@ -35,15 +35,21 @@ def normalized_cut_2d(edge_index, pos): return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0)) -class Net(nn.Module): +class Net(torch.nn.Module): def __init__(self): super().__init__() - nn1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), - nn.Linear(25, d.num_features * 32)) + nn1 = Sequential( + Linear(2, 25), + ReLU(), + Linear(25, d.num_features * 32), + ) self.conv1 = NNConv(d.num_features, 32, nn1, aggr='mean') - nn2 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), - nn.Linear(25, 32 * 64)) + nn2 = Sequential( + Linear(2, 25), + ReLU(), + Linear(25, 32 * 64), + ) self.conv2 = NNConv(32, 64, nn2, aggr='mean') self.fc1 = torch.nn.Linear(64, 128) diff --git a/torch_geometric/graphgym/config.py b/torch_geometric/graphgym/config.py index 131d01163fbc..043d18e180e7 100644 --- a/torch_geometric/graphgym/config.py +++ b/torch_geometric/graphgym/config.py @@ -452,8 +452,8 @@ def set_cfg(cfg): def assert_cfg(cfg): r"""Checks config values, do necessary post processing to the configs""" if cfg.dataset.task not in ['node', 'edge', 'graph', 'link_pred']: - raise ValueError('Task {} not supported, must be one of node, ' - 'edge, graph, link_pred'.format(cfg.dataset.task)) + raise ValueError(f"Task '{cfg.dataset.task}' not supported. Must be " + f"one of node, edge, graph, link_pred") if 'classification' in cfg.dataset.task_type and cfg.model.loss_fun == \ 'mse': cfg.model.loss_fun = 'cross_entropy' diff --git a/torch_geometric/graphgym/contrib/layer/generalconv.py b/torch_geometric/graphgym/contrib/layer/generalconv.py index c7f3263deb2c..5e1edb072de1 100644 --- a/torch_geometric/graphgym/contrib/layer/generalconv.py +++ b/torch_geometric/graphgym/contrib/layer/generalconv.py @@ -1,5 +1,4 @@ import torch -import torch.nn as nn from torch.nn import Parameter from torch_geometric.graphgym.config import cfg @@ -9,8 +8,7 @@ class GeneralConvLayer(MessagePassing): - r"""General GNN layer - """ + r"""A general GNN layer.""" def __init__(self, in_channels, out_channels, improved=False, cached=False, bias=True, **kwargs): super().__init__(aggr=cfg.gnn.agg, **kwargs) @@ -128,13 +126,23 @@ def __init__(self, in_channels, out_channels, edge_dim, improved=False, self.msg_direction = cfg.gnn.msg_direction if self.msg_direction == 'single': - self.linear_msg = nn.Linear(in_channels + edge_dim, out_channels, - bias=False) + self.linear_msg = torch.nn.Linear( + in_channels + edge_dim, + out_channels, + bias=False, + ) else: - self.linear_msg = nn.Linear(in_channels * 2 + edge_dim, - out_channels, bias=False) + self.linear_msg = torch.nn.Linear( + in_channels * 2 + edge_dim, + out_channels, + bias=False, + ) if cfg.gnn.self_msg == 'concat': - self.linear_self = nn.Linear(in_channels, out_channels, bias=False) + self.linear_self = torch.nn.Linear( + in_channels, + out_channels, + bias=False, + ) if bias: self.bias = Parameter(torch.empty(out_channels)) diff --git a/torch_geometric/graphgym/init.py b/torch_geometric/graphgym/init.py index 57821793e08a..f098c77467a0 100644 --- a/torch_geometric/graphgym/init.py +++ b/torch_geometric/graphgym/init.py @@ -1,4 +1,4 @@ -import torch.nn as nn +import torch def init_weights(m): @@ -9,11 +9,12 @@ def init_weights(m): m (nn.Module): PyTorch module """ - if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): + if (isinstance(m, torch.nn.BatchNorm2d) + or isinstance(m, torch.nn.BatchNorm1d)): m.weight.data.fill_(1.0) m.bias.data.zero_() - elif isinstance(m, nn.Linear): - m.weight.data = nn.init.xavier_uniform_( - m.weight.data, gain=nn.init.calculate_gain('relu')) + elif isinstance(m, torch.nn.Linear): + m.weight.data = torch.nn.init.xavier_uniform_( + m.weight.data, gain=torch.nn.init.calculate_gain('relu')) if m.bias is not None: m.bias.data.zero_() diff --git a/torch_geometric/graphgym/loader.py b/torch_geometric/graphgym/loader.py index 0dfeb55051da..93a83d05c295 100644 --- a/torch_geometric/graphgym/loader.py +++ b/torch_geometric/graphgym/loader.py @@ -1,3 +1,4 @@ +import os.path as osp from typing import Callable import torch @@ -58,7 +59,7 @@ def load_pyg(name, dataset_dir): Returns: PyG dataset object """ - dataset_dir = '{}/{}'.format(dataset_dir, name) + dataset_dir = osp.join(dataset_dir, name) if name in ['Cora', 'CiteSeer', 'PubMed']: dataset = Planetoid(dataset_dir, name) elif name[:3] == 'TU_': @@ -87,7 +88,7 @@ def load_pyg(name, dataset_dir): elif name == 'QM7b': dataset = QM7b(dataset_dir) else: - raise ValueError('{} not support'.format(name)) + raise ValueError(f"'{name}' not support") return dataset @@ -194,7 +195,7 @@ def load_dataset(): elif format == 'OGB': dataset = load_ogb(name.replace('_', '-'), dataset_dir) else: - raise ValueError('Unknown data format: {}'.format(format)) + raise ValueError(f"Unknown data format '{format}'") return dataset @@ -290,19 +291,23 @@ def get_loader(dataset, sampler, batch_size, shuffle=True): pin_memory=True, persistent_workers=pw) elif sampler == "cluster": - loader_train = \ - ClusterLoader(dataset[0], - num_parts=cfg.train.train_parts, - save_dir="{}/{}".format(cfg.dataset.dir, - cfg.dataset.name.replace( - "-", "_")), - batch_size=batch_size, shuffle=shuffle, - num_workers=cfg.num_workers, - pin_memory=True, - persistent_workers=pw) + loader_train = ClusterLoader( + dataset[0], + num_parts=cfg.train.train_parts, + save_dir=osp.join( + cfg.dataset.dir, + cfg.dataset.name.replace("-", "_"), + ), + batch_size=batch_size, + shuffle=shuffle, + num_workers=cfg.num_workers, + pin_memory=True, + persistent_workers=pw, + ) else: - raise NotImplementedError("%s sampler is not implemented!" % sampler) + raise NotImplementedError(f"'{sampler}' is not implemented") + return loader_train diff --git a/torch_geometric/graphgym/loss.py b/torch_geometric/graphgym/loss.py index f2030ec08099..332196823ff6 100644 --- a/torch_geometric/graphgym/loss.py +++ b/torch_geometric/graphgym/loss.py @@ -1,5 +1,4 @@ import torch -import torch.nn as nn import torch.nn.functional as F import torch_geometric.graphgym.register as register @@ -17,8 +16,8 @@ def compute_loss(pred, true): Returns: Loss, normalized prediction score """ - bce_loss = nn.BCEWithLogitsLoss(reduction=cfg.model.size_average) - mse_loss = nn.MSELoss(reduction=cfg.model.size_average) + bce_loss = torch.nn.BCEWithLogitsLoss(reduction=cfg.model.size_average) + mse_loss = torch.nn.MSELoss(reduction=cfg.model.size_average) # default manipulation for pred and true # can be skipped if special loss computation is needed @@ -44,5 +43,4 @@ def compute_loss(pred, true): true = true.float() return mse_loss(pred, true), pred else: - raise ValueError('Loss func {} not supported'.format( - cfg.model.loss_fun)) + raise ValueError(f"Loss function '{cfg.model.loss_fun}' not supported") diff --git a/torch_geometric/graphgym/models/act.py b/torch_geometric/graphgym/models/act.py index 4f9bc4d6811c..1daa32054723 100644 --- a/torch_geometric/graphgym/models/act.py +++ b/torch_geometric/graphgym/models/act.py @@ -1,35 +1,35 @@ -import torch.nn as nn +import torch from torch_geometric.graphgym.config import cfg from torch_geometric.graphgym.register import register_act def relu(): - return nn.ReLU(inplace=cfg.mem.inplace) + return torch.nn.ReLU(inplace=cfg.mem.inplace) def selu(): - return nn.SELU(inplace=cfg.mem.inplace) + return torch.nn.SELU(inplace=cfg.mem.inplace) def prelu(): - return nn.PReLU() + return torch.nn.PReLU() def elu(): - return nn.ELU(inplace=cfg.mem.inplace) + return torch.nn.ELU(inplace=cfg.mem.inplace) def lrelu_01(): - return nn.LeakyReLU(0.1, inplace=cfg.mem.inplace) + return torch.nn.LeakyReLU(0.1, inplace=cfg.mem.inplace) def lrelu_025(): - return nn.LeakyReLU(0.25, inplace=cfg.mem.inplace) + return torch.nn.LeakyReLU(0.25, inplace=cfg.mem.inplace) def lrelu_05(): - return nn.LeakyReLU(0.5, inplace=cfg.mem.inplace) + return torch.nn.LeakyReLU(0.5, inplace=cfg.mem.inplace) if cfg is not None: diff --git a/torch_geometric/graphgym/models/encoder.py b/torch_geometric/graphgym/models/encoder.py index 8cb606e4306a..43b3b64fad8e 100644 --- a/torch_geometric/graphgym/models/encoder.py +++ b/torch_geometric/graphgym/models/encoder.py @@ -8,15 +8,20 @@ @register_node_encoder('Integer') class IntegerFeatureEncoder(torch.nn.Module): - """ - Provides an encoder for integer node features. + r"""Provides an encoder for integer node features. Args: - emb_dim (int): Output embedding dimension - num_classes (int): the number of classes for the - embedding mapping to learn from + emb_dim (int): The output embedding dimension. + num_classes (int): The number of classes/integers. + + Example: + + >>> encoder = IntegerFeatureEncoder(emb_dim=16, num_classes=10) + >>> batch = torch.randint(0, 10, (10, 2)) + >>> encoder(batch).size() + torch.Size([10, 16]) """ - def __init__(self, emb_dim, num_classes=None): + def __init__(self, emb_dim: int, num_classes: int): super().__init__() self.encoder = torch.nn.Embedding(num_classes, emb_dim) @@ -31,14 +36,19 @@ def forward(self, batch): @register_node_encoder('Atom') class AtomEncoder(torch.nn.Module): - """ - The atom Encoder used in OGB molecule dataset. + r"""The atom encoder used in OGB molecule dataset. Args: - emb_dim (int): Output embedding dimension - num_classes: None + emb_dim (int): The output embedding dimension. + + Example: + + >>> encoder = AtomEncoder(emb_dim=16) + >>> batch = torch.randint(0, 10, (10, 3)) + >>> encoder(batch).size() + torch.Size([10, 16]) """ - def __init__(self, emb_dim, num_classes=None): + def __init__(self, emb_dim, *args, **kwargs): super().__init__() from ogb.utils.features import get_atom_feature_dims @@ -61,13 +71,19 @@ def forward(self, batch): @register_edge_encoder('Bond') class BondEncoder(torch.nn.Module): - """ - The bond Encoder used in OGB molecule dataset. + r"""The bond encoder used in OGB molecule dataset. Args: - emb_dim (int): Output edge embedding dimension + emb_dim (int): The output embedding dimension. + + Example: + + >>> encoder = BondEncoder(emb_dim=16) + >>> batch = torch.randint(0, 10, (10, 3)) + >>> encoder(batch).size() + torch.Size([10, 16]) """ - def __init__(self, emb_dim): + def __init__(self, emb_dim: int): super().__init__() from ogb.utils.features import get_bond_feature_dims diff --git a/torch_geometric/graphgym/models/gnn.py b/torch_geometric/graphgym/models/gnn.py index af521c46a0da..a011b149523d 100644 --- a/torch_geometric/graphgym/models/gnn.py +++ b/torch_geometric/graphgym/models/gnn.py @@ -1,5 +1,4 @@ import torch -import torch.nn as nn import torch.nn.functional as F import torch_geometric.graphgym.register as register @@ -14,49 +13,61 @@ from torch_geometric.graphgym.register import register_stage -def GNNLayer(dim_in, dim_out, has_act=True): - """ - Wrapper for a GNN layer +def GNNLayer(dim_in: int, dim_out: int, has_act: bool = True) -> GeneralLayer: + r"""Creates a GNN layer, given the specified input and output dimensions + and the underlying configuration in :obj:`cfg`. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - has_act (bool): Whether has activation function after the layer - + dim_in (int): The input dimension + dim_out (int): The output dimension. + has_act (bool, optional): Whether to apply an activation function + after the layer. (default: :obj:`True`) """ return GeneralLayer( cfg.gnn.layer_type, - layer_config=new_layer_config(dim_in, dim_out, 1, has_act=has_act, - has_bias=False, cfg=cfg)) + layer_config=new_layer_config( + dim_in, + dim_out, + 1, + has_act=has_act, + has_bias=False, + cfg=cfg, + ), + ) -def GNNPreMP(dim_in, dim_out, num_layers): - """ - Wrapper for NN layer before GNN message passing +def GNNPreMP(dim_in: int, dim_out: int, num_layers: int) -> GeneralMultiLayer: + r"""Creates a NN layer used before message passing, given the specified + input and output dimensions and the underlying configuration in :obj:`cfg`. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - num_layers (int): Number of layers - + dim_in (int): The input dimension + dim_out (int): The output dimension. + num_layers (int): The number of layers. """ return GeneralMultiLayer( 'linear', - layer_config=new_layer_config(dim_in, dim_out, num_layers, - has_act=False, has_bias=False, cfg=cfg)) + layer_config=new_layer_config( + dim_in, + dim_out, + num_layers, + has_act=False, + has_bias=False, + cfg=cfg, + ), + ) @register_stage('stack') @register_stage('skipsum') @register_stage('skipconcat') -class GNNStackStage(nn.Module): - """ - Simple Stage that stack GNN layers +class GNNStackStage(torch.nn.Module): + r"""Stacks a number of GNN layers. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - num_layers (int): Number of GNN layers + dim_in (int): The input dimension + dim_out (int): The output dimension. + num_layers (int): The number of layers. """ def __init__(self, dim_in, dim_out, num_layers): super().__init__() @@ -67,7 +78,7 @@ def __init__(self, dim_in, dim_out, num_layers): else: d_in = dim_in if i == 0 else dim_out layer = GNNLayer(d_in, dim_out) - self.add_module('layer{}'.format(i), layer) + self.add_module(f'layer{i}', layer) def forward(self, batch): for i, layer in enumerate(self.children()): @@ -75,44 +86,56 @@ def forward(self, batch): batch = layer(batch) if cfg.gnn.stage_type == 'skipsum': batch.x = x + batch.x - elif cfg.gnn.stage_type == 'skipconcat' and \ - i < self.num_layers - 1: + elif (cfg.gnn.stage_type == 'skipconcat' + and i < self.num_layers - 1): batch.x = torch.cat([x, batch.x], dim=1) if cfg.gnn.l2norm: batch.x = F.normalize(batch.x, p=2, dim=-1) return batch -class FeatureEncoder(nn.Module): - """ - Encoding node and edge features +class FeatureEncoder(torch.nn.Module): + r"""Encodes node and edge features, given the specified input dimension and + the underlying configuration in :obj:`cfg`. Args: - dim_in (int): Input feature dimension + dim_in (int): The input feature dimension. """ - def __init__(self, dim_in): + def __init__(self, dim_in: int): super().__init__() self.dim_in = dim_in if cfg.dataset.node_encoder: - # Encode integer node features via nn.Embeddings + # Encode integer node features via `torch.nn.Embedding`: NodeEncoder = register.node_encoder_dict[ cfg.dataset.node_encoder_name] self.node_encoder = NodeEncoder(cfg.gnn.dim_inner) if cfg.dataset.node_encoder_bn: self.node_encoder_bn = BatchNorm1dNode( - new_layer_config(cfg.gnn.dim_inner, -1, -1, has_act=False, - has_bias=False, cfg=cfg)) - # Update dim_in to reflect the new dimension fo the node features + new_layer_config( + cfg.gnn.dim_inner, + -1, + -1, + has_act=False, + has_bias=False, + cfg=cfg, + )) + # Update `dim_in` to reflect the new dimension fo the node features self.dim_in = cfg.gnn.dim_inner if cfg.dataset.edge_encoder: - # Encode integer edge features via nn.Embeddings + # Encode integer edge features via `torch.nn.Embedding`: EdgeEncoder = register.edge_encoder_dict[ cfg.dataset.edge_encoder_name] self.edge_encoder = EdgeEncoder(cfg.gnn.dim_inner) if cfg.dataset.edge_encoder_bn: self.edge_encoder_bn = BatchNorm1dNode( - new_layer_config(cfg.gnn.dim_inner, -1, -1, has_act=False, - has_bias=False, cfg=cfg)) + new_layer_config( + cfg.gnn.dim_inner, + -1, + -1, + has_act=False, + has_bias=False, + cfg=cfg, + )) def forward(self, batch): for module in self.children(): @@ -120,16 +143,26 @@ def forward(self, batch): return batch -class GNN(nn.Module): - """ - General GNN model: encoder + stage + head +class GNN(torch.nn.Module): + r"""A general Graph Neural Network (GNN) model. + + The GNN model consists of three main components: + + 1. An encoder to transform input features into a fixed-size embedding + space. + 2. A processing or message passing stage for information exchange between + nodes. + 3. A head to produce the final output features/predictions. + + The configuration of each component is determined by the underlying + configuration in :obj:`cfg`. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - **kwargs (optional): Optional additional args + dim_in (int): The input feature dimension. + dim_out (int): The output feature dimension. + **kwargs (optional): Additional keyword arguments. """ - def __init__(self, dim_in, dim_out, **kwargs): + def __init__(self, dim_in: int, dim_out: int, **kwargs): super().__init__() GNNStage = register.stage_dict[cfg.gnn.stage_type] GNNHead = register.head_dict[cfg.gnn.head] diff --git a/torch_geometric/graphgym/models/head.py b/torch_geometric/graphgym/models/head.py index 923ac32d6699..84738ea63369 100644 --- a/torch_geometric/graphgym/models/head.py +++ b/torch_geometric/graphgym/models/head.py @@ -1,9 +1,4 @@ -""" GNN heads are the last layer of a GNN right before loss computation. -They are constructed in the init function of the gnn.GNN. -""" - import torch -import torch.nn as nn import torch_geometric.graphgym.register as register from torch_geometric.graphgym.config import cfg @@ -12,19 +7,24 @@ @register_head('node') -class GNNNodeHead(nn.Module): - """ - GNN prediction head for node prediction tasks. +class GNNNodeHead(torch.nn.Module): + r"""A GNN prediction head for node-level prediction tasks. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension. For binary prediction, dim_out=1. + dim_in (int): The input feature dimension. + dim_out (int): The output feature dimension. """ - def __init__(self, dim_in, dim_out): + def __init__(self, dim_in: int, dim_out: int): super().__init__() self.layer_post_mp = MLP( - new_layer_config(dim_in, dim_out, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) + new_layer_config( + dim_in, + dim_out, + cfg.gnn.layers_post_mp, + has_act=False, + has_bias=True, + cfg=cfg, + )) def _apply_index(self, batch): x = batch.x @@ -44,45 +44,54 @@ def forward(self, batch): @register_head('edge') @register_head('link_pred') -class GNNEdgeHead(nn.Module): - """ - GNN prediction head for edge/link prediction tasks. +class GNNEdgeHead(torch.nn.Module): + r"""A GNN prediction head for edge-level/link-level prediction tasks. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension. For binary prediction, dim_out=1. + dim_in (int): The input feature dimension. + dim_out (int): The output feature dimension. """ - def __init__(self, dim_in, dim_out): + def __init__(self, dim_in: int, dim_out: int): super().__init__() - # module to decode edges from node embeddings + # Module to decode edges from node embeddings: if cfg.model.edge_decoding == 'concat': self.layer_post_mp = MLP( - new_layer_config(dim_in * 2, dim_out, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) - # requires parameter + new_layer_config( + dim_in * 2, + dim_out, + cfg.gnn.layers_post_mp, + has_act=False, + has_bias=True, + cfg=cfg, + )) self.decode_module = lambda v1, v2: \ self.layer_post_mp(torch.cat((v1, v2), dim=-1)) else: if dim_out > 1: - raise ValueError( - 'Binary edge decoding ({})is used for multi-class ' - 'edge/link prediction.'.format(cfg.model.edge_decoding)) + raise ValueError(f"Binary edge decoding " + f"'{cfg.model.edge_decoding}' is used for " + f"multi-class classification") self.layer_post_mp = MLP( - new_layer_config(dim_in, dim_in, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) + new_layer_config( + dim_in, + dim_in, + cfg.gnn.layers_post_mp, + has_act=False, + has_bias=True, + cfg=cfg, + )) if cfg.model.edge_decoding == 'dot': self.decode_module = lambda v1, v2: torch.sum(v1 * v2, dim=-1) elif cfg.model.edge_decoding == 'cosine_similarity': - self.decode_module = nn.CosineSimilarity(dim=-1) + self.decode_module = torch.nn.CosineSimilarity(dim=-1) else: - raise ValueError('Unknown edge decoding {}.'.format( - cfg.model.edge_decoding)) + raise ValueError(f"Unknown edge decoding " + f"'{cfg.model.edge_decoding}'") def _apply_index(self, batch): - index = '{}_edge_index'.format(batch.split) - label = '{}_edge_label'.format(batch.split) - return batch.x[batch[index]], \ - batch[label] + index = f'{batch.split}_edge_index' + label = f'{batch.split}_edge_label' + return batch.x[batch[index]], batch[label] def forward(self, batch): if cfg.model.edge_decoding != 'concat': @@ -95,17 +104,16 @@ def forward(self, batch): @register_head('graph') -class GNNGraphHead(nn.Module): - """ - GNN prediction head for graph prediction tasks. - The optional post_mp layer (specified by cfg.gnn.post_mp) is used - to transform the pooled embedding using an MLP. +class GNNGraphHead(torch.nn.Module): + r"""A GNN prediction head for graph-level prediction tasks. + A post message passing layer (as specified by :obj:`cfg.gnn.post_mp`) is + used to transform the pooled graph-level embeddings using an MLP. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension. For binary prediction, dim_out=1. + dim_in (int): The input feature dimension. + dim_out (int): The output feature dimension. """ - def __init__(self, dim_in, dim_out): + def __init__(self, dim_in: int, dim_out: int): super().__init__() self.layer_post_mp = MLP( new_layer_config(dim_in, dim_out, cfg.gnn.layers_post_mp, diff --git a/torch_geometric/graphgym/models/layer.py b/torch_geometric/graphgym/models/layer.py index aa5e047713a6..4629ffa4ab62 100644 --- a/torch_geometric/graphgym/models/layer.py +++ b/torch_geometric/graphgym/models/layer.py @@ -2,7 +2,6 @@ from dataclasses import dataclass, replace import torch -import torch.nn as nn import torch.nn.functional as F import torch_geometric as pyg @@ -45,7 +44,25 @@ class LayerConfig: keep_edge: float = 0.5 -def new_layer_config(dim_in, dim_out, num_layers, has_act, has_bias, cfg): +def new_layer_config( + dim_in: int, + dim_out: int, + num_layers: int, + has_act: bool, + has_bias: bool, + cfg, +) -> LayerConfig: + r"""Createa a layer configuration for a GNN layer. + + Args: + dim_in (int): The input feature dimension. + dim_out (int): The output feature dimension. + num_layers (int): The number of hidden layers + has_act (bool): Whether to apply an activation function after the + layer. + has_bias (bool): Whether to apply a bias term in the layer. + cfg (ConfigNode): The underlying configuration. + """ return LayerConfig( has_batchnorm=cfg.gnn.batchnorm, bn_eps=cfg.bn.eps, @@ -66,19 +83,13 @@ def new_layer_config(dim_in, dim_out, num_layers, has_act, has_bias, cfg): ) -# General classes -class GeneralLayer(nn.Module): - """ - General wrapper for layers +class GeneralLayer(torch.nn.Module): + r"""A general wrapper for layers. Args: - name (str): Name of the layer in registered :obj:`layer_dict` - dim_in (int): Input dimension - dim_out (int): Output dimension - has_act (bool): Whether has activation after the layer - has_bn (bool): Whether has BatchNorm in the layer - has_l2norm (bool): Wheter has L2 normalization after the layer - **kwargs (optional): Additional args + name (str): The registered name of the layer. + layer_config (LayerConfig): The configuration of the layer. + **kwargs (optional): Additional keyword arguments. """ def __init__(self, name, layer_config: LayerConfig, **kwargs): super().__init__() @@ -89,15 +100,20 @@ def __init__(self, name, layer_config: LayerConfig, **kwargs): layer_wrapper = [] if has_bn: layer_wrapper.append( - nn.BatchNorm1d(layer_config.dim_out, eps=layer_config.bn_eps, - momentum=layer_config.bn_mom)) + torch.nn.BatchNorm1d( + layer_config.dim_out, + eps=layer_config.bn_eps, + momentum=layer_config.bn_mom, + )) if layer_config.dropout > 0: layer_wrapper.append( - nn.Dropout(p=layer_config.dropout, - inplace=layer_config.mem_inplace)) + torch.nn.Dropout( + p=layer_config.dropout, + inplace=layer_config.mem_inplace, + )) if layer_config.has_act: layer_wrapper.append(register.act_dict[layer_config.act]()) - self.post_layer = nn.Sequential(*layer_wrapper) + self.post_layer = torch.nn.Sequential(*layer_wrapper) def forward(self, batch): batch = self.layer(batch) @@ -112,24 +128,21 @@ def forward(self, batch): return batch -class GeneralMultiLayer(nn.Module): - """ - General wrapper for a stack of multiple layers +class GeneralMultiLayer(torch.nn.Module): + r"""A general wrapper class for a stacking multiple NN layers. Args: - name (str): Name of the layer in registered :obj:`layer_dict` - num_layers (int): Number of layers in the stack - dim_in (int): Input dimension - dim_out (int): Output dimension - dim_inner (int): The dimension for the inner layers - final_act (bool): Whether has activation after the layer stack - **kwargs (optional): Additional args + name (str): The registered name of the layer. + layer_config (LayerConfig): The configuration of the layer. + **kwargs (optional): Additional keyword arguments. """ def __init__(self, name, layer_config: LayerConfig, **kwargs): super().__init__() - dim_inner = layer_config.dim_out \ - if layer_config.dim_inner is None \ - else layer_config.dim_inner + if layer_config.dim_inner: + dim_inner = layer_config.dim_out + else: + dim_inner = layer_config.dim_inner + for i in range(layer_config.num_layers): d_in = layer_config.dim_in if i == 0 else dim_inner d_out = layer_config.dim_out \ @@ -141,7 +154,7 @@ def __init__(self, name, layer_config: LayerConfig, **kwargs): inter_layer_config.dim_out = d_out inter_layer_config.has_act = has_act layer = GeneralLayer(name, inter_layer_config, **kwargs) - self.add_module('Layer_{}'.format(i), layer) + self.add_module(f'Layer_{i}', layer) def forward(self, batch): for layer in self.children(): @@ -153,20 +166,20 @@ def forward(self, batch): @register_layer('linear') -class Linear(nn.Module): - """ - Basic Linear layer. +class Linear(torch.nn.Module): + r"""A basic Linear layer. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - bias (bool): Whether has bias term - **kwargs (optional): Additional args + layer_config (LayerConfig): The configuration of the layer. + **kwargs (optional): Additional keyword arguments. """ def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = Linear_pyg(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) + self.model = Linear_pyg( + layer_config.dim_in, + layer_config.dim_out, + bias=layer_config.has_bias, + ) def forward(self, batch): if isinstance(batch, torch.Tensor): @@ -176,34 +189,38 @@ def forward(self, batch): return batch -class BatchNorm1dNode(nn.Module): - """ - BatchNorm for node feature. +class BatchNorm1dNode(torch.nn.Module): + r"""A batch normalization layer for node-level features. Args: - dim_in (int): Input dimension + layer_config (LayerConfig): The configuration of the layer. """ def __init__(self, layer_config: LayerConfig): super().__init__() - self.bn = nn.BatchNorm1d(layer_config.dim_in, eps=layer_config.bn_eps, - momentum=layer_config.bn_mom) + self.bn = torch.nn.BatchNorm1d( + layer_config.dim_in, + eps=layer_config.bn_eps, + momentum=layer_config.bn_mom, + ) def forward(self, batch): batch.x = self.bn(batch.x) return batch -class BatchNorm1dEdge(nn.Module): - """ - BatchNorm for edge feature. +class BatchNorm1dEdge(torch.nn.Module): + r"""A batch normalization layer for edge-level features. Args: - dim_in (int): Input dimension + layer_config (LayerConfig): The configuration of the layer. """ def __init__(self, layer_config: LayerConfig): super().__init__() - self.bn = nn.BatchNorm1d(layer_config.dim_in, eps=layer_config.bn_eps, - momentum=layer_config.bn_mom) + self.bn = torch.nn.BatchNorm1d( + layer_config.dim_in, + eps=layer_config.bn_eps, + momentum=layer_config.bn_mom, + ) def forward(self, batch): batch.edge_attr = self.bn(batch.edge_attr) @@ -211,24 +228,20 @@ def forward(self, batch): @register_layer('mlp') -class MLP(nn.Module): - """ - Basic MLP model. - Here 1-layer MLP is equivalent to a Liner layer. +class MLP(torch.nn.Module): + """A basic MLP model. Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - bias (bool): Whether has bias term - dim_inner (int): The dimension for the inner layers - num_layers (int): Number of layers in the stack - **kwargs (optional): Additional args + layer_config (LayerConfig): The configuration of the layer. + **kwargs (optional): Additional keyword arguments. """ def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - dim_inner = layer_config.dim_in \ - if layer_config.dim_inner is None \ - else layer_config.dim_inner + if layer_config.dim_inner: + dim_inner = layer_config.dim_in + else: + dim_inner = layer_config.dim_inner + layer_config.has_bias = True layers = [] if layer_config.num_layers > 1: @@ -241,7 +254,7 @@ def __init__(self, layer_config: LayerConfig, **kwargs): layers.append(Linear(layer_config)) else: layers.append(Linear(layer_config)) - self.model = nn.Sequential(*layers) + self.model = torch.nn.Sequential(*layers) def forward(self, batch): if isinstance(batch, torch.Tensor): @@ -252,14 +265,15 @@ def forward(self, batch): @register_layer('gcnconv') -class GCNConv(nn.Module): - """ - Graph Convolutional Network (GCN) layer - """ +class GCNConv(torch.nn.Module): + r"""A Graph Convolutional Network (GCN) layer.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = pyg.nn.GCNConv(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) + self.model = pyg.nn.GCNConv( + layer_config.dim_in, + layer_config.dim_out, + bias=layer_config.has_bias, + ) def forward(self, batch): batch.x = self.model(batch.x, batch.edge_index) @@ -267,14 +281,15 @@ def forward(self, batch): @register_layer('sageconv') -class SAGEConv(nn.Module): - """ - GraphSAGE Conv layer - """ +class SAGEConv(torch.nn.Module): + r"""A GraphSAGE layer.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = pyg.nn.SAGEConv(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) + self.model = pyg.nn.SAGEConv( + layer_config.dim_in, + layer_config.dim_out, + bias=layer_config.has_bias, + ) def forward(self, batch): batch.x = self.model(batch.x, batch.edge_index) @@ -282,14 +297,15 @@ def forward(self, batch): @register_layer('gatconv') -class GATConv(nn.Module): - """ - Graph Attention Network (GAT) layer - """ +class GATConv(torch.nn.Module): + r"""A Graph Attention Network (GAT) layer.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = pyg.nn.GATConv(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) + self.model = pyg.nn.GATConv( + layer_config.dim_in, + layer_config.dim_out, + bias=layer_config.has_bias, + ) def forward(self, batch): batch.x = self.model(batch.x, batch.edge_index) @@ -297,15 +313,15 @@ def forward(self, batch): @register_layer('ginconv') -class GINConv(nn.Module): - """ - Graph Isomorphism Network (GIN) layer - """ +class GINConv(torch.nn.Module): + r"""A Graph Isomorphism Network (GIN) layer.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - gin_nn = nn.Sequential( - Linear_pyg(layer_config.dim_in, layer_config.dim_out), nn.ReLU(), - Linear_pyg(layer_config.dim_out, layer_config.dim_out)) + gin_nn = torch.nn.Sequential( + Linear_pyg(layer_config.dim_in, layer_config.dim_out), + torch.nn.ReLU(), + Linear_pyg(layer_config.dim_out, layer_config.dim_out), + ) self.model = pyg.nn.GINConv(gin_nn) def forward(self, batch): @@ -314,16 +330,17 @@ def forward(self, batch): @register_layer('splineconv') -class SplineConv(nn.Module): - """ - SplineCNN layer - """ +class SplineConv(torch.nn.Module): + r"""A SplineCNN layer.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = pyg.nn.SplineConv(layer_config.dim_in, - layer_config.dim_out, dim=1, - kernel_size=2, - bias=layer_config.has_bias) + self.model = pyg.nn.SplineConv( + layer_config.dim_in, + layer_config.dim_out, + dim=1, + kernel_size=2, + bias=layer_config.has_bias, + ) def forward(self, batch): batch.x = self.model(batch.x, batch.edge_index, batch.edge_attr) @@ -331,13 +348,15 @@ def forward(self, batch): @register_layer('generalconv') -class GeneralConv(nn.Module): - """A general GNN layer""" +class GeneralConv(torch.nn.Module): + r"""A general GNN layer.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = GeneralConvLayer(layer_config.dim_in, - layer_config.dim_out, - bias=layer_config.has_bias) + self.model = GeneralConvLayer( + layer_config.dim_in, + layer_config.dim_out, + bias=layer_config.has_bias, + ) def forward(self, batch): batch.x = self.model(batch.x, batch.edge_index) @@ -345,14 +364,16 @@ def forward(self, batch): @register_layer('generaledgeconv') -class GeneralEdgeConv(nn.Module): - """A general GNN layer that supports edge features as well""" +class GeneralEdgeConv(torch.nn.Module): + r"""A general GNN layer with edge feature support.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = GeneralEdgeConvLayer(layer_config.dim_in, - layer_config.dim_out, - layer_config.edge_dim, - bias=layer_config.has_bias) + self.model = GeneralEdgeConvLayer( + layer_config.dim_in, + layer_config.dim_out, + layer_config.edge_dim, + bias=layer_config.has_bias, + ) def forward(self, batch): batch.x = self.model(batch.x, batch.edge_index, @@ -361,14 +382,16 @@ def forward(self, batch): @register_layer('generalsampleedgeconv') -class GeneralSampleEdgeConv(nn.Module): - """A general GNN layer that supports edge features and edge sampling""" +class GeneralSampleEdgeConv(torch.nn.Module): + r"""A general GNN layer that supports edge features and edge sampling.""" def __init__(self, layer_config: LayerConfig, **kwargs): super().__init__() - self.model = GeneralEdgeConvLayer(layer_config.dim_in, - layer_config.dim_out, - layer_config.edge_dim, - bias=layer_config.has_bias) + self.model = GeneralEdgeConvLayer( + layer_config.dim_in, + layer_config.dim_out, + layer_config.edge_dim, + bias=layer_config.has_bias, + ) self.keep_edge = layer_config.keep_edge def forward(self, batch): diff --git a/torch_geometric/graphgym/train.py b/torch_geometric/graphgym/train.py index f04a7a07a699..d391e9de21a8 100644 --- a/torch_geometric/graphgym/train.py +++ b/torch_geometric/graphgym/train.py @@ -1,5 +1,5 @@ import warnings -from typing import Optional +from typing import Any, Dict, Optional import torch from torch.utils.data import DataLoader @@ -14,6 +14,13 @@ class GraphGymDataModule(LightningDataModule): + r"""A :class:`pytorch_lightning.LightningDataModule` for handling data + loading routines in GraphGym. + + This class provides data loaders for training, validation, and testing, and + can be accessed through the :meth:`train_dataloader`, + :meth:`val_dataloader`, and :meth:`test_dataloader` methods, respectively. + """ def __init__(self): self.loaders = create_loader() super().__init__(has_val=True, has_test=True) @@ -30,8 +37,21 @@ def test_dataloader(self) -> DataLoader: return self.loaders[2] -def train(model: GraphGymModule, datamodule, logger: bool = True, - trainer_config: Optional[dict] = None): +def train( + model: GraphGymModule, + datamodule: GraphGymDataModule, + logger: bool = True, + trainer_config: Optional[Dict[str, Any]] = None, +): + r"""Trains a GraphGym model using PyTorch Lightning. + + Args: + model (GraphGymModule): The GraphGym model. + datamodule (GraphGymDataModule): The GraphGym data module. + logger (bool, optional): Whether to enable logging during training. + (default: :obj:`True`) + trainer_config (dict, optional): Additional trainer configuration. + """ warnings.filterwarnings('ignore', '.*use `CSVLogger` as the default.*') callbacks = [] diff --git a/torch_geometric/graphgym/utils/comp_budget.py b/torch_geometric/graphgym/utils/comp_budget.py index 0925bf6a0bee..bd303f96a6ce 100644 --- a/torch_geometric/graphgym/utils/comp_budget.py +++ b/torch_geometric/graphgym/utils/comp_budget.py @@ -91,6 +91,6 @@ def match_baseline_cfg(cfg_dict, cfg_dict_baseline, verbose=True): cfg_dict['gnn'] = {'dim_inner', cfg.gnn.dim_inner} set_cfg(cfg) if verbose: - print('Computational budget has matched: Baseline params {}, ' - 'Current params {}'.format(stats_baseline, stats)) + print(f"Computational budget has matched - Baseline params: " + f"{stats_baseline}, Current params: {stats}") return cfg_dict diff --git a/torch_geometric/graphgym/utils/plot.py b/torch_geometric/graphgym/utils/plot.py index f54b60434642..d591cdd7b27c 100644 --- a/torch_geometric/graphgym/utils/plot.py +++ b/torch_geometric/graphgym/utils/plot.py @@ -1,3 +1,6 @@ +import os.path as osp + + def view_emb(emb, dir): ''' Visualize a embedding matrix. @@ -19,4 +22,4 @@ def view_emb(emb, dir): emb = pca.fit_transform(emb) plt.figure(figsize=(10, 10)) plt.scatter(emb[:, 0], emb[:, 1]) - plt.savefig('{}/emb_pca.png'.format(dir), dpi=100) + plt.savefig(osp.join(dir, 'emb_pca.png'), dpi=100) diff --git a/torch_geometric/nn/norm/graph_size_norm.py b/torch_geometric/nn/norm/graph_size_norm.py index 243147d86912..567e85a661e1 100644 --- a/torch_geometric/nn/norm/graph_size_norm.py +++ b/torch_geometric/nn/norm/graph_size_norm.py @@ -1,14 +1,13 @@ from typing import Optional import torch -import torch.nn as nn from torch import Tensor from torch_geometric.typing import OptTensor from torch_geometric.utils import degree -class GraphSizeNorm(nn.Module): +class GraphSizeNorm(torch.nn.Module): r"""Applies Graph Size Normalization over each individual graph in a batch of node features as described in the `"Benchmarking Graph Neural Networks" `_ From 7e492d9ee8a867c37d0e02f0caaaefd2d6ee8163 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Fri, 1 Sep 2023 11:05:56 -0700 Subject: [PATCH 1445/2432] Added `papers100m` multi-GPU example (#7921) will use this in my tutorial: https://github.com/pyg-team/pytorch_geometric/pull/7894/ Average Training Iteration Time: 0.01475715453494085 s/iter --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + examples/multi_gpu/papers100m_multigpu.py | 137 ++++++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 examples/multi_gpu/papers100m_multigpu.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b51a3ec39cdf..4911018fd55a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added multi-gpu papers100m GCN example ([#7921](https://github.com/pyg-team/pytorch_geometric/pull/7921)) - Added `group_argsort` implementation ([#7948](https://github.com/pyg-team/pytorch_geometric/pull/7948)) - Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896), [#7897](https://github.com/pyg-team/pytorch_geometric/pull/7897)) - Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) diff --git a/examples/multi_gpu/papers100m_multigpu.py b/examples/multi_gpu/papers100m_multigpu.py new file mode 100644 index 000000000000..7a368357d4aa --- /dev/null +++ b/examples/multi_gpu/papers100m_multigpu.py @@ -0,0 +1,137 @@ +import argparse +import os +import time + +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import torch.nn.functional as F +from ogb.nodeproppred import PygNodePropPredDataset +from torch.nn.parallel import DistributedDataParallel +from torchmetrics import Accuracy + +from torch_geometric.loader import NeighborLoader +from torch_geometric.nn import GCNConv + + +def pyg_num_work(): + num_work = None + if hasattr(os, "sched_getaffinity"): + try: + num_work = len(os.sched_getaffinity(0)) / 2 + except Exception: + pass + if num_work is None: + num_work = os.cpu_count() / 2 + return int(num_work) + + +class GCN(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels): + super().__init__() + self.conv1 = GCNConv(in_channels, hidden_channels) + self.conv2 = GCNConv(hidden_channels, out_channels) + + def forward(self, x, edge_index, edge_weight=None): + x = F.dropout(x, p=0.5, training=self.training) + x = self.conv1(x, edge_index, edge_weight).relu() + x = F.dropout(x, p=0.5, training=self.training) + x = self.conv2(x, edge_index, edge_weight) + return x + + +def run_train(rank, data, world_size, model, epochs, batch_size, fan_out, + split_idx, num_classes): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12355' + dist.init_process_group('nccl', rank=rank, world_size=world_size) + split_idx['train'] = split_idx['train'].split( + split_idx['train'].size(0) // world_size, dim=0)[rank].clone() + model = model.to(rank) + model = DistributedDataParallel(model, device_ids=[rank]) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01, + weight_decay=0.0005) + train_loader = NeighborLoader(data, num_neighbors=[fan_out, fan_out], + input_nodes=split_idx['train'], + batch_size=batch_size, + num_workers=pyg_num_work()) + if rank == 0: + eval_loader = NeighborLoader(data, num_neighbors=[fan_out, fan_out], + input_nodes=split_idx['valid'], + batch_size=batch_size, + num_workers=pyg_num_work()) + test_loader = NeighborLoader(data, num_neighbors=[fan_out, fan_out], + input_nodes=split_idx['test'], + batch_size=batch_size, + num_workers=pyg_num_work()) + eval_steps = 100 + acc = Accuracy(task="multiclass", num_classes=num_classes).to(rank) + if rank == 0: + print("Beginning training...") + for epoch in range(epochs): + for i, batch in enumerate(train_loader): + if i >= 10: + start = time.time() + batch = batch.to(rank) + batch.y = batch.y.to(torch.long) + optimizer.zero_grad() + out = model(batch.x, batch.edge_index) + loss = F.cross_entropy(out[:batch_size], batch.y[:batch_size]) + loss.backward() + optimizer.step() + if rank == 0 and i % 10 == 0: + print("Epoch: " + str(epoch) + ", Iteration: " + str(i) + + ", Loss: " + str(loss)) + if rank == 0: + print("Average Training Iteration Time:", + (time.time() - start) / (i - 10), "s/iter") + acc_sum = 0.0 + with torch.no_grad(): + for i, batch in enumerate(eval_loader): + if i >= eval_steps: + break + if i >= 10: + start = time.time() + batch = batch.to(rank) + batch.y = batch.y.to(torch.long) + out = model(batch.x, batch.edge_index) + acc_sum += acc(out[:batch_size].softmax(dim=-1), + batch.y[:batch_size]) + print(f"Validation Accuracy: {acc_sum/(i) * 100.0:.4f}%", ) + print("Average Inference Iteration Time:", + (time.time() - start) / (i - 10), "s/iter") + if rank == 0: + acc_sum = 0.0 + with torch.no_grad(): + for i, batch in enumerate(test_loader): + batch = batch.to(rank) + batch.y = batch.y.to(torch.long) + out = model(batch.x, batch.edge_index) + acc_sum += acc(out[:batch_size].softmax(dim=-1), + batch.y[:batch_size]) + print(f"Test Accuracy: {acc_sum/(i) * 100.0:.4f}%", ) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--hidden_channels', type=int, default=64) + parser.add_argument('--lr', type=float, default=0.01) + parser.add_argument('--epochs', type=int, default=3) + parser.add_argument('--batch_size', type=int, default=128) + parser.add_argument('--fan_out', type=int, default=50) + + args = parser.parse_args() + + dataset = PygNodePropPredDataset(name='ogbn-papers100M') + split_idx = dataset.get_idx_split() + data = dataset[0] + data.y = data.y.reshape(-1) + model = GCN(dataset.num_features, args.hidden_channels, + dataset.num_classes) + print("Data =", data) + world_size = torch.cuda.device_count() + print('Let\'s use', world_size, 'GPUs!') + mp.spawn( + run_train, args=(data, world_size, model, args.epochs, args.batch_size, + args.fan_out, split_idx, dataset.num_classes), + nprocs=world_size, join=True) From 1d1fd85583a5c0dd1c27cc159e72577521d4694d Mon Sep 17 00:00:00 2001 From: Erfan Loghmani Date: Fri, 1 Sep 2023 15:49:00 -0700 Subject: [PATCH 1446/2432] Add `MyketDataset` (#7959) This dataset is a temporal graph of Android application install interactions. The dataset is introduced in "[Effect of Choosing Loss Function when Using T-batching for Representation Learning on Dynamic Networks](https://arxiv.org/abs/2308.06862)". Each application also has features like the number of installs, rating, rating_count, and dummy variables for its category. You can find more information about the dataset and the raw files [here](https://github.com/erfanloghmani/myket-android-application-market-dataset/). --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 3 +- torch_geometric/data/temporal.py | 6 ++ torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/myket.py | 87 ++++++++++++++++++++++++++++ 4 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 torch_geometric/datasets/myket.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 4911018fd55a..31062652c262 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added multi-gpu papers100m GCN example ([#7921](https://github.com/pyg-team/pytorch_geometric/pull/7921)) +- Added the `MyketDataset` ([#7959](https://github.com/pyg-team/pytorch_geometric/pull/7959)) +- Added a multi-GPU `ogbn-papers100M` example ([#7921](https://github.com/pyg-team/pytorch_geometric/pull/7921)) - Added `group_argsort` implementation ([#7948](https://github.com/pyg-team/pytorch_geometric/pull/7948)) - Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896), [#7897](https://github.com/pyg-team/pytorch_geometric/pull/7897)) - Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) diff --git a/torch_geometric/data/temporal.py b/torch_geometric/data/temporal.py index c2e99af7888c..d922ec1b6283 100644 --- a/torch_geometric/data/temporal.py +++ b/torch_geometric/data/temporal.py @@ -105,6 +105,12 @@ def __init__( for key, value in kwargs.items(): setattr(self, key, value) + @classmethod + def from_dict(cls, mapping: Dict[str, Any]) -> 'TemporalData': + r"""Creates a :class:`~torch_geometric.data.TemporalData` object from + a Python dictionary.""" + return cls(**mapping) + def index_select(self, idx: Any) -> 'TemporalData': idx = prepare_idx(idx) data = copy.copy(self) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index e7287d5633e3..3f38899558b8 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -72,6 +72,7 @@ from .airfrans import AirfRANS from .jodie import JODIEDataset from .wikidata import Wikidata5M +from .myket import MyketDataset from .dbp15k import DBP15K from .aminer import AMiner @@ -176,6 +177,7 @@ 'AirfRANS', 'JODIEDataset', 'Wikidata5M', + 'MyketDataset', ] hetero_datasets = [ diff --git a/torch_geometric/datasets/myket.py b/torch_geometric/datasets/myket.py new file mode 100644 index 000000000000..6b74dfba7ef4 --- /dev/null +++ b/torch_geometric/datasets/myket.py @@ -0,0 +1,87 @@ +from typing import Callable, List, Optional + +import numpy as np +import torch + +from torch_geometric.data import InMemoryDataset, TemporalData, download_url + + +class MyketDataset(InMemoryDataset): + r"""The Myket Android Application Install dataset from the + `"Effect of Choosing Loss Function when Using T-Batching for Representation + Learning on Dynamic Networks" `_ paper. + The dataset contains a temporal graph of application install interactions + in an Android application market. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 10 10 10 10 10 + :header-rows: 1 + + * - Name + - #nodes + - #edges + - #features + - #classes + * - Myket + - 17,988 + - 694,121 + - 33 + - 1 + """ + url = ('https://raw.githubusercontent.com/erfanloghmani/' + 'myket-android-application-market-dataset/main/data_int_index') + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform) + self.load(self.processed_paths[0], data_cls=TemporalData) + + @property + def raw_file_names(self) -> List[str]: + return ['myket.csv', 'app_info_sample.npy'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + for file_name in self.raw_file_names: + download_url(http://github.com/f'%7Bself.url%7D/%7Bfile_name%7D',%20self.raw_dir) + + def process(self): + import pandas as pd + + df = pd.read_csv(self.raw_paths[0], skiprows=1, header=None) + + src = torch.from_numpy(df[0].values) + dst = torch.from_numpy(df[1].values) + t = torch.from_numpy(df[2].values) + + x = torch.from_numpy(np.load(self.raw_paths[1])).to(torch.float) + msg = x[dst] + + dst = dst + (int(src.max()) + 1) + + data = TemporalData(src=src, dst=dst, t=t, msg=msg) + + if self.pre_transform is not None: + data = self.pre_transform(data) + + self.save([data], self.processed_paths[0]) From 684784963f408f6e9bfd8c580341fe04a31ae876 Mon Sep 17 00:00:00 2001 From: xnuohz Date: Sat, 2 Sep 2023 07:08:07 +0800 Subject: [PATCH 1447/2432] [Code Coverage] `nn/conv/rgat_conv.py` and `transforms/compose.py` (#7937) Just a try to improve coverage, feel free to review, thanks:) --------- Co-authored-by: rusty1s --- test/nn/conv/test_rgat_conv.py | 77 +++++++++++++++++++++++++++++---- test/transforms/test_compose.py | 15 +++++++ 2 files changed, 84 insertions(+), 8 deletions(-) diff --git a/test/nn/conv/test_rgat_conv.py b/test/nn/conv/test_rgat_conv.py index 630c9f27787c..e62505e203d7 100644 --- a/test/nn/conv/test_rgat_conv.py +++ b/test/nn/conv/test_rgat_conv.py @@ -22,19 +22,80 @@ 'additive-self-attention', 'multiplicative-self-attention', ]) -def test_rgat_conv(mod, attention_mechanism, attention_mode): +@pytest.mark.parametrize('concat', [True, False]) +@pytest.mark.parametrize('edge_dim', [8, None]) +def test_rgat_conv(mod, attention_mechanism, attention_mode, concat, edge_dim): x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) edge_type = torch.tensor([0, 2, 1, 2]) - edge_attr = torch.randn((4, 8)) + edge_attr = torch.randn((4, edge_dim)) if edge_dim else None - conv = RGATConv(8, 20, num_relations=4, num_bases=4, mod=mod, - attention_mechanism=attention_mechanism, - attention_mode=attention_mode, heads=2, dim=1, edge_dim=8) - assert str(conv) == 'RGATConv(8, 20, heads=2)' + conv1 = RGATConv( # `num_bases` is not None: + in_channels=8, + out_channels=16, + num_relations=4, + num_bases=4, + mod=mod, + attention_mechanism=attention_mechanism, + attention_mode=attention_mode, + heads=2, + dim=1, + concat=concat, + edge_dim=edge_dim, + ) - out = conv(x, edge_index, edge_type, edge_attr) - assert out.size() == (4, 40) + conv2 = RGATConv( # `num_blocks` is not `None` + in_channels=8, + out_channels=16, + num_relations=4, + num_blocks=4, + mod=mod, + attention_mechanism=attention_mechanism, + attention_mode=attention_mode, + heads=2, + dim=1, + concat=concat, + edge_dim=edge_dim, + ) + + conv3 = RGATConv( # Both `num_bases` and `num_blocks` are `None`: + in_channels=8, + out_channels=16, + num_relations=4, + mod=mod, + attention_mechanism=attention_mechanism, + attention_mode=attention_mode, + heads=2, + dim=1, + concat=concat, + edge_dim=edge_dim, + ) + + conv4 = RGATConv( # `dropout > 0` and `mod` is `None`: + in_channels=8, + out_channels=16, + num_relations=4, + mod=None, + attention_mechanism=attention_mechanism, + attention_mode=attention_mode, + heads=2, + dim=1, + concat=concat, + edge_dim=edge_dim, + dropout=0.5, + ) + + for conv in [conv1, conv2, conv3, conv4]: + assert str(conv) == 'RGATConv(8, 16, heads=2)' + + out = conv(x, edge_index, edge_type, edge_attr) + assert out.size() == (4, 16 * (2 if concat else 1)) + + out, (adj, alpha) = conv(x, edge_index, edge_type, edge_attr, + return_attention_weights=True) + assert out.size() == (4, 16 * (2 if concat else 1)) + assert adj.size() == edge_index.size() + assert alpha.size() == (4, 2) def test_rgat_conv_jittable(): diff --git a/test/transforms/test_compose.py b/test/transforms/test_compose.py index 541d4bf640b2..91f9d64ba1df 100644 --- a/test/transforms/test_compose.py +++ b/test/transforms/test_compose.py @@ -21,6 +21,21 @@ def test_compose(): assert data.edge_index.size() == (2, 7) +def test_compose_data_list(): + transform = T.Compose([T.Center(), T.AddSelfLoops()]) + + pos = torch.tensor([[0.0, 0.0], [2.0, 0.0], [4.0, 0.0]]) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + + data_list = [Data(edge_index=edge_index, pos=pos) for _ in range(3)] + data_list = transform(data_list) + assert len(data_list) == 3 + for data in data_list: + assert len(data) == 2 + assert data.pos.tolist() == [[-2.0, 0.0], [0.0, 0.0], [2.0, 0.0]] + assert data.edge_index.size() == (2, 7) + + def test_compose_filters(): filter_fn = T.ComposeFilters([ lambda d: d.num_nodes > 2, From 9f3361549a893797f7154764c190e361d170040d Mon Sep 17 00:00:00 2001 From: Favour James <63251266+Favourj-bit@users.noreply.github.com> Date: Sat, 2 Sep 2023 12:52:09 +0100 Subject: [PATCH 1448/2432] Add `BreastInvasiveCarcinoma` dataset (#7905) This is a dataset that was generated by integrating the breast cancer (BRCA TCGA) dataset from the cBioPortal (cbioportal.org) and a biological network for node connections from Pathway Commons (www.pathwaycommons.org). The dataset contains the gene features of each patient and the overall survival time (in months) of each patient, which are the labels. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Akihiro Nitta Co-authored-by: Matthias Fey --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/brca_tgca.py | 104 ++++++++++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 torch_geometric/datasets/brca_tgca.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 31062652c262..d069581959b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) - Added the `MyketDataset` ([#7959](https://github.com/pyg-team/pytorch_geometric/pull/7959)) - Added a multi-GPU `ogbn-papers100M` example ([#7921](https://github.com/pyg-team/pytorch_geometric/pull/7921)) - Added `group_argsort` implementation ([#7948](https://github.com/pyg-team/pytorch_geometric/pull/7948)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 3f38899558b8..f7857feed43a 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -73,6 +73,7 @@ from .jodie import JODIEDataset from .wikidata import Wikidata5M from .myket import MyketDataset +from .brca_tgca import BrcaTcga from .dbp15k import DBP15K from .aminer import AMiner @@ -178,6 +179,7 @@ 'JODIEDataset', 'Wikidata5M', 'MyketDataset', + 'BrcaTcga', ] hetero_datasets = [ diff --git a/torch_geometric/datasets/brca_tgca.py b/torch_geometric/datasets/brca_tgca.py new file mode 100644 index 000000000000..e699c141316a --- /dev/null +++ b/torch_geometric/datasets/brca_tgca.py @@ -0,0 +1,104 @@ +import os +import os.path as osp +import shutil +from typing import Callable, List, Optional + +import numpy as np +import torch + +from torch_geometric.data import ( + Data, + InMemoryDataset, + download_url, + extract_zip, +) + + +class BrcaTcga(InMemoryDataset): + r"""The breast cancer (BRCA TCGA) dataset from the `cBioPortal + `_ and the biological network for node + connections from `Pathway Commons `_. + The dataset contains the gene features of 1,082 patients, and the overall + survival time (in months) of each patient as label. + + Pre-processing and example model codes on how to use this dataset can be + found `here `_. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + pre_filter (callable, optional): A function that takes in an + :obj:`torch_geometric.data.Data` object and returns a boolean + value, indicating whether the data object should be included in the + final dataset. (default: :obj:`None`) + + **STATS:** + + .. list-table:: + :widths: 10 10 10 10 + :header-rows: 1 + + * - #graphs + - #nodes + - #edges + - #features + * - 1,082 + - 9,288 + - 271,771 + - 1,082 + """ + url = 'https://zenodo.org/record/8251328/files/brca_tcga.zip?download=1' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + pre_filter: Optional[Callable] = None, + ): + super().__init__(root, transform, pre_transform, pre_filter) + self.load(self.processed_paths[0]) + + @property + def raw_file_names(self) -> List[str]: + return ['graph_idx.csv', 'graph_labels.csv', 'edge_index.pt'] + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + path = download_url(http://github.com/self.url,%20self.root) + extract_zip(path, self.root) + os.unlink(path) + shutil.rmtree(self.raw_dir) + os.rename(osp.join(self.root, 'brca_tcga'), self.raw_dir) + + def process(self): + import pandas as pd + + graph_feat = pd.read_csv(self.raw_paths[0], index_col=0).values + graph_feat = torch.from_numpy(graph_feat).to(torch.float) + graph_label = np.loadtxt(self.raw_paths[1], delimiter=',') + graph_label = torch.from_numpy(graph_label).to(torch.float) + edge_index = torch.load(self.raw_paths[2]) + + data_list = [] + for x, y in zip(graph_feat, graph_label): + data = Data(x=x, edge_index=edge_index, y=y) + + if self.pre_filter is not None and not self.pre_filter(data): + continue + if self.pre_transform is not None: + data = self.pre_transform(data) + + data_list.append(data) + + self.save(data_list, self.processed_paths[0]) From 6db145342a58eec41c5e8eb0bbfcc1d33468b701 Mon Sep 17 00:00:00 2001 From: xnuohz Date: Mon, 4 Sep 2023 14:26:11 +0800 Subject: [PATCH 1449/2432] [Code Coverage] `loader/temporal_dataloader.py` and `nn/conv/transformer_conv.py` (#7968) Part of #6528. --------- Co-authored-by: rusty1s --- test/loader/test_temporal_dataloader.py | 14 ++++++++++---- test/nn/conv/test_transformer_conv.py | 25 ++++++++++++++++--------- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/test/loader/test_temporal_dataloader.py b/test/loader/test_temporal_dataloader.py index 82a1da009506..de5b0f960b10 100644 --- a/test/loader/test_temporal_dataloader.py +++ b/test/loader/test_temporal_dataloader.py @@ -1,20 +1,26 @@ +import pytest import torch from torch_geometric.data import TemporalData from torch_geometric.loader import TemporalDataLoader -def test_temporal_dataloader(): +@pytest.mark.parametrize('batch_size,drop_last', [(4, True), (2, False)]) +def test_temporal_dataloader(batch_size, drop_last): src = dst = t = torch.arange(10) msg = torch.randn(10, 16) data = TemporalData(src=src, dst=dst, t=t, msg=msg) - loader = TemporalDataLoader(data, batch_size=2) - assert len(loader) == 5 + loader = TemporalDataLoader( + data, + batch_size=batch_size, + drop_last=drop_last, + ) + assert len(loader) == 10 // batch_size for i, batch in enumerate(loader): - assert len(batch) == 2 + assert len(batch) == batch_size arange = range(len(batch) * i, len(batch) * i + len(batch)) assert batch.src.tolist() == data.src[arange].tolist() assert batch.dst.tolist() == data.dst[arange].tolist() diff --git a/test/nn/conv/test_transformer_conv.py b/test/nn/conv/test_transformer_conv.py index 41efdbfc3788..7435a358760c 100644 --- a/test/nn/conv/test_transformer_conv.py +++ b/test/nn/conv/test_transformer_conv.py @@ -1,3 +1,4 @@ +import pytest import torch import torch_geometric.typing @@ -7,22 +8,28 @@ from torch_geometric.utils import to_torch_csc_tensor -def test_transformer_conv(): +@pytest.mark.parametrize('edge_dim', [None, 8]) +@pytest.mark.parametrize('concat', [True, False]) +def test_transformer_conv(edge_dim, concat): x1 = torch.randn(4, 8) x2 = torch.randn(2, 16) + out_channels = 32 + heads = 2 edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + edge_attr = torch.randn(edge_index.size(1), edge_dim) if edge_dim else None adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) - conv = TransformerConv(8, 32, heads=2, beta=True) - assert str(conv) == 'TransformerConv(8, 32, heads=2)' + conv = TransformerConv(8, out_channels, heads, beta=True, + edge_dim=edge_dim, concat=concat) + assert str(conv) == f'TransformerConv(8, {out_channels}, heads={heads})' - out = conv(x1, edge_index) - assert out.size() == (4, 64) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) + out = conv(x1, edge_index, edge_attr) + assert out.size() == (4, out_channels * (heads if concat else 1)) + assert torch.allclose(conv(x1, adj1.t(), edge_attr), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, adj2.t(), edge_attr), out, atol=1e-6) if is_full_test(): t = '(Tensor, Tensor, NoneType, NoneType) -> Tensor' @@ -35,7 +42,7 @@ def test_transformer_conv(): assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) # Test `return_attention_weights`. - result = conv(x1, edge_index, return_attention_weights=True) + result = conv(x1, edge_index, edge_attr, return_attention_weights=True) assert torch.allclose(result[0], out) assert result[1][0].size() == (2, 4) assert result[1][1].size() == (4, 2) @@ -43,7 +50,7 @@ def test_transformer_conv(): assert conv._alpha is None if torch_geometric.typing.WITH_TORCH_SPARSE: - result = conv(x1, adj2.t(), return_attention_weights=True) + result = conv(x1, adj2.t(), edge_attr, return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 assert conv._alpha is None From cf60b5fdf2ef38dd594946acbf392ed7678d1226 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 4 Sep 2023 08:27:25 +0200 Subject: [PATCH 1450/2432] Correctly reshape node features in `BrcaTcga` dataset (#7971) --- torch_geometric/datasets/brca_tgca.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/datasets/brca_tgca.py b/torch_geometric/datasets/brca_tgca.py index e699c141316a..e5778bc2ce25 100644 --- a/torch_geometric/datasets/brca_tgca.py +++ b/torch_geometric/datasets/brca_tgca.py @@ -92,7 +92,7 @@ def process(self): data_list = [] for x, y in zip(graph_feat, graph_label): - data = Data(x=x, edge_index=edge_index, y=y) + data = Data(x=x.view(-1, 1), edge_index=edge_index, y=y) if self.pre_filter is not None and not self.pre_filter(data): continue From 9e687b412e87aa0f07ce7152e31d6f40271ed86d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 4 Sep 2023 16:03:26 +0200 Subject: [PATCH 1451/2432] Upgrade GitHub Action `checkout` version (#7973) `actions/checkout@v3` is broken, see https://github.com/actions/checkout/issues/1448 --- .github/workflows/building_pyg_conda.yml | 2 +- .github/workflows/building_rusty1s_conda.yml | 2 +- .github/workflows/examples.yml | 2 +- .github/workflows/full_gpu_testing.yml | 2 +- .github/workflows/full_testing.yml | 2 +- .github/workflows/latest_testing.yml | 2 +- .github/workflows/linting.yml | 6 +++--- .github/workflows/minimal_testing.yml | 2 +- .github/workflows/nightly.yml | 2 +- .github/workflows/prev_testing.yml | 2 +- .github/workflows/testing.yml | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index e92f6300c812..572aa2654b59 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -50,7 +50,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Conda for Python ${{ matrix.python-version }} uses: conda-incubator/setup-miniconda@v2 diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index 973b316e460c..6bb65dc3535f 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -50,7 +50,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Conda for Python ${{ matrix.python-version }} uses: conda-incubator/setup-miniconda@v2 diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index c438dbcb3966..3447a6695f5d 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup packages uses: ./.github/actions/setup diff --git a/.github/workflows/full_gpu_testing.yml b/.github/workflows/full_gpu_testing.yml index f650cc75ef30..83996c800b68 100644 --- a/.github/workflows/full_gpu_testing.yml +++ b/.github/workflows/full_gpu_testing.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup packages uses: ./.github/actions/setup diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 0f069024849f..4299f8b5f013 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup packages uses: ./.github/actions/setup diff --git a/.github/workflows/latest_testing.yml b/.github/workflows/latest_testing.yml index df301bd13874..4a5783f7d35d 100644 --- a/.github/workflows/latest_testing.yml +++ b/.github/workflows/latest_testing.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 40 diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index eae028b9dd18..77da597721c8 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup packages uses: ./.github/actions/setup @@ -34,7 +34,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4.3.0 @@ -53,7 +53,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4.3.0 diff --git a/.github/workflows/minimal_testing.yml b/.github/workflows/minimal_testing.yml index 7cb65921a154..f4aba732c8f4 100644 --- a/.github/workflows/minimal_testing.yml +++ b/.github/workflows/minimal_testing.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 40 diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 9566ab7e08e1..d21454461836 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4.3.0 diff --git a/.github/workflows/prev_testing.yml b/.github/workflows/prev_testing.yml index 499113982654..6b8b7ee998e4 100644 --- a/.github/workflows/prev_testing.yml +++ b/.github/workflows/prev_testing.yml @@ -25,7 +25,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 40 diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index e0784feb172a..0d0d3b1aecdf 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 40 From b437d8ffe180f61ed266a56f316bef9bbfaf11b5 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 5 Sep 2023 08:47:33 +0200 Subject: [PATCH 1452/2432] Support latest `pyg_lib.sampler.neighbor_sample` (#7978) The interface change, so we need to account for this :( --- test/loader/test_neighbor_loader.py | 2 +- torch_geometric/sampler/neighbor_sampler.py | 15 +++++++++++---- torch_geometric/typing.py | 3 +++ 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index d4e2bbd48c55..1b89e32ddbd2 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -524,7 +524,7 @@ def test_pyg_lib_and_torch_sparse_homo_equality(): seed = torch.arange(10) sample = torch.ops.pyg.neighbor_sample - out1 = sample(colptr, row, seed, [-1, -1], None, None, True) + out1 = sample(colptr, row, seed, [-1, -1], None, None, None, True) sample = torch.ops.torch_sparse.neighbor_sample out2 = sample(colptr, row, seed, [-1, -1], False, True) diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index 24c1e2834c19..fff32e5ad6c1 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -296,22 +296,29 @@ def _sample( # TODO Support induced subgraph sampling in `pyg-lib`. if (torch_geometric.typing.WITH_PYG_LIB and self.subgraph_type != SubgraphType.induced): - # TODO (matthias) `return_edge_id` if edge features present - # TODO (matthias) Ideally, `seed` inherits dtype from `colptr` - out = torch.ops.pyg.neighbor_sample( + + args = ( self.colptr, self.row, - seed.to(self.colptr.dtype), # seed + # TODO (matthias) `seed` should inherit dtype from `colptr` + seed.to(self.colptr.dtype), self.num_neighbors.get_mapped_values(), self.node_time, seed_time, + ) + if torch_geometric.typing.WITH_WEIGHTED_NEIGHBOR_SAMPLE: + args += (None, ) + args += ( True, # csc self.replace, self.subgraph_type != SubgraphType.induced, self.disjoint, self.temporal_strategy, + # TODO (matthias) `return_edge_id` if edge features present True, # return_edge_id ) + + out = torch.ops.pyg.neighbor_sample(*args) row, col, node, edge, batch = out[:4] + (None, ) # `pyg-lib>0.1.0` returns sampled number of nodes/edges: diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index bdf3db12d635..e13242579f55 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -1,3 +1,4 @@ +import inspect import sys import warnings from typing import Dict, List, Optional, Tuple, Union @@ -36,6 +37,8 @@ WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add') WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort') WITH_METIS = hasattr(pyg_lib, 'partition') + WITH_WEIGHTED_NEIGHBOR_SAMPLE = ('edge_weight' in inspect.signature( + pyg_lib.sampler.neighbor_sample).parameters) except (ImportError, OSError) as e: if isinstance(e, OSError): warnings.warn(f"An issue occurred while importing 'pyg-lib'. " From 60a2948ba1745e35ee5209d5e3d0b861aca54495 Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Tue, 5 Sep 2023 08:18:04 +0100 Subject: [PATCH 1453/2432] No graph breaks in `nn.models.{EdgeCNN,PNA}` on CUDA devices (#7967) This PR is a follow-up to #7947 and #7944. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- .github/workflows/documentation.yml | 2 +- test/nn/models/test_basic_gnn.py | 4 ---- test/test_warnings.py | 18 ++++++++++++++++++ torch_geometric/utils/scatter.py | 2 +- torch_geometric/warnings.py | 24 ++++++++++++++++++++++++ 5 files changed, 44 insertions(+), 6 deletions(-) create mode 100644 test/test_warnings.py create mode 100644 torch_geometric/warnings.py diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index e9eb5129bcb8..b61f33aa5f21 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 40 diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index ebb9b9cb3c8f..958420828afa 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -317,10 +317,6 @@ def test_trim_to_layer(): def test_compile_graph_breaks(Model, device): import torch._dynamo as dynamo - # TODO EdgeCNN and PNA currently lead to graph breaks on CUDA :( - if Model in {EdgeCNN, PNA} and device.type == 'cuda': - return - x = torch.randn(3, 8, device=device) edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device) diff --git a/test/test_warnings.py b/test/test_warnings.py new file mode 100644 index 000000000000..a880d957d46b --- /dev/null +++ b/test/test_warnings.py @@ -0,0 +1,18 @@ +import warnings +from unittest.mock import patch + +import pytest + +from torch_geometric.warnings import warn + + +def test_warn(): + with pytest.warns(UserWarning, match='test'): + warn('test') + + +@patch('torch_geometric.warnings._is_compiling', return_value=True) +def test_no_warn_if_compiling(_): + """No warning should be raised to avoid graph breaks when compiling.""" + with warnings.catch_warnings(): + warn('test') diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index e1c314b15328..b86f96d4c445 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -1,10 +1,10 @@ -import warnings from typing import Optional import torch from torch import Tensor import torch_geometric.typing +from torch_geometric import warnings from torch_geometric.typing import torch_scatter if torch_geometric.typing.WITH_PT112: # pragma: no cover diff --git a/torch_geometric/warnings.py b/torch_geometric/warnings.py new file mode 100644 index 000000000000..ead428d3bd05 --- /dev/null +++ b/torch_geometric/warnings.py @@ -0,0 +1,24 @@ +import warnings + +import torch_geometric + +if torch_geometric.typing.WITH_PT20: # pragma: no cover + from torch._dynamo import is_compiling as _is_compiling +else: + + def _is_compiling() -> bool: # pragma: no cover + return False + + +def warn(message: str): + if _is_compiling(): + return + + warnings.warn(message) + + +def filterwarnings(action: str, message: str): + if _is_compiling(): + return + + warnings.filterwarnings(action, message) From c37899806dbcfdc1a8979bc4042128c66bfd6bd4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 6 Sep 2023 13:41:10 +0200 Subject: [PATCH 1454/2432] Fix `TransformerConv` tests (#7984) --- test/nn/conv/test_transformer_conv.py | 40 +++++++++++++++------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/test/nn/conv/test_transformer_conv.py b/test/nn/conv/test_transformer_conv.py index 7435a358760c..742ef6830125 100644 --- a/test/nn/conv/test_transformer_conv.py +++ b/test/nn/conv/test_transformer_conv.py @@ -28,16 +28,17 @@ def test_transformer_conv(edge_dim, concat): assert torch.allclose(conv(x1, adj1.t(), edge_attr), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x1, adj2.t(), edge_attr), out, atol=1e-6) + adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, + sparse_sizes=(4, 4)) + assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): - t = '(Tensor, Tensor, NoneType, NoneType) -> Tensor' + t = '(Tensor, Tensor, Optional[Tensor], NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out) + assert torch.allclose(jit(x1, edge_index, edge_attr), out) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = '(Tensor, SparseTensor, NoneType, NoneType) -> Tensor' + t = '(Tensor, SparseTensor, Optional[Tensor], NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) @@ -50,16 +51,16 @@ def test_transformer_conv(edge_dim, concat): assert conv._alpha is None if torch_geometric.typing.WITH_TORCH_SPARSE: - result = conv(x1, adj2.t(), edge_attr, return_attention_weights=True) + result = conv(x1, adj2.t(), return_attention_weights=True) assert torch.allclose(result[0], out, atol=1e-6) assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 assert conv._alpha is None if is_full_test(): - t = ('(Tensor, Tensor, NoneType, bool) -> ' + t = ('(Tensor, Tensor, Optional[Tensor], bool) -> ' 'Tuple[Tensor, Tuple[Tensor, Tensor]]') jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, edge_index, return_attention_weights=True) + result = jit(x1, edge_index, edge_attr, return_attention_weights=True) assert torch.allclose(result[0], out) assert result[1][0].size() == (2, 4) assert result[1][1].size() == (4, 2) @@ -67,7 +68,7 @@ def test_transformer_conv(edge_dim, concat): assert conv._alpha is None if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = ('(Tensor, SparseTensor, NoneType, bool) -> ' + t = ('(Tensor, SparseTensor, Optional[Tensor], bool) -> ' 'Tuple[Tensor, SparseTensor]') jit = torch.jit.script(conv.jittable(t)) result = jit(x1, adj2.t(), return_attention_weights=True) @@ -78,23 +79,26 @@ def test_transformer_conv(edge_dim, concat): # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - conv = TransformerConv((8, 16), 32, heads=2, beta=True) - assert str(conv) == 'TransformerConv((8, 16), 32, heads=2)' + conv = TransformerConv((8, 16), out_channels, heads=heads, beta=True, + edge_dim=edge_dim, concat=concat) + assert str(conv) == (f'TransformerConv((8, 16), {out_channels}, ' + f'heads={heads})') - out = conv((x1, x2), edge_index) - assert out.size() == (2, 64) - assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) + out = conv((x1, x2), edge_index, edge_attr) + assert out.size() == (2, out_channels * (heads if concat else 1)) + assert torch.allclose(conv((x1, x2), adj1.t(), edge_attr), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, + sparse_sizes=(4, 2)) assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): - t = '(PairTensor, Tensor, NoneType, NoneType) -> Tensor' + t = '(PairTensor, Tensor, Optional[Tensor], NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out) + assert torch.allclose(jit((x1, x2), edge_index, edge_attr), out) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = '(PairTensor, SparseTensor, NoneType, NoneType) -> Tensor' + t = '(PairTensor, SparseTensor, Optional[Tensor], NoneType) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) From 40d4718713142bd60a9f65e4d66a80789290ae84 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 6 Sep 2023 16:12:32 +0200 Subject: [PATCH 1455/2432] Use `kwargs` when calling `Aggregation` (#7990) Fixes #7979 --- test/loader/test_neighbor_loader.py | 4 ++-- test/nn/test_model_summary.py | 4 ++-- torch_geometric/nn/aggr/base.py | 15 +++++++++++---- torch_geometric/sampler/neighbor_sampler.py | 10 ++++++++-- torch_geometric/transforms/random_link_split.py | 8 ++++---- 5 files changed, 27 insertions(+), 14 deletions(-) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 1b89e32ddbd2..17e2dd326544 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -564,8 +564,8 @@ def test_pyg_lib_and_torch_sparse_hetero_equality(): sample = torch.ops.pyg.hetero_neighbor_sample out1 = sample(node_types, edge_types, colptr_dict, row_dict, seed_dict, - num_neighbors_dict, None, None, True, False, True, False, - "uniform", True) + num_neighbors_dict, None, None, None, True, False, True, + False, "uniform", True) sample = torch.ops.torch_sparse.hetero_neighbor_sample out2 = sample(node_types, edge_types, colptr_dict, row_dict, seed_dict, num_neighbors_dict, 2, False, True) diff --git a/test/nn/test_model_summary.py b/test/nn/test_model_summary.py index 4efe8c3b599f..d4ab049ea315 100644 --- a/test/nn/test_model_summary.py +++ b/test/nn/test_model_summary.py @@ -148,10 +148,10 @@ def test_summary_with_leaf_module(gcn): | ├─(act)ReLU | [100, 16] | [100, 16] | -- | | ├─(convs)ModuleList | -- | -- | 1,072 | | │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | 528 | -| │ │ └─(aggr_module)SumAggregation | [120, 16], [120] | [100, 16] | -- | +| │ │ └─(aggr_module)SumAggregation | [120, 16] | [100, 16] | -- | | │ │ └─(lin)Linear | [100, 32] | [100, 16] | 512 | | │ └─(1)GCNConv | [100, 16], [2, 20] | [100, 32] | 544 | -| │ │ └─(aggr_module)SumAggregation | [120, 32], [120] | [100, 32] | -- | +| │ │ └─(aggr_module)SumAggregation | [120, 32] | [100, 32] | -- | | │ │ └─(lin)Linear | [100, 16] | [100, 32] | 512 | | ├─(norms)ModuleList | -- | -- | -- | | │ └─(0)Identity | [100, 16] | [100, 16] | -- | diff --git a/torch_geometric/nn/aggr/base.py b/torch_geometric/nn/aggr/base.py index 58be08b3dbb9..91d20d751fc1 100644 --- a/torch_geometric/nn/aggr/base.py +++ b/torch_geometric/nn/aggr/base.py @@ -93,9 +93,15 @@ def reset_parameters(self): pass @disable_dynamic_shapes(required_args=['dim_size']) - def __call__(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2, **kwargs) -> Tensor: + def __call__( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + **kwargs, + ) -> Tensor: if dim >= x.dim() or dim < -x.dim(): raise ValueError(f"Encountered invalid dimension '{dim}' of " @@ -116,7 +122,8 @@ def __call__(self, x: Tensor, index: Optional[Tensor] = None, dim_size = int(index.max()) + 1 if index.numel() > 0 else 0 try: - return super().__call__(x, index, ptr, dim_size, dim, **kwargs) + return super().__call__(x, index=index, ptr=ptr, dim_size=dim_size, + dim=dim, **kwargs) except (IndexError, RuntimeError) as e: if index is not None: if index.numel() > 0 and dim_size <= int(index.max()): diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index fff32e5ad6c1..f80ce9002476 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -217,13 +217,12 @@ def _sample( # TODO Support induced subgraph sampling in `pyg-lib`. if (torch_geometric.typing.WITH_PYG_LIB and self.subgraph_type != SubgraphType.induced): - # TODO (matthias) `return_edge_id` if edge features present # TODO (matthias) Ideally, `seed` inherits dtype from `colptr` colptrs = list(self.colptr_dict.values()) dtype = colptrs[0].dtype if len(colptrs) > 0 else torch.int64 seed = {k: v.to(dtype) for k, v in seed.items()} - out = torch.ops.pyg.hetero_neighbor_sample( + args = ( self.node_types, self.edge_types, self.colptr_dict, @@ -232,13 +231,20 @@ def _sample( self.num_neighbors.get_mapped_values(self.edge_types), self.node_time, seed_time, + ) + if torch_geometric.typing.WITH_WEIGHTED_NEIGHBOR_SAMPLE: + args += (None, ) + args += ( True, # csc self.replace, self.subgraph_type != SubgraphType.induced, self.disjoint, self.temporal_strategy, + # TODO (matthias) `return_edge_id` if edge features present True, # return_edge_id ) + + out = torch.ops.pyg.hetero_neighbor_sample(*args) row, col, node, edge, batch = out[:4] + (None, ) # `pyg-lib>0.1.0` returns sampled number of nodes/edges: diff --git a/torch_geometric/transforms/random_link_split.py b/torch_geometric/transforms/random_link_split.py index 0a81ce2ae33b..747bfc2a2c11 100644 --- a/torch_geometric/transforms/random_link_split.py +++ b/torch_geometric/transforms/random_link_split.py @@ -41,10 +41,10 @@ class RandomLinkSplit(BaseTransform): (default: :obj:`0.2`) is_undirected (bool): If set to :obj:`True`, the graph is assumed to be undirected, and positive and negative samples will not leak - (reverse) edge connectivity across different splits. Note that this - only affects the graph split, label data will not be returned - undirected. - (default: :obj:`False`) + (reverse) edge connectivity across different splits. This only + affects the graph split, label data will not be returned + undirected. This option is ignored for bipartite edge types or + whenever :obj:`edge_type != rev_edge_type`. (default: :obj:`False`) key (str, optional): The name of the attribute holding ground-truth labels. If :obj:`data[key]` does not exist, it will be automatically From be6ce568d2f175bdb6c0842cb38d7c7d343d88b2 Mon Sep 17 00:00:00 2001 From: Francesco Landolfi Date: Wed, 6 Sep 2023 16:37:32 +0200 Subject: [PATCH 1456/2432] Simplify `TopKPooling` (#7737) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hi, In this PR I optimized the computation of top-k: it leverages stable sorting instead of padding the score vector to the largest graph in the batch. Here is the output of the benchmark scripts, before and after: ``` flandolfi@johann /o/W/pytorch_geometric (master)> python test/nn/pool/select/test_select_topk.py (pyg) 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16/16 [00:33<00:00, 2.09s/it] +----------------+-----------+ | Name | Forward | |----------------+-----------| | B=64, N=256 | 1.1625s | | B=64, N=512 | 1.1630s | | B=64, N=1024 | 1.1744s | | B=64, N=2048 | 1.1733s | | B=64, N=4096 | 1.1826s | | B=64, N=8192 | 1.2270s | | B=64, N=16384 | 1.3334s | | B=64, N=32768 | 1.5825s | | B=128, N=256 | 2.0394s | | B=128, N=512 | 2.1531s | | B=128, N=1024 | 2.1189s | | B=128, N=2048 | 2.1391s | | B=128, N=4096 | 2.2102s | | B=128, N=8192 | 2.2339s | | B=128, N=16384 | 2.3075s | | B=128, N=32768 | 2.4393s | +----------------+-----------+ flandolfi@johann /o/W/pytorch_geometric (master)> git checkout sparse_topk (pyg) Switched to branch 'sparse_topk' Your branch is up to date with 'origin/sparse_topk'. flandolfi@johann /o/W/pytorch_geometric (sparse_topk)> python test/nn/pool/select/test_select_topk.py (pyg) 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16/16 [00:06<00:00, 2.32it/s] +----------------+-----------+ | Name | Forward | |----------------+-----------| | B=64, N=256 | 0.2419s | | B=64, N=512 | 0.2417s | | B=64, N=1024 | 0.2383s | | B=64, N=2048 | 0.2402s | | B=64, N=4096 | 0.2358s | | B=64, N=8192 | 0.3325s | | B=64, N=16384 | 0.5190s | | B=64, N=32768 | 0.8741s | | B=128, N=256 | 0.2359s | | B=128, N=512 | 0.2347s | | B=128, N=1024 | 0.2347s | | B=128, N=2048 | 0.2365s | | B=128, N=4096 | 0.2350s | | B=128, N=8192 | 0.3113s | | B=128, N=16384 | 0.4465s | | B=128, N=32768 | 0.7103s | +----------------+-----------+ ``` Bests, Francesco --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/pool/select/topk.py | 44 +++++++------------------- 2 files changed, 12 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d069581959b9..f2170194a649 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,6 +94,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Accelerated and simplified `top_k` computation in `TopKPooling` ([#7737](https://github.com/pyg-team/pytorch_geometric/pull/7737)) - Updated `GIN` implementation in kernel benchmarks to have sequential batchnorms ([#7955](https://github.com/pyg-team/pytorch_geometric/pull/7955)) - Fixed bugs in benchmarks caused by a lack of the device conditions for CPU and unexpected `cache` argument in heterogeneous models ([#7956](https://github.com/pyg-team/pytorch_geometric/pull/7956) - Fixed a bug in which `batch.e_id` was not correctly computed on unsorted graph inputs ([#7953](https://github.com/pyg-team/pytorch_geometric/pull/7953)) diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py index 670c223f980c..76e15126e175 100644 --- a/torch_geometric/nn/pool/select/topk.py +++ b/torch_geometric/nn/pool/select/topk.py @@ -9,7 +9,7 @@ from torch_geometric.utils import scatter, softmax -# TODO (matthias) Benchmark and document this method. +# TODO (matthias) Document this method. def topk( x: Tensor, ratio: Optional[Union[float, int]], @@ -27,45 +27,23 @@ def topk( if ratio is not None: num_nodes = scatter(batch.new_ones(x.size(0)), batch, reduce='sum') - batch_size, max_num_nodes = num_nodes.size(0), int(num_nodes.max()) - - cum_num_nodes = torch.cat( - [num_nodes.new_zeros(1), - num_nodes.cumsum(dim=0)[:-1]], dim=0) - - index = torch.arange(batch.size(0), dtype=torch.long, device=x.device) - index = (index - cum_num_nodes[batch]) + (batch * max_num_nodes) - - dense_x = x.new_full((batch_size * max_num_nodes, ), -60000.0) - dense_x[index] = x - dense_x = dense_x.view(batch_size, max_num_nodes) - - _, perm = dense_x.sort(dim=-1, descending=True) - - perm = perm + cum_num_nodes.view(-1, 1) - perm = perm.view(-1) if ratio >= 1: k = num_nodes.new_full((num_nodes.size(0), ), int(ratio)) - k = torch.min(k, num_nodes) else: k = (float(ratio) * num_nodes.to(x.dtype)).ceil().to(torch.long) - if isinstance(ratio, int) and (k == ratio).all(): - # If all graphs have exactly `ratio` or more than `ratio` entries, - # we can just pick the first entries in `perm` batch-wise: - index = torch.arange(batch_size, device=x.device) * max_num_nodes - index = index.view(-1, 1).repeat(1, ratio).view(-1) - index += torch.arange(ratio, device=x.device).repeat(batch_size) - else: - # Otherwise, compute indices per graph: - index = torch.cat([ - torch.arange(k[i], device=x.device) + i * max_num_nodes - for i in range(batch_size) - ], dim=0) + x, x_perm = torch.sort(x.view(-1), descending=True) + batch = batch[x_perm] + batch, batch_perm = torch.sort(batch, descending=False, stable=True) - perm = perm[index] - return perm + arange = torch.arange(x.size(0), dtype=torch.long, device=x.device) + ptr = num_nodes.new_zeros(num_nodes.numel() + 1) + torch.cumsum(num_nodes, 0, out=ptr[1:]) + batched_arange = arange - ptr[batch] + mask = batched_arange < k[batch] + + return x_perm[batch_perm[mask]] raise ValueError("At least one of the 'ratio' and 'min_score' parameters " "must be specified") From 9b2bb613efded0cce0f747abf0157b85ea9699d0 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 7 Sep 2023 08:43:23 +0200 Subject: [PATCH 1457/2432] Added internal `cumsum` implementation (#7994) --- CHANGELOG.md | 1 + test/nn/dense/test_linear.py | 3 ++- test/utils/test_functions.py | 11 +++++++++ torch_geometric/data/collate.py | 18 ++++----------- torch_geometric/datasets/upfd.py | 8 +++---- torch_geometric/io/tu.py | 8 +++---- torch_geometric/nn/aggr/quantile.py | 7 +++--- torch_geometric/nn/data_parallel.py | 9 ++++---- torch_geometric/nn/pool/decimation.py | 4 +++- torch_geometric/nn/pool/select/topk.py | 5 ++-- torch_geometric/nn/to_hetero_module.py | 8 +++---- torch_geometric/transforms/line_graph.py | 9 +++----- torch_geometric/utils/__init__.py | 12 ++++++---- torch_geometric/utils/augmentation.py | 6 ++--- torch_geometric/utils/dropout.py | 11 +++------ torch_geometric/utils/functions.py | 27 ++++++++++++++++++++++ torch_geometric/utils/negative_sampling.py | 14 +++++------ torch_geometric/utils/scatter.py | 4 ++-- torch_geometric/utils/to_dense_adj.py | 4 ++-- torch_geometric/utils/to_dense_batch.py | 4 ++-- torch_geometric/utils/unbatch.py | 4 ++-- 21 files changed, 100 insertions(+), 77 deletions(-) create mode 100644 test/utils/test_functions.py create mode 100644 torch_geometric/utils/functions.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f2170194a649..5ddf7508093f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) - Added the `MyketDataset` ([#7959](https://github.com/pyg-team/pytorch_geometric/pull/7959)) - Added a multi-GPU `ogbn-papers100M` example ([#7921](https://github.com/pyg-team/pytorch_geometric/pull/7921)) diff --git a/test/nn/dense/test_linear.py b/test/nn/dense/test_linear.py index 9c004b7b070f..f1189286e4c7 100644 --- a/test/nn/dense/test_linear.py +++ b/test/nn/dense/test_linear.py @@ -12,6 +12,7 @@ from torch_geometric.profile import benchmark from torch_geometric.testing import withCUDA, withPackage from torch_geometric.typing import pyg_lib +from torch_geometric.utils import cumsum weight_inits = ['glorot', 'kaiming_uniform', None] bias_inits = ['zeros', None] @@ -300,7 +301,7 @@ def dgl_mm(x: Tensor, count: Tensor, weight: Tensor) -> Tensor: xs = get_xs(mean, std, num_types, channels) count = torch.tensor([x.size(0) for x in xs]) - ptr = torch.tensor([0] + [x.size(0) for x in xs]).cumsum(0) + ptr = cumsum(torch.tensor([x.size(0) for x in xs])) x = torch.cat(xs, dim=0) padded_x = torch.nested.nested_tensor(xs).to_padded_tensor(padding=0.0) weight = torch.randn(num_types, channels, channels, device=args.device) diff --git a/test/utils/test_functions.py b/test/utils/test_functions.py new file mode 100644 index 000000000000..1293872706f8 --- /dev/null +++ b/test/utils/test_functions.py @@ -0,0 +1,11 @@ +import torch + +from torch_geometric.utils import cumsum + + +def test_cumsum(): + x = torch.tensor([2, 4, 1]) + assert cumsum(x).tolist() == [0, 2, 6, 7] + + x = torch.tensor([[2, 4], [3, 6]]) + assert cumsum(x, dim=1).tolist() == [[0, 2, 6], [0, 3, 9]] diff --git a/torch_geometric/data/collate.py b/torch_geometric/data/collate.py index f217479b67a5..bef83df9843d 100644 --- a/torch_geometric/data/collate.py +++ b/torch_geometric/data/collate.py @@ -1,6 +1,6 @@ from collections import defaultdict from collections.abc import Mapping, Sequence -from typing import Any, List, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple import torch from torch import Tensor @@ -9,7 +9,7 @@ from torch_geometric.data.data import BaseData from torch_geometric.data.storage import BaseStorage, NodeStorage from torch_geometric.typing import SparseTensor, torch_sparse -from torch_geometric.utils import is_sparse, is_torch_sparse_tensor +from torch_geometric.utils import cumsum, is_sparse, is_torch_sparse_tensor from torch_geometric.utils.sparse import cat @@ -131,7 +131,8 @@ def _collate( cat_dim = data_list[0].__cat_dim__(key, elem, stores[0]) if cat_dim is None or elem.dim() == 0: values = [value.unsqueeze(0) for value in values] - slices = cumsum([value.size(cat_dim or 0) for value in values]) + sizes = torch.tensor([value.size(cat_dim or 0) for value in values]) + slices = cumsum(sizes) if increment: incs = get_incs(key, values, data_list, stores) if incs.dim() > 1 or int(incs[-1]) != 0: @@ -179,7 +180,7 @@ def _collate( cat_dim = data_list[0].__cat_dim__(key, elem, stores[0]) cat_dims = (cat_dim, ) if isinstance(cat_dim, int) else cat_dim repeats = [[value.size(dim) for dim in cat_dims] for value in values] - slices = cumsum(repeats) + slices = cumsum(torch.tensor(repeats)) if is_torch_sparse_tensor(elem): value = cat(values, dim=cat_dim) else: @@ -268,15 +269,6 @@ def repeat_interleave( return torch.cat(outs, dim=0) -def cumsum(value: Union[Tensor, List[int]]) -> Tensor: - if not isinstance(value, Tensor): - value = torch.tensor(value) - out = value.new_empty((value.size(0) + 1, ) + value.size()[1:]) - out[0] = 0 - torch.cumsum(value, 0, out=out[1:]) - return out - - def get_incs(key, values: List[Any], data_list: List[BaseData], stores: List[BaseStorage]) -> Tensor: repeats = [ diff --git a/torch_geometric/datasets/upfd.py b/torch_geometric/datasets/upfd.py index 7cd84a182fd1..fcaf39fe7b0f 100644 --- a/torch_geometric/datasets/upfd.py +++ b/torch_geometric/datasets/upfd.py @@ -13,7 +13,7 @@ extract_zip, ) from torch_geometric.io import read_txt_array -from torch_geometric.utils import coalesce +from torch_geometric.utils import coalesce, cumsum class UPFD(InMemoryDataset): @@ -139,10 +139,8 @@ def process(self): batch = np.load(osp.join(self.raw_dir, 'node_graph_id.npy')) batch = torch.from_numpy(batch).to(torch.long) - node_slice = torch.cumsum(batch.bincount(), 0) - node_slice = torch.cat([torch.tensor([0]), node_slice]) - edge_slice = torch.cumsum(batch[edge_index[0]].bincount(), 0) - edge_slice = torch.cat([torch.tensor([0]), edge_slice]) + node_slice = cumsum(batch.bincount()) + edge_slice = cumsum(batch[edge_index[0].bincount()]) graph_slice = torch.arange(y.size(0) + 1) self.slices = { 'x': node_slice, diff --git a/torch_geometric/io/tu.py b/torch_geometric/io/tu.py index 0083a380620e..60940924e07e 100644 --- a/torch_geometric/io/tu.py +++ b/torch_geometric/io/tu.py @@ -7,7 +7,7 @@ from torch_geometric.data import Data from torch_geometric.io import read_txt_array -from torch_geometric.utils import coalesce, one_hot, remove_self_loops +from torch_geometric.utils import coalesce, cumsum, one_hot, remove_self_loops names = [ 'A', 'graph_indicator', 'node_labels', 'node_attributes' @@ -100,12 +100,10 @@ def cat(seq): def split(data, batch): - node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0) - node_slice = torch.cat([torch.tensor([0]), node_slice]) + node_slice = cumsum(torch.from_numpy(np.bincount(batch))) row, _ = data.edge_index - edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0) - edge_slice = torch.cat([torch.tensor([0]), edge_slice]) + edge_slice = cumsum(torch.from_numpy(np.bincount(batch[row]))) # Edge indices should start at zero for every graph. data.edge_index -= node_slice[batch[row]].unsqueeze(0) diff --git a/torch_geometric/nn/aggr/quantile.py b/torch_geometric/nn/aggr/quantile.py index 6c20b2a815c9..859edbb5a758 100644 --- a/torch_geometric/nn/aggr/quantile.py +++ b/torch_geometric/nn/aggr/quantile.py @@ -4,6 +4,7 @@ from torch import Tensor from torch_geometric.nn.aggr import Aggregation +from torch_geometric.utils import cumsum class QuantileAggregation(Aggregation): @@ -76,14 +77,14 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, assert index is not None # Required for TorchScript. count = torch.bincount(index, minlength=dim_size or 0) - cumsum = torch.cumsum(count, dim=0) - count + ptr = cumsum(count)[:-1] # In case there exists dangling indices (`dim_size > index.max()`), we # need to clamp them to prevent out-of-bound issues: if dim_size is not None: - cumsum = cumsum.clamp(max=x.size(dim) - 1) + ptr = ptr.clamp(max=x.size(dim) - 1) - q_point = self.q * (count - 1) + cumsum + q_point = self.q * (count - 1) + ptr q_point = q_point.t().reshape(-1) shape = [1] * x.dim() diff --git a/torch_geometric/nn/data_parallel.py b/torch_geometric/nn/data_parallel.py index b3f55b3da499..4956085bead3 100644 --- a/torch_geometric/nn/data_parallel.py +++ b/torch_geometric/nn/data_parallel.py @@ -4,6 +4,7 @@ import torch from torch_geometric.data import Batch +from torch_geometric.utils import cumsum class DataParallel(torch.nn.DataParallel): @@ -74,13 +75,11 @@ def scatter(self, data_list, device_ids): num_devices = min(len(device_ids), len(data_list)) count = torch.tensor([data.num_nodes for data in data_list]) - cumsum = count.cumsum(0) - cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0) - device_id = num_devices * cumsum.to(torch.float) / cumsum[-1].item() + ptr = cumsum(count) + device_id = num_devices * ptr.to(torch.float) / ptr[-1].item() device_id = (device_id[:-1] + device_id[1:]) / 2.0 device_id = device_id.to(torch.long) # round. - split = device_id.bincount().cumsum(0) - split = torch.cat([split.new_zeros(1), split], dim=0) + split = cumsum(device_id.bincount()) split = torch.unique(split, sorted=True) split = split.tolist() diff --git a/torch_geometric/nn/pool/decimation.py b/torch_geometric/nn/pool/decimation.py index fbc1801b9546..460b0031bb06 100644 --- a/torch_geometric/nn/pool/decimation.py +++ b/torch_geometric/nn/pool/decimation.py @@ -3,6 +3,8 @@ import torch from torch import LongTensor, Tensor +from torch_geometric.utils import cumsum + def decimation_indices( ptr: LongTensor, @@ -40,6 +42,6 @@ def decimation_indices( decim_indices = torch.cat(decim_indices, dim=0) # Get updated ptr (e.g., for future decimations): - decim_ptr = torch.cat([decim_count.new_zeros(1), decim_count.cumsum(0)]) + decim_ptr = cumsum(decim_count) return decim_indices, decim_ptr diff --git a/torch_geometric/nn/pool/select/topk.py b/torch_geometric/nn/pool/select/topk.py index 76e15126e175..244b64577283 100644 --- a/torch_geometric/nn/pool/select/topk.py +++ b/torch_geometric/nn/pool/select/topk.py @@ -6,7 +6,7 @@ from torch_geometric.nn.inits import uniform from torch_geometric.nn.pool.select import Select, SelectOutput from torch_geometric.nn.resolver import activation_resolver -from torch_geometric.utils import scatter, softmax +from torch_geometric.utils import cumsum, scatter, softmax # TODO (matthias) Document this method. @@ -38,8 +38,7 @@ def topk( batch, batch_perm = torch.sort(batch, descending=False, stable=True) arange = torch.arange(x.size(0), dtype=torch.long, device=x.device) - ptr = num_nodes.new_zeros(num_nodes.numel() + 1) - torch.cumsum(num_nodes, 0, out=ptr[1:]) + ptr = cumsum(num_nodes) batched_arange = arange - ptr[batch] mask = batched_arange < k[batch] diff --git a/torch_geometric/nn/to_hetero_module.py b/torch_geometric/nn/to_hetero_module.py index b8c40a52f42f..0aa4b29c01b9 100644 --- a/torch_geometric/nn/to_hetero_module.py +++ b/torch_geometric/nn/to_hetero_module.py @@ -8,7 +8,7 @@ import torch_geometric from torch_geometric.typing import EdgeType, NodeType, OptTensor -from torch_geometric.utils import scatter +from torch_geometric.utils import cumsum, scatter class ToHeteroLinear(torch.nn.Module): @@ -126,7 +126,7 @@ def fused_forward(self, x: Tensor, edge_index: Tensor, node_type: Tensor, edge_sizes = scatter(torch.ones_like(edge_type), edge_type, dim=0, dim_size=len(self.edge_types), reduce='sum') - cumsum = torch.cat([node_type.new_zeros(1), node_sizes.cumsum(0)[:1]]) + ptr = cumsum(node_sizes) xs = x.split(node_sizes.tolist()) x_dict = {node_type: x for node_type, x in zip(self.node_types, xs)} @@ -134,8 +134,8 @@ def fused_forward(self, x: Tensor, edge_index: Tensor, node_type: Tensor, # TODO Consider out-sourcing to its own function. edge_indices = edge_index.clone().split(edge_sizes.tolist(), dim=1) for (src, _, dst), index in zip(self.edge_types, edge_indices): - index[0] -= cumsum[self.node_type_to_index[src]] - index[1] -= cumsum[self.node_type_to_index[dst]] + index[0] -= ptr[self.node_type_to_index[src]] + index[1] -= ptr[self.node_type_to_index[dst]] edge_index_dict = { edge_type: edge_index diff --git a/torch_geometric/transforms/line_graph.py b/torch_geometric/transforms/line_graph.py index 83c492b26abb..ae2c5c1bfd46 100644 --- a/torch_geometric/transforms/line_graph.py +++ b/torch_geometric/transforms/line_graph.py @@ -3,7 +3,7 @@ from torch_geometric.data import Data from torch_geometric.data.datapipes import functional_transform from torch_geometric.transforms import BaseTransform -from torch_geometric.utils import coalesce, remove_self_loops, scatter +from torch_geometric.utils import coalesce, cumsum, remove_self_loops, scatter @functional_transform('line_graph') @@ -45,12 +45,9 @@ def forward(self, data: Data) -> Data: count = scatter(torch.ones_like(row), row, dim=0, dim_size=data.num_nodes, reduce='sum') - cumsum = torch.cat([count.new_zeros(1), count.cumsum(0)], dim=0) + ptr = cumsum(count) - cols = [ - i[cumsum[col[j]]:cumsum[col[j] + 1]] - for j in range(col.size(0)) - ] + cols = [i[ptr[col[j]]:ptr[col[j] + 1]] for j in range(col.size(0))] rows = [row.new_full((c.numel(), ), j) for j, c in enumerate(cols)] row, col = torch.cat(rows, dim=0), torch.cat(cols, dim=0) diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index 30c648a61f4d..faa52fea58c3 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -3,9 +3,9 @@ from .scatter import scatter, group_argsort from .segment import segment from .sort import index_sort +from .functions import cumsum from .degree import degree from .softmax import softmax -from .dropout import dropout_adj, dropout_node, dropout_edge, dropout_path from .sort_edge_index import sort_edge_index from .lexsort import lexsort from .coalesce import coalesce @@ -16,6 +16,7 @@ from .isolated import contains_isolated_nodes, remove_isolated_nodes from .subgraph import (get_num_hops, subgraph, k_hop_subgraph, bipartite_subgraph) +from .dropout import dropout_adj, dropout_node, dropout_edge, dropout_path from .homophily import homophily from .assortativity import assortativity from .get_laplacian import get_laplacian @@ -59,12 +60,9 @@ 'group_argsort', 'segment', 'index_sort', + 'cumsum', 'degree', 'softmax', - 'dropout_node', - 'dropout_edge', - 'dropout_path', - 'dropout_adj', 'sort_edge_index', 'lexsort', 'coalesce', @@ -82,6 +80,10 @@ 'subgraph', 'bipartite_subgraph', 'k_hop_subgraph', + 'dropout_node', + 'dropout_edge', + 'dropout_path', + 'dropout_adj', 'homophily', 'assortativity', 'get_laplacian', diff --git a/torch_geometric/utils/augmentation.py b/torch_geometric/utils/augmentation.py index 830145319dce..59e66824a1e2 100644 --- a/torch_geometric/utils/augmentation.py +++ b/torch_geometric/utils/augmentation.py @@ -3,7 +3,7 @@ import torch from torch import Tensor -from torch_geometric.utils import negative_sampling, scatter +from torch_geometric.utils import cumsum, negative_sampling, scatter def shuffle_node( @@ -61,10 +61,10 @@ def shuffle_node( perm = torch.randperm(x.size(0), device=x.device) return x[perm], perm num_nodes = scatter(batch.new_ones(x.size(0)), batch, dim=0, reduce='sum') - cumsum = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)]) + ptr = cumsum(num_nodes) perm = torch.cat([ torch.randperm(n, device=x.device) + offset - for offset, n in zip(cumsum[:-1], num_nodes) + for offset, n in zip(ptr[:-1], num_nodes) ]) return x[perm], perm diff --git a/torch_geometric/utils/dropout.py b/torch_geometric/utils/dropout.py index 9a90953c1a7d..c85f8bf946d7 100644 --- a/torch_geometric/utils/dropout.py +++ b/torch_geometric/utils/dropout.py @@ -6,10 +6,8 @@ import torch_geometric.typing from torch_geometric.deprecation import deprecated from torch_geometric.typing import OptTensor -from torch_geometric.utils.degree import degree +from torch_geometric.utils import cumsum, degree, sort_edge_index, subgraph from torch_geometric.utils.num_nodes import maybe_num_nodes -from torch_geometric.utils.sort_edge_index import sort_edge_index -from torch_geometric.utils.subgraph import subgraph def filter_adj(row: Tensor, col: Tensor, edge_attr: OptTensor, @@ -277,15 +275,12 @@ def dropout_path(edge_index: Tensor, p: float = 0.2, walks_per_node: int = 1, sample_mask = torch.rand(row.size(0), device=edge_index.device) <= p start = row[sample_mask].repeat(walks_per_node) - deg = degree(row, num_nodes=num_nodes) - rowptr = row.new_zeros(num_nodes + 1) - torch.cumsum(deg, 0, out=rowptr[1:]) + rowptr = cumsum(degree(row, num_nodes=num_nodes, dtype=torch.long)) n_id, e_id = torch.ops.torch_cluster.random_walk(rowptr, col, start, walk_length, 1.0, 1.0) e_id = e_id[e_id != -1].view(-1) # filter illegal edges - if edge_orders is not None: - # permute edge ids + if edge_orders is not None: # Permute edge indices: e_id = edge_orders[e_id] edge_mask[e_id] = False edge_index = ori_edge_index[:, edge_mask] diff --git a/torch_geometric/utils/functions.py b/torch_geometric/utils/functions.py new file mode 100644 index 000000000000..6708de7fe67f --- /dev/null +++ b/torch_geometric/utils/functions.py @@ -0,0 +1,27 @@ +import torch +from torch import Tensor + + +def cumsum(x: Tensor, dim: int = 0) -> Tensor: + r"""Returns the cumulative sum of elements of :obj:`inputs`. + In contrast to :meth:`torch.cumsum`, preprends the output with a zero. + + Args: + x (torch.Tensor): The input tensor. + dim (int, optional): The dimension to do the operation over. + (default: :obj:`0`) + + Example: + + >>> x = torch.tensor([2, 4, 1]) + >>> cumsum(x) + tensor([0, 2, 6, 7]) + + """ + size = x.size()[:dim] + (x.size(dim) + 1, ) + x.size()[dim + 1:] + out = x.new_empty(size) + + out.narrow(dim, 0, 1).zero_() + torch.cumsum(x, dim=dim, out=out.narrow(dim, 1, x.size(dim))) + + return out diff --git a/torch_geometric/utils/negative_sampling.py b/torch_geometric/utils/negative_sampling.py index b7e0d0f0981d..134feced4ccd 100644 --- a/torch_geometric/utils/negative_sampling.py +++ b/torch_geometric/utils/negative_sampling.py @@ -5,7 +5,7 @@ import torch from torch import Tensor -from torch_geometric.utils import coalesce, degree, remove_self_loops +from torch_geometric.utils import coalesce, cumsum, degree, remove_self_loops from torch_geometric.utils.num_nodes import maybe_num_nodes @@ -178,25 +178,25 @@ def batched_negative_sampling( edge_indices = torch.split(edge_index, split, dim=1) num_src = degree(src_batch, dtype=torch.long) - cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]]) + cum_src = cumsum(num_src)[:-1] if isinstance(batch, Tensor): num_nodes = num_src.tolist() - cumsum = cum_src + ptr = cum_src else: num_dst = degree(dst_batch, dtype=torch.long) - cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]]) + cum_dst = cumsum(num_dst)[:-1] num_nodes = torch.stack([num_src, num_dst], dim=1).tolist() - cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1) + ptr = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1) neg_edge_indices = [] for i, edge_index in enumerate(edge_indices): - edge_index = edge_index - cumsum[i] + edge_index = edge_index - ptr[i] neg_edge_index = negative_sampling(edge_index, num_nodes[i], num_neg_samples, method, force_undirected) - neg_edge_index += cumsum[i] + neg_edge_index += ptr[i] neg_edge_indices.append(neg_edge_index) return torch.cat(neg_edge_indices, dim=1) diff --git a/torch_geometric/utils/scatter.py b/torch_geometric/utils/scatter.py index b86f96d4c445..d9207c90a568 100644 --- a/torch_geometric/utils/scatter.py +++ b/torch_geometric/utils/scatter.py @@ -6,6 +6,7 @@ import torch_geometric.typing from torch_geometric import warnings from torch_geometric.typing import torch_scatter +from torch_geometric.utils.functions import cumsum if torch_geometric.typing.WITH_PT112: # pragma: no cover @@ -250,7 +251,6 @@ def group_argsort( # Compute cumulative sum of number of entries with the same index: count = scatter(torch.ones_like(index), index, dim=dim, dim_size=num_groups, reduce='sum') - ptr = count.new_zeros(count.numel() + 1) - torch.cumsum(count, dim=0, out=ptr[1:]) + ptr = cumsum(count) return out - ptr[index] diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py index e2423735a5ff..1a501b40b6ed 100644 --- a/torch_geometric/utils/to_dense_adj.py +++ b/torch_geometric/utils/to_dense_adj.py @@ -4,7 +4,7 @@ from torch import Tensor from torch_geometric.typing import OptTensor -from torch_geometric.utils import scatter +from torch_geometric.utils import cumsum, scatter def to_dense_adj( @@ -67,7 +67,7 @@ def to_dense_adj( one = batch.new_ones(batch.size(0)) num_nodes = scatter(one, batch, dim=0, dim_size=batch_size, reduce='sum') - cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)]) + cum_nodes = cumsum(num_nodes) idx0 = batch[edge_index[0]] idx1 = edge_index[0] - cum_nodes[batch][edge_index[0]] diff --git a/torch_geometric/utils/to_dense_batch.py b/torch_geometric/utils/to_dense_batch.py index cad023e7b93a..836f3b884b7f 100644 --- a/torch_geometric/utils/to_dense_batch.py +++ b/torch_geometric/utils/to_dense_batch.py @@ -7,7 +7,7 @@ disable_dynamic_shapes, is_experimental_mode_enabled, ) -from torch_geometric.utils import scatter +from torch_geometric.utils import cumsum, scatter @disable_dynamic_shapes(required_args=['batch_size', 'max_num_nodes']) @@ -106,7 +106,7 @@ def to_dense_batch( num_nodes = scatter(batch.new_ones(x.size(0)), batch, dim=0, dim_size=batch_size, reduce='sum') - cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)]) + cum_nodes = cumsum(num_nodes) filter_nodes = False dynamic_shapes_disabled = is_experimental_mode_enabled( diff --git a/torch_geometric/utils/unbatch.py b/torch_geometric/utils/unbatch.py index 0ba85c7e1135..304f60d2bb27 100644 --- a/torch_geometric/utils/unbatch.py +++ b/torch_geometric/utils/unbatch.py @@ -3,7 +3,7 @@ import torch from torch import Tensor -from torch_geometric.utils import degree +from torch_geometric.utils import cumsum, degree def unbatch( @@ -65,7 +65,7 @@ def unbatch_edge_index( [1, 0, 2, 1]])) """ deg = degree(batch, batch_size, dtype=torch.long) - ptr = torch.cat([deg.new_zeros(1), deg.cumsum(dim=0)[:-1]], dim=0) + ptr = cumsum(deg) edge_batch = batch[edge_index[0]] edge_index = edge_index - ptr[edge_batch] From c05a17e7ac8f9bf239366cdd60bf4866ea76624b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 7 Sep 2023 08:45:55 +0200 Subject: [PATCH 1458/2432] Warn on isolated/non-existing node types in `HeteroData.validate()` (#7995) --- CHANGELOG.md | 1 + test/data/test_hetero_data.py | 15 ++++++++++++++- torch_geometric/data/hetero_data.py | 17 +++++++++++++++++ torch_geometric/utils/functions.py | 4 ++-- 4 files changed, 34 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ddf7508093f..18f974b42bc9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) - Added the `MyketDataset` ([#7959](https://github.com/pyg-team/pytorch_geometric/pull/7959)) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index bea01b6ad99f..b820d0815c49 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -33,7 +33,8 @@ def test_init_hetero_data(): data['paper', 'paper'].edge_index = edge_index_paper_paper data['paper', 'author'].edge_index = edge_index_paper_author data['author', 'paper'].edge_index = edge_index_author_paper - data.validate(raise_on_error=True) + with pytest.warns(UserWarning, match="{'v1'} are isolated"): + data.validate(raise_on_error=True) assert len(data) == 2 assert data.node_types == ['v1', 'paper', 'author'] @@ -193,6 +194,18 @@ def test_hetero_data_rename(): assert edge_index.tolist() == edge_index_paper_paper.tolist() +def test_dangling_types(): + data = HeteroData() + data['src', 'to', 'dst'].edge_index = torch.randint(0, 10, (2, 20)) + with pytest.raises(ValueError, match="do not exist as node types"): + data.validate() + + data = HeteroData() + data['node'].num_nodes = 10 + with pytest.warns(UserWarning, match="{'node'} are isolated"): + data.validate() + + def test_hetero_data_subgraph(): data = HeteroData() data.num_node_types = 3 diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index a3e6cd37d206..4df5ce69ab50 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -387,6 +387,23 @@ def validate(self, raise_on_error: bool = True) -> bool: cls_name = self.__class__.__name__ status = True + node_types = set(self.node_types) + num_src_node_types = {src for src, _, _ in self.edge_types} + num_dst_node_types = {dst for _, _, dst in self.edge_types} + + dangling_types = (num_src_node_types | num_dst_node_types) - node_types + if len(dangling_types) > 0: + status = False + warn_or_raise( + f"The node types {dangling_types} are referenced in edge " + f"types but do not exist as node types", raise_on_error) + + dangling_types = node_types - (num_src_node_types | num_dst_node_types) + if len(dangling_types) > 0: + warn_or_raise( # May be intended. + f"The node types {dangling_types} are isolated and are not " + f"referenced by any edge type ", raise_on_error=False) + for edge_type, store in self._edge_store_dict.items(): src, _, dst = edge_type diff --git a/torch_geometric/utils/functions.py b/torch_geometric/utils/functions.py index 6708de7fe67f..c6931ec7c31e 100644 --- a/torch_geometric/utils/functions.py +++ b/torch_geometric/utils/functions.py @@ -3,8 +3,8 @@ def cumsum(x: Tensor, dim: int = 0) -> Tensor: - r"""Returns the cumulative sum of elements of :obj:`inputs`. - In contrast to :meth:`torch.cumsum`, preprends the output with a zero. + r"""Returns the cumulative sum of elements of :obj:`x`. + In contrast to :meth:`torch.cumsum`, prepends the output with zero. Args: x (torch.Tensor): The input tensor. From ed76d3cc18ed52718f292e92375276c038635414 Mon Sep 17 00:00:00 2001 From: apfelsinecode Date: Thu, 7 Sep 2023 13:03:20 +0200 Subject: [PATCH 1459/2432] Replace typ annotation List[str] with str for *args (#7996) Following [PEP 484: _Type Hints/Arbitrary argument lists and default argument values_](https://peps.python.org/pep-0484/#arbitrary-argument-lists-and-default-argument-values) the type annotation for argument list `*args` is annotated with the type of each element (`str`) instead of the type `List` itself. This affects the class `Data` and its attributes `Storage` and `View`. Previously, IDEs like PyCharm would mark `data.to('cuda', 'x', 'edge_index')` as incorrect, because it expects every element of the list `args` to be another list. --- torch_geometric/data/data.py | 28 ++++++++++++++-------------- torch_geometric/data/storage.py | 32 ++++++++++++++++---------------- torch_geometric/data/view.py | 4 ++-- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index d8fdadacc2b1..d618fb4ded97 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -265,43 +265,43 @@ def is_directed(self) -> bool: r"""Returns :obj:`True` if graph edges are directed.""" return not self.is_undirected() - def apply_(self, func: Callable, *args: List[str]): + def apply_(self, func: Callable, *args: str): r"""Applies the in-place function :obj:`func`, either to all attributes or only the ones given in :obj:`*args`.""" for store in self.stores: store.apply_(func, *args) return self - def apply(self, func: Callable, *args: List[str]): + def apply(self, func: Callable, *args: str): r"""Applies the function :obj:`func`, either to all attributes or only the ones given in :obj:`*args`.""" for store in self.stores: store.apply(func, *args) return self - def clone(self, *args: List[str]): + def clone(self, *args: str): r"""Performs cloning of tensors, either for all attributes or only the ones given in :obj:`*args`.""" return copy.copy(self).apply(lambda x: x.clone(), *args) - def contiguous(self, *args: List[str]): + def contiguous(self, *args: str): r"""Ensures a contiguous memory layout, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.contiguous(), *args) - def to(self, device: Union[int, str], *args: List[str], + def to(self, device: Union[int, str], *args: str, non_blocking: bool = False): r"""Performs tensor device conversion, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply( lambda x: x.to(device=device, non_blocking=non_blocking), *args) - def cpu(self, *args: List[str]): + def cpu(self, *args: str): r"""Copies attributes to CPU memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.cpu(), *args) - def cuda(self, device: Optional[Union[int, str]] = None, *args: List[str], + def cuda(self, device: Optional[Union[int, str]] = None, *args: str, non_blocking: bool = False): r"""Copies attributes to CUDA memory, either for all attributes or only the ones given in :obj:`*args`.""" @@ -310,34 +310,34 @@ def cuda(self, device: Optional[Union[int, str]] = None, *args: List[str], return self.apply(lambda x: x.cuda(device, non_blocking=non_blocking), *args) - def pin_memory(self, *args: List[str]): + def pin_memory(self, *args: str): r"""Copies attributes to pinned memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.pin_memory(), *args) - def share_memory_(self, *args: List[str]): + def share_memory_(self, *args: str): r"""Moves attributes to shared memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply_(lambda x: x.share_memory_(), *args) - def detach_(self, *args: List[str]): + def detach_(self, *args: str): r"""Detaches attributes from the computation graph, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply_(lambda x: x.detach_(), *args) - def detach(self, *args: List[str]): + def detach(self, *args: str): r"""Detaches attributes from the computation graph by creating a new tensor, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.detach(), *args) - def requires_grad_(self, *args: List[str], requires_grad: bool = True): + def requires_grad_(self, *args: str, requires_grad: bool = True): r"""Tracks gradient computation, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply_( lambda x: x.requires_grad_(requires_grad=requires_grad), *args) - def record_stream(self, stream: torch.cuda.Stream, *args: List[str]): + def record_stream(self, stream: torch.cuda.Stream, *args: str): r"""Ensures that the tensor memory is not reused for another tensor until all current work queued on :obj:`stream` has been completed, either for all attributes or only the ones given in :obj:`*args`.""" @@ -822,7 +822,7 @@ def __iter__(self) -> Iterable: for key, value in self._store.items(): yield key, value - def __call__(self, *args: List[str]) -> Iterable: + def __call__(self, *args: str) -> Iterable: r"""Iterates over all attributes :obj:`*args` in the data, yielding their attribute names and values. If :obj:`*args` is not given, will iterate over all attributes.""" diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index 3de611967fc6..cc6dda14288e 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -163,23 +163,23 @@ def __repr__(self) -> str: # storage object, e.g., in case we only want to transfer a subset of keys # to the GPU (i.e. the ones that are relevant to the deep learning model). - def keys(self, *args: List[str]) -> KeysView: + def keys(self, *args: str) -> KeysView: return KeysView(self._mapping, *args) - def values(self, *args: List[str]) -> ValuesView: + def values(self, *args: str) -> ValuesView: return ValuesView(self._mapping, *args) - def items(self, *args: List[str]) -> ItemsView: + def items(self, *args: str) -> ItemsView: return ItemsView(self._mapping, *args) - def apply_(self, func: Callable, *args: List[str]): + def apply_(self, func: Callable, *args: str): r"""Applies the in-place function :obj:`func`, either to all attributes or only the ones given in :obj:`*args`.""" for value in self.values(*args): recursive_apply_(value, func) return self - def apply(self, func: Callable, *args: List[str]): + def apply(self, func: Callable, *args: str): r"""Applies the function :obj:`func`, either to all attributes or only the ones given in :obj:`*args`.""" for key, value in self.items(*args): @@ -208,62 +208,62 @@ def to_namedtuple(self) -> NamedTuple: StorageTuple = namedtuple(typename, field_names) return StorageTuple(*[self[key] for key in field_names]) - def clone(self, *args: List[str]): + def clone(self, *args: str): r"""Performs a deep-copy of the object.""" return copy.deepcopy(self) - def contiguous(self, *args: List[str]): + def contiguous(self, *args: str): r"""Ensures a contiguous memory layout, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.contiguous(), *args) - def to(self, device: Union[int, str], *args: List[str], + def to(self, device: Union[int, str], *args: str, non_blocking: bool = False): r"""Performs tensor dtype and/or device conversion, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply( lambda x: x.to(device=device, non_blocking=non_blocking), *args) - def cpu(self, *args: List[str]): + def cpu(self, *args: str): r"""Copies attributes to CPU memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.cpu(), *args) - def cuda(self, device: Optional[Union[int, str]] = None, *args: List[str], + def cuda(self, device: Optional[Union[int, str]] = None, *args: str, non_blocking: bool = False): # pragma: no cover r"""Copies attributes to CUDA memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.cuda(device, non_blocking=non_blocking), *args) - def pin_memory(self, *args: List[str]): # pragma: no cover + def pin_memory(self, *args: str): # pragma: no cover r"""Copies attributes to pinned memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.pin_memory(), *args) - def share_memory_(self, *args: List[str]): + def share_memory_(self, *args: str): r"""Moves attributes to shared memory, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.share_memory_(), *args) - def detach_(self, *args: List[str]): + def detach_(self, *args: str): r"""Detaches attributes from the computation graph, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.detach_(), *args) - def detach(self, *args: List[str]): + def detach(self, *args: str): r"""Detaches attributes from the computation graph by creating a new tensor, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply(lambda x: x.detach(), *args) - def requires_grad_(self, *args: List[str], requires_grad: bool = True): + def requires_grad_(self, *args: str, requires_grad: bool = True): r"""Tracks gradient computation, either for all attributes or only the ones given in :obj:`*args`.""" return self.apply( lambda x: x.requires_grad_(requires_grad=requires_grad), *args) - def record_stream(self, stream: torch.cuda.Stream, *args: List[str]): + def record_stream(self, stream: torch.cuda.Stream, *args: str): r"""Ensures that the tensor memory is not reused for another tensor until all current work queued on :obj:`stream` has been completed, either for all attributes or only the ones given in :obj:`*args`.""" diff --git a/torch_geometric/data/view.py b/torch_geometric/data/view.py index d03b96723451..9fe7fd95d0eb 100644 --- a/torch_geometric/data/view.py +++ b/torch_geometric/data/view.py @@ -1,9 +1,9 @@ from collections.abc import Mapping -from typing import Iterable, List +from typing import Iterable class MappingView: - def __init__(self, mapping: Mapping, *args: List[str]): + def __init__(self, mapping: Mapping, *args: str): self._mapping = mapping self._args = args From c2cf68d3a1fbc49b64204ef5482e893a6bf1cfa7 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 10 Sep 2023 18:53:48 +0200 Subject: [PATCH 1460/2432] Fix GPU tests by requiring latest `pyg-lib` (#8014) --- .github/actions/setup/action.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 41d7a449d269..5e2bcaf9ea43 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -51,6 +51,7 @@ runs: - name: Install pyg-lib # pyg-lib is currently only available on Linux. if: ${{ inputs.torch-version != 'nightly' && runner.os == 'Linux' }} run: | + pip uninstall -y pyg-lib pip install --no-index pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash @@ -71,5 +72,5 @@ runs: run: | pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} pip install scipy - pip install --no-index torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html + pip install --no-index --upgrade torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html shell: bash From dea1577dfdacdb0347e381dd1288a3bb4f04b544 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 11 Sep 2023 08:16:50 +0200 Subject: [PATCH 1461/2432] Clarify how `to_dense_adj` deals with duplicated edges (#8016) Fixes #7998 --- torch_geometric/utils/to_dense_adj.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/torch_geometric/utils/to_dense_adj.py b/torch_geometric/utils/to_dense_adj.py index 1a501b40b6ed..15a61d6eb366 100644 --- a/torch_geometric/utils/to_dense_adj.py +++ b/torch_geometric/utils/to_dense_adj.py @@ -23,7 +23,10 @@ def to_dense_adj( :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) edge_attr (Tensor, optional): Edge weights or multi-dimensional edge - features. (default: :obj:`None`) + features. + If :obj:`edge_index` contains duplicated edges, the dense adjacency + matrix output holds the summed up entries of :obj:`edge_attr` for + duplicated edges. (default: :obj:`None`) max_num_nodes (int, optional): The size of the output node dimension. (default: :obj:`None`) batch_size (int, optional) The batch size. (default: :obj:`None`) From 0438d3a5bfae67967c323c47ee6b21f3824aa49d Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Mon, 11 Sep 2023 14:40:46 +0800 Subject: [PATCH 1462/2432] Filter out empty tensors inside `trim_to_layer` (#7942) filter out empty tensor for `x`, `edge_index`, `edge_attr` after calling `trim_to_layer` function. This can avoid unnecessary computation when some node/edge types get empty output. For example: when I train `igbh-tiny` dataset with 3 hops sampler and use `trim_to_layer` function, I get a lot of empty edge_index tensor for edge type '('author', 'affiliated_to', 'institute')', but the feature tensor for 'author' node type is still sent to compute in `HeteroConv` implementation. --------- Signed-off-by: Liu,Kaixuan Co-authored-by: Matthias Fey Co-authored-by: Jintang Li --- CHANGELOG.md | 1 + test/utils/test_trim_to_layer.py | 36 ++++++++++++++++++++++++++ torch_geometric/utils/trim_to_layer.py | 20 +++++++++++++- 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 18f974b42bc9..6b0caa744a64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Changed the `trim_to_layer` function to filter out non-reachable node and edge types when operating on heterogeneous graphs ([#7942](https://github.com/pyg-team/pytorch_geometric/pull/7942)) - Accelerated and simplified `top_k` computation in `TopKPooling` ([#7737](https://github.com/pyg-team/pytorch_geometric/pull/7737)) - Updated `GIN` implementation in kernel benchmarks to have sequential batchnorms ([#7955](https://github.com/pyg-team/pytorch_geometric/pull/7955)) - Fixed bugs in benchmarks caused by a lack of the device conditions for CPU and unexpected `cache` argument in heterogeneous models ([#7956](https://github.com/pyg-team/pytorch_geometric/pull/7956) diff --git a/test/utils/test_trim_to_layer.py b/test/utils/test_trim_to_layer.py index 5ee789feb7c8..c8296008451c 100644 --- a/test/utils/test_trim_to_layer.py +++ b/test/utils/test_trim_to_layer.py @@ -197,3 +197,39 @@ def test_trim_to_layer_with_neighbor_loader(): assert out2.size() == (2, 16) assert torch.allclose(out1, out2) + + +def test_trim_to_layer_filtering(): + x_dict = { + 'paper': torch.rand((13, 128)), + 'author': torch.rand((5, 128)), + 'field_of_study': torch.rand((6, 128)) + } + edge_index_dict = { + ('author', 'writes', 'paper'): + torch.tensor([[0, 1, 2, 3, 4], [0, 0, 1, 2, 2]]), + ('paper', 'has_topic', 'field_of_study'): + torch.tensor([[6, 7, 8, 9], [0, 0, 1, 1]]) + } + num_sampled_nodes_dict = { + 'paper': [1, 2, 10], + 'author': [0, 2, 3], + 'field_of_study': [0, 2, 4] + } + num_sampled_edges_dict = { + ('author', 'writes', 'paper'): [2, 3], + ('paper', 'has_topic', 'field_of_study'): [0, 4] + } + x_dict, edge_index_dict, _ = trim_to_layer( + layer=1, + num_sampled_nodes_per_hop=num_sampled_nodes_dict, + num_sampled_edges_per_hop=num_sampled_edges_dict, + x=x_dict, + edge_index=edge_index_dict, + ) + assert list(edge_index_dict.keys()) == [('author', 'writes', 'paper')] + assert torch.equal(edge_index_dict[('author', 'writes', 'paper')], + torch.tensor([[0, 1], [0, 0]])) + assert x_dict['paper'].size() == (3, 128) + assert x_dict['author'].size() == (2, 128) + assert x_dict['field_of_study'].size() == (2, 128) diff --git a/torch_geometric/utils/trim_to_layer.py b/torch_geometric/utils/trim_to_layer.py index bd6a2452f53e..1efdd96d171d 100644 --- a/torch_geometric/utils/trim_to_layer.py +++ b/torch_geometric/utils/trim_to_layer.py @@ -1,4 +1,5 @@ -from typing import Dict, List, Optional, Tuple, Union +import copy +from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import Tensor @@ -14,6 +15,17 @@ ) +def filter_empty_entries( + input_dict: Dict[Union[Any], Tensor]) -> Dict[Any, Tensor]: + r"""Removes empty tensors from a dictionary. This avoids unnecessary + computation when some node/edge types are non-reachable after trimming.""" + out_dict = copy.copy(input_dict) + for key, value in input_dict.items(): + if value.numel() == 0: + del out_dict[key] + return out_dict + + def trim_to_layer( layer: int, num_sampled_nodes_per_hop: Union[List[int], Dict[NodeType, List[int]]], @@ -53,6 +65,8 @@ def trim_to_layer( k: trim_feat(v, layer, num_sampled_nodes_per_hop[k]) for k, v in x.items() } + x = filter_empty_entries(x) + edge_index = { k: trim_adj( @@ -64,11 +78,15 @@ def trim_to_layer( ) for k, v in edge_index.items() } + edge_index = filter_empty_entries(edge_index) + if edge_attr is not None: edge_attr = { k: trim_feat(v, layer, num_sampled_edges_per_hop[k]) for k, v in edge_attr.items() } + edge_attr = filter_empty_entries(edge_attr) + return x, edge_index, edge_attr x = trim_feat(x, layer, num_sampled_nodes_per_hop) From 3b97746e64e2336fb4be6ba403b52b9cc00e383f Mon Sep 17 00:00:00 2001 From: ArchieGertsman Date: Mon, 11 Sep 2023 02:56:45 -0500 Subject: [PATCH 1463/2432] Implemented the Learnable Communitive Monoid Aggregation (#7976) Initial version of the LCM aggregation, requested in issue #7574. A possible feature to incorporate is for `forward` to additionally compute and return an associativity loss, as described in the paper. --------- Co-authored-by: Rishi Puri Co-authored-by: Jintang Li Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/aggr/test_lcm.py | 31 +++++++++ torch_geometric/nn/aggr/__init__.py | 2 + torch_geometric/nn/aggr/lcm.py | 101 ++++++++++++++++++++++++++++ 4 files changed, 135 insertions(+) create mode 100644 test/nn/aggr/test_lcm.py create mode 100644 torch_geometric/nn/aggr/lcm.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b0caa744a64..9d6cf049029e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) diff --git a/test/nn/aggr/test_lcm.py b/test/nn/aggr/test_lcm.py new file mode 100644 index 000000000000..60cb2335d527 --- /dev/null +++ b/test/nn/aggr/test_lcm.py @@ -0,0 +1,31 @@ +import pytest +import torch + +from torch_geometric.nn import LCMAggregation + + +def test_lcm_aggregation_with_project(): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + + aggr = LCMAggregation(16, 32) + assert str(aggr) == 'LCMAggregation(16, 32, project=True)' + + out = aggr(x, index) + assert out.size() == (3, 32) + + +def test_lcm_aggregation_without_project(): + x = torch.randn(6, 16) + index = torch.tensor([0, 0, 1, 1, 1, 2]) + + aggr = LCMAggregation(16, 16, project=False) + assert str(aggr) == 'LCMAggregation(16, 16, project=False)' + + out = aggr(x, index) + assert out.size() == (3, 16) + + +def test_lcm_aggregation_error_handling(): + with pytest.raises(ValueError, match="must be projected"): + LCMAggregation(16, 32, project=False) diff --git a/torch_geometric/nn/aggr/__init__.py b/torch_geometric/nn/aggr/__init__.py index bbb67089f62e..c41d038b8ba2 100644 --- a/torch_geometric/nn/aggr/__init__.py +++ b/torch_geometric/nn/aggr/__init__.py @@ -23,6 +23,7 @@ from .mlp import MLPAggregation from .deep_sets import DeepSetsAggregation from .set_transformer import SetTransformerAggregation +from .lcm import LCMAggregation __all__ = classes = [ 'Aggregation', @@ -49,4 +50,5 @@ 'MLPAggregation', 'DeepSetsAggregation', 'SetTransformerAggregation', + 'LCMAggregation', ] diff --git a/torch_geometric/nn/aggr/lcm.py b/torch_geometric/nn/aggr/lcm.py new file mode 100644 index 000000000000..ac96a6d14d2d --- /dev/null +++ b/torch_geometric/nn/aggr/lcm.py @@ -0,0 +1,101 @@ +from math import ceil, log2 +from typing import Optional + +from torch import Tensor +from torch.nn import GRUCell, Linear + +from torch_geometric.experimental import disable_dynamic_shapes +from torch_geometric.nn.aggr import Aggregation + + +class LCMAggregation(Aggregation): + r"""The Learnable Commutative Monoid aggregation from the + `"Learnable Commutative Monoids for Graph Neural Networks" + `_ paper, in which the elements are + aggregated using a binary tree reduction with + :math:`\mathcal{O}(\log |\mathcal{V}|)` depth. + + .. note:: + + :class:`LCMAggregation` requires sorted indices :obj:`index` as input. + Specifically, if you use this aggregation as part of + :class:`~torch_geometric.nn.conv.MessagePassing`, ensure that + :obj:`edge_index` is sorted by destination nodes, either by manually + sorting edge indices via :meth:`~torch_geometric.utils.sort_edge_index` + or by calling :meth:`torch_geometric.data.Data.sort`. + + .. warning:: + + :class:`LCMAggregation` is not a permutation-invariant operator. + + Args: + in_channels (int): Size of each input sample. + out_channels (int): Size of each output sample. + project (bool, optional): If set to :obj:`True`, the layer will apply a + linear transformation followed by an activation function before + aggregation. (default: :obj:`True`) + """ + def __init__( + self, + in_channels: int, + out_channels: int, + project: bool = True, + ): + super().__init__() + + if in_channels != out_channels and not project: + raise ValueError(f"Inputs of '{self.__class__.__name__}' must be " + f"projected if `in_channels != out_channels`") + + self.in_channels = in_channels + self.out_channels = out_channels + self.project = project + + if self.project: + self.lin = Linear(in_channels, out_channels) + else: + self.lin = None + + self.gru_cell = GRUCell(out_channels, out_channels) + + def reset_parameters(self): + if self.project: + self.lin.reset_parameters() + self.gru_cell.reset_parameters() + + def binary_op(self, left: Tensor, right: Tensor) -> Tensor: + return (self.gru_cell(left, right) + self.gru_cell(right, left)) / 2.0 + + @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) + def forward( + self, + x: Tensor, + index: Optional[Tensor] = None, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + dim: int = -2, + max_num_elements: Optional[int] = None, + ) -> Tensor: + + if self.project: + x = self.lin(x).relu() + + x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, + max_num_elements=max_num_elements) + + x = x.permute(1, 0, 2) # [num_neighbors, num_nodes, num_features] + + depth = ceil(log2(x.size(0))) + for _ in range(depth): + x = [ + self.binary_op(x[2 * i], x[2 * i + 1]) if + (2 * i + 1) < len(x) else x[2 * i] + for i in range(ceil(len(x) / 2)) + ] + + assert len(x) == 1 + return x[0] + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.out_channels}, project={self.project})') From 1f79875bcfba4af39097ce97d920fe0372fe9416 Mon Sep 17 00:00:00 2001 From: rusty1s Date: Mon, 11 Sep 2023 11:23:11 +0000 Subject: [PATCH 1464/2432] update --- test/nn/conv/test_cheb_conv.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/nn/conv/test_cheb_conv.py b/test/nn/conv/test_cheb_conv.py index ff264a11e22c..8baccca7629e 100644 --- a/test/nn/conv/test_cheb_conv.py +++ b/test/nn/conv/test_cheb_conv.py @@ -67,5 +67,5 @@ def test_cheb_conv_batch(): out = conv(batch.x, batch.edge_index, batch.edge_weight, batch.batch) assert out.size() == (7, 16) - assert torch.allclose(out1, out[:4]) - assert torch.allclose(out2, out[4:]) + assert torch.allclose(out1, out[:4], atol=1e-6) + assert torch.allclose(out2, out[4:], atol=1e-6) From 0aeef587f5eb438db9150250c62b391fcba458d2 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 11 Sep 2023 13:47:02 +0200 Subject: [PATCH 1465/2432] Catch all exceptions when looking for extension packages (#8017) --- torch_geometric/typing.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index e13242579f55..190e15f50a65 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -39,8 +39,8 @@ WITH_METIS = hasattr(pyg_lib, 'partition') WITH_WEIGHTED_NEIGHBOR_SAMPLE = ('edge_weight' in inspect.signature( pyg_lib.sampler.neighbor_sample).parameters) -except (ImportError, OSError) as e: - if isinstance(e, OSError): +except Exception as e: + if not isinstance(e, ImportError): # pragma: no cover warnings.warn(f"An issue occurred while importing 'pyg-lib'. " f"Disabling its usage. Stacktrace: {e}") pyg_lib = object @@ -50,12 +50,13 @@ WITH_SAMPLED_OP = False WITH_INDEX_SORT = False WITH_METIS = False + WITH_WEIGHTED_NEIGHBOR_SAMPLE = False try: import torch_scatter # noqa WITH_TORCH_SCATTER = True -except (ImportError, OSError) as e: - if isinstance(e, OSError): +except Exception as e: + if not isinstance(e, ImportError): # pragma: no cover warnings.warn(f"An issue occurred while importing 'torch-scatter'. " f"Disabling its usage. Stacktrace: {e}") torch_scatter = object @@ -65,11 +66,12 @@ import torch_cluster # noqa WITH_TORCH_CLUSTER = True WITH_TORCH_CLUSTER_BATCH_SIZE = 'batch_size' in torch_cluster.knn.__doc__ -except (ImportError, OSError) as e: - if isinstance(e, OSError): +except Exception as e: + if not isinstance(e, ImportError): # pragma: no cover warnings.warn(f"An issue occurred while importing 'torch-cluster'. " f"Disabling its usage. Stacktrace: {e}") WITH_TORCH_CLUSTER = False + WITH_TORCH_CLUSTER_BATCH_SIZE = False class TorchCluster: def __getattr__(self, key: str): @@ -80,8 +82,8 @@ def __getattr__(self, key: str): try: import torch_spline_conv # noqa WITH_TORCH_SPLINE_CONV = True -except (ImportError, OSError) as e: - if isinstance(e, OSError): +except Exception as e: + if not isinstance(e, ImportError): # pragma: no cover warnings.warn( f"An issue occurred while importing 'torch-spline-conv'. " f"Disabling its usage. Stacktrace: {e}") @@ -91,8 +93,8 @@ def __getattr__(self, key: str): import torch_sparse # noqa from torch_sparse import SparseStorage, SparseTensor WITH_TORCH_SPARSE = True -except (ImportError, OSError) as e: - if isinstance(e, OSError): +except Exception as e: + if not isinstance(e, ImportError): # pragma: no cover warnings.warn(f"An issue occurred while importing 'torch-sparse'. " f"Disabling its usage. Stacktrace: {e}") WITH_TORCH_SPARSE = False @@ -212,7 +214,7 @@ def masked_select_nnz(src: SparseTensor, mask: Tensor, try: import intel_extension_for_pytorch # noqa WITH_IPEX = True -except (ImportError, OSError): +except Exception: WITH_IPEX = False From 8b2ae66ed04596607bab49251ffd5a7a979c4892 Mon Sep 17 00:00:00 2001 From: xnuohz Date: Mon, 11 Sep 2023 22:25:25 +0800 Subject: [PATCH 1466/2432] PyTorch Sparse Tensor support: `WLConvContinuous`, `GeneralConv`, `PDNConv`, `ARMAConv` (#8013) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/conv/test_arma_conv.py | 14 ++++ test/nn/conv/test_general_conv.py | 73 ++++++------------- test/nn/conv/test_pdn_conv.py | 14 ++++ test/nn/conv/test_wl_conv_continuous.py | 37 +++++++++- torch_geometric/nn/conv/wl_conv_continuous.py | 46 +++++++++--- 6 files changed, 120 insertions(+), 65 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d6cf049029e..62fa49451ac1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) - Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) diff --git a/test/nn/conv/test_arma_conv.py b/test/nn/conv/test_arma_conv.py index 9a3ed519eeac..94ca336890b7 100644 --- a/test/nn/conv/test_arma_conv.py +++ b/test/nn/conv/test_arma_conv.py @@ -43,3 +43,17 @@ def test_lazy_arma_conv(): assert str(conv) == 'ARMAConv(-1, 32, num_stacks=8, num_layers=4)' out = conv(x, edge_index) assert out.size() == (4, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + assert torch.allclose(conv(x, adj2.t()), out) + + if is_full_test(): + t = '(Tensor, Tensor, OptTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, edge_index), out) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + t = '(Tensor, SparseTensor, OptTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, adj2.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_general_conv.py b/test/nn/conv/test_general_conv.py index c0c7910d25c8..efe9486197c5 100644 --- a/test/nn/conv/test_general_conv.py +++ b/test/nn/conv/test_general_conv.py @@ -1,58 +1,31 @@ +import pytest import torch +import torch_geometric.typing from torch_geometric.nn import GeneralConv - - -def test_general_conv(): - x1 = torch.randn(4, 8) - e1 = torch.randn(4, 16) +from torch_geometric.typing import SparseTensor + + +@pytest.mark.parametrize('kwargs', [ + dict(), + dict(skip_linear=True), + dict(directed_msg=False), + dict(heads=3), + dict(attention=True), + dict(heads=3, attention=True), + dict(heads=3, attention=True, attention_type='dot_product'), + dict(l2_normalize=True), +]) +def test_general_conv(kwargs): + x = torch.randn(4, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + edge_attr = torch.randn(edge_index.size(1), 16) - conv = GeneralConv(8, 32, 16) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, skip_linear=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, directed_msg=False) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, heads=3) + conv = GeneralConv(8, 32, in_edge_channels=16, **kwargs) assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) + out = conv(x, edge_index, edge_attr) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - conv = GeneralConv(8, 32, 16, attention=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, heads=3, attention=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, heads=3, attention=True, - attention_type='dot_product') - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, l2_normalize=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) + assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) diff --git a/test/nn/conv/test_pdn_conv.py b/test/nn/conv/test_pdn_conv.py index 86cdf4121b16..ddaec0596a95 100644 --- a/test/nn/conv/test_pdn_conv.py +++ b/test/nn/conv/test_pdn_conv.py @@ -45,3 +45,17 @@ def test_pdn_conv_with_sparse_node_input_feature(): out = conv(x, edge_index, edge_attr) assert out.size() == (4, 32) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) + assert torch.allclose(conv(x, adj.t(), edge_attr), out, atol=1e-6) + + if is_full_test(): + t = '(Tensor, Tensor, OptTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, edge_index, edge_attr), out) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + t = '(Tensor, SparseTensor, OptTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, adj.t(), edge_attr), out, atol=1e-6) diff --git a/test/nn/conv/test_wl_conv_continuous.py b/test/nn/conv/test_wl_conv_continuous.py index 733b7fbfd8a7..f8fe84762cac 100644 --- a/test/nn/conv/test_wl_conv_continuous.py +++ b/test/nn/conv/test_wl_conv_continuous.py @@ -1,7 +1,9 @@ import torch +import torch_geometric.typing from torch_geometric.nn import WLConvContinuous from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor def test_wl_conv(): @@ -14,19 +16,46 @@ def test_wl_conv(): out = conv(x, edge_index) assert out.tolist() == [[-0.5], [0.0], [0.5]] + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(3, 3)) + assert torch.allclose(conv(x, adj.t()), out) + if is_full_test(): t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x, edge_index), out) + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) + # Test bipartite message passing: x1 = torch.randn(4, 8) x2 = torch.randn(2, 8) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) edge_weight = torch.randn(edge_index.size(1)) - out = conv((x1, None), edge_index, edge_weight, size=(4, 2)) - assert out.size() == (2, 8) + out1 = conv((x1, None), edge_index, edge_weight, size=(4, 2)) + assert out1.size() == (2, 8) + + out2 = conv((x1, x2), edge_index, edge_weight) + assert out2.size() == (2, 8) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, edge_weight, (4, 2)) + assert torch.allclose(conv((x1, None), adj.t()), out1) + assert torch.allclose(conv((x1, x2), adj.t()), out2) - out = conv((x1, x2), edge_index, edge_weight) - assert out.size() == (2, 8) + if is_full_test(): + t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose( + jit((x1, None), edge_index, edge_weight, size=(4, 2)), out1) + assert torch.allclose(jit((x1, x2), edge_index, edge_weight), out2) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit((x1, None), adj.t()), out1, atol=1e-6) + assert torch.allclose(jit((x1, x2), adj.t()), out2, atol=1e-6) diff --git a/torch_geometric/nn/conv/wl_conv_continuous.py b/torch_geometric/nn/conv/wl_conv_continuous.py index f74946060f91..e1979059389d 100644 --- a/torch_geometric/nn/conv/wl_conv_continuous.py +++ b/torch_geometric/nn/conv/wl_conv_continuous.py @@ -3,8 +3,14 @@ from torch import Tensor from torch_geometric.nn.conv import MessagePassing -from torch_geometric.typing import OptPairTensor, OptTensor, Size -from torch_geometric.utils import scatter +from torch_geometric.typing import ( + Adj, + OptPairTensor, + OptTensor, + Size, + SparseTensor, +) +from torch_geometric.utils import scatter, spmm class WLConvContinuous(MessagePassing): @@ -37,20 +43,31 @@ class WLConvContinuous(MessagePassing): def __init__(self, **kwargs): super().__init__(aggr='add', **kwargs) - def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Tensor, - edge_weight: OptTensor = None, size: Size = None) -> Tensor: + def forward( + self, + x: Union[Tensor, OptPairTensor], + edge_index: Adj, + edge_weight: OptTensor = None, + size: Size = None, + ) -> Tensor: if isinstance(x, Tensor): x: OptPairTensor = (x, x) - if edge_weight is None: - edge_weight = x[0].new_ones(edge_index.size(1)) - - # propagate_type: (x: OptPairTensor, edge_weight: Tensor) + # propagate_type: (x: OptPairTensor, edge_weight: OptTensor) out = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=size) - deg = scatter(edge_weight, edge_index[1], 0, out.size(0), reduce='sum') + if isinstance(edge_index, SparseTensor): + assert edge_weight is None + dst_index, _, edge_weight = edge_index.coo() + else: + dst_index = edge_index[1] + + if edge_weight is None: + edge_weight = x[0].new_ones(dst_index.numel()) + + deg = scatter(edge_weight, dst_index, 0, out.size(0), reduce='sum') deg_inv = 1. / deg deg_inv.masked_fill_(deg_inv == float('inf'), 0) out = deg_inv.view(-1, 1) * out @@ -61,5 +78,12 @@ def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Tensor, return out - def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor: - return edge_weight.view(-1, 1) * x_j + def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor: + return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j + + def message_and_aggregate( + self, + adj_t: SparseTensor, + x: OptPairTensor, + ) -> Tensor: + return spmm(adj_t, x[0], reduce=self.aggr) From 297f9e613486fa6081e59c50f18409f0da7d91b7 Mon Sep 17 00:00:00 2001 From: Benjamin Kurt Miller <12955549+bkmi@users.noreply.github.com> Date: Mon, 11 Sep 2023 20:40:48 -0700 Subject: [PATCH 1467/2432] Make `DimeNet` pickleable (#8019) This is an extremely simple and small change to make instantiated versions of `SphericalBasisLayer` pickleable. The issue is that the lambda is a local function and therefore not pickleable. I made the function into a "private" static method within the class. It is unlikely to cause name collisions with subclasses. I have not run `pytest`; however, I am using the code in my own project. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/nn/models/dimenet.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 62fa49451ac1..fb96d7d1dca2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,6 +98,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Enabled pickling of `DimeNet` models ([#8019](https://github.com/pyg-team/pytorch_geometric/pull/8019)) - Changed the `trim_to_layer` function to filter out non-reachable node and edge types when operating on heterogeneous graphs ([#7942](https://github.com/pyg-team/pytorch_geometric/pull/7942)) - Accelerated and simplified `top_k` computation in `TopKPooling` ([#7737](https://github.com/pyg-team/pytorch_geometric/pull/7737)) - Updated `GIN` implementation in kernel benchmarks to have sequential batchnorms ([#7955](https://github.com/pyg-team/pytorch_geometric/pull/7955)) diff --git a/torch_geometric/nn/models/dimenet.py b/torch_geometric/nn/models/dimenet.py index cc41761587a0..2c033d7e3443 100644 --- a/torch_geometric/nn/models/dimenet.py +++ b/torch_geometric/nn/models/dimenet.py @@ -1,5 +1,6 @@ import os import os.path as osp +from functools import partial from math import pi as PI from math import sqrt from typing import Callable, Dict, Optional, Tuple, Union @@ -102,7 +103,7 @@ def __init__( for i in range(num_spherical): if i == 0: sph1 = sym.lambdify([theta], sph_harm_forms[i][0], modules)(0) - self.sph_funcs.append(lambda x: torch.zeros_like(x) + sph1) + self.sph_funcs.append(partial(self._sph_to_tensor, sph1)) else: sph = sym.lambdify([theta], sph_harm_forms[i][0], modules) self.sph_funcs.append(sph) @@ -110,6 +111,10 @@ def __init__( bessel = sym.lambdify([x], bessel_forms[i][j], modules) self.bessel_funcs.append(bessel) + @staticmethod + def _sph_to_tensor(sph, x: Tensor) -> Tensor: + return torch.zeros_like(x) + sph + def forward(self, dist: Tensor, angle: Tensor, idx_kj: Tensor) -> Tensor: dist = dist / self.cutoff rbf = torch.stack([f(dist) for f in self.bessel_funcs], dim=1) From b904044b6e8d87ee7321dc16a9081c5949a17968 Mon Sep 17 00:00:00 2001 From: Harshit Verma <100012454+harshit5674@users.noreply.github.com> Date: Tue, 12 Sep 2023 13:05:18 +0530 Subject: [PATCH 1468/2432] Implementing Neural Fingerprint from Duvenaud et al. (#7919) Implemented the Neural Fingerprint Model as suggested in the paper "[Convolutional Networks on Graphs for Learning Molecular Fingerprints](https://arxiv.org/pdf/1509.09292.pdf)" and suggested in the issue https://github.com/pyg-team/pytorch_geometric/issues/6077. Unit test has been added and all tests are passing. --------- Co-authored-by: Harshit Verma Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jintang Li Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/models/test_neural_fingerprint.py | 28 ++++++++ torch_geometric/nn/models/__init__.py | 2 + .../nn/models/neural_fingerprint.py | 72 +++++++++++++++++++ 4 files changed, 103 insertions(+) create mode 100644 test/nn/models/test_neural_fingerprint.py create mode 100644 torch_geometric/nn/models/neural_fingerprint.py diff --git a/CHANGELOG.md b/CHANGELOG.md index fb96d7d1dca2..5e939d3c3c73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) - Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) diff --git a/test/nn/models/test_neural_fingerprint.py b/test/nn/models/test_neural_fingerprint.py new file mode 100644 index 000000000000..f32d7e72c167 --- /dev/null +++ b/test/nn/models/test_neural_fingerprint.py @@ -0,0 +1,28 @@ +import pytest +import torch + +import torch_geometric.typing +from torch_geometric.nn import NeuralFingerprint +from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor + + +@pytest.mark.parametrize('batch', [None, torch.tensor([0, 1, 1])]) +def test_neural_fingerprint(batch): + x = torch.randn(3, 7) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + + model = NeuralFingerprint(7, 16, out_channels=5, num_layers=4) + assert str(model) == 'NeuralFingerprint(7, 5, num_layers=4)' + model.reset_parameters() + + out = model(x, edge_index, batch) + assert out.size() == (1, 5) if batch is None else (2, 5) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(3, 3)) + assert torch.allclose(model(x, adj.t(), batch), out) + + if is_full_test(): + jit = torch.jit.export(model) + assert torch.allclose(jit(x, edge_index, batch), out) diff --git a/torch_geometric/nn/models/__init__.py b/torch_geometric/nn/models/__init__.py index 9b716e21e858..4fbbffb4ba50 100644 --- a/torch_geometric/nn/models/__init__.py +++ b/torch_geometric/nn/models/__init__.py @@ -24,6 +24,7 @@ from .rev_gnn import GroupAddRev from .gnnff import GNNFF from .pmlp import PMLP +from .neural_fingerprint import NeuralFingerprint __all__ = classes = [ 'MLP', @@ -64,4 +65,5 @@ 'GroupAddRev', 'GNNFF', 'PMLP', + 'NeuralFingerprint', ] diff --git a/torch_geometric/nn/models/neural_fingerprint.py b/torch_geometric/nn/models/neural_fingerprint.py new file mode 100644 index 000000000000..62fa898aa1ce --- /dev/null +++ b/torch_geometric/nn/models/neural_fingerprint.py @@ -0,0 +1,72 @@ +from typing import Optional + +import torch +from torch import Tensor + +from torch_geometric.nn import Linear, MFConv, global_add_pool +from torch_geometric.typing import Adj + + +class NeuralFingerprint(torch.nn.Module): + r"""The Neural Fingerprint model from the + `"Convolutional Networks on Graphs for Learning Molecular Fingerprints" + `__ paper to generate fingerprints + of molecules. + + Args: + in_channels (int): Size of each input sample. + hidden_channels (int): Size of each hidden sample. + out_channels (int): Size of each output fingerprint. + num_layers (int): Number of layers. + **kwargs (optional): Additional arguments of + :class:`torch_geometric.nn.conv.MFConv`. + """ + def __init__( + self, + in_channels: int, + hidden_channels: int, + out_channels: int, + num_layers: int, + **kwargs, + ): + super().__init__() + + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.num_layers = num_layers + + self.convs = torch.nn.ModuleList() + for i in range(self.num_layers): + in_channels = self.in_channels if i == 0 else self.hidden_channels + self.convs.append(MFConv(in_channels, hidden_channels, **kwargs)) + + self.lins = torch.nn.ModuleList() + for _ in range(self.num_layers): + self.lins.append(Linear(hidden_channels, out_channels, bias=False)) + + def reset_parameters(self): + r"""Resets all learnable parameters of the module.""" + for conv in self.convs: + conv.reset_parameters() + for lin in self.lins: + lin.reset_parameters() + + def forward( + self, + x: Tensor, + edge_index: Adj, + batch: Optional[Tensor] = None, + batch_size: Optional[int] = None, + ) -> Tensor: + """""" + outs = [] + for conv, lin in zip(self.convs, self.lins): + x = conv(x, edge_index).sigmoid() + y = lin(x).softmax(dim=-1) + outs.append(global_add_pool(y, batch, batch_size)) + return sum(outs) + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.out_channels}, num_layers={self.num_layers})') From b79a847ed83e09369a26b261e431b8a16cb10e7e Mon Sep 17 00:00:00 2001 From: Chendi Qian <32506156+chendiqian@users.noreply.github.com> Date: Tue, 12 Sep 2023 16:01:11 +0200 Subject: [PATCH 1469/2432] Add `IBMBBatchLoader` and `IBMBNodeLoader` (#6230) add IBMB loaders of paper [Influence-Based Mini-Batching for Graph Neural Networks](https://openreview.net/forum?id=b9g0vxzYa_) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Matthias Fey Co-authored-by: Chendi Qian <32506156+Spazierganger@users.noreply.github.com> --- CHANGELOG.md | 1 + test/loader/test_ibmb_loader.py | 68 ++ torch_geometric/loader/__init__.py | 3 + torch_geometric/loader/ibmb_loader.py | 909 ++++++++++++++++++++++++++ 4 files changed, 981 insertions(+) create mode 100644 test/loader/test_ibmb_loader.py create mode 100644 torch_geometric/loader/ibmb_loader.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e939d3c3c73..d067d3b518e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) - Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976)) diff --git a/test/loader/test_ibmb_loader.py b/test/loader/test_ibmb_loader.py new file mode 100644 index 000000000000..34b2cabe1901 --- /dev/null +++ b/test/loader/test_ibmb_loader.py @@ -0,0 +1,68 @@ +import pytest +import torch +from torch import Tensor + +import torch_geometric.typing +from torch_geometric.datasets import KarateClub +from torch_geometric.loader.ibmb_loader import IBMBBatchLoader, IBMBNodeLoader +from torch_geometric.testing import withPackage +from torch_geometric.typing import SparseTensor + + +@withPackage('python_tsp') +@pytest.mark.parametrize( + 'use_sparse_tensor', + [False] + [True] if torch_geometric.typing.WITH_TORCH_SPARSE else []) +@pytest.mark.parametrize('kwargs', [ + dict(num_partitions=4, batch_size=1), + dict(num_partitions=8, batch_size=2), +]) +def test_ibmb_batch_loader(use_sparse_tensor, kwargs): + data = KarateClub()[0] + + loader = IBMBBatchLoader( + data, + batch_order='order', + input_nodes=torch.randperm(data.num_nodes)[:20], + return_edge_index_type='adj' if use_sparse_tensor else 'edge_index', + **kwargs, + ) + assert str(loader) == 'IBMBBatchLoader()' + assert len(loader) == 4 + assert sum([batch.output_node_mask.sum() for batch in loader]) == 20 + + for batch in loader: + if use_sparse_tensor: + assert isinstance(batch.edge_index, SparseTensor) + else: + assert isinstance(batch.edge_index, Tensor) + + +@withPackage('python_tsp', 'numba') +@pytest.mark.parametrize( + 'use_sparse_tensor', + [False] + [True] if torch_geometric.typing.WITH_TORCH_SPARSE else []) +@pytest.mark.parametrize('kwargs', [ + dict(num_nodes_per_batch=4, batch_size=1), + dict(num_nodes_per_batch=2, batch_size=2), +]) +def test_ibmb_node_loader(use_sparse_tensor, kwargs): + data = KarateClub()[0] + + loader = IBMBNodeLoader( + data, + batch_order='order', + input_nodes=torch.randperm(data.num_nodes)[:20], + num_auxiliary_nodes=4, + return_edge_index_type='adj' if use_sparse_tensor else 'edge_index', + **kwargs, + ) + assert str(loader) == 'IBMBNodeLoader()' + assert len(loader) == 5 + assert sum([batch.output_node_mask.sum() for batch in loader]) == 20 + + for batch in loader: + if use_sparse_tensor: + assert isinstance(batch.edge_index, SparseTensor) + else: + assert isinstance(batch.edge_index, Tensor) diff --git a/torch_geometric/loader/__init__.py b/torch_geometric/loader/__init__.py index 0aa431a10d8e..266f498a113b 100644 --- a/torch_geometric/loader/__init__.py +++ b/torch_geometric/loader/__init__.py @@ -11,6 +11,7 @@ GraphSAINTEdgeSampler, GraphSAINTRandomWalkSampler) from .shadow import ShaDowKHopSampler from .random_node_loader import RandomNodeLoader +# from .ibmb_loader import IBMBBatchLoader, IBMBNodeLoader from .zip_loader import ZipLoader from .data_list_loader import DataListLoader from .dense_data_loader import DenseDataLoader @@ -37,6 +38,8 @@ 'GraphSAINTRandomWalkSampler', 'ShaDowKHopSampler', 'RandomNodeLoader', + # 'IBMBBatchLoader', + # 'IBMBNodeLoader', 'ZipLoader', 'DataListLoader', 'DenseDataLoader', diff --git a/torch_geometric/loader/ibmb_loader.py b/torch_geometric/loader/ibmb_loader.py new file mode 100644 index 000000000000..83e39e73a5eb --- /dev/null +++ b/torch_geometric/loader/ibmb_loader.py @@ -0,0 +1,909 @@ +import logging +import math +from typing import Callable, Iterator, List, NamedTuple, Optional, Tuple, Union + +import numpy as np +import scipy.sparse +import torch +from torch import Tensor +from tqdm import tqdm + +from torch_geometric.data import Data +from torch_geometric.typing import SparseTensor +from torch_geometric.utils import get_ppr, is_undirected, subgraph + +try: + import numba + WITH_NUMBA = True +except ImportError: # pragma: no cover + WITH_NUMBA = False + + +class OutputNodes(NamedTuple): + seed_id: Tensor + auxiliary_id: Tensor + + +class _IBMBBaseLoader(torch.utils.data.DataLoader): + def __init__(self, data: Data, **kwargs): + kwargs.pop('collate_fn', None) + batch_size = kwargs.get('batch_size', 1) + + output_nodes = self.get_output_nodes(self) + + if batch_size == 1: # Pre-process subgraphs: + data_list = ... + super().__init__(data_list, collate_fn=self._cache_fn, **kwargs) + else: + self.data = data + super().__init__(output_nodes, collate_fn=self._collate_fn, + **kwargs) + + def get_output_nodes(self) -> List[OutputNodes]: + raise NotImplementedError + + def _cache_fn(self, data_list: List[Data]) -> Data: + assert len(data_list) == 1 + return data_list[0] + + def _collate_fn(self, output_nodes: List[OutputNodes]) -> Data: + raise NotImplementedError + + def __repr__(self) -> str: + return f'{self.__class__.__name__}()' + + +############################################################################### + + +def get_partitions( + edge_index: Union[Tensor, SparseTensor], + num_partitions: int, + indices: Tensor, + num_nodes: int, + output_weight: Optional[float] = None, +) -> List[Tensor]: + assert isinstance( + edge_index, + (torch.LongTensor, + SparseTensor)), f'Unsupported edge_index type {type(edge_index)}' + if isinstance(edge_index, torch.LongTensor): + edge_index = SparseTensor.from_edge_index( + edge_index, sparse_sizes=(num_nodes, num_nodes)) + + if output_weight is not None and output_weight != 1: + node_weight = torch.ones(num_nodes) + node_weight[indices] = output_weight + else: + node_weight = None + + _, partptr, perm = edge_index.partition(num_parts=num_partitions, + recursive=False, weighted=False, + node_weight=node_weight) + + partitions = [] + for i in range(len(partptr) - 1): + partitions.append(perm[partptr[i]:partptr[i + 1]]) + + return partitions + + +def get_pair_wise_distance( + ys: List, + num_classes: int, + dist_type: str = 'kl', +) -> np.ndarray: + num_batches = len(ys) + + counts = np.zeros((num_batches, num_classes), dtype=np.int32) + for i in range(num_batches): + unique, count = np.unique(ys[i], return_counts=True) + counts[i, unique] = count + + counts += 1 + counts = counts / counts.sum(1).reshape(-1, 1) + pairwise_dist = np.zeros((num_batches, num_batches), dtype=np.float64) + + for i in range(0, num_batches - 1): + for j in range(i + 1, num_batches): + if dist_type == 'l1': + pairwise_dist[i, j] = np.sum(np.abs(counts[i] - counts[j])) + elif dist_type == 'kl': + + def kl_divergence(p: np.ndarray, q: np.ndarray): + return (p * np.log(p / q)).sum() + + pairwise_dist[i, j] = kl_divergence(counts[i], + counts[j]) + kl_divergence( + counts[j], counts[i]) + else: + raise ValueError + + pairwise_dist += pairwise_dist.T + pairwise_dist += 1e-5 # for numerical stability + np.fill_diagonal(pairwise_dist, 0.) + + return pairwise_dist + + +def indices_complete_check( + loader: List[Tuple[Union[Tensor, np.ndarray], Union[Tensor, np.ndarray]]], + output_indices: Union[Tensor, np.ndarray], +): + if isinstance(output_indices, Tensor): + output_indices = output_indices.cpu().numpy() + + outs = [] + for out, aux in loader: + if isinstance(out, Tensor): + out = out.cpu().numpy() + if isinstance(aux, Tensor): + aux = aux.cpu().numpy() + + assert np.all(np.in1d(out, + aux)), "Not all output nodes are in aux nodes!" + outs.append(out) + + outs = np.sort(np.concatenate(outs)) + assert np.all( + outs == np.sort(output_indices)), "Output nodes missing or duplicate!" + + +def get_subgraph( + out_indices: Tensor, + graph: Data, + return_edge_index_type: str, + adj: SparseTensor, + **kwargs, +): + if return_edge_index_type == 'adj': + assert adj is not None + + if return_edge_index_type == 'adj': + subg = Data(x=graph.x[out_indices], y=graph.y[out_indices], + edge_index=adj[out_indices, :][:, out_indices]) + elif return_edge_index_type == 'edge_index': + edge_index, edge_attr = subgraph(out_indices, graph.edge_index, + graph.edge_attr, relabel_nodes=True, + num_nodes=graph.num_nodes, + return_edge_mask=False) + subg = Data(x=graph.x[out_indices], y=graph.y[out_indices], + edge_index=edge_index, edge_attr=edge_attr) + else: + raise NotImplementedError + + for k, v in kwargs.items(): + subg[k] = v + + return subg + + +def define_sampler( + batch_order: str, + ys: List[Union[Tensor, np.ndarray, List]], + num_classes: int, + dist_type: str = 'kl', +): + if batch_order == 'rand': + logging.info("Running with random order") + sampler = torch.utils.data.RandomSampler(ys) + elif batch_order in ['order', 'sample']: + kl_div = get_pair_wise_distance(ys, num_classes, dist_type=dist_type) + if batch_order == 'order': + from python_tsp.heuristics import solve_tsp_simulated_annealing + best_perm, _ = solve_tsp_simulated_annealing(kl_div) + logging.info(f"Running with given order: {best_perm}") + sampler = IBMBOrderedSampler(best_perm) + else: + logging.info("Running with weighted sampling") + sampler = IBMBWeightedSampler(kl_div) + else: + raise ValueError + + return sampler + + +def create_batchwise_out_aux_pairs( + adj: SparseTensor, + partitions: List[Union[torch.LongTensor, np.ndarray]], + prime_indices: Union[torch.LongTensor, np.ndarray], + topk: int, + num_outnodeset_per_batch: int = 50, + alpha: float = 0.2, + ppr_iterations: int = 50, +) -> List[Tuple[np.ndarray, np.ndarray]]: + def ppr_power_method( + adj: SparseTensor, + batch: List[Union[np.ndarray, torch.LongTensor]], + topk: int, + num_iter: int, + alpha: float, + ) -> List[np.ndarray]: + + topk_neighbors = [] + logits = torch.zeros( + adj.size(0), len(batch), + device=adj.device()) # each column contains a set of output nodes + for i, tele_set in enumerate(batch): + logits[tele_set, i] = 1. / len(tele_set) + + new_logits = logits.clone() + for i in range(num_iter): + new_logits = adj @ new_logits * (1 - alpha) + alpha * logits + + inds = new_logits.argsort(0) + nonzeros = (new_logits > 0).sum(0) + nonzeros = torch.minimum( + nonzeros, + torch.tensor([topk], dtype=torch.int64, device=adj.device())) + for i in range(new_logits.shape[1]): + topk_neighbors.append(inds[-nonzeros[i]:, i].cpu().numpy()) + + return topk_neighbors + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + if isinstance(prime_indices, Tensor): + prime_indices = prime_indices.cpu().numpy() + + adj = adj.to(device) + + cur_output_nodes = [] + loader = [] + + pbar = tqdm(range(len(partitions))) + pbar.set_description("Processing topic-sensitive PPR batches") + for n in pbar: + part = partitions[n] + if isinstance(part, Tensor): + part = part.cpu().numpy() + + primes_in_part, *_ = np.intersect1d(part, prime_indices, + assume_unique=True, + return_indices=True) + if len(primes_in_part): # no output nodes in this partition + cur_output_nodes.append(primes_in_part) + + # accumulate enough output nodes to make good use of GPU memory + if len(cur_output_nodes + ) >= num_outnodeset_per_batch or n == len(partitions) - 1: + topk_neighbors = ppr_power_method(adj, cur_output_nodes, topk, + ppr_iterations, alpha) + for i in range(len(cur_output_nodes)): + # force output nodes to be aux nodes + auxiliary_nodes = np.union1d(cur_output_nodes[i], + topk_neighbors[i]) + loader.append((cur_output_nodes[i], auxiliary_nodes)) + cur_output_nodes = [] + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + return loader + + +def get_pairs(ppr_mat: scipy.sparse.csr_matrix) -> np.ndarray: + ppr_mat = ppr_mat + ppr_mat.transpose() + + ppr_mat = ppr_mat.tocoo() + row, col, data = ppr_mat.row, ppr_mat.col, ppr_mat.data + mask = (row > col) # lu + + row, col, data = row[mask], col[mask], data[mask] + sort_arg = np.argsort(data)[::-1] + # sort_arg = parallel_sort.parallel_argsort(data)[::-1] + + # map prime_nodes to arange + ppr_pairs = np.vstack((row[sort_arg], col[sort_arg])).T + return ppr_pairs + + +_prime_orient_merge_numba: Optional[Callable] = None + + +def prime_orient_merge( + ppr_pairs: np.ndarray, + primes_per_batch: int, + num_nodes: int, +): + if not WITH_NUMBA: # pragma: no cover + raise ImportError("'prime_orient_merge' requires the 'numba' package") + + global _prime_orient_merge_numba + if _prime_orient_merge_numba is None: + _prime_orient_merge_numba = numba.njit(cache=True)(_prime_orient_merge) + + return _prime_orient_merge_numba(ppr_pairs, primes_per_batch, num_nodes) + + +def _prime_orient_merge( + ppr_pairs: np.ndarray, + primes_per_batch: int, + num_nodes: int, +): + id_primes_list = list(np.arange(num_nodes, dtype=np.int32).reshape(-1, 1)) + node_id_list = np.arange(num_nodes, dtype=np.int32) + placeholder = np.zeros(0, dtype=np.int32) + + for i, j in ppr_pairs: + id1, id2 = node_id_list[i], node_id_list[j] + if id1 > id2: + id1, id2 = id2, id1 + + if id1 != id2 and len(id_primes_list[id1]) + len( + id_primes_list[id2]) <= primes_per_batch: + id_primes_list[id1] = np.concatenate( + (id_primes_list[id1], id_primes_list[id2])) + node_id_list[id_primes_list[id2]] = id1 + id_primes_list[id2] = placeholder + + prime_lst = list() + ids = np.unique(node_id_list) + + for _id in ids: + prime_lst.append(list(id_primes_list[_id])) + + return list(prime_lst) + + +def prime_post_process(loader, merge_max_size): + from heapq import heapify, heappop, heappush + + h = [( + len(p), + p, + ) for p in loader] + heapify(h) + + while len(h) > 1: + len1, p1 = heappop(h) + len2, p2 = heappop(h) + if len1 + len2 <= merge_max_size: + heappush(h, (len1 + len2, p1 + p2)) + else: + heappush(h, ( + len1, + p1, + )) + heappush(h, ( + len2, + p2, + )) + break + + new_batch = [] + + while len(h): + _, p = heappop(h) + new_batch.append(p) + + return new_batch + + +def topk_ppr_matrix( + edge_index: Tensor, + num_nodes: int, + alpha: float, + eps: float, + output_node_indices: Union[np.ndarray, torch.LongTensor], + topk: int, + normalization='row', +) -> Tuple[scipy.sparse.csr_matrix, List[np.ndarray]]: + neighbors, weights = get_ppr(edge_index, alpha, eps, output_node_indices, + num_nodes) + + _, neighbor_counts = neighbors[0].unique(return_counts=True) + + ppr_matrix = SparseTensor( + row=torch.arange( + len(output_node_indices)).repeat_interleave(neighbor_counts), + col=neighbors[1], value=weights, + sparse_sizes=(len(output_node_indices), + num_nodes)).to_scipy(layout='csr') + + neighbors = [ + n.cpu().numpy() + for n in torch.split(neighbors[1], + neighbor_counts.cpu().tolist(), dim=0) + ] + weights = [ + n.cpu().numpy() + for n in torch.split(weights, + neighbor_counts.cpu().tolist(), dim=0) + ] + + def sparsify(neighbors: List[np.ndarray], weights: List[np.ndarray], + topk: int): + new_neighbors = [] + for n, w in zip(neighbors, weights): + idx_topk = np.argsort(w)[-topk:] + new_neighbor = n[idx_topk] + new_neighbors.append(new_neighbor) + + return new_neighbors + + neighbors = sparsify(neighbors, weights, topk) + neighbors = [ + np.union1d(nei, pr) for nei, pr in zip(neighbors, output_node_indices) + ] + + _, out_degree = torch.unique(edge_index[0], sorted=True, + return_counts=True) + if normalization == 'sym': + # Assume undirected (symmetric) adjacency matrix + deg_sqrt = np.sqrt(np.maximum(out_degree, 1e-12)) + deg_inv_sqrt = 1. / deg_sqrt + + row, col = ppr_matrix.nonzero() + ppr_matrix.data = deg_sqrt[output_node_indices[row]] * \ + ppr_matrix.data * \ + deg_inv_sqrt[col] + elif normalization == 'col': + # Assume undirected (symmetric) adjacency matrix + deg_inv = 1. / np.maximum(out_degree, 1e-12) + + row, col = ppr_matrix.nonzero() + ppr_matrix.data = out_degree[output_node_indices[row]] * \ + ppr_matrix.data * \ + deg_inv[col] + elif normalization == 'row': + pass + else: + raise ValueError(f"Unknown PPR normalization: {normalization}") + + return ppr_matrix, neighbors + + +class IBMBBaseLoader(torch.utils.data.DataLoader): + def __init__( + self, + data_list: Union[List[Data], List[Tuple]], + graph: Data, + adj: SparseTensor, + return_edge_index_type: str, + **kwargs, + ): + self.graph = graph + self.adj = adj + self.return_edge_index_type = return_edge_index_type + if 'collate_fn' in kwargs: + del kwargs['collate_fn'] + super().__init__(data_list, collate_fn=self.collate_fn, **kwargs) + + def create_loader(self, *args, **kwargs): + raise NotImplementedError + + @classmethod + def prepare_cache( + cls, + graph: Data, + batch_wise_out_aux_pairs: List[Tuple[np.ndarray, np.ndarray]], + adj: Optional[SparseTensor], + return_edge_index_type: str, + ): + subgraphs = [] + + pbar = tqdm(batch_wise_out_aux_pairs) + pbar.set_description( + f"Caching data with type {return_edge_index_type}") + + if return_edge_index_type == 'adj': + assert adj is not None + + for out, aux in pbar: + mask = torch.from_numpy(np.in1d(aux, out)) + if isinstance(aux, np.ndarray): + aux = torch.from_numpy(aux) + subg = get_subgraph(aux, graph, return_edge_index_type, adj, + output_node_mask=mask) + subgraphs.append(subg) + + return subgraphs + + @classmethod + def create_adj_from_edge_index( + cls, + edge_index: Tensor, + num_nodes: int, + normalization: str, + ): + assert normalization in ['sym', 'rw'] + adj = SparseTensor.from_edge_index( + edge_index, + sparse_sizes=(num_nodes, num_nodes), + ) + adj = adj.fill_value(1.) + degree = adj.sum(0) + + degree[degree == 0.] = 1e-12 + deg_inv = 1 / degree + + if normalization == 'sym': + deg_inv_sqrt = deg_inv**0.5 + adj = adj * deg_inv_sqrt.reshape(1, -1) + adj = adj * deg_inv_sqrt.reshape(-1, 1) + elif normalization == 'rw': + adj = adj * deg_inv.reshape(-1, 1) + + return adj + + def collate_fn(self, data_list: List[Union[Data, Tuple]]): + if len(data_list) == 1 and isinstance(data_list[0], Data): + return data_list[0] + + out, aux = zip(*data_list) + out = np.concatenate(out) + aux = np.unique(np.concatenate(aux)) + mask = torch.from_numpy(np.in1d(aux, out)) + aux = torch.from_numpy(aux) + + subg = get_subgraph(aux, self.graph, self.return_edge_index_type, + self.adj, output_node_mask=mask) + return subg + + def __repr__(self) -> str: + return f'{self.__class__.__name__}()' + + +class IBMBBatchLoader(IBMBBaseLoader): + r"""The batch-wise influence-based data loader from the + `"Influence-Based Mini-Batching for Graph Neural Networks" + `__ paper. + + First, the METIS graph partitioning algorithm separates the graph into + :obj:`num_partitions` many partitions. + Afterwards, input/seed nodes and their auxiliary nodes (found via + topic-sensitive PageRank) are used to form a mini-batch. + + If :obj:`batch_size` is set to :obj:`1`, mini-batches are pre-calculated + and cached in memory. + Otherwise, only input nodes and their auxiliary nodes are pre-computed, and + mini-batches are collated on-the-fly. + + Args: + data (torch_geometric.data.Data): A + :class:`~torch_geometric.data.Data` object. + batch_order (str): A string indicating the batch order type (one of + :obj:`"order"`, :obj:`"sample"` or :obj:`"rand"`). + If :obj:`"order"`, calculates the pair-wise KL divergence between + every two batches to organize an optimal order. + If :obj:`"sample"`, samples the next batch w.r.t. the last one in + which a batch with higher KL divergence score is more likely to be + sampled. + If :obj:`"rand"`, batches are generated randomly. + num_partitions (int): The number of partitions. + input_nodes (torch.Tensor): A vector containing the set of seed + nodes. + batch_expand_ratio (float, optional): The ratio between the returned + batch size and the original partition size. For example, set it to + :obj:`2.0` in case you would like the batch to have double the + number of nodes as the size of its partition. + (default: :obj:`1.0`) + metis_input_node_weight (float, optional): The weights on the input + nodes for METIS graph partitioning. (default: :obj:`None`) + alpha (float, optional): The teleport probability of the PageRank + calculation. (default: :obj:`0.2`) + approximate_ppr_iterations (int, optional): The number of power + iterations for PageRank calculation. (default: :obj:`50`) + return_edge_index_type (str, optional): A string indicating the output + type of edge indices (one of :obj:`"edge_index"` or :obj:`"adj"`). + If set to :obj:`"adj"`, the :obj:`edge_index` of the batch will + be a :class:`torch_sparse.SparseTensor`, otherwise a + :class:`torch.Tensor`. (default: :obj:`"edge_index"`) + **kwargs (optional): Additional arguments of + :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, + :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. + """ + def __init__( + self, + data: Data, + batch_order: str, + num_partitions: int, + input_nodes: Tensor, + batch_expand_ratio: Optional[float] = 1.0, + metis_input_node_weight: Optional[float] = None, + alpha: Optional[float] = 0.2, + approximate_ppr_iterations: Optional[int] = 50, + return_edge_index_type: str = 'edge_index', + **kwargs, + ): + self.subgraphs = [] + self.batch_wise_out_aux_pairs = [] + + assert is_undirected( + data.edge_index, + num_nodes=data.num_nodes), "Assume the graph to be undirected" + assert batch_order in ['rand', 'sample', 'order' + ], f"Unsupported batch order: {batch_order}" + + adj = self.create_adj_from_edge_index( + data.edge_index, + data.num_nodes, + normalization='rw', + ) + + self.cache_data = kwargs['batch_size'] == 1 + self.num_partitions = num_partitions + self.output_indices = input_nodes + assert return_edge_index_type in ['adj', 'edge_index'] + self.return_edge_index_type = return_edge_index_type + self.batch_expand_ratio = batch_expand_ratio + self.metis_output_weight = metis_input_node_weight + self.num_outnodeset_per_batch = 50 + self.alpha = alpha + self.approximate_ppr_iterations = approximate_ppr_iterations + + self.create_loader(data, adj) + + if len(self.batch_wise_out_aux_pairs) > 2: # <= 2 order makes no sense + ys = [ + data.y[out].numpy() for out, _ in self.batch_wise_out_aux_pairs + ] + sampler = define_sampler(batch_order, ys, data.y.max().item() + 1) + else: + sampler = None + + if not self.cache_data: + cached_data = data # need to cache the original graph + if return_edge_index_type == 'adj': + cached_adj = adj + else: + cached_adj = None + else: + cached_data = None + cached_adj = None + + super().__init__( + self.subgraphs + if self.cache_data else self.batch_wise_out_aux_pairs, + cached_data, + cached_adj, + return_edge_index_type, + sampler=sampler, + **kwargs, + ) + + def create_loader(self, graph: Data, adj: SparseTensor): + partitions = get_partitions( + adj, + self.num_partitions, + self.output_indices, + graph.num_nodes, + self.metis_output_weight, + ) + + # get output - auxiliary node pairs + topk = math.ceil(self.batch_expand_ratio * graph.num_nodes / + self.num_partitions) + batch_wise_out_aux_pairs = create_batchwise_out_aux_pairs( + adj, partitions, self.output_indices, topk, + self.num_outnodeset_per_batch, self.alpha, + self.approximate_ppr_iterations) + + indices_complete_check(batch_wise_out_aux_pairs, self.output_indices) + self.batch_wise_out_aux_pairs = batch_wise_out_aux_pairs + + if self.cache_data: + self.subgraphs = self.prepare_cache( + graph, + batch_wise_out_aux_pairs, + adj, + self.return_edge_index_type, + ) + + +class IBMBNodeLoader(IBMBBaseLoader): + r"""The node-wise influence-based data loader from the + `"Influence-Based Mini-Batching for Graph Neural Networks" + `__ paper. + + First, the Personalized PageRank (PPR) score for each input node is + computed, for which the :obj:`k` nodes with the highest scores are taken + auxiliary nodes. + Afterwards, input nodes are merged according to their pair-wise PPR scores. + + Similar to :class:`~torch_geometric.loader.IBMBBatchLoader`, subgraphs are + cached in memory for :obj:`batch_size = 1`, and collated on-the-fly + otherwise. + + Args: + data (torch_geometric.data.Data): A + :class:`~torch_geometric.data.Data` object. + batch_order (str): A string indicating the batch order type (one of + :obj:`"order"`, :obj:`"sample"` or :obj:`"rand"`). + If :obj:`"order"`, calculates the pair-wise KL divergence between + every two batches to organize an optimal order. + If :obj:`"sample"`, samples the next batch w.r.t. the last one in + which a batch with higher KL divergence score is more likely to be + sampled. + If :obj:`"rand"`, batches are generated randomly. + input_nodes (torch.Tensor): A vector containing the set of seed + nodes. + num_auxiliary_nodes (int): The number of auxiliary nodes per input + node. + num_nodes_per_batch (int): The number of seed nodes per batch. + alpha (float, optional): The teleport probability of the PageRank + calculation. (default: :obj:`0.2`) + eps (float, optional): The threshold for stopping the PPR calculation + The smaller :obj`eps` is, the more accurate are the results of + PPR calculation, but it also takes longer. + (default: :obj:`1e-5`) + return_edge_index_type (str, optional): A string indicating the output + type of edge indices (one of :obj:`"edge_index"` or :obj:`"adj"`). + If set to :obj:`"adj"`, the :obj:`edge_index` of the batch will + be a :class:`torch_sparse.SparseTensor`, otherwise a + :class:`torch.Tensor`. (default: :obj:`"edge_index"`) + **kwargs (optional): Additional arguments of + :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, + :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. + """ + def __init__( + self, + data: Data, + batch_order: str, + input_nodes: torch.Tensor, + num_auxiliary_nodes: int, + num_nodes_per_batch: int, + alpha: float = 0.2, + eps: float = 1e-5, + return_edge_index_type: str = 'edge_index', + **kwargs, + ): + self.subgraphs = [] + self.node_wise_out_aux_pairs = [] + + assert is_undirected( + data.edge_index, + num_nodes=data.num_nodes), "Assume the graph to be undirected" + assert batch_order in ['rand', 'sample', 'order' + ], f"Unsupported batch order: {batch_order}" + + if return_edge_index_type == 'adj': + adj = self.create_adj_from_edge_index(data.edge_index, + data.num_nodes, + normalization='rw') + else: + adj = None + + self.cache_data = kwargs['batch_size'] == 1 + self._batchsize = kwargs['batch_size'] + self.output_indices = input_nodes.numpy() + assert return_edge_index_type in ['adj', 'edge_index'] + self.return_edge_index_type = return_edge_index_type + self.num_auxiliary_node_per_output = num_auxiliary_nodes + self.num_output_nodes_per_batch = num_nodes_per_batch + self.alpha = alpha + self.eps = eps + + self.create_loader(data, adj) + + if len(self.node_wise_out_aux_pairs) > 2: # <= 2 order makes no sense + ys = [ + data.y[out].numpy() for out, _ in self.node_wise_out_aux_pairs + ] + sampler = define_sampler(batch_order, ys, data.y.max().item() + 1) + else: + sampler = None + + if not self.cache_data: + cached_graph = data # need to cache the original graph + cached_adj = adj + else: + cached_graph = None + cached_adj = None + + super().__init__( + self.subgraphs + if self.cache_data else self.node_wise_out_aux_pairs, + cached_graph, + cached_adj, + return_edge_index_type, + sampler=sampler, + **kwargs, + ) + + def create_loader(self, graph: Data, adj: SparseTensor): + logging.info("Start PPR calculation") + ppr_matrix, neighbors = topk_ppr_matrix( + graph.edge_index, graph.num_nodes, self.alpha, self.eps, + torch.from_numpy(self.output_indices), + self.num_auxiliary_node_per_output) + + ppr_matrix = ppr_matrix[:, self.output_indices] + + logging.info("Getting PPR pairs") + ppr_pairs = get_pairs(ppr_matrix) + + output_list = prime_orient_merge( + ppr_pairs, + self.num_output_nodes_per_batch, + len(self.output_indices), + ) + output_list = prime_post_process( + output_list, + self.num_output_nodes_per_batch, + ) + node_wise_out_aux_pairs = [] + + if isinstance(neighbors, list): + neighbors = np.array(neighbors, dtype=object) + + def _union(inputs): + return np.unique(np.concatenate(inputs)) + + for p in output_list: + node_wise_out_aux_pairs.append( + (self.output_indices[p], + _union(neighbors[p]).astype(np.int64))) + + indices_complete_check(node_wise_out_aux_pairs, self.output_indices) + self.node_wise_out_aux_pairs = node_wise_out_aux_pairs + + if self.cache_data: + self.subgraphs = self.prepare_cache( + graph, + node_wise_out_aux_pairs, + adj, + self.return_edge_index_type, + ) + + +class IBMBOrderedSampler(torch.utils.data.Sampler[int]): + r"""A sampler with given order, specially for IBMB loaders. + + Args: + data_source (np.ndarray, torch.Tensor, List): A :obj:`np.ndarray`, + :obj:`torch.Tensor`, or :obj:`List` data object. Contains the + order of the batches. + """ + def __init__(self, data_source: Union[np.ndarray, torch.Tensor, + List]) -> None: + self.data_source = data_source + super().__init__(data_source) + + def __iter__(self) -> Iterator[int]: + return iter(self.data_source) + + def __len__(self) -> int: + return len(self.data_source) + + +class IBMBWeightedSampler(torch.utils.data.Sampler[int]): + r"""A weighted sampler wrt the pair wise KL divergence. + The very first batch after initialization is sampled randomly, + with the next ones being sampled according to the last batch, + including the first batch in the next round. + + Args: + batch_kl_div (np.ndarray, torch.Tensor): A :obj:`np.ndarray` or + :obj:`torch.Tensor`, each element [i, j] contains the pair wise + KL divergence between batch i and j. + """ + def __init__(self, batch_kl_div: Union[np.ndarray, torch.Tensor]) -> None: + data_source = np.arange(batch_kl_div.shape[0]) + self.data_source = data_source + self.batch_kl_div = batch_kl_div + self.last_train_batch_id = 0 + super().__init__(data_source) + + def __iter__(self) -> Iterator[int]: + probs = self.batch_kl_div.copy() + + last = self.last_train_batch_id + num_batches = probs.shape[0] + + fetch_idx = [] + + next_id = 0 + while np.any(probs): + next_id = np.random.choice(num_batches, size=None, replace=False, + p=probs[last] / probs[last].sum()) + last = next_id + fetch_idx.append(next_id) + probs[:, next_id] = 0. + + self.last_train_batch_id = next_id + + return iter(fetch_idx) + + def __len__(self) -> int: + return len(self.data_source) From 3e6902237612150ed65db6bd60db2ac5c313b41e Mon Sep 17 00:00:00 2001 From: ArchieGertsman Date: Wed, 13 Sep 2023 01:29:35 -0500 Subject: [PATCH 1470/2432] Accelerated `LCMAggregation` by increasing parallelism (#8023) I've vectorized the computations such that the total number of calls to `GRUCell` during a forward pass is reduced from $\mathcal{O}(|\mathcal{V}|)$ to $\mathcal{O}(\log |\mathcal{V}|)$. I used a benchmark script to compare the old version to the new one, and have introduced this benchmark into `test_lcm.py`. Here are the results, running the tests on an NVIDIA A100 GPU and AMD EPYC 7413 24-core CPU: ``` Old version: +------------------+-----------+ | Name | Forward | |------------------+-----------| | N=32768, B=1024 | 3.5072s | | N=32768, B=2048 | 1.9874s | | N=32768, B=4096 | 1.8826s | | N=65536, B=1024 | 6.1835s | | N=65536, B=2048 | 3.6961s | | N=65536, B=4096 | 3.2131s | | N=131072, B=1024 | 11.3381s | | N=131072, B=2048 | 6.2320s | | N=131072, B=4096 | 5.2321s | | N=262144, B=1024 | 21.1921s | | N=262144, B=2048 | 11.6855s | | N=262144, B=4096 | 9.4793s | +------------------+-----------+ New version: +------------------+-----------+ | Name | Forward | |------------------+-----------| | N=32768, B=1024 | 1.2499s | | N=32768, B=2048 | 1.3941s | | N=32768, B=4096 | 1.7324s | | N=65536, B=1024 | 1.9671s | | N=65536, B=2048 | 2.1052s | | N=65536, B=4096 | 2.5111s | | N=131072, B=1024 | 3.7243s | | N=131072, B=2048 | 3.7287s | | N=131072, B=4096 | 4.3217s | | N=262144, B=1024 | 7.0736s | | N=262144, B=2048 | 6.9741s | | N=262144, B=4096 | 7.1758s | +------------------+-----------+ ``` We see up to $3\times$ speedup, and more consistent behavior when keeping `N` fixed and varying `B`. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/nn/aggr/test_lcm.py | 41 ++++++++++++++++++++++++++++++++++ torch_geometric/nn/aggr/lcm.py | 40 ++++++++++++++++++++++++--------- 3 files changed, 71 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d067d3b518e2..2821e10f42a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) -- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976)) +- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) diff --git a/test/nn/aggr/test_lcm.py b/test/nn/aggr/test_lcm.py index 60cb2335d527..46db4c7173ac 100644 --- a/test/nn/aggr/test_lcm.py +++ b/test/nn/aggr/test_lcm.py @@ -1,7 +1,10 @@ +from itertools import product + import pytest import torch from torch_geometric.nn import LCMAggregation +from torch_geometric.profile import benchmark def test_lcm_aggregation_with_project(): @@ -29,3 +32,41 @@ def test_lcm_aggregation_without_project(): def test_lcm_aggregation_error_handling(): with pytest.raises(ValueError, match="must be projected"): LCMAggregation(16, 32, project=False) + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--device', type=str, default='cuda') + parser.add_argument('--backward', action='http://github.com/store_true') + args = parser.parse_args() + + channels = 128 + batch_size_list = [2**i for i in range(10, 12)] + num_nodes_list = [2**i for i in range(15, 18)] + + aggr = LCMAggregation(channels, channels, project=False) + aggr = aggr.to(args.device) + + funcs = [] + func_names = [] + args_list = [] + for batch_size, num_nodes in product(batch_size_list, num_nodes_list): + x = torch.randn((num_nodes, channels), device=args.device) + index = torch.randint(0, batch_size, (num_nodes, ), device=args.device) + index = index.sort()[0] + + funcs.append(aggr) + func_names.append(f'B={batch_size}, N={num_nodes}') + args_list.append((x, index)) + + benchmark( + funcs=funcs, + func_names=func_names, + args=args_list, + num_steps=10 if args.device == 'cpu' else 100, + num_warmups=5 if args.device == 'cpu' else 50, + backward=args.backward, + progress_bar=True, + ) diff --git a/torch_geometric/nn/aggr/lcm.py b/torch_geometric/nn/aggr/lcm.py index ac96a6d14d2d..741efe2a30f8 100644 --- a/torch_geometric/nn/aggr/lcm.py +++ b/torch_geometric/nn/aggr/lcm.py @@ -1,6 +1,7 @@ from math import ceil, log2 from typing import Optional +import torch from torch import Tensor from torch.nn import GRUCell, Linear @@ -63,9 +64,6 @@ def reset_parameters(self): self.lin.reset_parameters() self.gru_cell.reset_parameters() - def binary_op(self, left: Tensor, right: Tensor) -> Tensor: - return (self.gru_cell(left, right) + self.gru_cell(right, left)) / 2.0 - @disable_dynamic_shapes(required_args=['dim_size', 'max_num_elements']) def forward( self, @@ -84,17 +82,37 @@ def forward( max_num_elements=max_num_elements) x = x.permute(1, 0, 2) # [num_neighbors, num_nodes, num_features] + _, num_nodes, num_features = x.size() depth = ceil(log2(x.size(0))) for _ in range(depth): - x = [ - self.binary_op(x[2 * i], x[2 * i + 1]) if - (2 * i + 1) < len(x) else x[2 * i] - for i in range(ceil(len(x) / 2)) - ] - - assert len(x) == 1 - return x[0] + half_size = ceil(x.size(0) / 2) + + if x.size(0) % 2 == 1: + # This level of the tree has an odd number of nodes, so the + # remaining unmatched node gets moved to the next level. + x, remainder = x[:-1].contiguous(), x[-1:] + else: + remainder = None + + left_right = x.view(-1, 2, num_nodes, num_features) + right_left = left_right.flip(dims=[1]) + + left_right = left_right.view(-1, num_features) + right_left = right_left.view(-1, num_features) + + # Execute the GRUCell for all (left, right) pairs in the current + # level of the tree in parallel: + out = self.gru_cell(left_right, right_left) + out = out.view(-1, 2, num_nodes, num_features) + out = out.mean(dim=1) + if remainder is not None: + out = torch.cat([out, remainder], dim=0) + + x = out.view(half_size, num_nodes, num_features) + + assert x.size(0) == 1 + return x.squeeze(0) def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.in_channels}, ' From 13cbae6daffe5d2b7d4c82eb1ace04bdbb5e5ebd Mon Sep 17 00:00:00 2001 From: ArchieGertsman Date: Thu, 14 Sep 2023 02:18:14 -0500 Subject: [PATCH 1471/2432] Changed `view` to `reshape` in `LCMAggregation` (#8026) `view` resulted in an error in some cases, so I changed it to `reshape`. I also modified the tests to reflect such a scenario. --- CHANGELOG.md | 2 +- test/nn/aggr/test_lcm.py | 4 ++-- torch_geometric/nn/aggr/lcm.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2821e10f42a4..feaa17524f71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) -- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023)) +- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023), [#8026](https://github.com/pyg-team/pytorch_geometric/pull/8026)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) diff --git a/test/nn/aggr/test_lcm.py b/test/nn/aggr/test_lcm.py index 46db4c7173ac..9664e2be826d 100644 --- a/test/nn/aggr/test_lcm.py +++ b/test/nn/aggr/test_lcm.py @@ -19,8 +19,8 @@ def test_lcm_aggregation_with_project(): def test_lcm_aggregation_without_project(): - x = torch.randn(6, 16) - index = torch.tensor([0, 0, 1, 1, 1, 2]) + x = torch.randn(5, 16) + index = torch.tensor([0, 1, 1, 2, 2]) aggr = LCMAggregation(16, 16, project=False) assert str(aggr) == 'LCMAggregation(16, 16, project=False)' diff --git a/torch_geometric/nn/aggr/lcm.py b/torch_geometric/nn/aggr/lcm.py index 741efe2a30f8..a237b863022b 100644 --- a/torch_geometric/nn/aggr/lcm.py +++ b/torch_geometric/nn/aggr/lcm.py @@ -91,15 +91,15 @@ def forward( if x.size(0) % 2 == 1: # This level of the tree has an odd number of nodes, so the # remaining unmatched node gets moved to the next level. - x, remainder = x[:-1].contiguous(), x[-1:] + x, remainder = x[:-1], x[-1:] else: remainder = None left_right = x.view(-1, 2, num_nodes, num_features) right_left = left_right.flip(dims=[1]) - left_right = left_right.view(-1, num_features) - right_left = right_left.view(-1, num_features) + left_right = left_right.reshape(-1, num_features) + right_left = right_left.reshape(-1, num_features) # Execute the GRUCell for all (left, right) pairs in the current # level of the tree in parallel: From 66c4999373daf37d11c5a2b68ed9c34e0b424572 Mon Sep 17 00:00:00 2001 From: Erik Huckvale <42946548+erikhuck@users.noreply.github.com> Date: Thu, 14 Sep 2023 04:33:22 -0400 Subject: [PATCH 1472/2432] Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` (#8024) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/models/test_basic_gnn.py | 22 ++++++++++++ test/nn/models/test_mlp.py | 27 ++++++++++++++ torch_geometric/nn/models/basic_gnn.py | 50 +++++++++++++++++++++++--- torch_geometric/nn/models/mlp.py | 28 +++++++++++++-- 5 files changed, 122 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index feaa17524f71..7354eb2710ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024)) - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) diff --git a/test/nn/models/test_basic_gnn.py b/test/nn/models/test_basic_gnn.py index 958420828afa..947f76a565b0 100644 --- a/test/nn/models/test_basic_gnn.py +++ b/test/nn/models/test_basic_gnn.py @@ -160,6 +160,28 @@ def test_one_layer_gnn(out_dim, jk): assert model(x, edge_index).size() == (3, out_channels) +@pytest.mark.parametrize('norm', [ + 'BatchNorm', + 'GraphNorm', + 'InstanceNorm', + 'LayerNorm', +]) +def test_batch(norm): + x = torch.randn(3, 8) + edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) + batch = torch.tensor([0, 0, 1]) + + model = GraphSAGE(8, 16, num_layers=2, norm=norm) + assert model.supports_norm_batch == (norm != 'BatchNorm') + + out = model(x, edge_index, batch=batch) + assert out.size() == (3, 16) + + if model.supports_norm_batch: + with pytest.raises(RuntimeError, match="out of bounds"): + model(x, edge_index, batch=batch, batch_size=1) + + @onlyOnline @onlyNeighborSampler @pytest.mark.parametrize('jk', [None, 'last']) diff --git a/test/nn/models/test_mlp.py b/test/nn/models/test_mlp.py index d830a8ed77e6..6e6aeabe7baa 100644 --- a/test/nn/models/test_mlp.py +++ b/test/nn/models/test_mlp.py @@ -37,6 +37,33 @@ def test_mlp(norm, act_first, plain_last): assert torch.allclose(mlp(x), out) +@pytest.mark.parametrize('norm', [ + 'BatchNorm', + 'GraphNorm', + 'InstanceNorm', + 'LayerNorm', +]) +def test_batch(norm): + x = torch.randn(3, 8) + batch = torch.tensor([0, 0, 1]) + + model = MLP( + 8, + hidden_channels=16, + out_channels=32, + num_layers=2, + norm=norm, + ) + assert model.supports_norm_batch == (norm != 'BatchNorm') + + out = model(x, batch=batch) + assert out.size() == (3, 32) + + if model.supports_norm_batch: + with pytest.raises(RuntimeError, match="out of bounds"): + model(x, batch=batch, batch_size=1) + + def test_mlp_return_emb(): x = torch.randn(4, 16) diff --git a/torch_geometric/nn/models/basic_gnn.py b/torch_geometric/nn/models/basic_gnn.py index 017e567d3ec8..3ecfe8277e4b 100644 --- a/torch_geometric/nn/models/basic_gnn.py +++ b/torch_geometric/nn/models/basic_gnn.py @@ -1,4 +1,5 @@ import copy +import inspect from typing import Any, Callable, Dict, Final, List, Optional, Tuple, Union import torch @@ -64,6 +65,7 @@ class BasicGNN(torch.nn.Module): """ supports_edge_weight: Final[bool] supports_edge_attr: Final[bool] + supports_norm_batch: Final[bool] def __init__( self, @@ -129,6 +131,12 @@ def __init__( ) if norm_layer is None: norm_layer = torch.nn.Identity() + + self.supports_norm_batch = False + if hasattr(norm_layer, 'forward'): + norm_params = inspect.signature(norm_layer.forward).parameters + self.supports_norm_batch = 'batch' in norm_params + for _ in range(num_layers - 1): self.norms.append(copy.deepcopy(norm_layer)) @@ -173,10 +181,12 @@ def forward( # noqa edge_index, edge_weight=None, edge_attr=None, + batch=None, + batch_size=None, num_sampled_nodes_per_hop=None, num_sampled_edges_per_hop=None, ): - # type: (Tensor, Tensor, OptTensor, OptTensor, Optional[List[int]], Optional[List[int]]) -> Tensor # noqa + # type: (Tensor, Tensor, OptTensor, OptTensor, OptTensor, Optional[int], Optional[List[int]], Optional[List[int]]) -> Tensor # noqa pass @torch.jit._overload_method @@ -185,10 +195,12 @@ def forward( # noqa edge_index, edge_weight=None, edge_attr=None, + batch=None, + batch_size=None, num_sampled_nodes_per_hop=None, num_sampled_edges_per_hop=None, ): - # type: (Tensor, SparseTensor, OptTensor, OptTensor, Optional[List[int]], Optional[List[int]]) -> Tensor # noqa + # type: (Tensor, SparseTensor, OptTensor, OptTensor, OptTensor, Optional[int], Optional[List[int]], Optional[List[int]]) -> Tensor # noqa pass def forward( # noqa @@ -197,9 +209,11 @@ def forward( # noqa edge_index: Tensor, # TODO Support `SparseTensor` in type hint. edge_weight: OptTensor = None, edge_attr: OptTensor = None, + batch: OptTensor = None, + batch_size: Optional[int] = None, num_sampled_nodes_per_hop: Optional[List[int]] = None, num_sampled_edges_per_hop: Optional[List[int]] = None, - ) -> Tensor: + ): r""" Args: x (torch.Tensor): The input node features. @@ -208,6 +222,17 @@ def forward( # noqa supported by the underlying GNN layer). (default: :obj:`None`) edge_attr (torch.Tensor, optional): The edge features (if supported by the underlying GNN layer). (default: :obj:`None`) + batch (torch.Tensor, optional): The batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns + each element to a specific example. + Only needs to be passed in case the underlying normalization + layers require the :obj:`batch` information. + (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. + Only needs to be passed in case the underlying normalization + layers require the :obj:`batch` information. + (default: :obj:`None`) num_sampled_nodes_per_hop (List[int], optional): The number of sampled nodes per hop. Useful in :class:`~torch_geometric.loader.NeighborLoader` @@ -260,7 +285,10 @@ def forward( # noqa if i < self.num_layers - 1 or self.jk_mode is not None: if self.act is not None and self.act_first: x = self.act(x) - x = norm(x) + if self.supports_norm_batch: + x = norm(x, batch, batch_size) + else: + x = norm(x) if self.act is not None and not self.act_first: x = self.act(x) x = self.dropout(x) @@ -397,6 +425,8 @@ def forward( edge_index: Tensor, edge_weight: OptTensor = None, edge_attr: OptTensor = None, + batch: OptTensor = None, + batch_size: Optional[int] = None, num_sampled_nodes_per_hop: Optional[List[int]] = None, num_sampled_edges_per_hop: Optional[List[int]] = None, ) -> Tensor: @@ -405,6 +435,8 @@ def forward( edge_index, edge_weight, edge_attr, + batch, + batch_size, num_sampled_nodes_per_hop, num_sampled_edges_per_hop, ) @@ -426,6 +458,8 @@ def forward( edge_index: SparseTensor, edge_weight: OptTensor = None, edge_attr: OptTensor = None, + batch: OptTensor = None, + batch_size: Optional[int] = None, num_sampled_nodes_per_hop: Optional[List[int]] = None, num_sampled_edges_per_hop: Optional[List[int]] = None, ) -> Tensor: @@ -434,6 +468,8 @@ def forward( edge_index, edge_weight, edge_attr, + batch, + batch_size, num_sampled_nodes_per_hop, num_sampled_edges_per_hop, ) @@ -492,6 +528,7 @@ class GCN(BasicGNN): """ supports_edge_weight: Final[bool] = True supports_edge_attr: Final[bool] = False + supports_norm_batch: Final[bool] def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -536,6 +573,7 @@ class GraphSAGE(BasicGNN): """ supports_edge_weight: Final[bool] = False supports_edge_attr: Final[bool] = False + supports_norm_batch: Final[bool] def init_conv(self, in_channels: Union[int, Tuple[int, int]], out_channels: int, **kwargs) -> MessagePassing: @@ -577,6 +615,7 @@ class GIN(BasicGNN): """ supports_edge_weight: Final[bool] = False supports_edge_attr: Final[bool] = False + supports_norm_batch: Final[bool] def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -635,6 +674,7 @@ class GAT(BasicGNN): """ supports_edge_weight: Final[bool] = False supports_edge_attr: Final[bool] = True + supports_norm_batch: Final[bool] def init_conv(self, in_channels: Union[int, Tuple[int, int]], out_channels: int, **kwargs) -> MessagePassing: @@ -697,6 +737,7 @@ class PNA(BasicGNN): """ supports_edge_weight: Final[bool] = False supports_edge_attr: Final[bool] = True + supports_norm_batch: Final[bool] def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: @@ -738,6 +779,7 @@ class EdgeCNN(BasicGNN): """ supports_edge_weight: Final[bool] = False supports_edge_attr: Final[bool] = False + supports_norm_batch: Final[bool] def init_conv(self, in_channels: int, out_channels: int, **kwargs) -> MessagePassing: diff --git a/torch_geometric/nn/models/mlp.py b/torch_geometric/nn/models/mlp.py index d2be2475e641..947c5f6e5197 100644 --- a/torch_geometric/nn/models/mlp.py +++ b/torch_geometric/nn/models/mlp.py @@ -1,5 +1,6 @@ +import inspect import warnings -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, Final, List, Optional, Union import torch import torch.nn.functional as F @@ -71,6 +72,8 @@ class MLP(torch.nn.Module): bias per layer. (default: :obj:`True`) **kwargs (optional): Additional deprecated arguments of the MLP layer. """ + supports_norm_batch: Final[bool] + def __init__( self, channel_list: Optional[Union[List[int], int]] = None, @@ -160,6 +163,11 @@ def __init__( norm_layer = Identity() self.norms.append(norm_layer) + self.supports_norm_batch = False + if len(self.norms) > 0 and hasattr(self.norms[0], 'forward'): + norm_params = inspect.signature(self.norms[0].forward).parameters + self.supports_norm_batch = 'batch' in norm_params + self.reset_parameters() @property @@ -188,11 +196,24 @@ def reset_parameters(self): def forward( self, x: Tensor, + batch: Optional[Tensor] = None, + batch_size: Optional[int] = None, return_emb: NoneType = None, ) -> Tensor: r""" Args: x (torch.Tensor): The source tensor. + batch (torch.Tensor, optional): The batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns + each element to a specific example. + Only needs to be passed in case the underlying normalization + layers require the :obj:`batch` information. + (default: :obj:`None`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. + Only needs to be passed in case the underlying normalization + layers require the :obj:`batch` information. + (default: :obj:`None`) return_emb (bool, optional): If set to :obj:`True`, will additionally return the embeddings before execution of the final output layer. (default: :obj:`False`) @@ -206,7 +227,10 @@ def forward( x = lin(x) if self.act is not None and self.act_first: x = self.act(x) - x = norm(x) + if self.supports_norm_batch: + x = norm(x, batch, batch_size) + else: + x = norm(x) if self.act is not None and not self.act_first: x = self.act(x) x = F.dropout(x, p=self.dropout[i], training=self.training) From 36872e0c23e6a846a172b9a62f30c0617b28b38a Mon Sep 17 00:00:00 2001 From: xnuohz Date: Thu, 14 Sep 2023 17:37:36 +0800 Subject: [PATCH 1473/2432] Add `MixHopConv` to `torch_geometric.nn.conv` (#8025) From https://github.com/pyg-team/pytorch_geometric/issues/8022 - Add new operator `MixHopConv` in `nn.conv` - Add a test for it - Add an example for it Feel free to comment, thanks:) --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + examples/gcn.py | 2 +- examples/mixhop.py | 89 ++++++++++++++++++ test/nn/conv/test_mixhop_conv.py | 44 +++++++++ torch_geometric/nn/conv/__init__.py | 2 + torch_geometric/nn/conv/mixhop_conv.py | 125 +++++++++++++++++++++++++ 6 files changed, 262 insertions(+), 1 deletion(-) create mode 100644 examples/mixhop.py create mode 100644 test/nn/conv/test_mixhop_conv.py create mode 100644 torch_geometric/nn/conv/mixhop_conv.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 7354eb2710ce..6f162d9f3a0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024)) - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) diff --git a/examples/gcn.py b/examples/gcn.py index 4aaf0e2317d8..c86b9ef03f0b 100644 --- a/examples/gcn.py +++ b/examples/gcn.py @@ -90,7 +90,7 @@ def test(): return accs -best_val_acc = final_test_acc = 0 +best_val_acc = test_acc = 0 times = [] for epoch in range(1, args.epochs + 1): start = time.time() diff --git a/examples/mixhop.py b/examples/mixhop.py new file mode 100644 index 000000000000..58b2c4579f50 --- /dev/null +++ b/examples/mixhop.py @@ -0,0 +1,89 @@ +import os.path as osp + +import torch +import torch.nn.functional as F + +from torch_geometric.datasets import Planetoid +from torch_geometric.nn import BatchNorm, Linear, MixHopConv + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') +dataset = Planetoid(path, name='Cora') +data = dataset[0] + + +class MixHop(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = MixHopConv(dataset.num_features, 60, powers=[0, 1, 2]) + self.norm1 = BatchNorm(3 * 60) + + self.conv2 = MixHopConv(3 * 60, 60, powers=[0, 1, 2]) + self.norm2 = BatchNorm(3 * 60) + + self.conv3 = MixHopConv(3 * 60, 60, powers=[0, 1, 2]) + self.norm3 = BatchNorm(3 * 60) + + self.lin = Linear(3 * 60, dataset.num_classes) + + def forward(self, x, edge_index): + x = F.dropout(x, p=0.7, training=self.training) + + x = self.conv1(x, edge_index) + x = self.norm1(x) + x = F.dropout(x, p=0.9, training=self.training) + + x = self.conv2(x, edge_index) + x = self.norm2(x) + x = F.dropout(x, p=0.9, training=self.training) + + x = self.conv3(x, edge_index) + x = self.norm3(x) + x = F.dropout(x, p=0.9, training=self.training) + + return self.lin(x) + + +model, data = MixHop().to(device), data.to(device) +optimizer = torch.optim.SGD(model.parameters(), lr=0.5, weight_decay=0.005) +scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=40, + gamma=0.01) + + +def train(): + model.train() + optimizer.zero_grad() + out = model(data.x, data.edge_index) + loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) + loss.backward() + optimizer.step() + scheduler.step() + return float(loss) + + +@torch.no_grad() +def test(): + model.eval() + pred = model(data.x, data.edge_index).argmax(dim=-1) + + accs = [] + for mask in [data.train_mask, data.val_mask, data.test_mask]: + accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) + return accs + + +best_val_acc = test_acc = 0 +for epoch in range(1, 101): + loss = train() + train_acc, val_acc, tmp_test_acc = test() + if val_acc > best_val_acc: + best_val_acc = val_acc + test_acc = tmp_test_acc + print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' + f'Val: {best_val_acc:.4f}, Test: {test_acc:.4f}') diff --git a/test/nn/conv/test_mixhop_conv.py b/test/nn/conv/test_mixhop_conv.py new file mode 100644 index 000000000000..a9725cd801c2 --- /dev/null +++ b/test/nn/conv/test_mixhop_conv.py @@ -0,0 +1,44 @@ +import torch + +import torch_geometric.typing +from torch_geometric.nn import MixHopConv +from torch_geometric.testing import is_full_test +from torch_geometric.typing import SparseTensor +from torch_geometric.utils import to_torch_csc_tensor + + +def test_mixhop_conv(): + x = torch.randn(4, 16) + edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) + value = torch.rand(edge_index.size(1)) + adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) + adj2 = to_torch_csc_tensor(edge_index, value, size=(4, 4)) + + conv = MixHopConv(16, 32, powers=[0, 1, 2, 4]) + assert str(conv) == 'MixHopConv(16, 32, powers=[0, 1, 2, 4])' + + out1 = conv(x, edge_index) + assert out1.size() == (4, 128) + assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) + + out2 = conv(x, edge_index, value) + assert out2.size() == (4, 128) + assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) + + if torch_geometric.typing.WITH_TORCH_SPARSE: + adj3 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj4 = SparseTensor.from_edge_index(edge_index, value, (4, 4)) + assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) + assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) + + if is_full_test(): + t = '(Tensor, Tensor, OptTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, edge_index), out1, atol=1e-6) + assert torch.allclose(jit(x, edge_index, value), out2, atol=1e-6) + + if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: + t = '(Tensor, SparseTensor, OptTensor) -> Tensor' + jit = torch.jit.script(conv.jittable(t)) + assert torch.allclose(jit(x, adj3.t()), out1, atol=1e-6) + assert torch.allclose(jit(x, adj4.t()), out2, atol=1e-6) diff --git a/torch_geometric/nn/conv/__init__.py b/torch_geometric/nn/conv/__init__.py index 9a6dae927e9f..c51d5a831086 100644 --- a/torch_geometric/nn/conv/__init__.py +++ b/torch_geometric/nn/conv/__init__.py @@ -60,6 +60,7 @@ from .gps_conv import GPSConv from .antisymmetric_conv import AntiSymmetricConv from .dir_gnn_conv import DirGNNConv +from .mixhop_conv import MixHopConv __all__ = [ 'MessagePassing', @@ -127,6 +128,7 @@ 'GPSConv', 'AntiSymmetricConv', 'DirGNNConv', + 'MixHopConv', ] classes = __all__ diff --git a/torch_geometric/nn/conv/mixhop_conv.py b/torch_geometric/nn/conv/mixhop_conv.py new file mode 100644 index 000000000000..31e530741188 --- /dev/null +++ b/torch_geometric/nn/conv/mixhop_conv.py @@ -0,0 +1,125 @@ +from typing import List, Optional + +import torch +from torch import Tensor +from torch.nn import Parameter + +from torch_geometric.nn.conv import MessagePassing +from torch_geometric.nn.conv.gcn_conv import gcn_norm +from torch_geometric.nn.dense.linear import Linear +from torch_geometric.nn.inits import zeros +from torch_geometric.typing import Adj, OptTensor, SparseTensor +from torch_geometric.utils import spmm + + +class MixHopConv(MessagePassing): + r"""The Mix-Hop graph convolutional operator from the + `"MixHop: Higher-Order Graph Convolutional Architecturesvia Sparsified + Neighborhood Mixing" `_ paper + + .. math:: + \mathbf{X}^{\prime}={\Bigg\Vert}_{p\in P} + {\left( \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}} + \mathbf{\hat{D}}^{-1/2} \right)}^p \mathbf{X} \mathbf{\Theta}, + + where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the + adjacency matrix with inserted self-loops and + :math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix. + + Args: + in_channels (int): Size of each input sample, or :obj:`-1` to derive + the size from the first input(s) to the forward method. + out_channels (int): Size of each output sample. + powers (List[int], optional): The powers of the adjacency matrix to + use. (default: :obj:`[0, 1, 2]`) + add_self_loops (bool, optional): If set to :obj:`False`, will not add + self-loops to the input graph. (default: :obj:`True`) + bias (bool, optional): If set to :obj:`False`, the layer will not learn + an additive bias. (default: :obj:`True`) + **kwargs (optional): Additional arguments of + :class:`torch_geometric.nn.conv.MessagePassing`. + + Shapes: + - **input:** + node features :math:`(|\mathcal{V}|, F_{in})`, + edge indices :math:`(2, |\mathcal{E}|)`, + edge weights :math:`(|\mathcal{E}|)` *(optional)* + - **output:** + node features :math:`(|\mathcal{V}|, |P| \cdot F_{out})` + """ + def __init__( + self, + in_channels: int, + out_channels: int, + powers: Optional[List[int]] = None, + add_self_loops: bool = True, + bias: bool = True, + **kwargs, + ): + kwargs.setdefault('aggr', 'add') + super().__init__(**kwargs) + + if powers is None: + powers = [0, 1, 2] + + self.in_channels = in_channels + self.out_channels = out_channels + self.powers = powers + self.add_self_loops = add_self_loops + + self.lins = torch.nn.ModuleList([ + Linear(in_channels, out_channels, bias=False) + if p in powers else torch.nn.Identity() + for p in range(max(powers) + 1) + ]) + + if bias: + self.bias = Parameter(torch.empty(len(powers) * out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + for lin in self.lins: + if hasattr(lin, 'reset_parameters'): + lin.reset_parameters() + zeros(self.bias) + + def forward(self, x: Tensor, edge_index: Adj, + edge_weight: OptTensor = None) -> Tensor: + + if isinstance(edge_index, Tensor): + edge_index, edge_weight = gcn_norm( # yapf: disable + edge_index, edge_weight, x.size(self.node_dim), False, + self.add_self_loops, self.flow, x.dtype) + elif isinstance(edge_index, SparseTensor): + edge_index = gcn_norm( # yapf: disable + edge_index, edge_weight, x.size(self.node_dim), False, + self.add_self_loops, self.flow, x.dtype) + + outs = [self.lins[0](x)] + + for lin in self.lins[1:]: + # propagate_type: (x: Tensor, edge_weight: OptTensor) + x = self.propagate(edge_index, x=x, edge_weight=edge_weight, + size=None) + + outs.append(lin.forward(x)) + + out = torch.cat([outs[p] for p in self.powers], dim=-1) + + if self.bias is not None: + out = out + self.bias + + return out + + def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor: + return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j + + def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor: + return spmm(adj_t, x, reduce=self.aggr) + + def __repr__(self) -> str: + return (f'{self.__class__.__name__}({self.in_channels}, ' + f'{self.out_channels}, powers={self.powers})') From 2f7d04a352f64ae5071dd2bdbba9e50cf10b9d4f Mon Sep 17 00:00:00 2001 From: Erik Huckvale <42946548+erikhuck@users.noreply.github.com> Date: Fri, 15 Sep 2023 02:45:10 -0400 Subject: [PATCH 1474/2432] Adds the option to use batch-level normalization layers in `AttentionalAggregation`/`MLPAggregation` (#8033) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- torch_geometric/nn/aggr/attention.py | 14 ++++++++++++-- torch_geometric/nn/aggr/mlp.py | 2 +- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f162d9f3a0a..baa53f116a81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) -- Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024)) +- Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) diff --git a/torch_geometric/nn/aggr/attention.py b/torch_geometric/nn/aggr/attention.py index c531f3d71200..02a4fa1537ab 100644 --- a/torch_geometric/nn/aggr/attention.py +++ b/torch_geometric/nn/aggr/attention.py @@ -3,6 +3,7 @@ import torch from torch import Tensor +import torch_geometric from torch_geometric.nn.aggr import Aggregation from torch_geometric.nn.inits import reset from torch_geometric.utils import softmax @@ -50,8 +51,17 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, dim: int = -2) -> Tensor: self.assert_two_dimensional_input(x, dim) - gate = self.gate_nn(x) - x = self.nn(x) if self.nn is not None else x + + if isinstance(self.gate_nn, torch_geometric.nn.MLP): + gate = self.gate_nn(x, index, dim_size) + else: + gate = self.gate_nn(x) + + if isinstance(self.nn, torch_geometric.nn.MLP): + x = self.nn(x, index, dim_size) + elif self.nn is not None: + x = self.nn(x) + gate = softmax(gate, index, ptr, dim_size, dim) return self.reduce(gate * x, index, ptr, dim_size, dim) diff --git a/torch_geometric/nn/aggr/mlp.py b/torch_geometric/nn/aggr/mlp.py index 99a72986783d..389225d47fb7 100644 --- a/torch_geometric/nn/aggr/mlp.py +++ b/torch_geometric/nn/aggr/mlp.py @@ -54,7 +54,7 @@ def forward(self, x: Tensor, index: Optional[Tensor] = None, dim: int = -2) -> Tensor: x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, max_num_elements=self.max_num_elements) - return self.mlp(x.view(-1, x.size(1) * x.size(2))) + return self.mlp(x.view(-1, x.size(1) * x.size(2)), index, dim_size) def __repr__(self) -> str: return (f'{self.__class__.__name__}({self.in_channels}, ' From d167d6e1c851b10cd39f151693bb55d399a3f448 Mon Sep 17 00:00:00 2001 From: Mohamad Zamini <32536264+mzamini92@users.noreply.github.com> Date: Fri, 15 Sep 2023 01:21:41 -0600 Subject: [PATCH 1475/2432] Replace `einsum` with `matmul` in `Performer` (#8035) The previous implementation uses `torch.einsum` to perform the matrix multiplications. However, `torch.einsum` is not always as efficient as `torch.matmul`. The previous implementation needs to perform two `torch.einsum` calls, one for the inner product and one for the softmax. The updated implementation uses `torch.matmul` for all of the matrix multiplications. This is more efficient, especially on GPUs. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- torch_geometric/nn/attention/performer.py | 29 ++++++++++------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/torch_geometric/nn/attention/performer.py b/torch_geometric/nn/attention/performer.py index b82b51f8e93e..2fe9209cb502 100644 --- a/torch_geometric/nn/attention/performer.py +++ b/torch_geometric/nn/attention/performer.py @@ -43,25 +43,22 @@ def linear_attention(q: Tensor, k: Tensor, v: Tensor) -> Tensor: \mathbf{\hat{D}}^{-1}(\mathbf{Q}'((\mathbf{K}')^{\top} \mathbf{V})) """ - k_contract = k.sum(dim=-2) - D_inv = 1.0 / torch.einsum('...Lr,...r->...L', q, k_contract) - kv = torch.einsum('...Lr,...Ld->...rd', k, v) - qkv = torch.einsum('...Lr,...rd->...Ld', q, kv) - out = torch.einsum('...L,...Ld->...Ld', D_inv, qkv) + D_inv = 1.0 / (q @ k.sum(dim=-2).unsqueeze(-1)) + kv = k.transpose(-2, -1) @ v + qkv = q @ kv + out = torch.einsum('...L,...Ld->...Ld', D_inv.squeeze(-1), qkv) return out -def generalized_kernel(x: Tensor, mat: Tensor, - kernel: Callable = torch.nn.ReLU(), - epsilon: float = 0.001) -> Tensor: - r"""Apply generalized kernelizable attention with - kernel functions such as the ReLU. - """ - num_batches, num_heads, *_ = x.shape - # Expand projection matrix to number of batches and number of heads - projection = mat.expand(num_batches, num_heads, *mat.shape) - # "Inner" product x with projection matrix - x = torch.einsum('...id,...jd->...ij', x, projection) +def generalized_kernel( + x: Tensor, + mat: Tensor, + kernel: Callable = torch.nn.ReLU(), + epsilon: float = 0.001, +) -> Tensor: + batch_size, num_heads = x.size()[:2] + projection = mat.t().expand(batch_size, num_heads, -1, -1) + x = x @ projection out = kernel(x) + epsilon return out From eb15f68d9b293572d82080ac82a7d5341a77d12c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 15 Sep 2023 09:26:11 +0200 Subject: [PATCH 1476/2432] Add a test for `SAGEConv` with `MLPAggregation` (#8037) --- test/nn/conv/test_sage_conv.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/test/nn/conv/test_sage_conv.py b/test/nn/conv/test_sage_conv.py index 0ff0066ea17d..42e3998b373c 100644 --- a/test/nn/conv/test_sage_conv.py +++ b/test/nn/conv/test_sage_conv.py @@ -2,7 +2,7 @@ import torch import torch_geometric.typing -from torch_geometric.nn import SAGEConv +from torch_geometric.nn import MLPAggregation, SAGEConv from torch_geometric.testing import assert_module, is_full_test from torch_geometric.typing import SparseTensor @@ -87,6 +87,25 @@ def test_lstm_aggr_sage_conv(): conv(x, edge_index) +def test_mlp_sage_conv(): + x = torch.randn(4, 8) + edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + + conv = SAGEConv( + in_channels=8, + out_channels=32, + aggr=MLPAggregation( + in_channels=8, + out_channels=8, + max_num_elements=2, + num_layers=1, + ), + ) + + out = conv(x, edge_index) + assert out.size() == (4, 32) + + @pytest.mark.parametrize('aggr_kwargs', [ dict(mode='cat'), dict(mode='proj', mode_kwargs=dict(in_channels=8, out_channels=16)), From 080a6e9246c040ac77516862b5729caf05996641 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 15 Sep 2023 11:17:30 +0200 Subject: [PATCH 1477/2432] Weighted sampling in `NeighborLoader` and `LinkNeighborLoader` (#8038) --- CHANGELOG.md | 1 + test/loader/test_neighbor_loader.py | 70 ++++++++++++++++++- .../loader/link_neighbor_loader.py | 9 +++ torch_geometric/loader/neighbor_loader.py | 9 +++ torch_geometric/sampler/neighbor_sampler.py | 45 ++++++++++-- 5 files changed, 128 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index baa53f116a81..5cb3241ae843 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) diff --git a/test/loader/test_neighbor_loader.py b/test/loader/test_neighbor_loader.py index 17e2dd326544..434558a8a7eb 100644 --- a/test/loader/test_neighbor_loader.py +++ b/test/loader/test_neighbor_loader.py @@ -20,7 +20,11 @@ withCUDA, withPackage, ) -from torch_geometric.typing import WITH_PYG_LIB, WITH_TORCH_SPARSE +from torch_geometric.typing import ( + WITH_PYG_LIB, + WITH_TORCH_SPARSE, + WITH_WEIGHTED_NEIGHBOR_SAMPLE, +) from torch_geometric.utils import ( is_undirected, sort_edge_index, @@ -714,3 +718,67 @@ def test_neighbor_loader_mapping(): batch.n_id[batch.edge_index], data.edge_index[:, batch.e_id], ) + + +@pytest.mark.skipif( + not WITH_WEIGHTED_NEIGHBOR_SAMPLE, + reason="'pyg-lib' does not support weighted neighbor sampling", +) +def test_weighted_homo_neighbor_loader(): + edge_index = torch.tensor([ + [1, 3, 0, 4], + [2, 2, 1, 3], + ]) + edge_weight = torch.tensor([0.0, 1.0, 0.0, 1.0]) + + data = Data(num_nodes=5, edge_index=edge_index, edge_weight=edge_weight) + + loader = NeighborLoader( + data, + input_nodes=torch.tensor([2]), + num_neighbors=[1] * 2, + batch_size=1, + weight_attr='edge_weight', + ) + assert len(loader) == 1 + + batch = next(iter(loader)) + + assert batch.num_nodes == 3 + assert batch.n_id.tolist() == [2, 3, 4] + assert batch.num_edges == 2 + assert batch.n_id[batch.edge_index].tolist() == [[3, 4], [2, 3]] + + +@pytest.mark.skipif( + not WITH_WEIGHTED_NEIGHBOR_SAMPLE, + reason="'pyg-lib' does not support weighted neighbor sampling", +) +def test_weighted_hetero_neighbor_loader(): + edge_index = torch.tensor([ + [1, 3, 0, 4], + [2, 2, 1, 3], + ]) + edge_weight = torch.tensor([0.0, 1.0, 0.0, 1.0]) + + data = HeteroData() + data['paper'].num_nodes = 5 + data['paper', 'to', 'paper'].edge_index = edge_index + data['paper', 'to', 'paper'].edge_weight = edge_weight + + loader = NeighborLoader( + data, + input_nodes=('paper', torch.tensor([2])), + num_neighbors=[1] * 2, + batch_size=1, + weight_attr='edge_weight', + ) + assert len(loader) == 1 + + batch = next(iter(loader)) + + assert batch['paper'].num_nodes == 3 + assert batch['paper'].n_id.tolist() == [2, 3, 4] + assert batch['paper', 'paper'].num_edges == 2 + global_edge_index = batch['paper'].n_id[batch['paper', 'paper'].edge_index] + assert global_edge_index.tolist() == [[3, 4], [2, 3]] diff --git a/torch_geometric/loader/link_neighbor_loader.py b/torch_geometric/loader/link_neighbor_loader.py index 7d9e8c79397f..9871088eef23 100644 --- a/torch_geometric/loader/link_neighbor_loader.py +++ b/torch_geometric/loader/link_neighbor_loader.py @@ -165,6 +165,13 @@ class LinkNeighborLoader(LinkLoader): guaranteed to fulfill temporal constraints, *i.e.* neighbors have an earlier or equal timestamp than the center node. Only used if :obj:`edge_label_time` is set. (default: :obj:`None`) + weight_attr (str, optional): The name of the attribute that denotes + edge weights in the graph. + If set, weighted/biased sampling will be used such that neighbors + are more likely to get sampled the higher their edge weights are. + Edge weights do not need to sum to one, but must be non-negative, + finite and have a non-zero sum within local neighborhoods. + (default: :obj:`None`) transform (callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) @@ -207,6 +214,7 @@ def __init__( neg_sampling: Optional[NegativeSampling] = None, neg_sampling_ratio: Optional[Union[int, float]] = None, time_attr: Optional[str] = None, + weight_attr: Optional[str] = None, transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, is_sorted: bool = False, @@ -233,6 +241,7 @@ def __init__( disjoint=disjoint, temporal_strategy=temporal_strategy, time_attr=time_attr, + weight_attr=weight_attr, is_sorted=is_sorted, share_memory=kwargs.get('num_workers', 0) > 0, directed=directed, diff --git a/torch_geometric/loader/neighbor_loader.py b/torch_geometric/loader/neighbor_loader.py index a5213b3b9882..9cbf0bcbaebe 100644 --- a/torch_geometric/loader/neighbor_loader.py +++ b/torch_geometric/loader/neighbor_loader.py @@ -165,6 +165,13 @@ class NeighborLoader(NodeLoader): guaranteed to fulfill temporal constraints, *i.e.* neighbors have an earlier or equal timestamp than the center node. (default: :obj:`None`) + weight_attr (str, optional): The name of the attribute that denotes + edge weights in the graph. + If set, weighted/biased sampling will be used such that neighbors + are more likely to get sampled the higher their edge weights are. + Edge weights do not need to sum to one, but must be non-negative, + finite and have a non-zero sum within local neighborhoods. + (default: :obj:`None`) transform (callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) @@ -204,6 +211,7 @@ def __init__( disjoint: bool = False, temporal_strategy: str = 'uniform', time_attr: Optional[str] = None, + weight_attr: Optional[str] = None, transform: Optional[Callable] = None, transform_sampler_output: Optional[Callable] = None, is_sorted: bool = False, @@ -226,6 +234,7 @@ def __init__( disjoint=disjoint, temporal_strategy=temporal_strategy, time_attr=time_attr, + weight_attr=weight_attr, is_sorted=is_sorted, share_memory=kwargs.get('num_workers', 0) > 0, directed=directed, diff --git a/torch_geometric/sampler/neighbor_sampler.py b/torch_geometric/sampler/neighbor_sampler.py index f80ce9002476..2a4c3d0aa408 100644 --- a/torch_geometric/sampler/neighbor_sampler.py +++ b/torch_geometric/sampler/neighbor_sampler.py @@ -43,6 +43,7 @@ def __init__( disjoint: bool = False, temporal_strategy: str = 'uniform', time_attr: Optional[str] = None, + weight_attr: Optional[str] = None, is_sorted: bool = False, share_memory: bool = False, # Deprecated: @@ -65,18 +66,30 @@ def __init__( if self.data_type == DataType.homogeneous: self.num_nodes = data.num_nodes - self.node_time = data[time_attr] if time_attr else None + + self.node_time: Optional[Tensor] = None + if time_attr is not None: + self.node_time = data[time_attr] # Convert the graph data into CSC format for sampling: self.colptr, self.row, self.perm = to_csc( data, device='cpu', share_memory=share_memory, is_sorted=is_sorted, src_node_time=self.node_time) + self.edge_weight: Optional[Tensor] = None + if weight_attr is not None: + self.edge_weight = data[weight_attr] + if self.perm is not None: + self.edge_weight = self.edge_weight[self.perm] + elif self.data_type == DataType.heterogeneous: self.node_types, self.edge_types = data.metadata() self.num_nodes = {k: data[k].num_nodes for k in self.node_types} - self.node_time = data.collect(time_attr) if time_attr else None + + self.node_time: Optional[Dict[NodeType, Tensor]] = None + if time_attr is not None: + self.node_time = data.collect(time_attr) # Conversion to/from C++ string type: Since C++ cannot take # dictionaries with tuples as key as input, edge type triplets need @@ -91,6 +104,16 @@ def __init__( self.row_dict = remap_keys(row_dict, self.to_rel_type) self.colptr_dict = remap_keys(colptr_dict, self.to_rel_type) + self.edge_weight: Optional[Dict[EdgeType, Tensor]] = None + if weight_attr is not None: + self.edge_weight = data.collect(weight_attr) + for edge_type, edge_weight in self.edge_weight.items(): + if self.perm.get(edge_type, None) is not None: + edge_weight = edge_weight[self.perm[edge_type]] + self.edge_weight[edge_type] = edge_weight + self.edge_weight = remap_keys(self.edge_weight, + self.to_rel_type) + else: # self.data_type == DataType.remote feature_store, graph_store = data @@ -106,7 +129,7 @@ def __init__( for node_type in self.node_types } - self.node_time: Optional[Dict[str, Tensor]] = None + self.node_time: Optional[Dict[NodeType, Tensor]] = None if time_attr is not None: # If the `time_attr` is present, we expect that `GraphStore` # holds all edges sorted by destination, and within local @@ -136,6 +159,13 @@ def __init__( for time_attr, time_tensor in zip(time_attrs, time_tensors) } + self.edge_weight: Optional[Dict[EdgeType, Tensor]] = None + if weight_attr is not None: + raise NotImplementedError( + f"'weight_attr' argument not yet supported within " + f"'{self.__class__.__name__}' for " + f"'(FeatureStore, GraphStore)' inputs") + # Conversion to/from C++ string type (see above): self.to_rel_type = {k: '__'.join(k) for k in self.edge_types} self.to_edge_type = {v: k for k, v in self.to_rel_type.items()} @@ -145,6 +175,11 @@ def __init__( self.row_dict = remap_keys(row_dict, self.to_rel_type) self.colptr_dict = remap_keys(colptr_dict, self.to_rel_type) + if (self.edge_weight is not None + and not torch_geometric.typing.WITH_WEIGHTED_NEIGHBOR_SAMPLE): + raise ImportError("Weighted neighbor sampling requires " + "'pyg-lib>=0.3.0'") + self.num_neighbors = num_neighbors self.replace = replace self.subgraph_type = SubgraphType(subgraph_type) @@ -233,7 +268,7 @@ def _sample( seed_time, ) if torch_geometric.typing.WITH_WEIGHTED_NEIGHBOR_SAMPLE: - args += (None, ) + args += (self.edge_weight, ) args += ( True, # csc self.replace, @@ -313,7 +348,7 @@ def _sample( seed_time, ) if torch_geometric.typing.WITH_WEIGHTED_NEIGHBOR_SAMPLE: - args += (None, ) + args += (self.edge_weight, ) args += ( True, # csc self.replace, From 590879038336d10a141778e091d0b659c1554bcd Mon Sep 17 00:00:00 2001 From: Jay Bhambhani Date: Sun, 17 Sep 2023 04:22:43 -0400 Subject: [PATCH 1478/2432] `Database` implementation (#8028) this is related to #7946 this is a rough draft to create an abstract base class as we integrate this into the dataset and dataloader in the future. the idea is to create this class and implement a sqlite version of it. --- torch_geometric/data/database.py | 123 +++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 torch_geometric/data/database.py diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py new file mode 100644 index 000000000000..e1d48d44034b --- /dev/null +++ b/torch_geometric/data/database.py @@ -0,0 +1,123 @@ +import abc +from collections import namedtuple +from dataclasses import dataclass +import io +import itertools +import json +from typing import Iterable, Generator, Optional, Any + +import sqlite3 +import torch + +from torch_geometric.data.data import Data +from torch_geometric.typing import OptTensor + + +class GraphLabel: + + def __init__(self, id: str, *args, **kwargs): + self.id = id + for key, value in kwargs.items(): + setattr(self, key, value) + +@dataclass +class GraphRow: + id: str + data: Optional[dict[str, Optional[bytes]]] + + +def chunk(seq: Iterable, chunk_size: int) -> Generator[list, Any, None]: + "chunk data into chunks of chunk_size" + it = iter(seq) + while True: + batch = list(itertools.islice(it, chunk_size)) + if not batch: + return + yield batch + + +def namedtuple_factory(cursor, row): + """util function to create a namedtuple Row foe db results""" + fields = [column[0] for column in cursor.description] + cls = namedtuple("Row", fields) + return cls._make(row) + + +class Database(abc.ABC): + + def __init__(self, credentials, *args, **kwargs): + self.connection = self._get_connection(credentials) + + @abc.abstractmethod + def _initialize(self): + """initialize the database in some way if needed""" + raise NotImplementedError() + + @abc.abstractmethod + def insert(self, labels: Iterable[GraphLabel], values: Iterable[Data], batch_size=10000) -> list[str]: + """insert data into a database""" + raise NotImplementedError() + + @abc.abstractmethod + def serialize_data(self, data: Data) -> GraphRow: + """serialize the data""" + raise NotImplementedError() + + @abc.abstractmethod + def get(self, key: str) -> Data: + """get data by key""" + raise NotImplementedError() + + def multi_get(self, keys: list[str]) -> list[Data]: + """get multiple keys""" + return [self.get(key) for key in keys] + + +class SQLiteDatabase(abc.ABC): + + def __init__(self, credentials, table='pyg_database') -> None: + self.table = table + super().__init__(credentials) + + def _initialize(self): + create = """CREATE TABLE ? (id TEXT, data, TEXT)""" + self.cursor.execute(create, self.table) + + def insert(self, labels: Iterable[GraphLabel], values: Iterable[Data], batch_size=10000) -> list[GraphRow]: + for chunk_data in chunk(zip(labels, values), batch_size): + serialized = [self.serialize_data(label, value) for label, value in chunk_data] + query = f""" + INSERT INTO {self.table} (id, data) + VALUES (?, ?)""" + self.cursor.executemany(query, [(row['id'], json.dumps(row['data'])) for row in serialized]) + + def get(self, label: GraphLabel): + query = f"""SELECT * FROM {self.table} where id = ?""" + self.cursor.execute(query, (label.id)) + return self.cursor.fetchone() + + def multi_get(self, labels: Iterable[GraphLabel], batch_size=999): + for chunk_data in chunk(labels, batch_size): + query = f"SELECT * FROM {self.table} WHERE id IN ({','.join('?' * len(chunk_data))})" + self.cursor.execute(query, (label.id for label in chunk_data)) + + def serialize_data(self, label: GraphLabel, data: Data) -> GraphRow: + row_dict = {k: self._serialize_tensor(v) if isinstance(v, OptTensor) else v for k, v in vars(data).items()} + return GraphRow( + **vars(label), + **row_dict + ) + + @staticmethod + def _serialize_tensor(t: OptTensor) -> bytes: + """convert a tensor into bytes""" + buff = io.BytesIO() + torch.save(t, buff) + return buff.getvalue() + + def _get_connection(self, credentials): + """a method to get the db cursor to executor SQL""" + con = sqlite3.connect(credentials) + cursor = con.cursor() + cursor.row_factory = namedtuple_factory + return cursor From d0181f5a90ec9a1cae6e23e8214f02e5e7667886 Mon Sep 17 00:00:00 2001 From: Aniket Saxena <92912434+fork123aniket@users.noreply.github.com> Date: Sun, 17 Sep 2023 04:36:37 -0400 Subject: [PATCH 1479/2432] GNN-powered Classification and Explanation App (#8042) This PR is to incorporate `link` to an app on ***GNN-based Node and Graph Classification, and Explanation*** using `GraphMask Explainer` feature of `PyG` in [***PyG's external resources***](https://pytorch-geometric.readthedocs.io/en/latest/external/resources.html) and [***GraphMaskExplainer Documentation***](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.explain.algorithm.GraphMaskExplainer.html#torch_geometric.explain.algorithm.GraphMaskExplainer). More information on this can be found in #7935. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- docs/source/external/resources.rst | 2 ++ torch_geometric/explain/algorithm/graphmask_explainer.py | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/docs/source/external/resources.rst b/docs/source/external/resources.rst index cd62497f9436..0443165e3e03 100644 --- a/docs/source/external/resources.rst +++ b/docs/source/external/resources.rst @@ -40,3 +40,5 @@ External Resources * Manan Goel: **Recommending Amazon Products using Graph Neural Networks in** :pyg:`null` **PyTorch Geometric** [:wandb:`null` `W&B Report `__] * Kùzu: **Remote Backend for** :pyg:`null` **PyTorch Geometric** [:colab:`null` `Colab `__] + +* Aniket Saxena: **Graph Neural Networks-based Node and Graph Classification, and Explanation App using** :pyg:`null` **PyTorch Geometric** [`Website `__, :github:`null` `GitHub `__] diff --git a/torch_geometric/explain/algorithm/graphmask_explainer.py b/torch_geometric/explain/algorithm/graphmask_explainer.py index 27ace22ae9b4..2385ae320781 100644 --- a/torch_geometric/explain/algorithm/graphmask_explainer.py +++ b/torch_geometric/explain/algorithm/graphmask_explainer.py @@ -50,6 +50,10 @@ class GraphMaskExplainer(ExplainerAlgorithm): `_. + A working real-time example of :class:`GraphMaskExplainer` in the form + of a deployed app can be accessed `here + `_. + Args: num_layers (int): The number of layers to use. epochs (int, optional): The number of epochs to train. From 3f7a8b9cf8075451d22e77c3e929a124bc831348 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 17 Sep 2023 10:36:59 +0200 Subject: [PATCH 1480/2432] `Database` Updates and Tests (#8044) --- CHANGELOG.md | 1 + test/data/test_database.py | 31 ++++ torch_geometric/data/database.py | 278 ++++++++++++++++++------------- 3 files changed, 193 insertions(+), 117 deletions(-) create mode 100644 test/data/test_database.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 5cb3241ae843..c5bbaaa8f893 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a `Database` interface and `SQLite` implementation ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/test/data/test_database.py b/test/data/test_database.py new file mode 100644 index 000000000000..cbd4379df177 --- /dev/null +++ b/test/data/test_database.py @@ -0,0 +1,31 @@ +import os.path as osp + +import pytest +import torch + +from torch_geometric.data.database import SQLiteDatabase +from torch_geometric.testing import withPackage + + +@withPackage('sqlite3') +@pytest.mark.parametrize('batch_size', [None, 1]) +def test_sqlite_database(tmp_path, batch_size): + path = osp.join(tmp_path, 'sqlite.db') + db = SQLiteDatabase(path, name='test_table') + assert str(db) == 'SQLiteDatabase()' + + data = torch.randn(5) + db.insert(0, data) + assert torch.equal(db.get(0), data) + + indices = torch.tensor([1, 2]) + data_list = torch.randn(2, 5) + db.multi_insert(indices, data_list, batch_size=batch_size) + + out_list = db.multi_get(indices, batch_size=batch_size) + assert isinstance(out_list, list) + assert len(out_list) == 2 + assert torch.equal(out_list[0], data_list[0]) + assert torch.equal(out_list[1], data_list[1]) + + db.close() diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index e1d48d44034b..b4ac677697ce 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -1,123 +1,167 @@ -import abc -from collections import namedtuple -from dataclasses import dataclass import io -import itertools -import json -from typing import Iterable, Generator, Optional, Any +from abc import ABC, abstractmethod +from typing import Any, Iterable, List, Optional, Union -import sqlite3 import torch +from torch import Tensor + + +class Database(ABC): + r"""Base class for database.""" + def connect(self): + pass + + def close(self): + pass + + @abstractmethod + def insert(self, index: int, data: Any): + raise NotImplementedError + + def multi_insert( + self, + indices: Union[Iterable[int], Tensor], + data_list: Iterable[Any], + batch_size: Optional[int] = None, + ): + if batch_size is None: + batch_size = min(len(indices), len(data_list)) + + for start in range(0, min(len(indices), len(data_list)), batch_size): + self._multi_insert( + indices[start:start + batch_size], + data_list[start:start + batch_size], + ) + + def _multi_insert( + self, + indices: Union[Iterable[int], Tensor], + data_list: Iterable[Any], + ): + if isinstance(indices, Tensor): + indices = indices.tolist() + for index, data in zip(indices, data_list): + self.insert(index, data) + + @abstractmethod + def get(self, index: int) -> Any: + raise NotImplementedError + + def multi_get( + self, + indices: Union[Iterable[int], Tensor], + batch_size: Optional[int] = None, + ) -> List[Any]: + if batch_size is None: + batch_size = len(indices) + + data_list: List[Any] = [] + for start in range(0, len(indices), batch_size): + chunk_indices = indices[start:start + batch_size] + data_list.extend(self._multi_get(chunk_indices)) + return data_list + + def _multi_get(self, indices: Union[Iterable[int], Tensor]) -> List[Any]: + if isinstance(indices, Tensor): + indices = indices.tolist() + return [self.get(index) for index in indices] + + # Helper functions ######################################################## -from torch_geometric.data.data import Data -from torch_geometric.typing import OptTensor - - -class GraphLabel: - - def __init__(self, id: str, *args, **kwargs): - self.id = id - for key, value in kwargs.items(): - setattr(self, key, value) - -@dataclass -class GraphRow: - id: str - data: Optional[dict[str, Optional[bytes]]] - - -def chunk(seq: Iterable, chunk_size: int) -> Generator[list, Any, None]: - "chunk data into chunks of chunk_size" - it = iter(seq) - while True: - batch = list(itertools.islice(it, chunk_size)) - if not batch: - return - yield batch - - -def namedtuple_factory(cursor, row): - """util function to create a namedtuple Row foe db results""" - fields = [column[0] for column in cursor.description] - cls = namedtuple("Row", fields) - return cls._make(row) - - -class Database(abc.ABC): - - def __init__(self, credentials, *args, **kwargs): - self.connection = self._get_connection(credentials) - - @abc.abstractmethod - def _initialize(self): - """initialize the database in some way if needed""" - raise NotImplementedError() - - @abc.abstractmethod - def insert(self, labels: Iterable[GraphLabel], values: Iterable[Data], batch_size=10000) -> list[str]: - """insert data into a database""" - raise NotImplementedError() - - @abc.abstractmethod - def serialize_data(self, data: Data) -> GraphRow: - """serialize the data""" - raise NotImplementedError() - - @abc.abstractmethod - def get(self, key: str) -> Data: - """get data by key""" - raise NotImplementedError() - - def multi_get(self, keys: list[str]) -> list[Data]: - """get multiple keys""" - return [self.get(key) for key in keys] - - -class SQLiteDatabase(abc.ABC): - - def __init__(self, credentials, table='pyg_database') -> None: - self.table = table - super().__init__(credentials) - - def _initialize(self): - create = """CREATE TABLE ? (id TEXT, data, TEXT)""" - self.cursor.execute(create, self.table) - - def insert(self, labels: Iterable[GraphLabel], values: Iterable[Data], batch_size=10000) -> list[GraphRow]: - for chunk_data in chunk(zip(labels, values), batch_size): - serialized = [self.serialize_data(label, value) for label, value in chunk_data] - query = f""" - INSERT INTO {self.table} (id, data) - VALUES (?, ?)""" - self.cursor.executemany(query, [(row['id'], json.dumps(row['data'])) for row in serialized]) - - def get(self, label: GraphLabel): - query = f"""SELECT * FROM {self.table} where id = ?""" - self.cursor.execute(query, (label.id)) - return self.cursor.fetchone() - - def multi_get(self, labels: Iterable[GraphLabel], batch_size=999): - for chunk_data in chunk(labels, batch_size): - query = f"SELECT * FROM {self.table} WHERE id IN ({','.join('?' * len(chunk_data))})" - self.cursor.execute(query, (label.id for label in chunk_data)) - - def serialize_data(self, label: GraphLabel, data: Data) -> GraphRow: - row_dict = {k: self._serialize_tensor(v) if isinstance(v, OptTensor) else v for k, v in vars(data).items()} - return GraphRow( - **vars(label), - **row_dict - ) + @staticmethod + def serialize(data: Any) -> bytes: + r"""Serializes :obj:`data` into bytes.""" + buffer = io.BytesIO() + torch.save(data, buffer) + return buffer.getvalue() @staticmethod - def _serialize_tensor(t: OptTensor) -> bytes: - """convert a tensor into bytes""" - buff = io.BytesIO() - torch.save(t, buff) - return buff.getvalue() - - def _get_connection(self, credentials): - """a method to get the db cursor to executor SQL""" - con = sqlite3.connect(credentials) - cursor = con.cursor() - cursor.row_factory = namedtuple_factory - return cursor + def deserialize(data: bytes) -> Any: + r"""Deserializes bytes into the original data.""" + return torch.load(io.BytesIO(data)) + + def __repr__(self) -> str: + return f'{self.__class__.__name__}()' + + +class SQLiteDatabase(Database): + def __init__(self, path: str, name: str): + super().__init__() + + import sqlite3 + + self.path = path + self.name = name + + self._connection: Optional[sqlite3.Connection] = None + self._cursor: Optional[sqlite3.Cursor] = None + + self.connect() + + query = (f'CREATE TABLE IF NOT EXISTS {self.name} (\n' + f' id INTEGER PRIMARY KEY,\n' + f' data BLOB NOT NULL\n' + f')') + self.cursor.execute(query) + + def connect(self): + import sqlite3 + self._connection = sqlite3.connect(self.path) + self._cursor = self._connection.cursor() + + def close(self): + self._connection.commit() + self._connection.close() + self._connection = None + self._cursor = None + + @property + def cursor(self) -> Any: + if self._cursor is None: + raise RuntimeError("No open database connection") + return self._cursor + + def insert(self, index: int, data: Any): + query = f'INSERT INTO {self.name} (id, data) VALUES (?, ?)' + self.cursor.execute(query, (index, self.serialize(data))) + + def _multi_insert( + self, + indices: Union[Iterable[int], Tensor], + data_list: Iterable[Any], + ): + if isinstance(indices, Tensor): + indices = indices.tolist() + data_list = [self.serialize(data) for data in data_list] + + query = f'INSERT INTO {self.name} (id, data) VALUES (?, ?)' + self.cursor.executemany(query, zip(indices, data_list)) + + def get(self, index: int) -> Any: + query = f'SELECT data FROM {self.name} WHERE id = ?' + self.cursor.execute(query, (index, )) + return self.deserialize(self.cursor.fetchone()[0]) + + def multi_get( + self, + indices: Union[Iterable[int], Tensor], + batch_size: Optional[int] = None, + ) -> List[Any]: + if isinstance(indices, Tensor): + indices = indices.tolist() + + query = (f'SELECT data FROM {self.name} ' + f'WHERE id IN ({", ".join("?" * len(indices))})') + self.cursor.execute(query, indices) + + if batch_size is None: + data_list = self.cursor.fetchall() + else: + data_list: List[Any] = [] + while True: + chunk_list = self.cursor.fetchmany(size=batch_size) + if len(chunk_list) == 0: + break + data_list.extend(chunk_list) + + return [self.deserialize(data[0]) for data in data_list] From d2f9d18eb1320b50964efe20b80ea107e35cfffd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 17 Sep 2023 12:13:17 +0200 Subject: [PATCH 1481/2432] `SQLiteDatabase`: Better `multi_get` + syntactic sugar (#8046) --- CHANGELOG.md | 2 +- test/data/test_database.py | 29 ++++++++++ torch_geometric/data/database.py | 97 +++++++++++++++++++++++++++----- 3 files changed, 112 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5bbaaa8f893..53e47e0372ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added a `Database` interface and `SQLite` implementation ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044)) +- Added a `Database` interface and `SQLite` implementation ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/test/data/test_database.py b/test/data/test_database.py index cbd4379df177..dd5586bc6f6d 100644 --- a/test/data/test_database.py +++ b/test/data/test_database.py @@ -13,14 +13,17 @@ def test_sqlite_database(tmp_path, batch_size): path = osp.join(tmp_path, 'sqlite.db') db = SQLiteDatabase(path, name='test_table') assert str(db) == 'SQLiteDatabase()' + assert len(db) == 0 data = torch.randn(5) db.insert(0, data) + assert len(db) == 1 assert torch.equal(db.get(0), data) indices = torch.tensor([1, 2]) data_list = torch.randn(2, 5) db.multi_insert(indices, data_list, batch_size=batch_size) + assert len(db) == 3 out_list = db.multi_get(indices, batch_size=batch_size) assert isinstance(out_list, list) @@ -29,3 +32,29 @@ def test_sqlite_database(tmp_path, batch_size): assert torch.equal(out_list[1], data_list[1]) db.close() + + +@withPackage('sqlite3') +def test_database_syntactic_sugar(tmp_path): + path = osp.join(tmp_path, 'sqlite.db') + db = SQLiteDatabase(path, name='test_table') + + data = torch.randn(5, 16) + db[0] = data[0] + db[1:3] = data[1:3] + db[torch.tensor([3, 4])] = data[torch.tensor([3, 4])] + assert len(db) == 5 + + assert torch.equal(db[0], data[0]) + assert torch.equal(torch.stack(db[:3], dim=0), data[:3]) + assert torch.equal(torch.stack(db[3:], dim=0), data[3:]) + assert torch.equal(torch.stack(db[1::2], dim=0), data[1::2]) + assert torch.equal(torch.stack(db[[4, 3]], dim=0), data[[4, 3]]) + assert torch.equal( + torch.stack(db[torch.tensor([4, 3])], dim=0), + data[torch.tensor([4, 3])], + ) + assert torch.equal( + torch.stack(db[torch.tensor([4, 4])], dim=0), + data[torch.tensor([4, 4])], + ) diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index b4ac677697ce..647d3baa3e0a 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -1,6 +1,7 @@ import io from abc import ABC, abstractmethod from typing import Any, Iterable, List, Optional, Union +from uuid import uuid4 import torch from torch import Tensor @@ -20,14 +21,17 @@ def insert(self, index: int, data: Any): def multi_insert( self, - indices: Union[Iterable[int], Tensor], + indices: Union[Iterable[int], Tensor, slice, range], data_list: Iterable[Any], batch_size: Optional[int] = None, ): - if batch_size is None: - batch_size = min(len(indices), len(data_list)) + if isinstance(indices, slice): + indices = self.slice_to_range(indices) + + length = min(len(indices), len(data_list)) + batch_size = length if batch_size is None else batch_size - for start in range(0, min(len(indices), len(data_list)), batch_size): + for start in range(0, length, batch_size): self._multi_insert( indices[start:start + batch_size], data_list[start:start + batch_size], @@ -35,7 +39,7 @@ def multi_insert( def _multi_insert( self, - indices: Union[Iterable[int], Tensor], + indices: Union[Iterable[int], Tensor, range], data_list: Iterable[Any], ): if isinstance(indices, Tensor): @@ -49,14 +53,17 @@ def get(self, index: int) -> Any: def multi_get( self, - indices: Union[Iterable[int], Tensor], + indices: Union[Iterable[int], Tensor, slice, range], batch_size: Optional[int] = None, ) -> List[Any]: - if batch_size is None: - batch_size = len(indices) + if isinstance(indices, slice): + indices = self.slice_to_range(indices) + + length = len(indices) + batch_size = length if batch_size is None else batch_size data_list: List[Any] = [] - for start in range(0, len(indices), batch_size): + for start in range(0, length, batch_size): chunk_indices = indices[start:start + batch_size] data_list.extend(self._multi_get(chunk_indices)) return data_list @@ -80,6 +87,38 @@ def deserialize(data: bytes) -> Any: r"""Deserializes bytes into the original data.""" return torch.load(io.BytesIO(data)) + def slice_to_range(self, indices: slice) -> range: + start = 0 if indices.start is None else indices.start + stop = len(self) if indices.stop is None else indices.stop + step = 1 if indices.step is None else indices.step + + return range(start, stop, step) + + # Python built-ins ######################################################## + + def __len__(self) -> int: + raise NotImplementedError + + def __getitem__( + self, + key: Union[int, Iterable[int], Tensor, slice, range], + ) -> Union[Any, List[Any]]: + + if isinstance(key, int): + return self.get(key) + else: + return self.multi_get(key) + + def __setitem__( + self, + key: Union[int, Iterable[int], Tensor, slice, range], + value: Union[Any, Iterable[Any]], + ): + if isinstance(key, int): + self.insert(key, value) + else: + self.multi_insert(key, value) + def __repr__(self) -> str: return f'{self.__class__.__name__}()' @@ -127,7 +166,7 @@ def insert(self, index: int, data: Any): def _multi_insert( self, - indices: Union[Iterable[int], Tensor], + indices: Union[Iterable[int], Tensor, range], data_list: Iterable[Any], ): if isinstance(indices, Tensor): @@ -144,15 +183,35 @@ def get(self, index: int) -> Any: def multi_get( self, - indices: Union[Iterable[int], Tensor], + indices: Union[Iterable[int], Tensor, slice, range], batch_size: Optional[int] = None, ) -> List[Any]: - if isinstance(indices, Tensor): + + if isinstance(indices, slice): + indices = self.slice_to_range(indices) + elif isinstance(indices, Tensor): indices = indices.tolist() - query = (f'SELECT data FROM {self.name} ' - f'WHERE id IN ({", ".join("?" * len(indices))})') - self.cursor.execute(query, indices) + # We first create a temporary ID table to then perform an INNER JOIN. + # This avoids having a long IN clause and guarantees sorted outputs: + join_table_name = f'{self.name}__join__{uuid4().hex}' + query = (f'CREATE TABLE {join_table_name} (\n' + f' id INTEGER,\n' + f' row_id INTEGER\n' + f')') + self.cursor.execute(query) + + query = f'INSERT INTO {join_table_name} (id, row_id) VALUES (?, ?)' + self.cursor.executemany(query, zip(indices, range(len(indices)))) + + query = f'SELECT * FROM {join_table_name}' + self.cursor.execute(query) + + query = (f'SELECT {self.name}.data ' + f'FROM {self.name} INNER JOIN {join_table_name} ' + f'ON {self.name}.id = {join_table_name}.id ' + f'ORDER BY {join_table_name}.row_id') + self.cursor.execute(query) if batch_size is None: data_list = self.cursor.fetchall() @@ -164,4 +223,12 @@ def multi_get( break data_list.extend(chunk_list) + query = f'DROP TABLE {join_table_name}' + self.cursor.execute(query) + return [self.deserialize(data[0]) for data in data_list] + + def __len__(self) -> int: + query = f'SELECT COUNT(*) FROM {self.name}' + self.cursor.execute(query) + return self.cursor.fetchone()[0] From 3718e05baa656f9128f289865709a352b0302f74 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 18 Sep 2023 07:34:42 +0200 Subject: [PATCH 1482/2432] Temporarily disable broken Windows test (#8050) --- test/explain/test_hetero_explanation.py | 3 ++- test/nn/models/test_correct_and_smooth.py | 2 ++ test/nn/models/test_rect.py | 8 ++++---- torch_geometric/testing/__init__.py | 2 ++ torch_geometric/testing/decorators.py | 10 ++++++++++ 5 files changed, 20 insertions(+), 5 deletions(-) diff --git a/test/explain/test_hetero_explanation.py b/test/explain/test_hetero_explanation.py index ebbb3f0981b9..9aeac15a0e87 100644 --- a/test/explain/test_hetero_explanation.py +++ b/test/explain/test_hetero_explanation.py @@ -83,7 +83,8 @@ def test_node_mask(): explanation = HeteroExplanation() explanation['paper'].node_mask = torch.tensor([[1.], [0.], [1.], [1.]]) explanation['author'].node_mask = torch.tensor([[1.], [0.], [1.], [1.]]) - explanation.validate(raise_on_error=True) + with pytest.warns(UserWarning, match="are isolated"): + explanation.validate(raise_on_error=True) out = explanation.get_explanation_subgraph() assert out['paper'].node_mask.size() == (3, 1) diff --git a/test/nn/models/test_correct_and_smooth.py b/test/nn/models/test_correct_and_smooth.py index 035f9f860803..564b4bdab099 100644 --- a/test/nn/models/test_correct_and_smooth.py +++ b/test/nn/models/test_correct_and_smooth.py @@ -2,9 +2,11 @@ import torch_geometric.typing from torch_geometric.nn.models import CorrectAndSmooth +from torch_geometric.testing import noWindows from torch_geometric.typing import SparseTensor +@noWindows def test_correct_and_smooth(): y_soft = torch.tensor([0.1, 0.5, 0.4]).repeat(6, 1) y_true = torch.tensor([1, 0, 0, 2, 1, 1]) diff --git a/test/nn/models/test_rect.py b/test/nn/models/test_rect.py index 3cfe2e25faf2..50c4a78f2826 100644 --- a/test/nn/models/test_rect.py +++ b/test/nn/models/test_rect.py @@ -33,12 +33,12 @@ def test_rect(): if is_full_test(): jit = torch.jit.script(model.jittable()) - assert torch.allclose(jit(x, edge_index), out) - assert torch.allclose(embed_out, jit.embed(x, edge_index)) + assert torch.allclose(jit(x, edge_index), out, atol=1e-6) + assert torch.allclose(embed_out, jit.embed(x, edge_index), atol=1e-6) assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: jit = torch.jit.script(model.jittable(use_sparse_tensor=True)) - assert torch.allclose(jit(x, adj.t()), out) - assert torch.allclose(embed_out, jit.embed(x, adj.t())) + assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) + assert torch.allclose(embed_out, jit.embed(x, adj.t()), atol=1e-6) assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py index 6a108d42fc04..0a3131046e50 100644 --- a/torch_geometric/testing/__init__.py +++ b/torch_geometric/testing/__init__.py @@ -2,6 +2,7 @@ is_full_test, onlyFullTest, onlyLinux, + noWindows, onlyPython, onlyCUDA, onlyXPU, @@ -21,6 +22,7 @@ 'is_full_test', 'onlyFullTest', 'onlyLinux', + 'noWindows', 'onlyPython', 'onlyCUDA', 'onlyXPU', diff --git a/torch_geometric/testing/decorators.py b/torch_geometric/testing/decorators.py index 8c5e35c16e41..29845e02cfed 100644 --- a/torch_geometric/testing/decorators.py +++ b/torch_geometric/testing/decorators.py @@ -36,6 +36,16 @@ def onlyLinux(func: Callable) -> Callable: )(func) +def noWindows(func: Callable) -> Callable: + r"""A decorator to specify that this function should not execute on + Windows systems.""" + import pytest + return pytest.mark.skipif( + os.name == 'nt', + reason="Windows system", + )(func) + + def onlyPython(*args) -> Callable: r"""A decorator to skip tests for any Python version not listed.""" def decorator(func: Callable) -> Callable: From 10464bd3f12fea07c94b39db8e4b71c041787b10 Mon Sep 17 00:00:00 2001 From: Kaiwen Dong Date: Mon, 18 Sep 2023 13:35:41 +0800 Subject: [PATCH 1483/2432] Fix backward compatibility for `to_undirected` (#8049) Fix a small bug happening when maintaining backward compatibility in to_undirected --- torch_geometric/utils/undirected.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/utils/undirected.py b/torch_geometric/utils/undirected.py index 1d7afff35aef..1e9021ca1323 100644 --- a/torch_geometric/utils/undirected.py +++ b/torch_geometric/utils/undirected.py @@ -158,8 +158,8 @@ def to_undirected( """ # Maintain backward compatibility to `to_undirected(edge_index, num_nodes)` if isinstance(edge_attr, int): - edge_attr = MISSING num_nodes = edge_attr + edge_attr = MISSING row, col = edge_index[0], edge_index[1] row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0) From fa11f4341546977908649b9417c0d3b5c00e648b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 18 Sep 2023 09:51:11 +0200 Subject: [PATCH 1484/2432] `SQLiteDatabase` benchmark (#8051) --- CHANGELOG.md | 2 +- test/data/test_database.py | 42 +++++++++++++++++++++++++++++++- torch_geometric/data/database.py | 17 +++++++++++-- 3 files changed, 57 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53e47e0372ad..ffaa292347f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added a `Database` interface and `SQLite` implementation ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046)) +- Added a `Database` interface and `SQLite` implementation ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/test/data/test_database.py b/test/data/test_database.py index dd5586bc6f6d..dcb94addc4e8 100644 --- a/test/data/test_database.py +++ b/test/data/test_database.py @@ -4,6 +4,7 @@ import torch from torch_geometric.data.database import SQLiteDatabase +from torch_geometric.profile import benchmark from torch_geometric.testing import withPackage @@ -12,7 +13,7 @@ def test_sqlite_database(tmp_path, batch_size): path = osp.join(tmp_path, 'sqlite.db') db = SQLiteDatabase(path, name='test_table') - assert str(db) == 'SQLiteDatabase()' + assert str(db) == 'SQLiteDatabase(0)' assert len(db) == 0 data = torch.randn(5) @@ -58,3 +59,42 @@ def test_database_syntactic_sugar(tmp_path): torch.stack(db[torch.tensor([4, 4])], dim=0), data[torch.tensor([4, 4])], ) + + +if __name__ == '__main__': + import argparse + import tempfile + import time + + parser = argparse.ArgumentParser() + parser.add_argument('--numel', type=int, default=100_000) + parser.add_argument('--batch_size', type=int, default=256) + args = parser.parse_args() + + data = torch.randn(args.numel, 128) + + tmp_dir = tempfile.TemporaryDirectory() + path = osp.join(tmp_dir.name, 'sqlite.db') + db = SQLiteDatabase(path, name='test_table') + + t = time.perf_counter() + db.multi_insert(range(args.numel), data, batch_size=100, log=True) + print(f'Initialized DB in {time.perf_counter() - t:.2f} seconds') + + def in_memory_get(data): + index = torch.randint(0, args.numel, (128, )) + return data[index] + + def db_get(db): + index = torch.randint(0, args.numel, (128, )) + return db[index] + + benchmark( + funcs=[in_memory_get, db_get], + func_names=['In-Memory', 'SQLite'], + args=[(data, ), (db, )], + num_steps=50, + num_warmups=5, + ) + + tmp_dir.cleanup() diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index 647d3baa3e0a..acf5842c6281 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -5,6 +5,7 @@ import torch from torch import Tensor +from tqdm import tqdm class Database(ABC): @@ -24,6 +25,7 @@ def multi_insert( indices: Union[Iterable[int], Tensor, slice, range], data_list: Iterable[Any], batch_size: Optional[int] = None, + log: bool = False, ): if isinstance(indices, slice): indices = self.slice_to_range(indices) @@ -31,7 +33,13 @@ def multi_insert( length = min(len(indices), len(data_list)) batch_size = length if batch_size is None else batch_size - for start in range(0, length, batch_size): + if log and length > batch_size: + desc = f'Insert {length} entries' + offsets = tqdm(range(0, length, batch_size), desc=desc) + else: + offsets = range(0, length, batch_size) + + for start in offsets: self._multi_insert( indices[start:start + batch_size], data_list[start:start + batch_size], @@ -78,6 +86,10 @@ def _multi_get(self, indices: Union[Iterable[int], Tensor]) -> List[Any]: @staticmethod def serialize(data: Any) -> bytes: r"""Serializes :obj:`data` into bytes.""" + # Ensure that data is not a view of a larger tensor: + if isinstance(data, Tensor): + data = data.clone() + buffer = io.BytesIO() torch.save(data, buffer) return buffer.getvalue() @@ -120,7 +132,7 @@ def __setitem__( self.multi_insert(key, value) def __repr__(self) -> str: - return f'{self.__class__.__name__}()' + return f'{self.__class__.__name__}({len(self)})' class SQLiteDatabase(Database): @@ -171,6 +183,7 @@ def _multi_insert( ): if isinstance(indices, Tensor): indices = indices.tolist() + data_list = [self.serialize(data) for data in data_list] query = f'INSERT INTO {self.name} (id, data) VALUES (?, ?)' From 911c90c6b70e3329308630e6edb5c70402bf5eb5 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 18 Sep 2023 10:58:23 +0200 Subject: [PATCH 1485/2432] `RocksDatabase` implementation (#8052) --- CHANGELOG.md | 2 +- test/data/test_database.py | 47 ++++++++++++++++---- torch_geometric/data/database.py | 75 +++++++++++++++++++++++++++----- 3 files changed, 104 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ffaa292347f6..f635619420af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added a `Database` interface and `SQLite` implementation ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051)) +- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/test/data/test_database.py b/test/data/test_database.py index dcb94addc4e8..0a9a59f18db1 100644 --- a/test/data/test_database.py +++ b/test/data/test_database.py @@ -3,7 +3,7 @@ import pytest import torch -from torch_geometric.data.database import SQLiteDatabase +from torch_geometric.data.database import RocksDatabase, SQLiteDatabase from torch_geometric.profile import benchmark from torch_geometric.testing import withPackage @@ -35,6 +35,32 @@ def test_sqlite_database(tmp_path, batch_size): db.close() +@withPackage('rocksdict') +@pytest.mark.parametrize('batch_size', [None, 1]) +def test_rocks_database(tmp_path, batch_size): + path = osp.join(tmp_path, 'rocks.db') + db = RocksDatabase(path) + assert str(db) == 'RocksDatabase()' + with pytest.raises(NotImplementedError): + len(db) + + data = torch.randn(5) + db.insert(0, data) + assert torch.equal(db.get(0), data) + + indices = torch.tensor([1, 2]) + data_list = torch.randn(2, 5) + db.multi_insert(indices, data_list, batch_size=batch_size) + + out_list = db.multi_get(indices, batch_size=batch_size) + assert isinstance(out_list, list) + assert len(out_list) == 2 + assert torch.equal(out_list[0], data_list[0]) + assert torch.equal(out_list[1], data_list[1]) + + db.close() + + @withPackage('sqlite3') def test_database_syntactic_sugar(tmp_path): path = osp.join(tmp_path, 'sqlite.db') @@ -72,14 +98,19 @@ def test_database_syntactic_sugar(tmp_path): args = parser.parse_args() data = torch.randn(args.numel, 128) - tmp_dir = tempfile.TemporaryDirectory() + path = osp.join(tmp_dir.name, 'sqlite.db') - db = SQLiteDatabase(path, name='test_table') + sqlite_db = SQLiteDatabase(path, name='test_table') + t = time.perf_counter() + sqlite_db.multi_insert(range(args.numel), data, batch_size=100, log=True) + print(f'Initialized SQLiteDB in {time.perf_counter() - t:.2f} seconds') + path = osp.join(tmp_dir.name, 'rocks.db') + rocks_db = RocksDatabase(path) t = time.perf_counter() - db.multi_insert(range(args.numel), data, batch_size=100, log=True) - print(f'Initialized DB in {time.perf_counter() - t:.2f} seconds') + rocks_db.multi_insert(range(args.numel), data, batch_size=100, log=True) + print(f'Initialized RocksDB in {time.perf_counter() - t:.2f} seconds') def in_memory_get(data): index = torch.randint(0, args.numel, (128, )) @@ -90,9 +121,9 @@ def db_get(db): return db[index] benchmark( - funcs=[in_memory_get, db_get], - func_names=['In-Memory', 'SQLite'], - args=[(data, ), (db, )], + funcs=[in_memory_get, db_get, db_get], + func_names=['In-Memory', 'SQLite', 'RocksDB'], + args=[(data, ), (sqlite_db, ), (rocks_db, )], num_steps=50, num_warmups=5, ) diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index acf5842c6281..f8df0cb280c4 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -1,9 +1,8 @@ -import io +import pickle from abc import ABC, abstractmethod from typing import Any, Iterable, List, Optional, Union from uuid import uuid4 -import torch from torch import Tensor from tqdm import tqdm @@ -90,14 +89,12 @@ def serialize(data: Any) -> bytes: if isinstance(data, Tensor): data = data.clone() - buffer = io.BytesIO() - torch.save(data, buffer) - return buffer.getvalue() + return pickle.dumps(data) @staticmethod def deserialize(data: bytes) -> Any: r"""Deserializes bytes into the original data.""" - return torch.load(io.BytesIO(data)) + return pickle.loads(data) def slice_to_range(self, indices: slice) -> range: start = 0 if indices.start is None else indices.start @@ -132,7 +129,10 @@ def __setitem__( self.multi_insert(key, value) def __repr__(self) -> str: - return f'{self.__class__.__name__}({len(self)})' + try: + return f'{self.__class__.__name__}({len(self)})' + except NotImplementedError: + return f'{self.__class__.__name__}()' class SQLiteDatabase(Database): @@ -161,10 +161,11 @@ def connect(self): self._cursor = self._connection.cursor() def close(self): - self._connection.commit() - self._connection.close() - self._connection = None - self._cursor = None + if self._connection is not None: + self._connection.commit() + self._connection.close() + self._connection = None + self._cursor = None @property def cursor(self) -> Any: @@ -245,3 +246,55 @@ def __len__(self) -> int: query = f'SELECT COUNT(*) FROM {self.name}' self.cursor.execute(query) return self.cursor.fetchone()[0] + + +class RocksDatabase(Database): + def __init__(self, path: str): + super().__init__() + + import rocksdict + + self.path = path + + self._db: Optional[rocksdict.Rdict] = None + + self.connect() + + def connect(self): + import rocksdict + self._db = rocksdict.Rdict( + self.path, + options=rocksdict.Options(raw_mode=True), + ) + + def close(self): + if self._db is not None: + self._db.close() + self._db = None + + @property + def db(self) -> Any: + if self._db is None: + raise RuntimeError("No open database connection") + return self._db + + @staticmethod + def to_key(index: int) -> bytes: + return index.to_bytes(8, byteorder='big', signed=True) + + def insert(self, index: int, data: Any): + # Ensure that data is not a view of a larger tensor: + if isinstance(data, Tensor): + data = data.clone() + + self.db[self.to_key(index)] = self.serialize(data) + + def get(self, index: int) -> Any: + return self.deserialize(self.db[self.to_key(index)]) + + def _multi_get(self, indices: Union[Iterable[int], Tensor]) -> List[Any]: + if isinstance(indices, Tensor): + indices = indices.tolist() + indices = [self.to_key(index) for index in indices] + data_list = self.db[indices] + return [self.deserialize(data) for data in data_list] From 0dce89179df75b9ea378ba869717bd3c5437e5bb Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Mon, 18 Sep 2023 15:58:33 +0200 Subject: [PATCH 1486/2432] Use raw `Tensor` data as `BLOB` in `SQLiteDatabase` (#8054) --- CHANGELOG.md | 2 +- test/data/test_database.py | 4 +- torch_geometric/data/database.py | 178 +++++++++++++++++++++++++------ 3 files changed, 147 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f635619420af..0017c0643ed7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052)) +- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/test/data/test_database.py b/test/data/test_database.py index 0a9a59f18db1..63a603aca0b5 100644 --- a/test/data/test_database.py +++ b/test/data/test_database.py @@ -113,11 +113,11 @@ def test_database_syntactic_sugar(tmp_path): print(f'Initialized RocksDB in {time.perf_counter() - t:.2f} seconds') def in_memory_get(data): - index = torch.randint(0, args.numel, (128, )) + index = torch.randint(0, args.numel, (args.batch_size, )) return data[index] def db_get(db): - index = torch.randint(0, args.numel, (128, )) + index = torch.randint(0, args.numel, (args.batch_size, )) return db[index] benchmark( diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index f8df0cb280c4..992354672e89 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -1,14 +1,51 @@ import pickle +import warnings from abc import ABC, abstractmethod -from typing import Any, Iterable, List, Optional, Union +from dataclasses import dataclass, field +from functools import cached_property +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from uuid import uuid4 +import torch from torch import Tensor from tqdm import tqdm +from torch_geometric.utils.mixin import CastMixin + + +@dataclass +class TensorInfo(CastMixin): + dtype: torch.dtype + size: Tuple[int, ...] = field(default_factory=lambda: (-1, )) + + +def maybe_cast_to_tensor_info(value: Any) -> Union[Any, TensorInfo]: + if not isinstance(value, dict): + return value + if len(value) < 1 or len(value) > 2: + return value + if len(value) == 1 and 'dtype' not in value: + return value + if len(value) == 2 and 'dtype' not in value and 'size' not in value: + return value + return TensorInfo.cast(value) + + +Schema = Union[Any, Dict[str, Any], Tuple[Any], List[Any]] + class Database(ABC): - r"""Base class for database.""" + r"""Base class for inserting and retrieving data from a database.""" + def __init__(self, schema: Schema = object): + schema = maybe_cast_to_tensor_info(schema) + schema = self._to_dict(schema) + schema = { + key: maybe_cast_to_tensor_info(value) + for key, value in schema.items() + } + + self.schema: Dict[Union[str, int], Any] = schema + def connect(self): pass @@ -83,18 +120,13 @@ def _multi_get(self, indices: Union[Iterable[int], Tensor]) -> List[Any]: # Helper functions ######################################################## @staticmethod - def serialize(data: Any) -> bytes: - r"""Serializes :obj:`data` into bytes.""" - # Ensure that data is not a view of a larger tensor: - if isinstance(data, Tensor): - data = data.clone() - - return pickle.dumps(data) - - @staticmethod - def deserialize(data: bytes) -> Any: - r"""Deserializes bytes into the original data.""" - return pickle.loads(data) + def _to_dict(value) -> Dict[Union[str, int], Any]: + if isinstance(value, dict): + return value + if isinstance(value, (tuple, list)): + return {i: v for i, v in enumerate(value)} + else: + return {0: value} def slice_to_range(self, indices: slice) -> range: start = 0 if indices.start is None else indices.start @@ -136,8 +168,10 @@ def __repr__(self) -> str: class SQLiteDatabase(Database): - def __init__(self, path: str, name: str): - super().__init__() + def __init__(self, path: str, name: str, schema: Schema = object): + super().__init__(schema) + + warnings.filterwarnings('ignore', '.*given buffer is not writable.*') import sqlite3 @@ -149,9 +183,13 @@ def __init__(self, path: str, name: str): self.connect() + sql_schema = ',\n'.join([ + f' {col_name} {self._to_sql_type(type_info)} NOT NULL' for + col_name, type_info in zip(self._col_names, self.schema.values()) + ]) query = (f'CREATE TABLE IF NOT EXISTS {self.name} (\n' f' id INTEGER PRIMARY KEY,\n' - f' data BLOB NOT NULL\n' + f'{sql_schema}\n' f')') self.cursor.execute(query) @@ -174,8 +212,10 @@ def cursor(self) -> Any: return self._cursor def insert(self, index: int, data: Any): - query = f'INSERT INTO {self.name} (id, data) VALUES (?, ?)' - self.cursor.execute(query, (index, self.serialize(data))) + query = (f'INSERT INTO {self.name} ' + f'(id, {self._joined_col_names}) ' + f'VALUES (?, {self._dummies})') + self.cursor.execute(query, (index, self._serialize(data))) def _multi_insert( self, @@ -185,15 +225,18 @@ def _multi_insert( if isinstance(indices, Tensor): indices = indices.tolist() - data_list = [self.serialize(data) for data in data_list] + data_list = [self._serialize(data) for data in data_list] - query = f'INSERT INTO {self.name} (id, data) VALUES (?, ?)' + query = (f'INSERT INTO {self.name} ' + f'(id, {self._joined_col_names}) ' + f'VALUES (?, {self._dummies})') self.cursor.executemany(query, zip(indices, data_list)) def get(self, index: int) -> Any: - query = f'SELECT data FROM {self.name} WHERE id = ?' + query = (f'SELECT {self._joined_col_names} FROM {self.name} ' + f'WHERE id = ?') self.cursor.execute(query, (index, )) - return self.deserialize(self.cursor.fetchone()[0]) + return self._deserialize(self.cursor.fetchone()) def multi_get( self, @@ -221,7 +264,7 @@ def multi_get( query = f'SELECT * FROM {join_table_name}' self.cursor.execute(query) - query = (f'SELECT {self.name}.data ' + query = (f'SELECT {self._joined_col_names} ' f'FROM {self.name} INNER JOIN {join_table_name} ' f'ON {self.name}.id = {join_table_name}.id ' f'ORDER BY {join_table_name}.row_id') @@ -240,17 +283,77 @@ def multi_get( query = f'DROP TABLE {join_table_name}' self.cursor.execute(query) - return [self.deserialize(data[0]) for data in data_list] + return [self._deserialize(data) for data in data_list] def __len__(self) -> int: query = f'SELECT COUNT(*) FROM {self.name}' self.cursor.execute(query) return self.cursor.fetchone()[0] + # Helper functions ######################################################## + + @cached_property + def _col_names(self) -> List[str]: + return [f'COL_{key}' for key in self.schema.keys()] + + @cached_property + def _joined_col_names(self) -> str: + return ', '.join(self._col_names) + + @cached_property + def _dummies(self) -> str: + return ', '.join(['?'] * len(self.schema.keys())) + + def _to_sql_type(self, type_info: Any) -> str: + if type_info == int: + return 'INTEGER' + if type_info == int: + return 'FLOAT' + if type_info == str: + return 'TEXT' + else: + return 'BLOB' + + def _serialize(self, row: Any) -> Union[Any, List[Any]]: + out_list: List[Any] = [] + for key, col in self._to_dict(row).items(): + if isinstance(self.schema[key], TensorInfo): + out = row.numpy().tobytes() + elif isinstance(col, Tensor): + self.schema[key] = TensorInfo(dtype=col.dtype) + out = row.numpy().tobytes() + elif self.schema[key] in {int, float, str}: + out = col + else: + out = pickle.dumps(col) + + out_list.append(out) + + return out_list if len(out_list) > 1 else out_list[0] + + def _deserialize(self, row: Tuple[Any]) -> Any: + out_dict = {} + for i, (key, col_schema) in enumerate(self.schema.items()): + if isinstance(col_schema, TensorInfo): + out_dict[key] = torch.frombuffer( + row[i], dtype=col_schema.dtype).view(*col_schema.size) + elif col_schema in {int, float, str}: + out_dict[key] = row[i] + else: + out_dict[key] = pickle.loads(row[i]) + + if 0 in self.schema: + if len(self.schema) == 1: + return out_dict[0] + else: + return tuple(out_dict.values()) + else: + return out_dict + class RocksDatabase(Database): - def __init__(self, path: str): - super().__init__() + def __init__(self, path: str, schema: Schema = object): + super().__init__(schema) import rocksdict @@ -283,18 +386,25 @@ def to_key(index: int) -> bytes: return index.to_bytes(8, byteorder='big', signed=True) def insert(self, index: int, data: Any): - # Ensure that data is not a view of a larger tensor: - if isinstance(data, Tensor): - data = data.clone() - - self.db[self.to_key(index)] = self.serialize(data) + self.db[self.to_key(index)] = self._serialize(data) def get(self, index: int) -> Any: - return self.deserialize(self.db[self.to_key(index)]) + return self._deserialize(self.db[self.to_key(index)]) def _multi_get(self, indices: Union[Iterable[int], Tensor]) -> List[Any]: if isinstance(indices, Tensor): indices = indices.tolist() indices = [self.to_key(index) for index in indices] data_list = self.db[indices] - return [self.deserialize(data) for data in data_list] + return [self._deserialize(data) for data in data_list] + + # Helper functions ######################################################## + + def _serialize(self, row: Any) -> bytes: + # Ensure that data is not a view of a larger tensor: + if isinstance(row, Tensor): + row = row.clone() + return pickle.dumps(row) + + def _deserialize(self, row: bytes) -> Any: + return pickle.loads(row) From 5f157cd263d7af959ba975ac26dea00f176ac74d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 19 Sep 2023 08:46:07 +0200 Subject: [PATCH 1487/2432] Additional `Database` tests for all kinds of input data (#8057) --- CHANGELOG.md | 2 +- test/data/test_database.py | 151 ++++++++++++++++++++------ torch_geometric/data/database.py | 42 ++++--- torch_geometric/testing/__init__.py | 2 + torch_geometric/testing/decorators.py | 42 +++---- 5 files changed, 172 insertions(+), 67 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0017c0643ed7..98e0776c0714 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054)) +- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/test/data/test_database.py b/test/data/test_database.py index 63a603aca0b5..7d3b99a257ef 100644 --- a/test/data/test_database.py +++ b/test/data/test_database.py @@ -3,29 +3,53 @@ import pytest import torch -from torch_geometric.data.database import RocksDatabase, SQLiteDatabase +from torch_geometric.data import Data +from torch_geometric.data.database import ( + RocksDatabase, + SQLiteDatabase, + TensorInfo, +) from torch_geometric.profile import benchmark -from torch_geometric.testing import withPackage +from torch_geometric.testing import has_package, withPackage +AVAILABLE_DATABASES = [] +if has_package('sqlite3'): + AVAILABLE_DATABASES.append(SQLiteDatabase) +if has_package('rocksdict'): + AVAILABLE_DATABASES.append(RocksDatabase) -@withPackage('sqlite3') + +@pytest.mark.parametrize('Database', AVAILABLE_DATABASES) @pytest.mark.parametrize('batch_size', [None, 1]) -def test_sqlite_database(tmp_path, batch_size): - path = osp.join(tmp_path, 'sqlite.db') - db = SQLiteDatabase(path, name='test_table') - assert str(db) == 'SQLiteDatabase(0)' - assert len(db) == 0 +def test_databases_single_tensor(tmp_path, Database, batch_size): + kwargs = dict(path=osp.join(tmp_path, 'storage.db')) + if Database == SQLiteDatabase: + kwargs['name'] = 'test_table' + + db = Database(**kwargs) + assert db.schema == {0: object} + + try: + assert len(db) == 0 + assert str(db) == f'{Database.__name__}(0)' + except NotImplementedError: + assert str(db) == f'{Database.__name__}()' data = torch.randn(5) db.insert(0, data) - assert len(db) == 1 + try: + assert len(db) == 1 + except NotImplementedError: + pass assert torch.equal(db.get(0), data) indices = torch.tensor([1, 2]) data_list = torch.randn(2, 5) db.multi_insert(indices, data_list, batch_size=batch_size) - assert len(db) == 3 - + try: + assert len(db) == 3 + except NotImplementedError: + pass out_list = db.multi_get(indices, batch_size=batch_size) assert isinstance(out_list, list) assert len(out_list) == 2 @@ -35,35 +59,98 @@ def test_sqlite_database(tmp_path, batch_size): db.close() -@withPackage('rocksdict') -@pytest.mark.parametrize('batch_size', [None, 1]) -def test_rocks_database(tmp_path, batch_size): - path = osp.join(tmp_path, 'rocks.db') - db = RocksDatabase(path) - assert str(db) == 'RocksDatabase()' - with pytest.raises(NotImplementedError): - len(db) - - data = torch.randn(5) - db.insert(0, data) - assert torch.equal(db.get(0), data) +@pytest.mark.parametrize('Database', AVAILABLE_DATABASES) +def test_databases_schema(tmp_path, Database): + kwargs = dict(name='test_table') if Database == SQLiteDatabase else {} + + path = osp.join(tmp_path, 'tuple_storage.db') + schema = (int, float, str, dict(dtype=torch.float, size=(2, -1)), object) + db = Database(path, schema=schema, **kwargs) + assert db.schema == { + 0: int, + 1: float, + 2: str, + 3: TensorInfo(dtype=torch.float, size=(2, -1)), + 4: object, + } + + data1 = (1, 0.1, 'a', torch.randn(2, 8), Data(x=torch.randn(1, 8))) + data2 = (2, 0.2, 'b', torch.randn(2, 16), Data(x=torch.randn(2, 8))) + data3 = (3, 0.3, 'c', torch.randn(2, 32), Data(x=torch.randn(3, 8))) + db.insert(0, data1) + db.multi_insert([1, 2], [data2, data3]) + + out1 = db.get(0) + out2, out3 = db.multi_get([1, 2]) + + for out, data in zip([out1, out2, out3], [data1, data2, data3]): + assert out[0] == data[0] + assert out[1] == data[1] + assert out[2] == data[2] + assert torch.equal(out[3], data[3]) + assert isinstance(out[4], Data) and len(out[4]) == 1 + assert torch.equal(out[4].x, data[4].x) - indices = torch.tensor([1, 2]) - data_list = torch.randn(2, 5) - db.multi_insert(indices, data_list, batch_size=batch_size) + db.close() - out_list = db.multi_get(indices, batch_size=batch_size) - assert isinstance(out_list, list) - assert len(out_list) == 2 - assert torch.equal(out_list[0], data_list[0]) - assert torch.equal(out_list[1], data_list[1]) + path = osp.join(tmp_path, 'dict_storage.db') + schema = { + 'int': int, + 'float': float, + 'str': str, + 'tensor': dict(dtype=torch.float, size=(2, -1)), + 'data': object + } + db = Database(path, schema=schema, **kwargs) + assert db.schema == { + 'int': int, + 'float': float, + 'str': str, + 'tensor': TensorInfo(dtype=torch.float, size=(2, -1)), + 'data': object, + } + + data1 = { + 'int': 1, + 'float': 0.1, + 'str': 'a', + 'tensor': torch.randn(2, 8), + 'data': Data(x=torch.randn(1, 8)), + } + data2 = { + 'int': 2, + 'float': 0.2, + 'str': 'b', + 'tensor': torch.randn(2, 16), + 'data': Data(x=torch.randn(2, 8)), + } + data3 = { + 'int': 3, + 'float': 0.3, + 'str': 'c', + 'tensor': torch.randn(2, 32), + 'data': Data(x=torch.randn(3, 8)), + } + db.insert(0, data1) + db.multi_insert([1, 2], [data2, data3]) + + out1 = db.get(0) + out2, out3 = db.multi_get([1, 2]) + + for out, data in zip([out1, out2, out3], [data1, data2, data3]): + assert out['int'] == data['int'] + assert out['float'] == data['float'] + assert out['str'] == data['str'] + assert torch.equal(out['tensor'], data['tensor']) + assert isinstance(out['data'], Data) and len(out['data']) == 1 + assert torch.equal(out['data'].x, data['data'].x) db.close() @withPackage('sqlite3') def test_database_syntactic_sugar(tmp_path): - path = osp.join(tmp_path, 'sqlite.db') + path = osp.join(tmp_path, 'storage.db') db = SQLiteDatabase(path, name='test_table') data = torch.randn(5, 16) diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index 992354672e89..c180eac124d6 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -183,6 +183,8 @@ def __init__(self, path: str, name: str, schema: Schema = object): self.connect() + # Create the table (if it does not exist) by mapping the Python schema + # to the corresponding SQL schema: sql_schema = ',\n'.join([ f' {col_name} {self._to_sql_type(type_info)} NOT NULL' for col_name, type_info in zip(self._col_names, self.schema.values()) @@ -215,7 +217,7 @@ def insert(self, index: int, data: Any): query = (f'INSERT INTO {self.name} ' f'(id, {self._joined_col_names}) ' f'VALUES (?, {self._dummies})') - self.cursor.execute(query, (index, self._serialize(data))) + self.cursor.execute(query, (index, *self._serialize(data))) def _multi_insert( self, @@ -225,12 +227,13 @@ def _multi_insert( if isinstance(indices, Tensor): indices = indices.tolist() - data_list = [self._serialize(data) for data in data_list] + data_list = [(index, *self._serialize(data)) + for index, data in zip(indices, data_list)] query = (f'INSERT INTO {self.name} ' f'(id, {self._joined_col_names}) ' f'VALUES (?, {self._dummies})') - self.cursor.executemany(query, zip(indices, data_list)) + self.cursor.executemany(query, data_list) def get(self, index: int) -> Any: query = (f'SELECT {self._joined_col_names} FROM {self.name} ' @@ -249,7 +252,7 @@ def multi_get( elif isinstance(indices, Tensor): indices = indices.tolist() - # We first create a temporary ID table to then perform an INNER JOIN. + # We create a temporary ID table to then perform an INNER JOIN. # This avoids having a long IN clause and guarantees sorted outputs: join_table_name = f'{self.name}__join__{uuid4().hex}' query = (f'CREATE TABLE {join_table_name} (\n' @@ -314,24 +317,33 @@ def _to_sql_type(self, type_info: Any) -> str: else: return 'BLOB' - def _serialize(self, row: Any) -> Union[Any, List[Any]]: - out_list: List[Any] = [] + def _serialize(self, row: Any) -> List[Any]: + # Serializes the given input data according to `schema`: + # * {int, float, str}: Use as they are. + # * torch.Tensor: Convert into the raw byte string + # * object: Dump via pickle + # If we find a `torch.Tensor` that is not registered as such in + # `schema`, we modify the schema in-place for improved efficiency. + out: List[Any] = [] for key, col in self._to_dict(row).items(): if isinstance(self.schema[key], TensorInfo): - out = row.numpy().tobytes() + out.append(col.numpy().tobytes()) elif isinstance(col, Tensor): self.schema[key] = TensorInfo(dtype=col.dtype) - out = row.numpy().tobytes() + out.append(col.numpy().tobytes()) elif self.schema[key] in {int, float, str}: - out = col + out.append(col) else: - out = pickle.dumps(col) + out.append(pickle.dumps(col)) - out_list.append(out) - - return out_list if len(out_list) > 1 else out_list[0] + return out def _deserialize(self, row: Tuple[Any]) -> Any: + # Deserializes the DB data according to `schema`: + # * {int, float, str}: Use as they are. + # * torch.Tensor: Load raw byte string with `dtype` and `size` + # information from `schema` + # * object: Load via pickle out_dict = {} for i, (key, col_schema) in enumerate(self.schema.items()): if isinstance(col_schema, TensorInfo): @@ -342,12 +354,14 @@ def _deserialize(self, row: Tuple[Any]) -> Any: else: out_dict[key] = pickle.loads(row[i]) + # In case `0` exists as integer in the schema, this means that the + # schema was passed as either a single entry or a tuple: if 0 in self.schema: if len(self.schema) == 1: return out_dict[0] else: return tuple(out_dict.values()) - else: + else: # Otherwise, return the dictionary as it is: return out_dict diff --git a/torch_geometric/testing/__init__.py b/torch_geometric/testing/__init__.py index 0a3131046e50..f80b66093d76 100644 --- a/torch_geometric/testing/__init__.py +++ b/torch_geometric/testing/__init__.py @@ -9,6 +9,7 @@ onlyOnline, onlyGraphviz, onlyNeighborSampler, + has_package, withPackage, withCUDA, disableExtensions, @@ -29,6 +30,7 @@ 'onlyOnline', 'onlyGraphviz', 'onlyNeighborSampler', + 'has_package', 'withPackage', 'withCUDA', 'disableExtensions', diff --git a/torch_geometric/testing/decorators.py b/torch_geometric/testing/decorators.py index 29845e02cfed..6e517ae4b14a 100644 --- a/torch_geometric/testing/decorators.py +++ b/torch_geometric/testing/decorators.py @@ -125,29 +125,31 @@ def onlyNeighborSampler(func: Callable): )(func) +def has_package(package: str) -> bool: + r"""Returns :obj:`True` in case :obj:`package` is installed.""" + if '|' in package: + return any(has_package(p) for p in package.split('|')) + + req = Requirement(package) + if find_spec(req.name) is None: + return False + module = import_module(req.name) + if not hasattr(module, '__version__'): + return True + + version = module.__version__ + # `req.specifier` does not support `.dev` suffixes, e.g., for + # `pyg_lib==0.1.0.dev*`, so we manually drop them: + if '.dev' in version: + version = '.'.join(version.split('.dev')[:-1]) + + return version in req.specifier + + def withPackage(*args) -> Callable: r"""A decorator to skip tests if certain packages are not installed. Also supports version specification.""" - def is_installed(package: str) -> bool: - if '|' in package: - return any(is_installed(p) for p in package.split('|')) - - req = Requirement(package) - if find_spec(req.name) is None: - return False - module = import_module(req.name) - if not hasattr(module, '__version__'): - return True - - version = module.__version__ - # `req.specifier` does not support `.dev` suffixes, e.g., for - # `pyg_lib==0.1.0.dev*`, so we manually drop them: - if '.dev' in version: - version = '.'.join(version.split('.dev')[:-1]) - - return version in req.specifier - - na_packages = set(package for package in args if not is_installed(package)) + na_packages = set(package for package in args if not has_package(package)) def decorator(func: Callable) -> Callable: import pytest From 8f52944f3bc7fbf5a71370cf75dd2bddd7d9d793 Mon Sep 17 00:00:00 2001 From: Roy Velich <6681067+royvelich@users.noreply.github.com> Date: Tue, 19 Sep 2023 10:04:45 +0300 Subject: [PATCH 1488/2432] Add `edge_attr` support for `ResGatedGraphConv` (#8048) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/nn/conv/test_res_gated_graph_conv.py | 34 +++++++------- .../nn/conv/res_gated_graph_conv.py | 45 ++++++++++++++----- 3 files changed, 54 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98e0776c0714..cad4b17582e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) - Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) diff --git a/test/nn/conv/test_res_gated_graph_conv.py b/test/nn/conv/test_res_gated_graph_conv.py index 193cd53814d6..8bebaa018061 100644 --- a/test/nn/conv/test_res_gated_graph_conv.py +++ b/test/nn/conv/test_res_gated_graph_conv.py @@ -1,3 +1,4 @@ +import pytest import torch import torch_geometric.typing @@ -7,53 +8,56 @@ from torch_geometric.utils import to_torch_csc_tensor -def test_res_gated_graph_conv(): +@pytest.mark.parametrize('edge_dim', [None, 4]) +def test_res_gated_graph_conv(edge_dim): x1 = torch.randn(4, 8) x2 = torch.randn(2, 32) edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) + edge_attr = torch.randn(edge_index.size(1), edge_dim) if edge_dim else None adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) - conv = ResGatedGraphConv(8, 32) + conv = ResGatedGraphConv(8, 32, edge_dim=edge_dim) assert str(conv) == 'ResGatedGraphConv(8, 32)' - out = conv(x1, edge_index) + out = conv(x1, edge_index, edge_attr) assert out.size() == (4, 32) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) + assert torch.allclose(conv(x1, adj1.t(), edge_attr), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) + adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 4)) assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' + t = '(Tensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) + assert torch.allclose(jit(x1, edge_index, edge_attr), out, atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = '(Tensor, SparseTensor) -> Tensor' + t = '(Tensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit(x1, adj2.t()), out, atol=1e-6) # Test bipartite message passing: adj1 = to_torch_csc_tensor(edge_index, size=(4, 2)) - conv = ResGatedGraphConv((8, 32), 32) + conv = ResGatedGraphConv((8, 32), 32, edge_dim=edge_dim) assert str(conv) == 'ResGatedGraphConv((8, 32), 32)' - out = conv((x1, x2), edge_index) + out = conv((x1, x2), edge_index, edge_attr) assert out.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) + assert torch.allclose(conv((x1, x2), adj1.t(), edge_attr), out, atol=1e-6) if torch_geometric.typing.WITH_TORCH_SPARSE: - adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 2)) + adj2 = SparseTensor.from_edge_index(edge_index, edge_attr, (4, 2)) assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) if is_full_test(): - t = '(PairTensor, Tensor) -> Tensor' + t = '(PairTensor, Tensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) + assert torch.allclose(jit((x1, x2), edge_index, edge_attr), out, + atol=1e-6) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: - t = '(PairTensor, SparseTensor) -> Tensor' + t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) assert torch.allclose(jit((x1, x2), adj2.t()), out, atol=1e-6) diff --git a/torch_geometric/nn/conv/res_gated_graph_conv.py b/torch_geometric/nn/conv/res_gated_graph_conv.py index f9bd241288ba..7f9a4ef97d65 100644 --- a/torch_geometric/nn/conv/res_gated_graph_conv.py +++ b/torch_geometric/nn/conv/res_gated_graph_conv.py @@ -1,12 +1,13 @@ from typing import Callable, Optional, Tuple, Union +import torch from torch import Tensor from torch.nn import Parameter, Sigmoid from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.dense.linear import Linear from torch_geometric.nn.inits import zeros -from torch_geometric.typing import Adj, PairTensor +from torch_geometric.typing import Adj, OptTensor, PairTensor class ResGatedGraphConv(MessagePassing): @@ -33,6 +34,8 @@ class ResGatedGraphConv(MessagePassing): out_channels (int): Size of each output sample. act (callable, optional): Gating function :math:`\sigma`. (default: :meth:`torch.nn.Sigmoid()`) + edge_dim (int, optional): Edge feature dimensionality (in case + there are any). (default: :obj:`None`) bias (bool, optional): If set to :obj:`False`, the layer will not learn an additive bias. (default: :obj:`True`) root_weight (bool, optional): If set to :obj:`False`, the layer will @@ -55,6 +58,7 @@ def __init__( in_channels: Union[int, Tuple[int, int]], out_channels: int, act: Optional[Callable] = Sigmoid(), + edge_dim: Optional[int] = None, root_weight: bool = True, bias: bool = True, **kwargs, @@ -66,14 +70,16 @@ def __init__( self.in_channels = in_channels self.out_channels = out_channels self.act = act + self.edge_dim = edge_dim self.root_weight = root_weight if isinstance(in_channels, int): in_channels = (in_channels, in_channels) - self.lin_key = Linear(in_channels[1], out_channels) - self.lin_query = Linear(in_channels[0], out_channels) - self.lin_value = Linear(in_channels[0], out_channels) + edge_dim = edge_dim if edge_dim is not None else 0 + self.lin_key = Linear(in_channels[1] + edge_dim, out_channels) + self.lin_query = Linear(in_channels[0] + edge_dim, out_channels) + self.lin_value = Linear(in_channels[0] + edge_dim, out_channels) if root_weight: self.lin_skip = Linear(in_channels[1], out_channels, bias=False) @@ -97,16 +103,24 @@ def reset_parameters(self): if self.bias is not None: zeros(self.bias) - def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor: + def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj, + edge_attr: OptTensor = None) -> Tensor: + if isinstance(x, Tensor): x: PairTensor = (x, x) - k = self.lin_key(x[1]) - q = self.lin_query(x[0]) - v = self.lin_value(x[0]) + # In case edge features are not given, we can compute key, query and + # value tensors in node-level space, which is a bit more efficient: + if self.edge_dim is None: + k = self.lin_key(x[1]) + q = self.lin_query(x[0]) + v = self.lin_value(x[0]) + else: + k, q, v = x[1], x[0], x[0] - # propagate_type: (k: Tensor, q: Tensor, v: Tensor) - out = self.propagate(edge_index, k=k, q=q, v=v, size=None) + # propagate_type: (k: Tensor, q: Tensor, v: Tensor, edge_attr: OptTensor) # noqa + out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr, + size=None) if self.root_weight: out = out + self.lin_skip(x[1]) @@ -116,5 +130,14 @@ def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor: return out - def message(self, k_i: Tensor, q_j: Tensor, v_j: Tensor) -> Tensor: + def message(self, k_i: Tensor, q_j: Tensor, v_j: Tensor, + edge_attr: OptTensor) -> Tensor: + + assert (edge_attr is not None) == (self.edge_dim is not None) + + if edge_attr is not None: + k_i = self.lin_key(torch.cat([k_i, edge_attr], dim=-1)) + q_j = self.lin_query(torch.cat([q_j, edge_attr], dim=-1)) + v_j = self.lin_value(torch.cat([v_j, edge_attr], dim=-1)) + return self.act(k_i + q_j) * v_j From 5d81cab11c4a29b8975abd46440f3e85c64dc0d4 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 19 Sep 2023 13:35:00 +0200 Subject: [PATCH 1489/2432] `Database` Documentation (#8058) --- CHANGELOG.md | 2 +- docs/source/modules/data.rst | 14 ++++ test/data/test_database.py | 8 +-- torch_geometric/data/__init__.py | 7 ++ torch_geometric/data/database.py | 119 ++++++++++++++++++++++++++++++- 5 files changed, 142 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cad4b17582e4..e84c025c147e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) -- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057)) +- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057), [#8058](https://github.com/pyg-team/pytorch_geometric/pull/8058)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) - Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) - Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) diff --git a/docs/source/modules/data.rst b/docs/source/modules/data.rst index 25699b3c875a..a2806801ce8f 100644 --- a/docs/source/modules/data.rst +++ b/docs/source/modules/data.rst @@ -31,6 +31,20 @@ Remote Backend Interfaces {{ name }} {% endfor %} +Databases +--------- + +.. currentmodule:: torch_geometric.data + +.. autosummary:: + :nosignatures: + :toctree: ../generated + :template: autosummary/inherited_class.rst + + {% for name in torch_geometric.data.database_classes %} + {{ name }} + {% endfor %} + PyTorch Lightning Wrappers -------------------------- diff --git a/test/data/test_database.py b/test/data/test_database.py index 7d3b99a257ef..5d7d055fb19a 100644 --- a/test/data/test_database.py +++ b/test/data/test_database.py @@ -3,12 +3,8 @@ import pytest import torch -from torch_geometric.data import Data -from torch_geometric.data.database import ( - RocksDatabase, - SQLiteDatabase, - TensorInfo, -) +from torch_geometric.data import Data, RocksDatabase, SQLiteDatabase +from torch_geometric.data.database import TensorInfo from torch_geometric.profile import benchmark from torch_geometric.testing import has_package, withPackage diff --git a/torch_geometric/data/__init__.py b/torch_geometric/data/__init__.py index a4787b185ab7..58c144f3da11 100644 --- a/torch_geometric/data/__init__.py +++ b/torch_geometric/data/__init__.py @@ -6,6 +6,7 @@ from .hetero_data import HeteroData from .batch import Batch from .temporal import TemporalData +from .database import Database, SQLiteDatabase, RocksDatabase from .dataset import Dataset from .in_memory_dataset import InMemoryDataset from .makedirs import makedirs @@ -30,6 +31,12 @@ 'EdgeAttr', ] +database_classes = [ + 'Database', + 'SQLiteDatabase', + 'RocksDatabase', +] + helper_functions = [ 'makedirs', 'download_url', diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index c180eac124d6..9843749bcf0b 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -35,7 +35,52 @@ def maybe_cast_to_tensor_info(value: Any) -> Union[Any, TensorInfo]: class Database(ABC): - r"""Base class for inserting and retrieving data from a database.""" + r"""Base class for inserting and retrieving data from a database. + A database acts as a persisted, out-of-memory and index-based key/value + store for tensor and custom data: + + .. code-block:: python + + db = Database() + db[0] = Data(x=torch.randn(5, 16), y=0, z='id_0') + print(db[0]) + >>> Data(x=[5, 16], y=0, z='id_0') + + To improve efficiency, it is recommended to specify the underlying + :obj:`schema` of the data: + + .. code-block:: python + + db = Database(schema={ # Custom schema: + # Tensor information can be specified through a dictionary: + 'x': dict(dtype=torch.float, size=(-1, 16)), + 'y': int, + 'z': str, + }) + db[0] = dict(x=torch.randn(5, 16), y=0, z='id_0') + print(db[0]) + >>> {'x': torch.tensor(...), 'y': 0, 'z': 'id_0'} + + In addition, databases support batch-wise insert and get, and support + syntactic sugar known from indexing Python lists, *e.g.*: + + .. code-block:: python + + db = Database() + db[2:5] = torch.randn(3, 16) + print(db[torch.tensor([2, 3])]) + >>> [torch.tensor(...), torch.tensor(...)] + + Args: + schema (Any or Tuple[Any] or Dict[str, Any], optional): The schema of + the input data. + Can take :obj:`int`, :obj:`float`, :obj:`str`, :obj:`object`, or a + dictionary with :obj:`dtype` and :obj:`size` keys (for specifying + tensor data) as input, and can be nested as a tuple or dictionary. + Specifying the schema will improve efficiency, since by default the + database will use python pickling for serializing and + deserializing. (default: :obj:`object`) + """ def __init__(self, schema: Schema = object): schema = maybe_cast_to_tensor_info(schema) schema = self._to_dict(schema) @@ -47,13 +92,23 @@ def __init__(self, schema: Schema = object): self.schema: Dict[Union[str, int], Any] = schema def connect(self): + r"""Connects to the database. + Databases will automatically connect on instantiation. + """ pass def close(self): + r"""Closes the connection to the database.""" pass @abstractmethod def insert(self, index: int, data: Any): + r"""Inserts data at the specified index. + + Args: + index (int): The index at which to insert. + data (Any): The object to insert. + """ raise NotImplementedError def multi_insert( @@ -63,6 +118,18 @@ def multi_insert( batch_size: Optional[int] = None, log: bool = False, ): + r"""Inserts a chunk of data at the specified indices. + + Args: + indices (List[int] or torch.Tensor or range): The indices at which + to insert. + data_list (List[Any]): The objects to insert. + batch_size (int, optional): If specified, will insert the data to + the database in batches of size :obj:`batch_size`. + (default: :obj:`None`) + log (bool, optional): If set to :obj:`True`, will log progress to + the console. (default: :obj:`False`) + """ if isinstance(indices, slice): indices = self.slice_to_range(indices) @@ -93,6 +160,11 @@ def _multi_insert( @abstractmethod def get(self, index: int) -> Any: + r"""Gets data from the specified index. + + Args: + index (int): The index to query. + """ raise NotImplementedError def multi_get( @@ -100,6 +172,14 @@ def multi_get( indices: Union[Iterable[int], Tensor, slice, range], batch_size: Optional[int] = None, ) -> List[Any]: + r"""Gets a chunk of data from the specified indices. + + Args: + indices (List[int] or torch.Tensor or range): The indices to query. + batch_size (int, optional): If specified, will request the data + from the database in batches of size :obj:`batch_size`. + (default: :obj:`None`) + """ if isinstance(indices, slice): indices = self.slice_to_range(indices) @@ -168,6 +248,23 @@ def __repr__(self) -> str: class SQLiteDatabase(Database): + r"""An index-based key/value database based on :obj:`sqlite3`. + + .. note:: + This database implementation requires the :obj:`sqlite3` package. + + Args: + path (str): The path to where the database should be saved. + name (str): The name of the table to save the data to. + schema (Any or Tuple[Any] or Dict[str, Any], optional): The schema of + the input data. + Can take :obj:`int`, :obj:`float`, :obj:`str`, :obj:`object`, or a + dictionary with :obj:`dtype` and :obj:`size` keys (for specifying + tensor data) as input, and can be nested as a tuple or dictionary. + Specifying the schema will improve efficiency, since by default the + database will use python pickling for serializing and + deserializing. (default: :obj:`object`) + """ def __init__(self, path: str, name: str, schema: Schema = object): super().__init__(schema) @@ -366,6 +463,26 @@ def _deserialize(self, row: Tuple[Any]) -> Any: class RocksDatabase(Database): + r"""An index-based key/value database based on :obj:`RocksDB`. + + .. note:: + This database implementation requires the :obj:`rocksdict` package. + + .. warning:: + :class:`RocksDatabase` is currently less optimized than + :class:`SQLiteDatabase`. + + Args: + path (str): The path to where the database should be saved. + schema (Any or Tuple[Any] or Dict[str, Any], optional): The schema of + the input data. + Can take :obj:`int`, :obj:`float`, :obj:`str`, :obj:`object`, or a + dictionary with :obj:`dtype` and :obj:`size` keys (for specifying + tensor data) as input, and can be nested as a tuple or dictionary. + Specifying the schema will improve efficiency, since by default the + database will use python pickling for serializing and + deserializing. (default: :obj:`object`) + """ def __init__(self, path: str, schema: Schema = object): super().__init__(schema) From ce2a2d2c73c9bd710f796f2982b114426fcd8881 Mon Sep 17 00:00:00 2001 From: Favour James <63251266+Favourj-bit@users.noreply.github.com> Date: Wed, 20 Sep 2023 11:06:01 +0000 Subject: [PATCH 1490/2432] Edi description of the `BrcaTcga` dataset (#8060) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Akihiro Nitta Co-authored-by: Matthias Fey --- torch_geometric/datasets/brca_tgca.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/torch_geometric/datasets/brca_tgca.py b/torch_geometric/datasets/brca_tgca.py index e5778bc2ce25..9d04c40b5f68 100644 --- a/torch_geometric/datasets/brca_tgca.py +++ b/torch_geometric/datasets/brca_tgca.py @@ -15,9 +15,11 @@ class BrcaTcga(InMemoryDataset): - r"""The breast cancer (BRCA TCGA) dataset from the `cBioPortal - `_ and the biological network for node - connections from `Pathway Commons `_. + r"""The breast cancer (BRCA TCGA Pan-Cancer Atlas) dataset consisting of + patients with survival information and gene expression data from + `cBioPortal `_ + and a network of biological interactions between those nodes from + `Pathway Commons `_. The dataset contains the gene features of 1,082 patients, and the overall survival time (in months) of each patient as label. From 8dfecdf005f303ba7eb5babb473d50b75a522a28 Mon Sep 17 00:00:00 2001 From: Rishi Puri Date: Wed, 20 Sep 2023 09:52:55 -0700 Subject: [PATCH 1491/2432] [Documentation] Multi-GPU training in Vanilla PyTorch Tutorial (#7894) addresses: https://github.com/pyg-team/pytorch_geometric/issues/7893 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + docs/source/index.rst | 1 + docs/source/tutorial/multi_gpu.rst | 7 + docs/source/tutorial/multi_gpu_vanilla.rst | 176 +++++++++++++++++++++ 4 files changed, 185 insertions(+) create mode 100644 docs/source/tutorial/multi_gpu.rst create mode 100644 docs/source/tutorial/multi_gpu_vanilla.rst diff --git a/CHANGELOG.md b/CHANGELOG.md index e84c025c147e..0cb9d0e9ce66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) - Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057), [#8058](https://github.com/pyg-team/pytorch_geometric/pull/8058)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) diff --git a/docs/source/index.rst b/docs/source/index.rst index 46e5fbe26d47..7f231c7a0f6d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -30,6 +30,7 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many tutorial/gnn_design tutorial/dataset tutorial/application + tutorial/multi_gpu .. toctree:: :maxdepth: 1 diff --git a/docs/source/tutorial/multi_gpu.rst b/docs/source/tutorial/multi_gpu.rst new file mode 100644 index 000000000000..405426ef6436 --- /dev/null +++ b/docs/source/tutorial/multi_gpu.rst @@ -0,0 +1,7 @@ +Multi-GPU Training +================== + +.. nbgallery:: + :name: rst-gallery + + multi_gpu_vanilla diff --git a/docs/source/tutorial/multi_gpu_vanilla.rst b/docs/source/tutorial/multi_gpu_vanilla.rst new file mode 100644 index 000000000000..b9033ac45dc6 --- /dev/null +++ b/docs/source/tutorial/multi_gpu_vanilla.rst @@ -0,0 +1,176 @@ +Multi-GPU Training in Pure PyTorch +================================== + +For many large scale, real-world datasets, it may be necessary to scale-up training across multiple GPUs. +This tutorial goes over how to set up a multi-GPU training and inference pipeline in :pyg:`PyG` with pure :pytorch:`PyTorch` via :class:`torch.nn.parallel.DistributedDataParallel`, without the need for any other third-party libraries (such as :lightning:`PyTorch Lightning`). + +In particular, this tutorials shows how to train a :class:`~torch_geometric.nn.models.GraphSAGE` GNN model on the :class:`~torch_geometric.datasets.Reddit` dataset. +For this, we utilize the :class:`~torch_geometric.loader.NeighborLoader` together with :class:`torch.nn.parallel.DistributedDataParallel` to scale-up training across all available GPUs. + +.. note:: + A runnable example of this tutorial can be found at `examples/multi_gpu/distributed_sampling.py `_. + +Defining a Spawnable Runner +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +`DistributedDataParallel (DDP) `_ implements data parallelism at the module level which can run across multiple machines. +Applications using DDP spawn multiple processes and create a single DDP instance per process. +DDP processes can be placed on the same machine or across machines. + +To create a DDP module, we first need to set up process groups properly and define a spawnable runner function. +Here, the :obj:`world_size` corresponds to the number of GPUs we will be using at once. +For each GPU, the process is labeled with a process ID which we call :obj:`rank`. +:meth:`torch.multiprocessing.spawn` will take care of spawning :obj:`world_size` many processes: + +.. code-block:: python + + from torch_geometric.datasets import Reddit + import torch.multiprocessing as mp + + def run(rank: int, world_size: int, dataset: Reddit): + pass + + if __name__ == '__main__': + dataset = Reddit('./data/Reddit') + world_size = torch.cuda.device_count() + mp.spawn(run, args=(world_size, dataset), nprocs=world_size, join=True) + +Note that we initialize the dataset *before* spawning any processes. +With this, we only initialize the dataset once, and any data inside it will be automatically moved to shared memory via :obj:`torch.multiprocessing` such that processes do not need to work on replicas of the data. + +With this, we can start to implement our spawnable runner function: + +.. code-block:: python + + import os + import torch.distributed as dist + import torch + + def run(rank: int, world_size: int, dataset: Reddit): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12345' + dist.init_process_group('nccl', rank=rank, world_size=world_size) + + data = dataset[0] + +The first step above is initializing :obj:`torch.distributed`. +More details can be found in `Writing Distributed Applications with PyTorch `_. + +Next, we split training indices into :obj:`world_size` many chunks for each GPU, and initialize the :class:`~torch_geometric.loader.NeighborLoader` class to only operate on its specific chunk of the training set: + +.. code-block:: python + + from torch_geometric.loader import NeighborLoader + + def run(rank: int, world_size: int, dataset: Reddit): + ... + + train_index = data.train_mask.nonzero().view(-1) + train_index = train_index.split(train_index.size(0) // world_size)[rank] + + train_loader = NeighborLoader( + data, + input_nodes=train_index, + num_neighbors=[25, 10], + batch_size=1024, + num_workers=4, + shuffle=True, + ) + +Note that our :meth:`run` function is called on each rank, which means that each rank holds a separate :class:`~torch_geometric.loader.NeighborLoader` instance. + +Similarly, we create a :class:`~torch_geometric.loader.NeighborLoader` instance for evaluation. +For simplicity, we only do this on rank :obj:`0` such that computation of metrics do not need to communicate across different processes. +We recommend to take a look at the `torchmetrics `_ package for distributed computation of metrics. + +.. code-block:: python + + def run(rank: int, world_size: int, dataset: Reddit): + ... + + if rank == 0: + val_index = data.val_mask.nonzero().view(-1) + val_loader = NeighborLoader( + data, + input_nodes=val_index, + num_neighbors=[25, 10], + batch_size=1024, + num_workers=4, + shuffle=False, + ) + +Now that we have our data loaders defined, we initialize our :class:`~torch_geometric.nn.GraphSAGE` model and wrap it inside :pytorch:`PyTorch`'s :class:`~torch.nn.parallel.DistributedDataParallel`. +This wrapper on our model manages communication between each rank and reduces loss gradients from each process before updating the models parameters across all ranks: + +.. code-block:: python + + from torch.nn.parallel import DistributedDataParallel + from torch_geometric.nn import GraphSAGE + + def run(rank: int, world_size: int, dataset: Reddit): + ... + + torch.manual_seed(12345) + model = GraphSAGE( + in_channels=dataset.num_features, + hidden_channels=256, + num_layers=2, + out_channels=dataset.num_classes, + ).to(rank) + model = DistributedDataParallel(model, device_ids=[rank]) + +Finally, we can set up our optimizer and define our training loop, which follows a similar flow as usual single GPU training loops - the actual magic of gradient and model weight synchronization across different processes will happen behind the scenes within :class:`~torch.nn.parallel.DistributedDataParallel`: + +.. code-block:: python + + import torch.nn.functional as F + + def run(rank: int, world_size: int, dataset: Reddit): + ... + + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + + for epoch in range(1, 11): + model.train() + for batch in train_loader: + batch = batch.to_rank + optimizer.zero_grad() + out = model(batch.x, batch.edge_index)[:batch.batch_size] + loss = F.cross_entropy(out, batch.y[:batch.batch_size]) + loss.backward() + optimizer.step() + +After each training epoch, we evaluate and report validation metrics. +As previously mentioned, we do this on a single GPU only. +To synchronize all processes and to ensure that model weights have been updated, we need to call :meth:`torch.distributed.barrier`: + +.. code-block:: python + + dist.barrier() + + if rank == 0: + print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}') + + if rank == 0: + model.eval() + count = correct = 0 + with torch.no_grad(): + for batch in val_loader: + batch = batch.to(rank) + out = model(batch.x, batch.edge_index)[:batch.batch_size] + pred = out.argmax(dim=-1) + correct += (pred == batch.y[:batch.batch_size]).sum() + count += batch.batch_size + print(f'Validation Accuracy: {correct/count:.4f}') + + dist.barrier() + +After finishing training, we can clean up processes and destroy the process group via: + +.. code-block:: python + + dist.destroy_process_group() + +And that's it. +Putting it all together gives a working multi-GPU example that follows a similar training flow than single GPU training. +You can run the shown tutorial by yourself by looking at `examples/multi_gpu/distributed_sampling.py `_. From 812cf19fbe28f8d75647dc3f8ae6a04f310651a9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 20 Sep 2023 20:22:46 +0200 Subject: [PATCH 1492/2432] Add thumbnail to multi-GPU tutorial (#8064) --- .../_static/thumbnails/multi_gpu_vanilla.png | Bin 0 -> 53490 bytes docs/source/conf.py | 1 + docs/source/tutorial/multi_gpu_vanilla.rst | 13 ++++++------- 3 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 docs/source/_static/thumbnails/multi_gpu_vanilla.png diff --git a/docs/source/_static/thumbnails/multi_gpu_vanilla.png b/docs/source/_static/thumbnails/multi_gpu_vanilla.png new file mode 100644 index 0000000000000000000000000000000000000000..d0f3405d36e9cd61f97ee77f73a163c71b4f56b1 GIT binary patch literal 53490 zcmY(q1y~$0*EYOJVUd=lKyi0>_r=}aU5ab*0*e=S_u^8ZxU{&t7Ax-V{%xP<{oe2Y zXRq0uIY~}(GMP+r=bi{tM0Le*O#}xoT!T5K=05Y<0pw@J3 z)V1BT73BHM9POD*%pFZFn7-IM{mTUq_`(Mj?Je9)NWR$HIk@tD5hVLh3O=a(Pt8n5 z@}DGbp9RUZ6_iQD99=9(xR_X&SjdEsNk~WpT+A)`RK+F#Cp+{?kj&c6&54hh+0)aL z$&;PQ(Z!0Hm6w;7nT3s+jg1kSg3;B>!Oi3gqk}8?e~tWKJK`3uW-c~PZZ?h%B>(K1 zm^!+<36hchE9ifZ|JKvZ#`6D_0Zb2Pn{ujf{%EZF_zqX-S1^#LIlw52qpe6sa zFT^VFpOpVU-T#y$!2GZ9|4(K9>*;^AP*;VJ1(^T0*@TdB4L1-001<$UxTyLUm}3J( zWA(myp-g*Pky2RX(mb~$x-l?TZ|JI(pU{y{UrzM&in}=idJs#Yn%%P_6rOc&XFRhj_DW zTkh7tiS5K3Z?l5sS{`1oLcMIWRI}=jbCQpd^8v0lTKGiZm!X=e^UuD=zXCZt-{sX{ zhq!N~h6h<$TDFprk=4P%o=fE3M8eKsf|y8A zj|S@>QWCPOt?oPb7h7^7q4e`LMrXarC!K>ad5WOTFnWL8AN!N}a3`^Zf>eqnYIWz# zN!bc^2{{{y`=!IGUH*QeTQ8q>I=tp9b&iDvHIr7?59s^_y|4F=IX^ELG&(P97UA|? zS4tO*_KRXlPmCtugc+1eNXdO4Yb3Ndjf}Y@sYY+M)Q$K0Q1nc)ox$n)I6S*u6dDmB zvst?=mKFT|(89tpe3g*=Jw9iWn^C`VyI+EUKC8DlJx^)4^H;CmZT-@==ZM5ii%!=f zXm`9n)hM{8vqKamT7vpT?5-?@R}s$*@^aDD*6!Yg%xv6nvgrSH72SYczvAbKIa;*2 zz{WL@S@YSdx`v|<@Lczs5Trg3cKRz~7?QROvOjoU7Z_B1Y31sAPEh@p@eMj}7?l@mI{s|6DdT(80~% zhpnN=@E`E0XYbnU@Yo#}>zdJeQSjFylnfxok$gI;xLueC)Jhb7dN;}QUDd*Ccf5iZ z6kWd1EP>p$k4W?ZPKD%z;fthIreF}>Wy;YHxPf$|VtHbcN zw9jQW#>u!`#CjzydReI`H(*KQMKGCOBSv-tcp)Tu!GidGs?PLP-o|V7<+k5yAq5XN z%#NJ9CQ&bAoy>%U?vu~Xi}nQ4l+|DuF_XYX+-d6Cm+XZdmllk?lzIusxf=o=haQ8d z_0V$Uh&vbhZC@nXPn=Z)yTrv2f!UY*wwv&;G7Ckftf^GagB)qFA!P<~$2Ip+qf z{jC4ep6YhGt*9hT9s67(K?8S+};nAjF=XXc+Fkn@ z#&G6lr*tjku9Ql!<3=`~V> zzoZT}0vUa#JQL{gEDU9fuo3F_%KDKDkNyFU&L1&fhj2n_(fw2}l%3)-jGcU|9FU%T zLvM)}1zCdYx`eCUFLjG^dRZ`hzEH=L539~uH;sldoJs?5M8~?EfL=5i9h+MVq5b+t zJO8$Ub6X({hK*^Hu?Pkbyd@R0|EF*F#>0AjEjd1eF0Gc?3X@*fgA;hd_eDAfPf$_l zn{=G!d%U63;sDYDQxs7;cB&ru(ye~GKOQTdh6|nC_aeI3Kgn6PX?4GkXjp$JvipdI z#pulbefY?%WMuX2uHef#Q(u_S?_Q())daa8)1@&pp-5t{PwOE_)#0b9t8N_w2Tsjm zMubu)2%>|G!V{Uy6H1S=uNR9wrm-Q}kDLWAVTYiqv1w0YsSKlwX*;{tL^ zLnt5ta%s-0JNiZHW!j6)MzU4fAh<^`MAik1U#XmviHfnZ#Y)|UEy!N`@)yOl>3bGh`#rN;r@iD;xsCshIfmt3lv?|5gjt7z0T|Z-ks< ztI*hvo21LOgXF84xSB)9pc67=cRck?{(%Y~!p2Mdi2(Q`Q5|Cs<dsKQC=Paw0!TP4w_@(UUGd$ttZjwYw%C06c*tRy3wj6A z&vfG~hvG^;5jG zNH7q?H;;mqhy7sF*oXvFBPWbnOK<J7h&|g*XE_5PDY3+?1#;N<6r^93y=7&kXHqSwUI^lu zmYU6)1UP&fP@^xUkGya4PK%0cLI@}dG_Lc0st%m+Z16X^=Drez33e-B`LP7E?{*0A zTr!KM*j_EIE5Uhz^QS(rQoG6uyZdg^H6gh#5hWo+O`Z~;ve!w;L4Qz0BZ zDjlMr$GfDOog69RkBA~Pg3_Sr#MYucEhE1c%d37t`|3lV;3k)yVHVO4_8`-A88tjA zhHshVH651`0&m@bZQ*PZgVAbRY%YDUAaaH6GOK#obMaJ1O!*ZP%*nyz2~Y5CT15`+#2 zbpFM0*H*LBp+mnj{*;6HLyWM50ge`FM`?!z^lpib$7R74tWDhjr24a(7efv#lNnSd zYAGC0FU$e@K%NDxI@|9V=bX_@E^N<9#FZDKG`g}_{Ko>}knF3wZn>Dh5zl5ROV3~k z3@Rp&GyqAi4^hN+cC-xD5|m{ELGlgnLALATA*(`QLk)9;eEc;W3SmQSy^PsX1etIW zOmk|`Ij-pbyq+T^P{RMAZ$El@b7tb?j}$Hy4|{10ibdXnAFPP=_-xH~p+)BQsSk0; zvDsDdWLX3OLIJ>%Zj>D7{j}As>6}DF{H=q5WBWMVQS&6XJfJ6kL6t76xlDKvKTVz* zC;@=51?iJpAlxu_5p?Yi=J#^FJg#lS07HZXe>?SOfhh7rT(^a5V|*U4@TdT@`7!m~ z0))@DT=LpN{@c=}TkWAH;E;cfiYin3mU%Y#t>3-K`KR{9J!O7(!^2F-#b+r@U^F7| zM0zlfV*A}~vvya$5OvWbyf$Dwkngm|B6G))dyVk`)4w(mY%)xQP%z`H{k6;M2OTQL zrg5562%EeU4|t6QhA_jFF6MZ%^GUS-?I(2bnn0w0rCRNyu}j^t4}6ql@Z$e1CzJhH?1K z9X!k;I)uePt4IAJK}?=#c%gJ}$v&^c3U=4Yx@W-+ zr`G!B?=|Q6MSL!(PeS>8F(lXXzf3NV>)&92Q+(rWGBlZ;VKCrA;it+gix|`i%2c=T zYbwgJR`r#Ps>VG9QNpkf;%~p40w);K{Yz?mJ?X4ISak}|abKgLR^+yP`xwgM5AAZ- zW`(-b&5{}xX~tN<125B{$>u3?E=T_OwLH1?SHs>|FVoS_rYxdr7n5!})`cEZBpjP1 zRnuK>*~$YZOuz(X6VdPnBsdjPUPvL)et<>^uWxU7a}v%SmS4?jp{A5;Hi&Tmyy^w> zAqmnC{&*+`f@OI}k943yVZ&uHoV2b;b+5bbF`{5tGNSAVrZ3@3Q2tIUnmzAa(*b+b zjEyKuAu?!12=!R;;1 z1%QSr&Jj#<{&mCz-FLMxkW!&Y@0(;TD~1fgy-23J*Rgu%8%GD?Q6X1#8gGt%$I-&P zXUX*4T0URnX0?zh|3IyNiGe#eK1IUuFPT@|=N%6^>sBUOz>?#@pHq;Z6<9zMo32u% zMR4RA??FT!j8t6C%hOxO;hdIdwy^W2R@IYHK+WAmm!?K|U%9%K@c=ci$1!l$IhPhx zKw(Y_gbB984MQWYPRdANds_W1#<#rid~EN2!dY!J|7@5PSl;qycuepeC{0$5&g1&t zX`3e9IeGziaqU@H_C0gURFBh-|bPyzV23} zVSWWQIWl+75c-R?EW54L;CqetWs=PL07XfWZk@XVix*v$qjutUI^%NV;P$Bojl$+FBxCi)c$lI zH4wS9y5 z2_b_Lh?b0@8Ndd?o> zjE`3EpDKwuTe~W?8!^f0OQ-zOT;YunkDx;p$M6y%6Slmla}kK}Gez%{!l!rCTSeCO51I0A|`>+eI0glFxHlTBmY2|1%Q_H0QE2E0_=G$aULF18myplr~>L>L&$%+l=O@~=t=29}> zwP1_rT#}ORfZ{<0A5~{mr_erijY8Kdrkv+a)UW%q`z7J)sZD>y^&KWlN9<7V?i=r; z`NjHBj;JQtF-9vHK16-AH>p!uVF`IK)S~N|&Mrp1F5t;Os zv-)8^BT)b2b7YF7&JtEFlSUU{lKl{C$BJqccXEQyHITw|QKaj6ZRgJ5FaJyB7Qcpo zrSO&D)?fB)ULt{V<3(My!0JsSLXnUUhf2^1k1W^69AS>bgfsETjFykUnh_meLwX+9 z@6AyZ_UWDUESH<1w4--?7u^sI6QIarVL}&O#^VfqpSfJ8afx*P>@X6E>dV(Iuv3Yfo4hT6%idhM#j!S{L{F9(L2ZkpXNE!fr+Qgl2_eRGKf(mo!)tWC$(jyNE=I z2u?U1A0lLg_`AqIZ5A{la357mgy(_fT{k9BEw4b01XUa>8?Ji?O^rl8kmcPtG@>6_ zBIk7GJ4Gq)AFkBh$}3#Ul)nkwQz+;J{2t$CvRaIG{gIi{k#Cbtn$ivSgGyq08pfzp zRU9r!^+Y)uf8Z4l7kv{2*uES@;<5RiZ|naq%=g~G z2xG^Ih9QI3^}EuHT1n}|P-JA}@L#46kwz-a?ZSky`#i&&8mV34+~^XVlE%x@)mJVp z`YZEt8QhMiJ$70P4>CK{dyM9%o7JUB zV1Lh5_qO|UzqT#npLN8GxYFHjBy5=7_Yn!Xh$~X{(l>I@ovP<1OJaQzd>@2{hub*w zd%VS39FHfGNNpUu=-n?#LUex)R#7id(aaRb9l;XwPkSV$PVud(5ETBYLULhZorih7 zQ*^swBr>u}zQ;U)X?B$5Yh#td!)wBn$;5`?6W}z!3uJ2qPGDH-k~%3x@}3vxaXhWH zJ*qCO;C8Hb`MWK{>-=0>L`5Zw?8z}f{`=^s#PFb=L=TbHLeSLHA$KeDd?%`vZ%x-Y zk`3zgH{YEXMYK)WOTxHLbbNlv4^AtKGMQh_U~_l2NxE=?d`kYXA5KP%;cUhdC;Rz$ z^s!J0PwW=3uJLZTZ!fqb(rKKyiQ7{5nxPpV^K}Or@VYVZKf#9`Vh}L6mZcPQ!=M9m zIyn8GBQTtg(CiMqPsH8^%W?JQ`X!P}Ty+-JY>tYE4O#}IN}kHU-3K={tJtW^f3q`e zQ%reg5zBdcr7v4=kNBeIEK7YChtmC{u(ps~aeykZV!)r=V8z`@0G^TnE4?G<%5Lr2 z|M|zBTWnG`GX5{6uGjs`urYrlLkG4WtA6V04R$?i&NKdL3`WsLG&e`wn5VLK24-A* z=KP0yd&2(CS($Z;M1m!!_Bx(x9(oRZxEtSow<-9)m=^JfEb`8cdycL71F}62JeXRp zKCP%Bd_j2jA@4p%~VRsHs{3=(M~W3 z-~c4Lke5qjPcn;*5xdzM{9i(jJIDvdM)Yrwd%>nXr>n>Za5lCv&Fvo-^byv~iiE6K zZ#jZLgD|S9HD8Ai@)N1IhqgQpZqGy)N9x}MSD zgpCp-GWXr*--~@m7#uc{E>$cd@E}+v+yS3l05BQ>*n*dv>LHWL6fdK!2WcJY-j63~ z@OzPbxG&Z!{$s!`O+7G{5J=cN^(TluU}? zDaYRvAJ3t-vlpp9M!uN>_jlB!NTPKdfdR^pvDd-LZpknT0oLZIk4^KMQ;Sgh@?O3 z_lmZmhnyHdKBk?3O&@_wEtWFgy2{7MUQz2O&(nkrxb-AF&vlx;Mq8>je*?lSz7M~k z;EOULVlokB{v8Z>6#%wbgpV&A6d)g(rm?0iHW~dPG5kCDX?sgaDDFG9KIK^E&lE@h?pLcQ;Cu1^!vXtB>&U)>sJ)QDxi5R}Wxk(BbO_DvbMw)dy6vsDWb-zTZ#}U6r_i@i$;xNRl^3Hnyns;H2ly^P`sZ3>9{n*a^cC zsYcbWKOm9@;~V4_coK1$a5qX|2%8@#aA`{BBji)(l@YF=D;m%pHzD+4X^^D&?iUj) zHn5Ms=&5TxghCv|7E_9bN-R5J*9D*d($+XqN_rO6adM-2w}R5Qca`(wwU(x*+(ir9 z64aeFc^Plk?-p%4Ke*iKDuo@(0o*|t8z$?q0pX>*2P^bNom{Bg7c5YBPPsy%E1fvV z8cQIk5Y-|BFCh*F92|!8uYT`wD7D@q6=#1jf8!-poSX=w=;h5P03pA!bpKKx-mE9W zqy=T5$UvclB}TzM1#+@;witlgl}q0k{~lz8LtdJPNeKioqyqj-;81!8Ae~eVo*evr zCIc_BpLLZfLTGjFOwQ!1QKyDYi?9$n*QyJ_ ztvj0^K>HzlKAr4GSRfYMS9Ygi%Bn3k05}W7yJDgv4&rT9v9*XBR!%OM&Ep!HF!`7C zj0U=j+}9LsSB zhYA2GjbRT2jcti7(GrDnD9KoF|MetAx=Hm0kdF0O)!l?r?1(ybXk&e*iD4$@yhZss z>19LM=H{V=DKjWBm3i0<;|PFMhw_c1fh98Z25nL}Ji=qD$Z!F5Ha(K2iu;|=6&(`3 zt*70uVZQO2n9qxOnGMzsU*ljtXmXgqL1>W1bXxTJ_!maVvoN~9mQi32r6jkKMW3W$ zkO3KCf$tH4Eo^=OQi5*FDF76GaZ&_^A|XRw6?z~kagc9bV|C!DghA-~n@NliQD$)M z6rn7PH$Te)LG#?{oEME3I&0viF8Ty7`cap4Gm6!KER*X}e|m{U9(bctY*KKNi=j$l zK8mD5h&{?jT1Ty+3cZi~ZsNPqc+6*aBQW`Yb!vQlWxV|SHNdpNVzw#t zt%8D5g1%K;@<3;I0)TWNh>~;^sn5opASU`QA4`o8@dE=_y|Fh^C{nyE;oX;Z3|BBQ zO%FsJ`pC98uwZw%1Wtm4fe)1Vb!6{ClYYPgPsR6so$!bUPNq#|n`Id7+pm|ILE)Fh zCRb`yjJ3GMwsib#=PKjFwAgo3Cl zQJv%~1WeZj0G)r7!+-~3YQ9Ez?CtMje_3`f*A7K`@;FX1sc}&(y(cl(UTCIZq?P&P z%dgQNox`A|Z}JtyZz{*P<(pQ=(7s7CEdk>D%A5Zio>Uf$9koy=W6*ORY*9?Xfp*xe zPXf6XOYdxr;icwq822sR0)o>pg50yPr3Ud6;yz%lBxmDq1xeE*8mEcKlk#D#=yPiP$d4$#j=}$2H_A~hRT<<{_G7<^6ZnINcvLhpz{BRn<(hc!mBtbr3u@q$_;O|fU83-#c23J7(ZO0pY>_jmMcnkpW&Nd9Kkt5+C6EO3 z?li16=4t{-G|ciHkoARPlv!ioRxSdN9~J`z1=m6-X2AllKOIU&%)D-?19+$W>qSci zcOeZXU|hSd9YapUph6>#UC1UYHHXV{^SBBl)3w|2%2IZRc5+qgz3ndBP-^)M1yJ;3 zfeE+~3R+=Sn!u)M2T!8nP(2%+@o!%`bYKEQ9$V=(*|=F&1B`1>1L+ESK`>z3|{D=suPo56!v(6`u>IrcsY06v{u`ch${Xwb% z5*Q~N&niwXsQj)f6J(g=D9`uRX`iJ2R53F6$b8 ztca;dM?6M@a+$WIwD(c-&L|{E(dw??gW7+aHS-NbrvfGu=R@39O^GGMKDcd09Lb4` z-gL8wsu^Ku$BSe}zL)VC7zKReIMT@qAk*m7&m@0Z-RWaLogc4tJY_0{`mgK=!y$e- zeOVwsEnfGHLa>)FQUle=j^#tjpUDK~_;1i<-g~xwgAWy)V=ol*)vws%*c3{CMZEVr~^PtE`OrG!$>1NoU~ zqke1TmS-&bjXxQLu^r7@ee6@pZk+bRd(5S!dYye9kLtdDFeb=NA5JFRVJtBCa)K43 zH<+B|x>0a6za?%o$K|!Mg6yw^2?v8jL5(o3A+nrVvequ;Up=A6p>p3Z6PyAXBuMtc zZqm9hjryVMiwcaIH@c-UX8y)p^!$nxgF&1DGRciq=6%;PYv;ylS0C+YXfYJZs`zKz1eElEOmoL!IUf?u(B&lIt^ zSO|oXBMP{_VnXLW)1=q&A&yrDQ0H zs-!4^uv^9?Ix!p`Zmp`P`|ce@o6;Yw#z=|+yUj=Re}OH36a~eHWjipeF19LuT(y$9 z`ExVta88+=lYEi#nPeKmbGC9dg|v{6qIIL0*i{*)>=}7b3poEfe^O`n7vJ`XP|jO^ zavTdIq|YI`NXpYplyXQ%Ig_Fo+W z3gPBbMA)pXw#w!_^4oKSpeoPUr&dSfjYzE4#LMgQ=^}ZAjEdPH76!)C(@B=k3CN|$ z=h$5%y>s%l**kGN2!RI`m-r+bv{UoyOta#Z14VlCy(PU{RSTVe7bnM{j*0zHRZV!A zvbFx39JAW;kbSx~cIL)o_;cpfci#Cz=-j81{u}#%)d%AH&OED^oCLJ}2r)P{HTQJ) zoY4zi&k0Uh8KRmZGojdnP+uveM^(!7Nc)0>-+Jh`3B}B4V^@H$0<5k!T+diz zFY<;yU0M#)?@}l&mB3-g{1fBla&;$JCRdxcc|Ds7w-)Zw$q${!~+$@n({c*yP zrAnMi|6xeA-(dGCmXr`Sh8K8*!(nwVkZhuO>n^vUpkamg|(anKe3C)o0tOQc9l5)B^L3rcJt(`lVr#C z0S+wqpp`w0exI-h{_%ph>u%Q^`6@bQ;F+)u%eB+xDY>bH4NcsbY;rke4oZh|?-c_C z&ha;mByaXkyXCa~b~U)PYX8bSvP8acbMM0?=Q18m+`>x6&Al_e3Qx@RVMpa}WXy+> z<{z?pKMRi$EM=yxm_9aG%@s}g-&_n*#^bvAl_&n4+AObOxgkHs$zJ^Z+r(3vLN|By z0OUi(pQL=CuC2|J#BT5&7%f%tYKh-Uj@#&t&lNK^mUHbCjxXF6-A)j}DF>-h&UgGk zIWree!8@_@P%RIK4hXSAHHj)`7|0EHM(4J?w$`)5BG4Lp_tnXmH*;p=+D^Nxvrmbi zOD2fE`}Y`y7)-b}Ld=&HfYw^mz6Y(Qf)wtwtTWFCXLP%mlV#?L5~bu}qTJ)v7BOFK z?VEXi-w^y@{9`i`?1gIDs>3V|So?TP zcs*EauH#i|xhDI>;Zn6zvl4=8_|jy(*SMc9>W8!G4C#yDvUYYhM2;1g$o6gUQ#1gL z^Jt}TC3U2}eb0q^> zXo}7Ql45J~&Q`hTAQl#_uphUazfPn4nm()2%G8u(&j(cst9D!ApM?CqtnOu~%_lZu zY&^e`UwF+DBvo#TM-1*vjB1N6MQdlaRRckIbt8n7C|mw<#d4{e`WaE%D+tjHCp5Gt z%_Q%Ny;Lm@8<>kdHZ9BaDCfJ@u{V_47`!$kHM>upbGzPVg?4UaS)v2oI=F=lh3;(= z1XjL>pdPJrH`SVoZj?qwiA%NnFDQDkk*bcLIN9gSNXn>c)t|a>t4@pp(DtVHbmAhW zV_;7NF@hb#Mgreec<8WPbk*4msCaoCla=Yi85_YSSlM8+mfX>vBq_z`Adu?Yf7Yxr z6n5WO`A+E2KCV}Bt8*)#wNN0%ZTAN`)rlp?+s5IjB)m)BCtu{4r_j{Yyswh?(mr^7 zyZduZ5`*2I7!s}az=<$fe$aBKd&R6l_*QAIw}frB24Z0)f%jus1clofj<5Ic-I>R; zZ@fW+gVs8YzWL9WSGm)T?w$c`^T|r@iCAc4WrS#r{N9pRl-HTG3|@hfvN$+cg=>{1 zQb@gn=%ht)6vuyre1|? zA_!n5$dX2|A&Qf<@?GLDgmYGC zR%L!_s3}=TVq7xCjQShTU?X23ROw&CA=6;XJOav%-v&oA@7{4sQ)e$iBl{o z&h_GiqL35uXnhjqbQoi+LoK5%Y}U08|DS5_v_*|b|O6Sy`X$2u4G9C!v! z_|~jrxs9Fa5poF&6P5_u3VnoFDiJX|2C!(JdOu-2`2CTV2Jebeh$S0oegA4bT_oeJ z7@iOZo6n_XbWmo{*0?MNqAT44elY?m9o#(*+x}~@ic9s0cMs|7nP=`XoCcW2~5`Ifb-MNDv|z;2Z$>z@o|=mlG1zIvQrW zR0sf%q%bC=8a~ION|47?sTzF5*i!NMe9bA}v*9G8J1q0yy)(gfb%PB9wg-UAmVC;0 zv=%IKzJv^tj8uyN2Zp-dyRXb0?a8Kcr4F}e`%pyf>1rS7tGH}W@C-dJ)VrLm6Q@Xj zRc_Uh15F}sXEwBuU3YZuKEW6wJR%BR@xN5(RmA8$Wp)isOl(gm0l}_aPqJ%7LMo?U z%vc!pdoz>1x{aY9h7Z4L?ETqN!C<88u|Np_#HhQyzt-WymzGJ=u<;~umD$kNShH7I zKkicEY>sO#rGH!T4vvn)`n3Rt4;foAV%du|J_-XrN_2^XvnL4Yy4IY zj0|x)b}yYtgjYLkwm{k0NktrVRq`Xz$eX`-bmDq{U=v90=LL3X zm0F1+VN_^H9|K|d=_pLT27ad=M~%$#=f_9expJ&a67lKyg90F!ofN6#xl-W%fqJu2 zA4v*cq$4mAMY2QGJqv+!97)jSZA+Fg)lgJ~26?ZJ;T^E!8oDF0`?i+4xOfO4h5IUc zYRYdxh=_6L23W@VI{9nl=w6gY;eFgqY0V;1@^7Gv2#v(rWw3JWr<3AOydsUm6Cw@~D>N#|}_LdiO0`RQE=5CGUy;Hr^Lt|Zb>=1N&3 zg&)z_{SRS!qSou?m-Yawhr6z~`eX|nJ~!)K+|=-9Wvsz7xC?TBogbzzqgW#u4Q;!0 zUzHW87rY^tgW1W(rCZM=*PCxc!Df;$5Yt&+P6QyU`V_&|tSdMP0t9o~&aurplQQVo z(y4sZt#wL4B{**skOKwrjZ1;*q?1M2EvRRma~jNXFJ5@ODx~~c>g4v0c}cTlmD}qQ zIiQTMNl3)BXAkVbR9#^~4d=O;a`XWY447VPECKDQlO&*RPqd5x$|O>Nr6<{YEb(IT z_IkRCaM?pS6CvDF-izIl{Con=jNcKj_g&{@+2Z1&(tMTP9yq<- z3NxICx;La89?BBhtImCUI;6_3?&?>ow|vhnN)xu9Kj>`T=&fkIYbOm>q6ckeDK}EU zzZLLl@`zd=r7*gTll8EW;zJk?!=o}vrjaMcqg=tRyRS580$-YZFE<$ep)6TO4ZAB; z|G1Y8vlO_TJM6f#O=Z`I`pzQKV@SWD$il4Ei@*Vld)7kYZf*^Y|6VBt?JT*%2{v;M zW6$fYW6PG~9Snpntev9h4^lBg>CB$OP4Hxt@_+=7ab+*BLFrGC%*)+HCNt<WjIj zCD;55Q}tRkB~V-z3CyV5@VBm>-IImrKV+;OQ;p>yURN%L<@U}Ci>8&ozk{HJ8D&-q zQXDD}wO?-Y2S8l^tq$1g9`d}`X#Zt$cZiewSbr@0+r!5yq@G>UZ7T4ApjskZ;LfCC?6o|)L(gcsT=HPsHDO8#sx3U>Lq+k zy>pq7ImYJy?$w*mr2>@4I5FkbM z8DmV~Jvt!&y&U)JT=U5Lh5LfS0s}+saGg14%qEkh**kMSC;>5>RVwF+8x=#+{}hol zd`*;1AMckWv!=$pGa9s<&$`wHgu~aU@c98km#wJyd|(-{E}_Y^{IC1?u`l%a&O=&n7+mYv47 zIVO?=EEtFgm>Q6lFqx+UpWh_!3f@sm{M~rq(N@>bWRSpnFzia8<)R2o0CAWw+3BB% zwQVetFDKK1NL@de|B9;cdp+hoN);nqcnJO6F{PqXt=t45?Th9m4T3WHv~q?t2M&1i zk9BFg-)~hKHC5aFsTIY0ZPX(21{^`2Nic^Cr4`7;K4ws%32`V(`m*7XU{+C(R;b5w zJpbJ-rY(zLBRXvMtofYy9;}HgykD2}lb#>LK1~GFYWw1wX6>n}%5|mtj7wVBD<3xR z?2JN+Bp3$=UQuJ4^-9}euiT;<%-c{N=wKOy2WzS{yYWT^W4~o7_iy{ZC+P6Lxvki8 z*@eKms@sZiKtEGx(8FIqf)0LNrbnUu*^fqBWds;zI8@4bEZbLE72{R@6INFpOSwb@ zozpwm{pUjuD5e%+TI=`1rkEvQ9M5T=`hXF@_bC?@+SCp&8*3pNqZ#E=H@?s?ASTM%&>mI5~x+5`>GknyITJN z`^(&VdQB)(wj>-LC0&8$(MiUWL3ykZPH zd|r&~?0%B!VrVL(%VLBBl5}DuP(D@awO%NVA{iW&VG^F)-eXs|nAc=g`}ocAatmjk z{S>Z&hdq&Rdeq3N;BCL`m3rj&8@oa`OnbTRh>MTU94V=ijNwx@8)U*Gj0B_#<^SQ+q=rj{GyQ!xWTyBN&iJ!kr?7@V336(#(ciXTd%qeQx|}tF2`Vu%xa|v2 zA2<11u3DO0MQEh4{EQ=upGING5c< zV?$9L1bm=d|7W=Y&cyuw1m4D4In@<;qq}~N)@4ghNov8wuN$tSsbkzkRzd z!sag66^sA$&OI~nlTlU7+(=ay{PH&g6wYOG53dXRt#@f-QT6p1^XRe=u61bj3*T2Z z*gF3}^?k9B3DIGkz@6q*=0Iu@rl1MrGT3EKqwNXBnRm{@sB;ne=Il&lzOuNmeSaor z-r(5f%a`smYcf$UVD&dGTwi)K(GMAS7W1KucbP!>NA|ZKbzh&0zI50lRlGc}ZSywcvUIU*%Nr3`h9NNeoK85y*@fcE z6*lH)ltgfZiF~EtZ}QJxi))cAtgdBI@nAV&*!HcKF!Nm~()O-h52SOLW#f&7ngc@O zHp1An%_a{lLdcbS7w*=*%;i{}rBl-=@K*DbW%!Mu>X^q3&*DT@X5yWV_0>ezFcZVq zZBjQ|cQ;m%aEqc=W+Z;WyTe&2ht@5B`66m99B&sQ>zzZ3I_DQni=mYG4I_I8lb!mJ zohgJVq~)C(AIErCI+IhGF&(E%ZBMpORN9&a;Hg19!zA}#o4LgjV|ca>iIhz8clwK_ zBuL`>aVKr_(oy`mDfCPy~&doEx_Dhy*3zc-3me}8p$1q zw`H&&If9lDWTf;alZQ7KW#@{VZ_20hBsryj<~x5i5ZxK`S0d?d&AzJ8>cN`N3i2Cc z@cfG>ZQwzGt=?ow$b0lmrTR`poq#cLM~5LFh$Qa7z|GTa-|^gds5KW)uYio(mBouk zCI0)^cYD<9VExt+!kbTDFS_q`$m6OahZQd3j2|xyXW4hqxtU#K#w4!ONj-Csc81Df zmqYZqSgp`wGSTu+zC(G{fuyk~PrGt_u~=4<<&`R-=ig+2+}|rwg>uafK*6PrJic~a zQ0@|zi+`xaj>@{Wp{~GK#g z!JX+$<8o>`a;CG2Lh}uk!0TBHO0;BnLAZ*HRmep>EvOvCA(`&npG)USB3S7|wf!(* z%l0!qQpD)8T17G7p^DsAQC?J5YY_XGhK*dCU%ll=0FqlMF@i-cjRE@(M*8I{$H83H zQDPbfo-R%gc5mooue>i(&DtR2CAgqX-ha?Rc6(O1gD6EU@67Y#D<)`1h zNUnm$A-AHe9FJ}ua*5`g_EM8i*jZK6TIp)V6BhA+W>vXJDN864pWqb~;kC4dob_PR z=q|sXp{GGsX8uU!rd&inDn-G_$UlebU?MbWVu#C;MM8Bg*kX01_`=n7)R{Oiccf+L zrEwp8-RoXp3pexJoqK9fTG6hg^;#S7Kjgz8%skkt{qZ*q_pOmnJeE!PUun;3^td}Q z2E@<=x37bhr(%&=ESMS4)5Fm_-~X3-2o8qrOW~~x z0>QjLIa;V`D*681(tf>DaD95KNJG|;=Q40aG_a+4LtLAtIU!Adu0cg>S|)xtFMTPt zL4D2N+qUacejy>x%Kw_A>N@uIG3Q!6=JhaF)PJpBLed=ZJdEBdb-I9gsKBQD^x-(b zN}z&zjV({~^K@hTOzO^%V`%PKPqhd#CQA7AL3#LUY~H)?g#vcBK;kBa2q(V!<8)kW zj2%7p!B9&E@e%9*C53O#zDvO0z37WuQG9OwV?Nbrn=weTewjlo$}ovBU!;T)V=6sF zettHTN;0~(cp*H{nZ`F@d+*^!TDQ(J+Wjz|mb)_-12<$nd<=-TXR&ZEKm1Wz zJt1z9x1xU+Xqptxe%G}UHL~Ylq@7V-ws^A6;HosLK9YDQQBo2J1#-jJ{`mgB|fvrQqF2`%+8O_FPTgtw^Aa zzuj$mM03tEVN2(cE7Gk2!o~!>Cl*>+sxcCtcz(FTkHO`#4Vl=A-b9Ktzkbd-1k|Q) zRGtOIJ$AQ?Dz>DbQl>%oZEev^7sz8sRj z8lK=`IGezdlPI7s41sm4Wi1%hrQ&aNNU_y0t>;(d=Sw_UG!XG$rSSQ1dJ$~mFVf3I z{pQU|Q}qH@6L^#N@{n`)=b(A)^^cO#&)nCT^}j^V;IavrwO<$KJP#Zi4*LPA9G$fm zx4Tt*p__&+LloSytL^sL@{h0VI-OR99HoXcN=lV`B81H(lqY=N;4cF0dfzH&G^tF{?PE&lN9y)Ai0`cA%*Ga%Ah8-;)_6m=S@q z=&?$j(R!3-pa{P`*;&UAO)R|Eb8HuoL6iH1o-?Iq|L)f)#MeiK_vMvS3D*8%+mt8T zROc*;=yAg;4iSJ@_s(0Uh^juJzyY75Ta{^C+I8r+#^FFPn(%Y&S*q|WPr&UqEk1pd z&8gjDyAGSjlt)g+eq*K7(~Xm0jK~VzPLiJ)Zm|v3tm!dF;^vmcU{uZUC8b=kM(jxO ze9O(;u#-ong!?g|jB5Dui^c4M(Gd2DV}fRrK(u5!xy90}nOjoJV*pj}b{rM|(&Zii zJjgxKX0ms+DHl5u6yjPebytxbiEfS*n)Q$n$Ppus_`~7ITV|b2!Xt*5iFNUcIh%gD zzUwa`Mls&$nQULTRpyDu=zG!Z;wP^EL(@6NRocC8zsYu!ZQGi5wr$(CcDC)B?3(PF z?3$W5*|wj3e}4bx{eH2oz1DSJYaQote3!vwFgZOPBa_1h&VFnruO@T?5%mWXk7BLH zG*beUImkTYY_Vu86<`0+pqA~=Z`fPE@ z4XgT6rEASu#5NQkA@laxn`#($#G>=b)%)xex8kKBZlIn)z%%mSKFf@j&X#($>LH~LrLS*zx2ES7OKMTP2S0qZhCldq=a_Tn-=pTQLCi53)(NUe3&q4M7loED2 z>-OU3>MzDTLSHx8QL22_Fxoni?4WFhsR(ltMBEu`7D@U23*Q$^mI$f8~uRkUER`k0rlhq+=+?G9XN1(EosajAM; z0uA)AJIBOqs~=G&QCtO-J?wFmQvUaDe%x`1^4H&}V? zZi02aFN#3DEg22WVM!5m0d82vq(fEgu@USxRY{lOs+OiK9NKFk%lHW|lfm}If#(Wo zBCoRTlUxfzSHD+$LgIplkIKQEp#uyU?@W?ZH#P%{*c`ee{V$-JcGuK4^}o>f+rmBC z8{6wBPYhln%YEi!cad^BqTop37FXBkf2l&62F!-Bfun`x;s?t3r2upt6zQ=NkqRAk z=_9GlVvY$Dj?RR#Qo(>rK-Sme+wI8ora5-J{*AI+(-!Cd zE6z2;M3eb^!zMo%fI6NqT@eN;6?n&#CLymgu~qOB-#T%?O-}c01f-`*Xj4LG=6of?Ljvr z$gfIsk2|9@N;x#PJ2_A=7X0(!V3&!IwJ{G*&%DH9oC1;*5#Rh72zNU|$!#90Bwkpa z4h%>1mqfm3n|xT=s)}zM>&F!pDdMxn`^Nz~HDxe@GItF5_;6Ccy7*bOK#1KhH$=4io+ZixLEVDMH_ zH1e30@3^D4L|SY_(Fb41E3I!|fM4UQvJQ1hBx0(Yu>a{hFi+xR?kgL`$I=piO3Y4#h(jWdZvhQs~NRT_aL zD-ZFC`kEYxDU>xq{f(gHf8ly4%Dr#wgi5q1Qnm>Ug1 zS?s4xNcZ|>ryeMs4NW?It~NT)vUB|zX@|NiP(f)fnPVzxoM}~`5)B3TO%m=J|6fQS zGI*c2-(6kJS*i5Xl%1pFXr~oHWp8bYgyN3ze^OpUejQL`fFCQVqni*Vq zbA$UD^BF0bYFcdA`{JDIx7gg~X0xuoAW)g!BK^yko5C_=F7s$~?88%lL^R*5GfBr;{kFG0j8h1TU^UNT9{u-0BjVu~^MR*_!K{WE|8BF0Fcy7?_B(oHlD zMX|V_asT;gZ%VbU;ZxWl7eL|cZh4v`i|OYr5@nfpm}j+Apvkm5RBM;) zCFVBBzX%zr60B1r!IbkmIKh{pSrSaK6bj+HTonPevAAjdJ6pT=RY_Wm*P(+@Yxd|O zDN3!aqyRDIyvxo3Ly*LX7&Q<@3{fuP<8T#w(EWUu#aG6~n-f~pn5V9w^!B&Vv0+H9 zQ4zw7xx!|Sj*1dv!tx&#@1$ri61Y^QgDl2;OU?nP#f{>LrWYHV=t|*l;!I%__ag|V zq;Q+e^D{U%SSHovP-3DH$nHa8>&;e=#RwMi+1-sk5iu7R_3aGgysY!qbGhqdBv^x4 zS)Z?2ntaU#H&joCga&<##~p=F?@V)weS7NV1%^1UIeEgdRRk;qN5J3y9BRnMjaAU3 zuGhsI^hTnRQ>_w)33!x>Etk|8o@Zr$CK_B1XO-l!DF57|eqVnC1M-MSxBit3g&Ch0 z1uN?1rX7OR$Tx?;yWV>_(j%$<^F{i&$-Y0L>zIVss6KqQtr2b+4=g6{Q(1`Zciaf! z(Plk{co{Ppf?SB3RBC_Px-@;;pTt@(A9RAYLE76yL)FfKz|iHn&h7L$1%kp(5IY-+6?#Wa9V1-NBINk z8T#R5kz@(~wB1hn*o}kF|Ib(kWD#X`#@H%0qI+&O1|29f>@oUd3Qq857$e_Zx6R;s zuWj4G3RInY+*f8A;$>S$)qV>NcjhY;WVqmlbbSzk%$~t>BA1zkaxK<`*m}-dq{!z^ z=b_$br&ktZL6@H6Wf`8Dt^gjCEzC*>mYSma8s!-`NWw+ne6&=Z~bC!Jg$1d z##6I=e*ds|Z#P_dhE&K4rcg^M8uO zCjsVzlN?XUlesnpKk>c1kNy0BcjX+l75DmGo>zB!PVYeS5S!Jel-_q|*s2gowbl~P zjx8EUG?g~a&q|FM!{@uKtLji7EX`!KC8N@)P=U_1HBIWo(wK4K*gpurpFqS%GPH$D zbA!|=b@zvGU_UmkA1^Jq{R{KYp$PdV*gfJ&jzZv1}HIV>s@*mdtSBrltF>^HzaWBO(18hUDp znH*%|G{=nk)BP$~E3Y=y9)LH#X$e7n1pZlLNb2r^;gfq zYbW+tWYo57S%nm5VFN=E8y?!SCjOCps>F&gUB*KvqGXEGB-z=qZuW=hubGBd7uP|q@0JT0cU?&Gue#*-{BcD#vyu7ftTLCBx&#^n$*>iref% zFJY}!Byyt!UzPdt-X?*^VcL)3U-IflhR$S?_UoqVy>0>x`O#8>RJC;-ihSAaJCR%80gk7-8*)G(@r-=HoAMrcfUQ(Cl@=TM!L&ZVW3?PK~fuIl@w*Gw-|@ena+ z22jOnoF>HH!koh?_CW|6P{|-(vqAo#W()s@Q$dyTFy=Ed#6GyK1Tw?0k@jQxqbbRBfpaMJrP0q$h zgo?aB$x(UrlU7ks%K3;D6QkHAq8M`N(wKSx2G6HhmAI;eDh3jDLIx`iBGYM~YY9)O z{p{EERXy3yzs$$FJiB3rfW@uogFgRi058k3RwvwwSStw=Wf9+(7rhuQX zabjZD0qHn?&_MjV8@>IjQYmpk5QX(a#)%JcoSNOb9Fg~mw({ftg2(y$5&J^ZGlM>#w_r)-{w4m5l?H&z z+2fyxlj*!yrmIRo{83rr!^8_QU&+)HW)=liv$b-9iI>JGtD!k{9WP zE{^gswbws}JCBUr#oZ|albJ;UUq)BODnI8gXcdQf?lju(m+}70VEB)97m7%&rV&6z zi*A~k#T+4KgqIXyl@%erH+^FyF{UrYa!rMHYYNj@m!W?TGDzZ(nomx%vv<#|?%v(h zo!u&u7Om#bm@8GeE}O7k6ptun9~df{Ls0i3A?tz;6*Zg3VW0u}%dA2vk(86dc;z0l zqJM5IlpIfgaiB$Hdh+TUYWLcRxWkJdnK4wSCu{L)_C{*nZ-fWKav=QNBacxj(R9{; z!^-Q3eho9|P+q#C(|I;}ZY`camP_NF8X8a*LKGXAXM9)xzt+IdAaP6(CPN&ED2efZ zSOPfs)Yam56$ZJmwrqNForo0p|8)cyO2D2V4=Ss)SJ&y*jRpj2#R=;IxVCd#|E=8F zuuQg^M)B&wZAS!G0O1JYh5o}AfDGx`;KL{=@uJdqKl2v+x>!!o{k#a`Z<;MlHH~{) z>Ev)CYl_Wy^8foOh<_l38AO0SmCfQe6jo-MftZqs=ib!zOsld0D@Pk0V^BT%C$R_S zDHds?b7DH*f%7?hLdPmj}zhlkLWig!E2-jFX+yk(E1HAl^F z-#TnlQ_YOw5D3**ZR+Mq;b9Q}59fdg1q*P<1#~;ENK`&9dfqlEF0I$G?yMObQ;_8CixUg^;=O=4*oEs#rZnnW zhjdk4Cf)4DQ_E7VKXQDCl%|HOO*QM(5$2)HSv!qp>bY=>;TG;kyqo8JqQw!XYS2%H zYlazL;}QfAri~U08T)_pp2`N*Lea8<0#Mw3ie0Kz*90-dx+?Bu)kekn$jz^cdf2R% zNII50vN)=@5s$ET1W3R7RG17rT#QL3sKlSC{x{(?{Fp30@gtfF8q%qLDR+jWmPs#tvvKP$MBs^=|?d5umsj1f!NCt z2CqRL-b0*}i&*Y_Uwrdq2XV~sggPHs8NB~6;V(N#YsN%H8=c-(W-!ZOM z#?QHfgFhsdQIbi{z3ei!sbmomw~px(eL>)%8|7|%jNy~a6*H0^L<+dN+IM1?+F%%p zRRqw#L{s`inDuP}y;B1N%-K&Fg^9@tKc_3#!G~xQ7KhzW6J(b)L+1>Fl$No!K@CL^ zRxXqRlLK}@%y$lS=s^Q$?m*CYS(NMT) z^0JT!-zMe{TO%Xj(cHuY8{M)ng9PyzB=NH5LZNS4qP1E3^vTY(w9gfDcI*_$g?TG8 z68KF8g9#-+$i7gE9<<4`v^^R_^N}(7AQ#xl25A+}A?x(NTWO#QR%*^&@pIH`!UWDU8@cZypyQfB98JV*Fsgu@@3DQ)uiWkQy~~j9d@* zta6=x__jOh%My&H$t0XN+T>am^@~jo40irMpcj@TI>^+90?436Wjh2;z{4ip^7d#d zpMJx#Rs~BW%?G+S9S?opo{Q_pZhsh#A5K#jc|w{-iR$5wnvX68PP$^heK2<#rcZ5C zRgM#}P~g$TCq)ys_>@cP$Ki=k^Pv2W?g8zLwRZ3EJJVO#X5nqt8*B-X`hkf zZMRGEJ5+qj8WqYQlr~_&uU(krY0El}dt~sKV`pdAPsY%g$b8C?niYEIOv|rVOm6K` zl+ob>gItQfzsNA`^HuVBST_EoZPs+iC{;_v*RSShSAPsxk@JarSC20X&PS7gF?N-Z zKK;e>a-D9$W?G==9`KU^?7g8=M`0}6JG9Jl+u{~>G2l~Q+Z`C;P}G<~?$e}KT8VKy z4dwfm;FzdT?@v(|OdO4{g0TBIHX~MFiuS|K!@2Z4MPwTp`^h%E@9YEXc5y(A_v9ej z6*1dL{{G{MSbB6jrN0*EJ?Q&6@Z#V2W*}p_1_t(IG{^7|^1PUuL-#H~GV9yJ=pRlH zC($XFfj1eMsQiUt56s+odg^|nrVK%SD^vLBzRxTMCLp#xwu$vb7Hs0WOlCK!gRS>V z{5JdR=}(66`21cW)|#^hB($=$t@((hCjD11qU*4E^rucs*bKkvzeviNLoAydEy5+JDPYA$+${}zI z#1E8br7m4^<}CrT>loyDIh{qaEQ~jmP2|6nn2oVxl&-5Xk6$?My2(5Ky+jQB6P?PCv)mJ`^V-Bu>SDU4mNhJs7K>osd=ZNFF^DWRjnO7JYu)&2^9U;n+Lg zC{pR>xn$AzrI|6wh?Iy{0Qs)2Cc97w(|RGGWqdOvRT4fFg)aZ4MOX?^P2-D4GRG}w zJ~qgW>in*oBFAf?7&ml<8CNBqN$nH9{nhP+HgP#Q*!e9-?&EDl<)V*%Kj~MaiL4yP z>|+^OUVcV7-*Tc9H(!T%&wgG8QQA%laYl@Z2r+T=F{?z9#K{LREHx!t$|qa8U*Zhx zC-wNLV_i!5SGO8bpnbNU**a;TKrDK};F)&c(tn+v&si27?nk|EDMesQcqRdP& zB(^|E=JUZ9z0L&`&mltRp{ht%g_6y()ZvJuTaUQihwtf%%1N)an!~%)&6Nr zwg4$(1Y4ic_q8q6=XfcI@-_n3{2k81___f~a?(UAbuwxAvBr(p&#n&K@ z|9Q#Yf-TuPpL-;0WICEx3%lg;CjqjHi277yB$hw&0<`1nk1}Pw^)gQqbLl)OW+#P2 zO%L57xA%A4(v09t(K)F(tXO#zL-eEQDyWX}4@zXxVw0Pq9BqDCN^oiZ1RN$|sry*UlkMw+(%NKh}cq8p= z(oP9I%U9+DRBQ>S(N7#0RFH=-!1w3<)dH#wX?(bdV!19;UqugryI9c^z*Eu(JeOYI zr+LF^y2D6MO1U;C%J-6y&1drCcH1KfTF~`dLqP}*RMJw%ag;J|*z|s>D4YLMz{{KE zlkC?oZ0VM^Rt_swU5mJ{ob$Lo8J#8D5XK*Kx~YvxYFn?9$%u0xOFdOzP5w7j8}b;U zF1gOUQ1Z#u$~T1Vr;za7I1UQrScnD{{r$r~7YD>h&Jb%))rW~hrj}!+le!a&X}s6y zmFY1~pQ+L)bIfhnyR9Pn*#TM>?;VCrptQaBK#$H0aRHpGu3vbb z$KmDARaG3*of!|q<(cXsW^NT1S46dczICv`QN5EV;sS((UbiLQ0u?1nlU1{*< z#uY}pQtuSnG;X^VgV}G|j|)b;UzOh$YVW)gEzl`>8L`l*`Z-w*=$5;b@RqBFNmaQ7 zQv}A7bCXHnqKKlL)sye)F<;y#jj^Z-H3c})=1!d5E}JA}yL`w@HHp%m_q>wwsc5?b zQ!ZS(mSQR)(uhx+rb1Dx@CET&n9zv>wR-d2f8+-g=II%%#O)gG6wF3^`-S3nn&RVr zR=fuT_VNYBDDXmF!kKF~2|n=63A`|BQ_o?etj~_75S5|A>pMO8P54W6U2r zkGxeKa1M@S2{Y(L()uYtt}S%z-a-fMW z$dX_1BS8hL?V^Zc*d36uULRx(F5fcwlcIDy$+x0t`@0wCZ}cqOF0&E3q)Er>;1T7_ znumWG&{Pm;OF(Wse@H5Nax(8{b^_Z?LfXq?oT%uD#OFNf{gtmMH(YH(6Fyk^9tmaL z&3K4s8oW0q5%B&|HLO+E%jl};l`d4$2&04k#?=#CU2_&SVU(3Bs-#SESr|gWd`wPn z2hr-bMU`-91cwY|vbn`DacgDeG!~tybUfI^%M~I4MqU5)qFk%*&6JQSpDKy zV4+c#Z9)B{qEF)Jd|WOO3Z4FGjPzx<-0+}>_zRW#P(`@2xcRH zkf`6`&Uobr={6;o*;50?zv32%w~&)-w~sA=zjO2#)(ZP%ECt!^R~^kY>!BU!V999f z5wo#M^F3e=8zLrpe^mscrHCL%ESf9is;4)v$){s%gjpjT4FW04Zdb6~;CmmRj>G3Y zH6jzb`m=yeFd3ztv5^`RQbM~Hr}34Cb%%GD!1b(VnZop%jpc>aTr~}HE;Jk-Kh^m* z?G-qX2?Cb0c3W*k{m;4sS}?3xQJvA3$X`&iIIpW}_(%PjGsXBLuJBm8s6k^n0o0gW zO8j2=WE$aVu)T==&!(rZzg_TPl~tEtIa>~$_}12_-=HuPmEK_oo`oJzv~X6VHiNaa zB70x+&mqw?!P=QbK3a%`Mc~88@gteL3c1*9bV=8&Ilgm`)+%jZ%cs7 zugBF(YzHA>Zzf2tm(JykwQ*78_G7zTm6qH`d}?}2E$i6@m@v;Ap!?N%U9Sl`shYA3 zH~uqXGWqmQyD{)KeGvesq~By88}*FNcni)WAR$zqCP>iYB3X0-51jeJ#IIP!RrUOr zQZF>hS^pWFlv{k}#rJ4G7_TqzXYX1>2T97> zPe{U`kub$DC#?FALGq21xx|0+)Gf8plVT4u{g+pqfH#Bja!A1=b#ux)x&~nYHOH&^^J*-y`S?k31WH_LJV5@kbMM- ziLW0dm%Ty4ryg=&-F?5%=y-y(%5%(!SfiYleMs7xZfUkYB3f^BhCo{mOyJBf^oFalfGWk_ zDOvO)Q|wJ|-V5-1bdmG5;$w4JXtbCl<)TvisVDoTfr< z@j5h*J^u6EIZC6wsbhg2bM|i~Rcn|XmN#uoBx;OAY4?2Sy@MOaR}l+3auJM>rf~JJ zGJD};zqXF^Vahid}b7ggr={3rLk!8Z8Q+v7~{;B;n!KE%0bT(|FyR2x>DC`=ZA=>9w80G;LLJ) zIob)lDmWXz6vUjHM?06)p(-{r=92OS6;h$-57ao0qSk&nDB>H=goA9#9tM50XNQxF z3W?*_z$fLBSitXk=hcS=fn^E7j?8hCOTAG0CKBuD{*k9?vGsV%o7TCmgqkIj#@EU; z`hyg%k)EC(f@&PQ$3+_WY4L)Afsx$8HbB-ZgW%Mv#`{BZ2jA~Ic~=p;zsr4!bn0o@q#>Dbb3cFPcvAt63u^5%Fq zA4@1;{clLC#|Z0lOuOjVK<1C?>o!bvoR1;&0jq2c)2oCC|T95 z{4^!hBx5u&Q4>i$99K+X$JK~Gf@>sMM7EE1`MzS=E-{Q-C}KO&1`~Mqc#%Y=*`$NH zqEr_}*vWaeu_l%Ol4k zSm`K&L=bFGcg}BSnPZXN`Te9*p5H`n$$BzqDsHpLBDij{3_l~=-PivWRZ7x<0II$( z6NWcZd%X-_`?#`*rhbaC1OAz97pQzKHer~qbWJDm_9njP!{fw?VJJ7T7eWLXh?S#R zBrBXeL&;tq<&ueWIBPc?ILFQuuFlNUD#=VaOas=pI{0tssslEMMvCh%oKy-g7zy*C zXYaqi&kT3%?XC%vyj}EqIgNw)u>>_6ic+1CEdq;&#&roZ4RRiItnhN!1Gzp|8^Vcv z@ED&*mn{63Uf5I`ZP+dM32m{C1)N1QPCVu0*zbSK`FTAP2;fb&f;Zg}EaYU_&nDMG z75_bgW^NTSa$1jnHZ+A7o-dBdN;#BT4wLZ(^RS#3lWD_&TaXywO7oroq@>6dHjp?U z%nDc73_=@^IMns=w)dVEckz_7SpCgKNnnS1GnB`IKu6ePb}kHxqHulx7vX5U-hqre z+)o4GAd&JVC*QkXuRojnoM)ARJxl=DM?P}Nngnu;m_OS-L8?{T6xmGx$#bZ@chCOe z~+_e&rZ&js6wKgP*|8u>^-A(P=8hMn)=Wc*bY}-@5D~#AME^r_E@1me%c`8s7pgcX$!}x+|Ac=!JfX~yk6PuSe=Q=U-u>8r+sCV zwKO74Xgyi&Ez6#DdER&S->NFc{O!#+mLM`j-{0_CxnKF!Y)DSf`EWTFY z#bDGe{rSXL&r zQBugbzB2(e&tN#u8Uk>!azMl!H)S(a6^xH+3Xz2|r>LKv%ckRQ%C)~jw{H@u4Thb# zTG~(j-uF)wFG7E2rI<0$bxx~cK@j7|CtUWy$!%5D zwqh?B%D9t}^juBLM%;{DD2pvDN#fp`Vx!qBsPaq2LoB63sWzId@A`UuJ#!c9*a=}O zRAJom@%k|}-mC%Z9XHIiY~LU>kB4`-3MK5&9j+h{vwIOCE(M8u$U9(SKE&UkTBS~T zFx^bJRD->h=8G8xBVzta`1GbMJj+piPF%z^f3-_s`WV{(-nyi<`oYXp4GXvD{N8O# zjz=-~T@i(Gr|8oj+Xsu3`SL|$9`e3ARnd=Jz7Zz{e2Vw5zoGN{@L#Q`dpQQJv5qB5 zz0uv*)Eg~H#o)?q{?70Xx8pSI^RW!}1w zzY>2j!)`ca+nDa27O#JOTOim_%J2=Jtk0-Z0(iE)sebCGhV2Q zQADsuq>lBR0Iw+G$8%i zrl!~JvDCp8$Nh^t5^bhU#_mq0kg{?&zP$k{>U*~bCvyk)=2b|K@J zfT@QkZGx%B%cpNrmw?8Z1Beb~)#SU*vongR{6oB=cLTqLD}X90loK_hPVRIalfiPZ z6?Xi(q_^}FCZ*4VJaojQD&bQR>30dYUMSt-w&WR=dgE)|{ImUaNqe(3yVl7A7a5f@j3*vn-3VVB0&hxo6h^|ym_X)v0t0|aMC~N3rpgjxYLySq@t_}-e{S+kf~kT zWYsE^QPB^>5k}K?W~((xo&p5LKtFe8hk|Yba$+3`M^;^?{A3lJpRHSzIE2EmEBt6GT5d0TQW99LWr~eh*{j5r zZESSlra*|2F?tsqUZTOEScr`Rm#t4ZzrMxUoy)r+j?aq=zcSy=NfMkJa)xwde!*U+QiEggud;yf$%$fG)C-x_F{*&z6fykpJ; zFV8Amh1$*5AsyYBls>i}hpDU9;__0584@{-6+RmFz*llLAUN%7v>YO+^|85P=77nu z{23uzjx1hoLCe9&+a4~)@AYNN8rjTHBC@WO83T;fapsGxjc;~*CE#v-M?S;;j5V~s z-=DAxO1hdB>ry~E=95EV^I1FgH#0iw@K*U@QUI5EEyif~e1=k^l?CTHJL;sn9bBQ$ z8PSYpOxHn~myD0AWTH}}A8SE?Hrtf5J#4AHX4lD6HbFv4KGmouo`c4`lNT=hZ)N+* zcs_xhSLzr^?TKpR^BgDFFe69X+QwDaMER{a$4M1lUrpNSGgOB&lilAG0C7*Om||T0 zqV=%I{hSP#%3H>LlJkb|(On83kr(*TiY0)-#9DzQ|Jj@HJo_~`VfeVKXp)<<39H!0 zMU;p4=cm!Xn&i*}H2~d5+0OVUg3Hfz77~QBvZ=I*d==P!A-OzOP8N%QS`=!Vk(`*2 zSDzBb{lPlbR~^sY+LHIB-}eeFOyZ>a$6|cYA;y`7@6f0Yi`G5$;=)14U$lx22ky_$ z59S*56V!Ex?Tu9;2%34ZNSTjT62wfo4BxCju7+bPzJyZoHqo>n198px*S8u7ulIIw zkhYk3%U7{!I%yTh_kWvw43PvWuk-kh;JiUsCmcw ziIz2DH{-QymPCwXC^9_PuPDvGms7I2yiMM;g|gc0$^Du6#d^a@ew!SHGcNr@l+P#f z(KNCu*kVNy(z7nv0Kx@xV1F#U%t7-$;F?^FSvS~!dv@@fLQ}VLo5}ss6^8oo{of#y_u`)iUE*)1O^GpGG>%X0EKcz4CYn+g^*w$P7br&fa3Of7pF_XI+d zIbCn?XjCHhEbN^^d?>xRo8v6Y|16=*k!EJdwFS^e>lI99T=)>Bq)wvf>x8#gq81oz z`q(PkFR9m}0`dCxNqf{oRd#4Ip=p%^Jr~xbn3dRFhW{1E>pLkMX_&Q`$a7AV()Cg? zohca8Po>R*m&hFX9O#<}>aBN)d_KwukpXW7%>$Ed@_ysoTWw+quDu$-D<7kENbH&9 zBfE0U9HObS0b4R^z~oc2QE$r-mc@UoJ3x66LmCnWMrm^($tsk_onvwcUdeE1+T?I3 z$R6xeLNl!iAz|WNmVUQl2a>e6T0&)u`jm16H_O1deJWxH^K*MyX{4a^y$Gj1HNE5O z_Xu|C&u5vlbjs{kH+EAKC)%Ul>P^PGvuYffUFmSXqzRgTdLMqr3BjiYf$DOMa3TsL zRg=Z%Km28)r>0v{L!=fkY!(u7IY;c#Zx@-JNM6t3iUm8`lcvXEU)lR2dvy;f5lLeZ z@&lO5*wfuVF1eNxaqNe*8r?kzu0LNzaaan-7~yPY_F! zsG@E0p1lnv>B|hE!n~0mLjtcHq<)g_#Wy}^Qb5)Eb!}5*i+0hZL(Oj0G_~>1ux%sa z;8x=Lp#8wY{j2}*rb7rhtbwM6w-S?&DK+&Oz!3nG6(ut(aC>`m#?wP0bjzmrw*r>~ zGLOgEB)JT(Fq(oSxJiX=D;U!i1vm~=^Xcn-ND_lyJS<>+ol-0`uASdkQOO$1{tuI)|e?a;!5nS@^mJv zzu!)Pw1<-`^<%ez)$>__f)Lq~DrN+qc^uEXC#(;)MJLfYjtAyvJPTXNa6Ytl4t!uG0O`IgA~`g%tlW%6~k0J!C>cV$oDAXYQvPt2d|& zFZpBd8y4Cqdl3>`ytx56T{k=36aK;9^6W8l3unro2)|Cn_~5L3^XWX_6vfY6V3-C# z*X#VyG~N(2obEF~jk=!W?`Yqoy;)}cr1(WGtRHbWee5ISsRxH9 zAbMIa%w4=k=|=SE8Aj)Dm_|ahL#sRojK+3OO7i*~TDhKfjPafjY0y6rSWSw@!_7K* zwwb2uUb6@?$)u#KK^H@QmA*)6jmh_(XXM`tDzmC7+7-=i0R}n%axokU|1&sL*O&(rKjJ$f2gvH+@-sCsL04{e@j{94;MP4IS7JIBv|s zKHeUU+Hz&7vaO^M=#{4@EccDTODnRgqP{DZtP0tZ=4Z1(aU1$`_L-*5kh{UZo{mG$ zY31wO9CRalIlqv$9G;NI2WGL*(JG?)O%KyX+#esJ@juD=A;z6@siagY+4-Y(ChP_; zOlmTYf|8t}9`5Te^7&zXsZRY^;tZC@`d)Dr6#Xgx##lSH2jbd#4I*(%yqA1R)~?6S z_2tATN=eHJpMeQiD#>J@{?3*9w~;00^y&YSa_Rt+7L3o%d>3beA*P?g#U=SS=v>oS zF>3qGC|vp4?G|qq-Fywg(D}Yh!tauP{#2#!&xfO3T2Sw2LMKpdfcM4R{NdoXTClh7uZ->4tCn1uFgDLy;_#a`xX^p2E$*N3g$Y{O0vUm6UZ6^Q6`KfA^0W4ILO|4YCTdpGX%#|ykKVI*z_;rq-; zjJE2guR_ z*G7!*%SvJ}0ay)LmMs%9ur~m9Ol9`z(aSqt^01a02{c7}pDt0ejzFKq7p6%+75103cJz5=k^z^T}n2Z&847e*a6 zA7?400#W;7S}mBvl*||f?iVHnkUEk3oJSOGbJZ~D6yUxt zd0vOrkWKVk#;AdO55-E!QPzKZkPZ;)v#IW5&;eKRnc>4@6z+PdQKJ-L#j%TA4g|@X8D%LVN1s!fXG?7gu5Ov!5LmXolX3;E)Ec2zJ z;(YK5@>_u9Z)%{=ubn5aK>$suSrITXE_*It-;1N_J5|rMiWY1bx^Y$}Xd`q-@@FLG zaJS=E?R<~wl>n*Q@2tuNpjCVdXOVJSd?=g=j2Nx|8@;kDALvMdPYhhk9C73s>o_=&*Dx z&x$$+-SCTwB74po6Oe(_6`QTBg+T}HHxfxkO=^{+l!h8zQ%iv1vhv@kad5s9iXXu> zQQeb=&Bb;Z(6AvaoZ=Yg_(^00R1yGyX*>N{Z421uLM7eT#n@eMKwXgjiK8jZALcd6sK&HL z29$7Bir_3VA<^ol!2~1;Jep5m=DEpL#7WOC5@>dLtn$}+9p?$8%gLQ&k{Cwn>lw*c zoMSBNxo$*cE4tk#{jKk@Jmu+Bp(YkFH!pW zo2n8M(4XTr|M@Ek3;VH*O6;mV8IB|l;;SnoY2S{aOpgxR-aMY4Uxa;hdHdyfm*D+Dc9nP{%RQe^gFaxyfnWuLJa4S6VRv#6N(Ep@x;U z6}Et4!Dy-$n^!vDi96|EB16#fCA^n5gXX0q@N_|LA>&wL;N73)JW)g?O$KE>YB2|3 z(lN{6_@1nRwvvMgNqCQ_Zw5zMNa(YmyY>9 zbrMp}47DfTUcY_-m&(D=z-FFo2w7$d11zAf@hez=C^1_2Wp%7QNS%xG_HPf306xsG z$bd(#SwB_unahs1#oeQ?PGraTKa*_rv_3a`Zti{QABBH>-g)5PJpomR+?T&HiOOsw z$NJ;NrLV3OWe;JA)jbuZ>J#dSdsNRLu*81-CUx#hUX6>XZ!GwzZ-H|5 zM~tdS!Z&-np<)-y&hl_qb+oT_`k&u-HZeZ}2KYMrXXc>g$Ev(SzMa5;qlhbg+UMWM zeLf}Q*j;|=T#z$tcDL7K-lSVJ4fq#};jX8ur`(@UEq|VkxjEX=w{zKIYss*O$!8E1 zQE)yd?vEa?`O>^0?_PMLnh;EzGmRP40)pSxoNmTh&hB2I`14oEzx%Q@~TztUO5 zMC}kG(mNUg*)B^i*Q31Op?2#oy98Zik{sk>9}<3tmh?kYOi845*`VfXAj)g_NrUg~ zt(4aOYV1c>;mYIJe&UDH>e%J;G2Gt!k^X`uVv&$h2G|{>;2X(wq6H-|<>4F*&+sXP>6fR$A1iQ~+{$RvZ6eCQYqcLX{J_H|yP z2@uCQ?qI3Vyu#x0c_TANBMjWxn%K>rsMuV2bQBzD#JUW3_}ueOzGp1Sud5-A@ zhJ@F^ylR%0$u%kuE@;MxZ8eBGIiy=auaHQi?T}&7`A?UiwjQBVvvtg63_FoxGxET#2+CdAZ&Yu2bg@ z{Daf$oB2u!izzfDg{Xi@9Uvav=3?yX=2hN3Qm8CU-lWF@er(bw_q8+baAU<$LSsS^ zLlIfINrJ=8Z&>_&oD-`uFeG6ZEY>VvCY!3(tBm_PpC;gnZ!51D?v~b9%gt$W%g&DKK1G$NHmbV-qU*&MF^La%1b4F!~R6 zgnG7n`aZZio!-^d_+|NS%v(P$1rSn6q^{cI__D``fOAyxD@|SO@PebvZywI~u znU!519+5blC{Z}fZd@ z^dVI_UL>LbLC|)90t-#6Z%}bQ{}(||tw?Bc%lsz3fr2+G9}!vsK5gYshG@p}t2@TU zlP&jr8IE+aOe!CkJR5WAk_Nowf_$^Z77}ad7>_1gv7osxY%sD4&KSoso_Z=VzELf} zh*Xdp6c_$0na_Xw5T0K{Wl-ESo@seMz@vl{n}Luoa&71fk2aiKdot!en|YIMc#SQ7 zge4{gHJ|U4)tq+|^bdO4?Kfj{Pff-g zh+{EXL6oXiA%t*A_xmED(MSiXNjb4}S}z0LFYZqkBr8gaQ^+*bNu^{ZheB?XxBre+ z9by}r5|82fRU&Iom+DIv2%u{NdzfSgP+4ih{3fjOocnv+6Dr|H)21O((ZYVo6m?AR z;efRAWj=iUTEeI2H-}Wipq2v9lnco$fyFZaEU|R0RSJk;r8M?znPFm{aUK{JLxcxu zP9-HP@OQJ0mN}{XaZ?j&RS1S)!mBdfO1zSkX7o-2K)7 z!V6`{_!G8?9+oFA)_I|!6nlbYJ{3`OU`Kim(=S(3EsUdAV5bmC=?_3qjJ}bGh>p&o zp(Q-v?2z^^VgT5C!<));MVSMI>- z>~*&%mw60%CKin^ax@Q+w`%>^EI(0N5Lfem32g8}j`sz=3IC7|wL25j`bbVoYPL5F zkQh}KxYuGN8c7D#u2WlaR?4V|$nR%`yS7$ypu3;Chug(HjOkvGN&8~kE$ogecOm9W zzBhg7FrY`?vj|YQf8S59%woFzY=Xc>W%{XrnD2pMrKwjMC*QZ1jonuL8}qcMHP7;T zylzTwtJ?2!zC`&@!+@LuAcj+5l7PltM~0-|52t!8NEqB?^(&Zi&n`&8cDY3FEvL$T z#UX*M)CEOdc=jfSCqBYMPUvG67iHwXVTu|*QkYGwL3{P`m71!SK(x{Wx*hS2_8P7AceA(H=}6zw zT;QKOxTP-%zKxAl#Zuc^T?@F*r-Wk`@QYGO$jlyPkGE}fW%2tv6kaw`Yif8f(-0%! zwpIbG1X3cEF^BwrV4^-mFSL>u1PAh!k!QNJCQBV_@!D1iu1)jy*bd9d^I_)UV+uy< z6!#^Ms`yeM*exPlF27K3SKt4_#w%MOzxvN7eWDP-&aE^O>8|&OwY=wvY+!|vXbb^s zBq86%TCK69C}lP*T&`R;)+Fiw5lr;&7DBbKQ~<7km24iV+1XjVDc(n|9Dgwom+@#q zY(w+Be<++{@K-A;HB36KqYBL?y0ltOvx0{La<%7P0blyO1L*?VUZ^Oo9)isA*NXg! z97KNyqgY|SIt~iE71_mYXHlsA_HLXVo4nm!qJ@hx`vmqC2HE|-Xtz5{s#^7!#*$dmZ(Gk^G^ZQ~xPE;r&t;CvR zX-it>{K$n_@!Ft-0g1k|gOziCXzb<&SlW&}${ zhAgeNs&)coKXv_~l~+iesIWQ{h=DRPCb7G{5bVB>*6**1DzPrlzd@eSR_nGH=KH)g zF3;9gQAf*$pUaS>Hn4TL&693b-ef{&?lzn{sNYKNN$rj-V6N z{x@AvNMKK=)ye#<{42&-+UGimW!~0MNQPJ=eaLX9g#epd#pdM0KI(nBy{=XKl&u36 z{jd~~t!_Z1(4?RI-|69n;4f=x7D)50-)f|t>em#}c*%JV!z?9uczYwTx*w2IpKNdo zsHiPHGLC%FTY6btMzl1_C%s#vGwX4ZntselGHhX^VNmmd%`1%IKxeVb4~rzc6DpVv z7}E8oPoxdCYfg~&3c-S6X?G4`R3CKhl<|3^?-OyAt~@Op%xg4WZ!SG%w!q<{2H|Up zd-17D=?&`F;Y}iB1d^|Vhh~2ot=Fi&BTML$?W?(IVKn*by$9bvncnhoIGjV_v~J3N>xp&L0~W;3qKJJ+rvDOREu* zM`33U?Ep#Vc8XzJ$P?%cq3+{A+5wr0y>M5I8I%^tN&2cqkw}>Za;xMkHe@rpOzX#u z9lq+iMR2pZ&W6?NOEME>n3MD#NTb_7j8J2Tq^$gPsW$y?AKB)=#^U-+{X3O_l^)Z+ zr>gHQTJKq3ElC#mbhbMBB-bY_m>G{-jKFp0Dj`Y`9Z*Xs?8+fKd8(vQTtaimG z>#ke|$`6Yp;<{Q5Shitq{d+9@rNJ{?WsUPwp;L#R{zTa}N`!Z6@Heb$1S5>A?i*T6 z0jINu#)_t@8^Wy3GwQ@%+hu3p5TWnSXzhC2%}l7VkOM2OU)rNc#Fy3F44xNOETPta zxE4-`yt!j^ZkQWdJLrVIES4t$RlOteI9ld(>(^P_MvnOg>p@NVZ=J|EjW=@3PMH_> zpSO=Wntc~Xi|yRXvaovIlnI+B$z>q(^lU6;5SMeQ@p`kp3`(Z?n=~rhPwTX4qwbc9 zvpvcIuJ6J}iQbCDK}e#wMdS7MxAU`22Q-882$t13_JWf;MykHYlrrVQpE2_>swGOZo^n_|6D9#F5LbHN7&7G5zyI3FSvxI zkLG~$GxU^;ui)BepE`*5LB-yK%9@w-r(0e)g$=K&Cl5Cn{@PpBd2$#G z`l0)8)QI3cyF|(`k}n8HBm-lilMwlyza7q>`gQWU*;T&;N?hg8SqcK{Zi;rbTW6=N zs1{sg@|h?D3nwyz+gdjQybpM-6iXf)Ggk%^0%jw~L*D{p-Phq!7j?bZg5;(aWgHCB zNC4Jvnf06lkuI}dGNuqEb#YQ-6OJ1?sVztgja^Yb|JHW=rVtJ$3nUc?^&LP+I|dbu z-|45c!xO$=ZryAr+fRrm%^CeUn5wz@UN+%zJ4E-{#j<@oU`f~x{y4>Le@||B)y=NR z3c$`if$NJ0_hPr%^jAfs>3vGG%|3c&xTNQ$BHcj+s|q)ORa| zvFUs^X@A;@sNE#ZKNVC6PGJ^??)bcZG}4z9vxY=gI2+H z=3tI_j`7Qk5L?X(V-0E?Xq48xr$oQS0X>VCtP*&w?j$dar!q_bmLBGLc+n!>%SNgl z-qA8f0NT|C_h=7EK$Bbh;O0$Uim?VK*)Y}4-lgHql7eEPb4KdCfoALuUc!BS@fDFZ)N0KiaCEM1_`+D~O-fl*q-m_QR@ z=r8~o@F>+ZQ$RujmWk=fJsHW|RUsIf_?f5o4*T*K@q8C%5K@Np2mDj`7dAeMlGPDC z8+3EBSx8mm;H;%CW+Sso$EQdFtHK+0g9OI;i{YUb&)$~x>&z|V+N|1%zxD&bJsKJn zq16+^y&sso6;;f2^F@6Qo3K9XLPAA1};^Cxp-y3;i+&4DPqJiDR)$O zJ}?1k$NtK+@KY8u9n>f|9*7-|bVK(q>NHrFI6|0=q7pN#gbqTG-ww?&9Xe+TAspb8 zHye2N9WXVwcr~XMTd$1?T#pSk&GJ@(A-+yPMb)76Pat+T2bytt%wL9S5i5Zhk;5f2 z3VD>)grSWikUYSzHBn|7tW^upCD5tzkq%N6;{}U)P*Dj3o}S!RYyuGQxrX=agMdF~ z$hZLGAXZ73oKu_ht0{6*3O0i2vcv%D2)2Kdo~st_AUG8N_#l<%<|XPZI-x{sTSKR; zXqH2cg>=zDO>uXCd&Tlo%5N0KgLfeDU1b*1Z0I%%`)xgX(QYbrpwCKxeczxRY~eWt zaz$Of8)R8~0;%pVbjNkekehM)n--{Fd9{${O_Ac-(Esd*@ZD}kC=!tz?3&uAYx{p$ z#`4^;5;G3n7yHt^L~vf9wf;+Pn9Px=;$EVZJ~Ct+BF~jPrL*8I&!t)^<%=?*#~;Dd zBxw>l<8Ho;sxchulpODeO**{6qC$!orYI)LMf!_cH^4o~6Ad&IYlDC#u7qjv0?CmR z&bK0YBmyaefJaQA-LGfGPXo^#zYmSehm9bJrjve4z4TRhUY5XKLZC6zuiUJuWolxV z9z{FPeQ#-09M=&u=k(l{Y~4^ZajY7;Kpk{xGygA|{>qjAHi`#f_>WP4r$5YXCyc@& zNUqpSl9AQ|*kTEElDfhK=1>*!Jj6p2!*y+>WhPdRe`yU|Gk6p-*yyaqANejrij5i6 zbszOMbvCIlJy3*N6zALAscgyK2MS;Q0z2`-!BP|>C*Nu4Y&!6y;Ce0j6a`5|IaY!Ruao#p0h0E zz&vQN61_=)U&TKUip@g$n|DV2D&vYB--^__pQL#;uPB>=LY&VHkjfHpOwU3>d8wFZ zk@C9zk4%(?>bMjLi()w7M#4gRsx_a4?`!;N{Lf;A$NA!{<;wvhqS5`qQvnnk9j6G* zC$a5(E^IcNW85zE4Q`ONSnpEBRY$EYHZ&m0=GcZPoG+otZdfOluF~0 zM+&IGv<6nQ-M&(uTbH$Fv5_su-M=)8yDdRpAW(YJl zF+qp+GWX0h1e>r&18T22Aw%l;X~cDv)}Hs3yK}F8jy{jI4%C{TnAY(r0>zRHZiaq) zcRBosa~pbI3{2p(Q47fzdG~b4iXA)GMCI~M;YU zYj1YhxMBX+a{o{y=I0zd}x{5xvkv^7}1XBmn@ z!?cDaK2Vm2Y2jcv0&4Gc?w|qND@dl?krUtOJFcStT#A=k7^_>7AS`j6x(bT zaw8OOph9jmgC@(AQqP)mmN*~C)|JCT44ab_uQ58Z)(e_cizW3ezl*hdyB;D;DjU@4 z(svWJ03J3vc;LyDouPiMY9dO5*<~ek$Uvu=%GM~L*kd&HJ}6etBniHn09@zEj;hqT zXzkHgq-P+{AisSyrsx1bL${jkx~{g5FB*})DZXS}K#mNonPFURbB4=YV=Psv4zk*d5rpm?ZCOHBy1<0ad zlDIEYn)TP8opGEmU6RazDqqLcTU^>CM66mSw*NSoawl+Bu0FW366Zg_BO5Q2|UdL#5#E>aJ>5Z4(qw+#z37Ia?#q%zW<+Cz2Q3~RuOW3flJ;CUUA2RiR z2XBlzk9s9&CTwuCiow&vF%B!eLyDN|?hWGG3C5++8y$;JM1oBOIvZs66xe+HXf=(7l4d{ zV~luDN9y^F{mrKw-X=O`K$g#|%;YGMKo%u&TrZ?TEMLF7j@WY?+uBFSH}Z(zJVS$>__gfnp*3;}fOpfIT$F zb++52rC))SgmjibT1^l0zheQVcF$pEsW_loz~ zkNX(Zvkn4P-y+@n!-0;MWW~QM9;RYA9rzv|oUCgY#yw@Xi@D*BP^R7l9^+phR*Vup z3x7d1rk{vUcd4!n2Hi+TbAxo1e|uIhKF2>dx{N*#KORv;Kk?lb=3e=dO+DuJS_t-1 zKQz0Rsy^)ZbYdtT{p|Y_PyxYk<(=!y=JoU^0Ln9 zSe495j11I@o@t15m=mg-+rPny#9(e=WVf-`F1{*pXen_@r6xfjv8wU{t&Z4EK_|nb zkL~rxfkoBPfJ%fUq3^*QQrCQ7nXW=1D9(o?xk3tmS_5#Xgl<*U0%dquU4BW9BcOc1 z`-`no``#=lJ`ZQqYp0kN`$y*-IXI|f5fAadG!(-K8*ie~V8JETjIG5sA$Dq#KH z!0uF!U*7I;Ep^J@2(m_Ct$?wezQMf3nBZr5RDK{jK*=FAet_l7{V{`j2-TzZNsX1~ zz&o>6auhB-7c^L6QJ^A2(E0U$ES#9;RWJb@a;ShNRT;Nol4tO^Sv$8*g6{J}g~C6@ zKdm^OIMw;InRIPMqEfX4`r%gJCr--25a^`sAgh{ZzuN)IUZprgWlNlXfB4 zFb|ywRbC^>A7J8)gXxp^$@MnegQV_4xO#DQbPSSfVIKJL@YiGw+6(6c|Nj{q_YuAq zq-nH2xh+JZA|F80xjOdcsvYhTRxKr-k5}TLa{KY&N@Oq)& z;E;xG`=2pcnrm6(NQO@Dnl?<78m=XBmmrGWzPVeO+i% z@~a^wB6r)}{1@#8B2)f0T{+|a`n3M<#q*0-Qr{lf+&r^p+EWen> zpRybo{om(y(|@>fATmcDbSP4IwTN3&<;qET7hrL<&^b-?MQ7R-pW3)sZ0HH?hk+U? z6>P>;+kXd;E&$PwH$*?=;rdZGXH1x2ZT_#|J`W9uD4s~w+u}A5L`M@E9-+jc^Ivu? zLkdRV^KBMGzb_h|TYCkZnKonU_-*)yH@%zP$$ZI|je&rhG7D6R_cg~ z;$}%%J7Or&RKO2jpX#qZz!E6z4CkrOv{~dyf8kF~!yQU%g9Fq8-vd59ze~s#4b99Q zcimP}O|@qUT-4Y44;t?06KHhQGu1o~-({&V1A}inlCeGn6rd1;x<lnl#?)BTgZ3o~Iokdcv> zJ5Ac=IOlDSryKf6y{9hIr~@aJI%JGn3NVD2ils0}BgX|U8u-8?0jN^X4)-^XjW3>T zK`>G#QIu#i48(lQSo|p(@w{Fa2ombQR1n3jUfGe7uv24 z9YjVV7Zd7cX(IMvM|M+D4Lthoy6$v(e{K#R|FX3ThLMT!OOb^hz2Ft2KSO;yV0LVv z28TNbULEjV3DZ|&1s&TUEO8i&*4Bd|RkJEHFZli?iPlN@jo#~fBwb}Co4t12x&biM zntNDl$hAtKz2T&I+Tgw7IR*y zc{R_}8?%7;VxUWT5rBnbXtq=GRrKZI{B?S{#*p^SG2-49&W_ezDr)_-E&kjlDhC$oKij_^K`RaiHaSEI@3+p=Z00h_u1vvjr?E z{{wBcOsQXU13Zc0{2 zw-Fg>#r*>ciYI-Uw+gdd_B|9ITYg*>*A7yWtk(4{_?50aAi2?`rfLsY!@W=aM6Xc| zeZ!pkP;Y(z-fq*s!)R84KFstF^DEY3C1|HilUD7*gYY%R*q>*RuS_vqxrE8|9{%@1 zLPjS6VFwf51c-H4*O8^|hW4#ierPEY`q94pxICm|L%gHZqep5XmeGfXwK0e6_pN_s&}SeotzZK63vEEolZe z+}aw$!^dV11-?jcgKsa=pg)%g#-%mnZ)86O=MrC8y>ske&X$DD}OLzub6+e2Nkh%{PT9bD|zIygtqR!JLh)zoxT=d zX(*3`!CTF+oqeCZeYK2}uf#mWdoGzjWbGQ*1quzb)4Q#m3GHwu@=5i@Y*j7lZbaR5 zqJYY*s&GgQju+1-1UFbXb8Qy)IO{fEA&5FrOY9vl~=Ge~ln$t8_500M_KkUuk>~n~)DAmlD+;QzZ((D^?Kmky zzVArn;9}}}^LJD3b4AQ1%$;%CN*J~&~#fzs#?bl z`ufKLVRPwbMaptFIMh)7KzaZ+J-kqb<-yJgBqM{-KG9(}A%Fa5Um(D~Iu&27&fq6O znh)zntL0A(AvsdlT5W`x7w_|1?G7Ex*%(ibk_pSEP{jBVSkJ-VUQZv&!^d8j7g)>K zoSEP$HxslR79KFW+|EG%nP~0P&Hm5d+CekPG9s>yfQQ1#{;!-Cgttv_EH!%neFwWI z8Y*}4^i=LI?-+L2xn)z^SwIV5zGAM=`{ge6%NU9xJ&YK{tT?2oZUOTH+0-HZ9UI}S zn-VsMQ5CUwKEjd&bR%T%69IEkd2gp)iW|pbE=Ga74?b|n+OrUVnl2UC6_(EvB?;Q2(Q>XW>QA z-+rF>#tT&Pz)T+(?2G0t8mw|J(w`GJesLLX1|jT80NkD!wFs{xX4s_|o9~JVsF@k~ z$n0hGm744XzN!@>v?5IKxyvYW?ggC`<7EU!7#2*0`fHbS*R+E>-J<{OUg2CxLi~k) z=ol5QJ>Qtx`9!FQ9h_yBg12hK=wH0wR@>dX)pJpVk0){d_@zRBD5b^fH8H%wpb3+c zmYEfU8`3CLA9bY=&$vQj_m)n#ydN1DXB{|Q3w;6?vRzxYO7d$^;QI_V%q4vHy>HBb zZNn+@CUlUkKy5B|sCeS9;i{V4<8b$?sW~`?Qe{ap#coe2?BXt4gi0?{RyO#vm|*+`g^Sk^P0b+0*AGZT|8Pz+cX?Lg!}mZ99#r=WL0#7kw?)h+d|)JAyN%wX7aj&Y-?O67z&+KQYf{x4%3&S0vO^$gU%6tqUEi5Rs?LQG78cQ816j)~ z$%PasE+KpX#rmBJ5I_F;&JNBRB0)~FC403&QcqcjaMM-#nIVvf0v4=FS2H8=Qg-sD zOLb_vAWNhNlqtB=SzI}uG7Dj1u+Lkoi@R#KCb!FFo3Tw51i*MS~Hwcew^zDZb~BsS)y7M?^#nOsdVeZ+@$!R@<x|c0_!LoEq$i`dp0#5WgTU&0Qc<`tqMk_Nu{_^VU2z zH5(5cCgz=)DqBRWWfw3{#Mtka45uwgX=QQvwyt>n^k2u}<8x4_+tg_-FtCT6_PVzn zh^V>YYm{;F?6t`3* z@t4u8qt@^+nt>6;t82|bIaW$sA-88EcwNyfj)M#fNnT615N#n@gIr?LkKuGYud5S- zbq`gjyJBln)Tu)81F0$Ne3zgfsgkuFnGPYCQh-r@y(j+!mmW&bK zqE8I8wUp@LMR1`Wi#SygBFmcoUyo1PtXK1 zGo#1P@Qexzh-s<6=~_r2%g=4V_?EXlovQi&x<)~mor?l$e_Jh%kTd*_3IUn z)FoSgM>Vrx{u^A-5lvI6ZDMymR2e8=_@xgcvJXb(n_%ll0r7(FI#ER#eQE84-vd!% zyzvoccrxf7@;LZr1i&`l)fyp+wIkvS#F$Uf!Z12?hd$FI?8j8=s;%EuQ(BIgf8u*_ z2eXy-*3&&m!@Mff^9ZS$2TYXXp=#}<@Lx8IzrWn4DXrk6`uC`M&EbWKxAfvo^vu-b z&6{CFk;N^sv8z-q`%f+9_X1IetC*Kak1ulE-q%mW0y9eKBRD|8(rqy$Ll#4ty9e80 zp=?h@f`1MAj}2asp8?NM5LwRjpNfObc2O3Wr3=mGpDRyg%CMr4P=A(w;()UtGL4)j z%%!McP?$>OC=SdR$)`9J;SdLQ*|Pfx8%8z&NI_Dw#u7t2RljrVg24~U1D@{UW9(Mi ze0Mhx?o$F&B@xdz{iC>P847KNyj2nU4lz6b2v(HVM4>&AgbqW41*8bW{{C7EYOCD5 z+@;ti>h}~o-)>`_zt!ldbGlzyY5O0?^9@P0*%PP>cLI^-u;y@gq5?e((b3R~pjKf$_q0 zTK|2Ts5qvTW1FUk_%XAy)%LMOX3DYI0?lM`EmRM+#lU{gvS`xKHoo-q3Ec(s~yXq9qCgS3gT)u!|i zR$9v(>;^1EOvVP@HA;yZ)klgJu8PvL(Q<$xL6Eu$3ZMx%zJW z$dC6EKT?{W1#{@@&GtkkNbL^F`wio(1~bz($>T4&2RuP#H(X@CnFVj;H2;L4VA7aA z(fu=d4sv3?#2usM-fVE#_)Qdw#al~p+x zk7BwBSO7GIeORNs;N7aDCN5T#WiYUB#ptL%1t;#ivYpRmO>;L&&{>CMoU}kfTPL}i zI5T4TkZ#(e9p`e8U!P95`-BGZph<2OinDKA7zx%r)$>&>)hP8lqwPBwGRl+)Y)*hE z#&%a-=U~*5&mWhs1}-5cYN$ht4^dgW*T+-4?KQUy|3Ja;ZCO3TMajDtxD+|EQ4m=D z&g{?#K0ygbi$G5cDWgaxdB{Fh~?Ef&8B|ngk&(&FfLtL9G+^E9Qxc4Xj=I< z{NzckuvEie2RC+<2ey#wYVG3ncBa)(BC87^OsP! zmb2&aO&?3zG{{Xd>Bhg#v4bw+ZiEx_IKHr@?-;F{GoQRCIWl<(ZzpXu-+r9EnkrXL ztvM;l*^4~$#ErFa{eytua`XEl&4KNnRDp{!8CwPTQW(pFGp(*>%BDR?;&oocNrFat z1Di%`+0M*gugU3H!6SX|eb!QY33Bqljydoz!45R7{?A z#-JEWq2ie1Y+F>bU4|Ii+4_2FQF33W4>cFHQ+ba&x^z0-$$bsi-2ldG3IPGY&`u9)HGdYWw;Wu!4dFXqr zcVar*1gRcSsG}v1UkDp~Pv4lYK;1{*bnI%aHekYM1?t!6rS*1`|7kQY_tgqwmO029 z$*Z6QEHZ9V*w*(p+HyVLl9rac$*cJBxr1;~VOy6dMuPVE=LJ+Xyt90(bAsz|)}} z+?_S(XW24{#s<$7ue(F)#T@?PaqEM`{yM>LrvNF!YzfkHlRJW$kdFVa-&(m=)@n19 z=lQ~Pl{9RrD5I?jY9z+!Muvi|%Af-JiZ+3&HHKq)0(m5r<)@YL&2~JPa9B4JZt2bq zd*y)lZdLD@&4j&R(csp)=ETD>dj(oH3u8?2e1Ay9;$4G^rPxQO+S&YDx~1(R zozQWMYYq834@-%1e3HUL5G#6{%$q;r3^?CXcD3pn;#W>+J7Ey#NNIDTL|KsW?f0DI z77j<(%RW!pjXz8AS$(UTRRWHvYf^J@H_<5Z3Vw^%0v{y)1JEB*M*|{&#aK&##u)6N zp&!BG6hY#TzKiUFI+?r3+d{o*!KLjmM)ut@9a=l-s>0B{PNJE@^nS_VnF~`sR!Xmy z;Qj{EJmo&shv6`WM`&KxjwCpbO9w)ELY69BD2z!Pl5*_jga_Q;0&wlRl5EPgM!j_B zrivWuxraI@d?{Rad>o9&-d;R~h!(BQ*^^w*ojRJ%ETQ#!FG-2Z#x?<~wTq^|qsb$Z zx6%QRx$>+Y<8kgGe|?1QD9FgNH5x1ge#Et7Mhy%2{l%g#`{)T64Vzs83W2`@q7aK>fpW^Vf4_-g{@+qW!tIgTr!ft+`d1x*ydrSt>jK(gDGs zp#b$|trGb#296gU@6)gQ)`-reZu0ac?KDi9*H3-GPY=Zn3Qn}p_NO%+g!~sde92|4)WN*k-`%{L){Jpb~T>* z1&ik)o#_&1o4uh!i&AulTP=dpZ8K9NmuvNm0Hp@cq847b^V+{)WkfvSnq})&VmmBx7&9BF-(<%Zvw1U**ntZPJxtt+rb~QXe!G|oPT#dk|k_-w<0`#e#!NN zV|B^fHwn;h%)ThCo1Z@_y`4BGqjMg%;!5|G#a;&E+<%J7G!bpe=-{XJhvYLnZdzqG#^VxI*uIl!Cfp9VgY{9&lD{ZIe3lUxVG$Q?cO6X8ehY)XJg9+3CDV0BBf<5E_&q;=IBs^*3dhO74qetcJR=<60<_D8|6rUj<|R#;dV zBY>Q|0*BQ?sja@g9u0K#W*me@d}VC2-dr=$lLzJF`o8Vpm?38*(aDd%AD@mLg`*pj z{K^*%Q>7LDsId{A43!yzHN>a(ax3KJhtM2SR0Qok%$1%oY0qsjc9k4;tOa!^u4bRM zEdkT<4{|$GMOClYv1xZQjZi=~4ZN$0h+wn4w^-hB`VU2{TR(NUGFmA!h-NtMe*Vz5 zm;LbfAJ>z`KniR2hue$I8@1-n*7fka@@E#!0gj|E(s zP7AT+XN-~Cd|n&f7SKg+;r=-YUpx;i%>`YIe&2R>Ut>3SSmu*m46~{z_UDu<=H;3g zF=KuqFSlg-=IEQ9ywt=zpIgRw4UkH8Ag?r2NQ@{+usxuhzwl(v%y&%;!wqKv&#n?O zCt?T3-8s=a3f#PQ9OF#+>|1hM6G7D#0dIdkK#4q?Ply;<7td@1kensI_jhu39#XWw z{6m=hn`eLBFBC)O#&_^`)#dCJq*X;yz>;td&?Q*)FQ$(w?g>AiV2UKWM_(Urx zu=0ksJ4^cP%_DH`uWfyU<@+hn&TtICfo(9Cr87S*%#dl32b zFxd4zd_9*4@RXegyZ(8)vLm9vQ-((ViTR0QAz45rgcYX>?9Cg^Q+~}J?Ym)6s8IX# z6Tli0<*sj_`mJ&H6oNtw^F236jvwXdTLZfB;P;}*Vt4~|20ts8$Bng3SC`9!i`tmB zH0|jLPO0Trju=m$NA5${&P%UDEniVzjShWkD$dw;6V229w!b{2M7?=nm~umnatK{Cbv7YTmPK6J8VeA+h+V*8b1fRu1tAj zSA{JIS1Oia1m@-!9UaCUF7@+d26q?AhS9q6;7gfiyYmFG~wM>bl{@CgDTu{Y1&0j0Xajm-$4bj+4d1h0ftRj+P7~Avf@GlN=Fu@ znGaOxi2tNTKknH}LmKnkZL>u&-+~8q7YNUBC2_9|nvJhNrGi`SqakIRV=i31#XmWe zs9hYk{Q@5?2@|6$a~V~C=8h<4k$!dt+|7hI-yy%?V96cDwPoh5g&g)V1c#nF zoWv^@#`_UrS!(a%=S}e|Lq-t@Te_wAPyW=NaEI95iz}LwF4*mqmEABgn0zLJSJHyd5G)rNq0a>WH2cmT zm3D=XGc@JrPDS7ac?o(p-c6Ro3YvMr0EYS zD>yEX)lDC}6eBXJB}_VFezI=YoL3KNCOY{i`R#}5OtU(*Svo8A@`-bbAOknbH4jhW zUff4_ZQD^;oY)^o953(Tv^hu4LlzvqQy(1qo4SON5T5Jcmh%4z{|o^00N$B*is|rY~9| z;519-UzS;&=SD!{J{%97{BwG!vOBTv4{ncT&Ci;?C&$2*%NK=NR_lkeT(xSI1Alsv zrwA?{$wE+uJG}*T{dF_2JyLfaIHds%v5QT_Xu@5d0W|XFC3GWj=WFM+4%YB>FLmY* zq4F{FX`CgiU_N+Pcq^96=o+y0CVwbRn^X$2LxI+tIMw0S?L!+P*9e+j@x6%K2COSS z$hfJ&^7CdYxbiW(i%G5V3Ng8$hnzKl=3nI3PIxn#O z*OUznWWpvm>$T_vFgOWk1&(J8iPBi(@w*IQ?F|P`bK%0_G>Za0m{$mZD1_B4oC^aG zoCD%ZtUU!f+l9GE%&CRrSq|_boG@GXu|BK2k@FE6xaB=qQs7>!vN;BKRvFEU{dLZlCvqydcBu@DLfg z^NGBUhe!NQc5sRgTq}NO_`~RzI=!e*M7id@=!88#7lRaJbNTMeW8vHL0&+Nqw26o@ zVc`yYc*Oz9P8_>=#VJBuj@ywWN_3I$ai^acKkiU5caiit(YO#Vm5{7#HsyTV1K08V zSq^Khg7`-CQ`XzzV~z(|51%JH;P+u8S72;E&tzp@ocjXfgjEH0A>vK^gsBolocIyX zV(&d9elB_ULLl(%?`>P}RdopAO0Z#$1Gm#-12!vd>;d#%#_cCM}fuN(FgG#O}31&Sf(>wUP|vbx5V%rjfyW%?ID20vj5)ODDkw1R!Xs$gs^i3K3x^Sju@Bl1~)Wt z;6SO)r-Wks$h^csfp?~XqThf5x#frwAB=);=y#3BDTD<Xc%;A*_$mF*x1Pw1YrELWX&9aIXd?jmpc8)COp5Ugss8in}k>*X<4z z%-`mmHTy@8VSNOaZ_AJ>0>Oo{ybr0Hnszm7?Nx(m?%j7S7Ws6q%1t#}8Q+2claR;b zp_3<12KMjYuY&NG&zm>zDRh#~xpU{*S@mzpO&&%gz=J5>*MWrG__MopLTZKfqS%bDiXGNL_FC<$wMtFvQo41_t#_awsK-5Q>cD%N$&d;H zQX@)$mN3VGV?aVi!}k5U;}=`i=^+wYf9Q}4LdPT|%pM7{k+(t!1o4DssVI8Wo?XV- znM0H*OhU!QFKyho(ajB9hMpl1(u4#FwY0RTAfyMGgs@ki8zXf&57#qzdEhBN;;02Q zZS!9@E6ZAzFbVyTN$7(QK5$DCN)KSE5yinxbeh;FOTj0-8yfd&j(fMNGpehFCd2|) zR#xVhBoqg{c$d+LU2eC*nh=vvb;Z;Lg@uJLGoIK>PL9;$;b!x?#T=(I3E_*Z53X6W z=J%#11bMZYlZ+M%0Wl{8Z62Qj*EhemSy{60mo&C~ER#^fv17+PrKP3PR38gC8`mTe z#Q;f=5VjgsJeN^@{holjV7of4YU(2pxTj^O83V8+mxP`J5^{Uow5@rk(RJswO7-Ll zK|-LStv5PuK!2$%9`> zLUFLOb6FBm6iTdm^9G_$waBP%*rT}?Zda?PN)n2K$@VGZ$s2~cTyA4~bF=Q7b)7Qh z_K9IlNRA?HLm4+BV+;~9KtdWjCV+%$9)JAtKbV?O+=TLmEd&@}{uH=@k6mrtu+jNo z>jU^?MY$v)Tj<$~BY(LiqGEJ0@&x(UkdcObC$>&k3evO9wjGPln zQ_=t`9202UuASt);~J%UN~ItnIVY4Ro>9pxb3#HBDkvy;0FDWNG|h36grWdT@szcB z#t^^8CO9T60|~(~VFs=>N*PWOOdHXAJrS|$%>zfp(~s)v_5|GbH>uT?m6C*P2v4ji zj3)a3VFQIb$Bu+qH7Bm~Wp@TFDlz<~p-2}O`ln%T-^Ap~^DFfUfs z72Uu(AqSh|pa}^Qf_zKIge*vkAR^Iup#R`LB-D?fs9=W@EYno+`I>pmcg*uxaf2zl z^RcG0rZwZm`;E@qEHgU3dEJa3U0!#5Ykd_Zk4P;v>g)Gv?nT`s)RQ?3@dN;p%MoRD z7oC$w+;CZNxUNo^G8EONZ{7J8F$q;pnsOgH%|EkdnJLEv6Ib&$CqcDT%B&~8J1r@&h=no%p~vjxWbMJ%lFq%`RMYnCIpR0&Ix5f7*zfw zG(4IxgjLC{VL@;&uq$Jr6RfYxMGmb{-`3Qk2OSeCABLd)HZ&)c)*J?_^Vm)W`FqIY zf(cp4eSZ)YjTb5qxgkfI;?%HroWpy^b|R(b4W}JMkW!Zo7io zy*~7tp%-U;kLEF_jP+jQ-RJn``!?f5r;pCln#Mei6^E}`gS8wkBl_GDS3}2Hoadr5 zXZ~kAzu_b26K`#IO{thFNQecl2a8pWb7#-#9c^Fu&;0i@I@9sBLxC%Jt_L1P_CW(a z{ncrG<(B7M4O<$VRTC$LHK9y+8)ELmgwgo;PL?r zRoO)wJ83TiCeMcEgqVcP7G~m<*6*3qXN=RQPLuwr?rR&^rgVBcg_$a?%)Uj5n24Bk zPaZ$1HT`FMeoJc$d&fa@ud+%G(Pgg!gi7vC$vUVT=g$YQ}4 zGiw$)4@T36KlzeYH#O0eipqx|XunI>F@f_7b%wlUjq|a$9`&?s?I54e2RL&dP&VCg zNBRDzN9pyAZ_vaE6YqsN?k|~~)J2C&{I_o1IyY-U4Gj$_qLBCdGO@pyB9+y$P5J8T z>Z0=Ua)MU06j=NTe;Iet9`@Z~uHws!>6!PQ&iDKLdJn$bj{B4pxo^4UmJ>MMi2;Blq2AKCz6teY68a`QN3J|Ww(vy9ko)ST u4Ix;#aA8*DRpQ$XJ`be2&*V|I5%@othqYGxSlPV*0000`_: .. code-block:: python @@ -51,11 +53,6 @@ With this, we can start to implement our spawnable runner function: os.environ['MASTER_PORT'] = '12345' dist.init_process_group('nccl', rank=rank, world_size=world_size) - data = dataset[0] - -The first step above is initializing :obj:`torch.distributed`. -More details can be found in `Writing Distributed Applications with PyTorch `_. - Next, we split training indices into :obj:`world_size` many chunks for each GPU, and initialize the :class:`~torch_geometric.loader.NeighborLoader` class to only operate on its specific chunk of the training set: .. code-block:: python @@ -65,6 +62,8 @@ Next, we split training indices into :obj:`world_size` many chunks for each GPU, def run(rank: int, world_size: int, dataset: Reddit): ... + data = dataset[0] + train_index = data.train_mask.nonzero().view(-1) train_index = train_index.split(train_index.size(0) // world_size)[rank] From 82572c3db05d0a566b17ad00357d495db2345e10 Mon Sep 17 00:00:00 2001 From: Viktor Stenby Date: Thu, 21 Sep 2023 12:08:53 +0200 Subject: [PATCH 1493/2432] [Documentation] `Node2Vec` and `MetaPath2Vec` (#7938) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Part of #7892 documentation sprint. I have added the tutorial to the documentation and started writing a bit. Will continue to fill out! 💪 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + .../_figures/shallow_node_embeddings.png | Bin 0 -> 63999 bytes docs/source/tutorial/application.rst | 1 + .../tutorial/shallow_node_embeddings.rst | 145 ++++++++++++++++++ 4 files changed, 147 insertions(+) create mode 100644 docs/source/_figures/shallow_node_embeddings.png create mode 100644 docs/source/tutorial/shallow_node_embeddings.rst diff --git a/CHANGELOG.md b/CHANGELOG.md index 0cb9d0e9ce66..2999faeffb03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) - Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) - Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057), [#8058](https://github.com/pyg-team/pytorch_geometric/pull/8058)) diff --git a/docs/source/_figures/shallow_node_embeddings.png b/docs/source/_figures/shallow_node_embeddings.png new file mode 100644 index 0000000000000000000000000000000000000000..c248e09a41e1154f52e2628cad479a821f50ccff GIT binary patch literal 63999 zcmZU*1zZ*F^9Kq?P(lGo>5xu|Bi-Fyhm@2t~NFXZNYunR#Zu^IbkG$ceu~!b5_Afq5kjHh zu!5wpFo}YlwTYR9F$~O`c*iJtYX1)eJ^Y3--qGvi`v4cUt|Q@@-#zac=Vc5F4FdFJ-5l7oO}<1GJ$s;I313gl@-(; z410(;SHD0JJ(GiFgl88M(LcsNrDst#JFs?f+@IaW?mi4&Yl>F*b|EPyNUn5HMUaV! zxWvIx61se9`i<%8*w*Gg;9mIp=3>2|r@7AN#kNWQ!m#^+>9wDOyI0}Mfh)7IXj{s} zW*yE_DdCw?%QiV-S9~LD%UcG^xtV9wEkRX-FU)=$kOgs^JOkvcW~?D;A}b3+3!Eds zzy_JYzyW8lzy}Zb0FNXl5atE&6CL=7WP<)&ea4vi?9Vx@!PAX`%EFS8z)xjEJ7Z%j z`;XQRhbg$}Kv$DyDjE(NvNBwT)|QMPjI0fe8C@)Go|?e$xNrfdmc|YrNL(x}tn9g5 zc**{`g9|u+`kIN1qd`HHIL_$KsV`pT-r6eNuUvuC$ zUb2r44mMm&OwP{EjLxi#)^?^$%$%H@Oz&8jSXdZi`oB1va;A@MN%eP(<}M0t!NFfalzk|KgC zF0gxPFZ3}dCVS+$5Me=|7Tx1fKz=Hsud(Q) z%caxs><^uR=y-Uvf`WqA7pKn)GjBnm&8BaPE= zd(dP&lFq9FA#nZ0@&2j9d@>MGb-l`KGu=Qq37`LSWX{8Vw54I^N|$)*=6{ERDqM5X zj!-koKcfKy0*Amg!n%>Ew3A7$k*V?Lk1i}iarX3cZiAob-9IjAy3WvcEs+cKuQQ#lWG;m3NxQ-92R3bS(G7azc9s>-09Ni2Ci zy&ilv@pU`6{QCO&7(1cA@`@0FvC_uv`;6|q_l01oS#cC&H7pc+kg&UJz<|?MAYg+h z0=q~yy(x*SxBwqgdo_ieAck`m*H|(aY+W+DmDWv2u7C^ls`6a)Lhyf&!4l zD5}1#Dp7oKef@XY>J<8*4X_QL@z9IF|1b#t5IFto7~Mb*Fq%!aY;M=Hr5T@(;S%!Wc&gTr?zz?-S$ zDSLWY`_Wrkh$ND}4>$^CtydGJZ(lVzB{o{lX`$Nt!_fn?(2zJv`=fI{a25#dSoCTO z?Zcn;;-=SL<0Y<`*~g_^kVSoSgx*y<$=pW}Qq+5jA6x&7^1Z+C+c&ybvbC**U$c?? zV?2A(BwDOK0VSFg$15LV65{liLMtw_5c4Uz$L6GUdUvF3f`&({tsk@IPJ*@2GFMlz z2L>f${xu{R#5fTW-4a@6anyl=1@7wipBKTby{+U%s!sFhsQwUUK?W^J1_rF96Y9WcKs@oi;%-49lmk0y>RxUr%(;cjI4s`oCkzk_d!qz2BRc-7)igli}l& zg?|z({WNFvfnLcc%m3$yOAruKiXpf*4EPa9OtVj8v9%0WSyh=mRIWIhX|FC=Asu3l z4m3*M?bnR{(+HC!3LA$iT8mxsY{fq&K3#2@nxBgo*75YTL~+dMt@(IW9gV~Gnf)r< z$?MG37}OX1f5)rG^DVAsljLy~5=x|xBJgC9xAg=OrKgWVdu^0ayX#D3r!vY@2BShk zQ|Q_9B>$7nfdmQP>KB50`<(3VE*9x8O`fo9J$}{#vIS$gCDlYMGCZCh^Wi=NLx~}w z0}M1&B7dZ+Ki^SnnULQ?isY}^F@KHkJ8^klM#JzNH9Awx@<-3oQkpHt`Fue=$N7ov z(O-3jM-##(2(A_D{1%oxK|~8fmofNgQO^>^qan z5M8#jX2W^K`zQbQh-U-$cU?J8j+j>b@kV&~J2*@T#U~h%O_nictu_0nkhN`*G+TqA3V)Qi!#`V-~C+3Jt#lTSzRrc z?)3e;Dh-4mgvg1V1KzP=5@wAOp6_08~xV|tmJbr`x;6XBmmj_HewqfYA|8MJ4*hA0m;#(=>zcEA7nJH_?a zy;Aat!RO{14!v>2NN`dFW!qJ55@~h;k%?f46_YynNyXfZdBgw^3s? zg4oa#n-0s0<0Wi;#5Hu;E0S+i=Gi4L4=`)h44mCruJu&$Avsugp_7_?IUA0LrzP7f zh%x!SE9RQ+UUeWaIk<#{|G82b5>*CZcEi!t*`H$YtKbnicwn<-4#*f@pkihCzyuMO zRJ{V6L{-U~oQHX?_TT966N>0h(n~srK>T^UNpP;MB!V;QTs?U*a{Lw08fPTj# z#OnOjp(mSCirLdUD#}g`rCmU@5CzGJwbz+2n6bNsjaw~t^obqRcN(vn_2}S7g#EW5 z?HGXw2&6-;Bztr~|7;;Nd_w^LXFK#M!szYmi^kAmS2VLd z57@jrdF>Ox>~Ws48l6V*%IyqWo~g{E=lbgz+;3VnH#Lhtu992<7L-?K9gm^){@17y zY#`5sB%(|3r!u{_RPU{AMiDu>%K>-S&1Hl6pT#bZ!l6HH+a>rER8(_0Tfm?Kig5b2 zVJe|((MyTQ+LScLR03T&dxI{||4dE*7a_Z=tM9?po}8PZ<)vu_4Prf|Z`JK(@5+X> zw!NKGIu;fV_cN2rKVuxgMHFk3X>J!*=Qq6!fSXT)p=bIPgwnf$C2i(PvzF+q+0~B_ zHuTq_O3CYA%2!cbpGnd(ihL6j2V2KbCn<1O0j!d1dtr7A(GD5de2jw{*{v<2GUvRvk7)}Y1{v160Cq9wLWP=-BTcfbM zhgeGCRE!=O;GijSQ9-Q z+a`tGsT(JL<5FeNp)26OkBa^#2?`%TrcQ^WFl+u;2SJ2@z}lx{-ikkHP%?|7ybgK= zn9uun_5V{fClCT9U#qddi_6Y810*1s$UzfuTNkEFkZ?mU`GTFPqD@bJCHH4DR$v4L zsWT?NT-1&aC6p)gT}3d;Gwn@(yeFag^{WePpJE7jPm=;p#s6fJ?*ZCE`P4u2>{jHX zMS(~CUNqd<7A-HQAWG3j06eWIFmrLgzYCp*lRKEDOxJBrqh=QXGM^lhD2L5z0?0)Q z;Gg2NYIfhRSmRCUXbt}{<2|Y6@r#leo0CsEW#5MZ)r<3i=mG|cb~o5i1?=!!6I=8D zDP(3U-012Wxxwl4DHclXydG&+=d2`g$M)6cn8!I#z-i99DNKUjxK!D$3rk6rph7xOqt+eHnw= zm;QINf*`1GCo6`)v(~TW$N`9pI>o9tHx^B3+n4HZtR@?j9>=08YwO@nxsts zUeiou<~5>9lz?3Ar?Lm-%F+3^iqEkUk-YR=xc;-d`e(3LqvFMSYbeSa1d{Cq2tX*; z|2-!(6*{;ZvI^}2VfDpM_wlQITR@8 zMO2$l4X3)_oSE5&G6gwmxz7o?ee z_XeGv>pyB4MyuudZ?!uG-Ez9R1hM@(|J_GzP(TV|?=7ZDmOM_w6=mM-45hFS{QN3x z{4-Nftf9TNw^w3&sx0Gdd#tCrhp+eduW!azM=LCb-C^^DH-}%C4cAAdnd0*@yk@DX znVFgGW{eY)rq=snWx1)}*3H|6az3`+9#iPX#a|uoz1pTpQ3TF5Qu!)1=weK$?XQ7_en@PHGfxK4F;k$ zG8re)e@t?@8LrlP$tv#i<&w8vR+e38ljr$_x>(=u>56Xz+)jfs2f)NwkopOo=SlPA z%$kFc;*uD&YTv+jkD^RQ939zk+N}qZlaqgOx}sucRvb>}HCJ^jXQH9$YtBp+(ASat z1)=$k)Bo964+*_u@*EcTl+Ni4=8 zikSK|xbH2#h=_>v7OEBv!U+orWg8D9pc4>Sq}il4$_v_@qOzAHgUum2T|1K{bdkIl zUpOZVl&N+(t4v3*<>lplS&Uwv?eFhfYPT=5JjY?7=26hUyStNN)zj0vKA+N~;N#PU zPL*0-tfW5PZSjj0biAQwaXHdoQ<`jaG#_B(bUDhLxwyQnYmD5Vtr?(}*&0s6I73Zh zHI1OxZJxGRa@|VXyl;EFhb-P)VGxFvd0dXKDV3?oa@sPAMHKC7lwIs+2PW1TnMb_~ zfRo5{Lj=sKw}4P9r>i}*(3!-kkqg*~eU0n>6$p^1Gn16lw|5->z>xm}Da^FP*^G!ka&tB2x3P zTptOEA;G^4=%1^4G_BogxrkvyDrEQoz1J-NP&KZm%l@)W1A4W-XyW4PCW(!S&zk}0 zzCJ(bD$NGE6T^!7Bz z@x-TR3Zk=YR^)vJG?karh)Ff8sNd>k-tb7=d3usF@G29jT~>!SIz}_!-$?m$)r@n+ zrYO!RPqDlhkPLV(Q3A?MLZZRXl%nhZrQWvY%1@NT8@jI@CE#Qd_yi#|$~Q|kpU~Hq zvd98yLS;s{|@$t4#{))*8R+T-C1}6)C!8)X(#Pu>cy**h z{$rHG=zTg-dUtYcsphoZJa4iXRzsc1Nc4XHAT2yN2Gp9s)b5T))x^ zr?h6cah3aO6#DyJqMtxlS587~>_6Gv78T6KI^f9?HwF?10LKN@u4}zJk=CrUN=qUD zjkurvCS`zBzuWeIo#N{mj4N1vT~fD53;63gzu!N<`tzI)vQCa9yb9guK1n*Purh1|r$$4UXu6W%*3nN&Kqn&B3A)tE0uH4nHzbf3a}1vulvDR^m8DiT)-EAJy3Bgt(5%272jamT#5+aQ+Z6VDCh& zqMhmq&*}9PC)pT)%H7)i{Kmg4DCtWkfz1n8K3Vo73ipQS?NoByn#U)0R~lU;ODyqRSL{vnQ4qo5LaPBEstpCq67mMh<2 zM%K_Ji>*O0@_mWyZ6}r7z_5Rik)B5VpyxGwfjYNBEfEKOTd5|Ku$x1yVvAdFSzY&( z20iL*WKoEsHj6!0n-&u;&DzbGN`R>6_e$1tUaeN03fhcOC25AwkvhC`vE(ke#v`f? zHoC^-nfz~q5*QOor{F%eFJ30GnZ;<^4}q_>8MT z$#Ps8tVLjJwBiHfQq37yBwfNGOlQSJxdy3FyYC}|fxIt_(kEY624_GBGTM$Ih-xOK zMg5v41DBQP5)phP2*_rv=4|+Fc2?Z zdaaeSb|Ed^caDP^9arse>}xC~R@1e{LnwkMmIk>7Eq{1|%vr~=p<2D@Z2(*d z8Coj@!-UP8@xgHyYvPC5g|e+}a?Yy2Q;6Jdi-%8!dG zUwl+-!7r*G2e&!u7#W`EYrQrQV{{|=f5F7G_>}odr?t?zWUN|E#zMsE`w%Vu=f$uW zAALA=6(A&!t;bxhAMZFMWhCR-90i3T8!an66auVm6F(0>x7CU&JT|or#&4w5Nzk?U zdT(E(WNX@ZV!ZVmACef?2bs%64j)I>4Hx85iX5GtpvfEiB5`gf&9IzCPQ)Z9;>tU^ zZZG9jY0q9C;t38jb4OGbBChAJ;-?8QIfl9j|V;4or< zFfy55mpUrAkqmH{UAT{jj}If`;<)@?&6)4WifQvhLB6702CBBg4k+9kpL*BuZ6C7_THo}Frjdot0;%lKTbj))O&Tl9xLDfihW+6#q~oz`+`Gk>STvMLbA9e z76LtugkZB#AG+^3JV>2MB|Rt$G{pn}iNSn!5-%?6xMRMBVSazKd4XK2Bb{8N3a|H; z>3)m9Zl2^(X<)Es2dc)M83>_b*~s^zLM4ujW5Q-s8xSo8TkofPR?4oKDA|63Jql~BkK7lk8~pU;df9Ft2MJZ1H$a^fmfit4?2Q(NSN5!B!Mh;*Bgn*v>ly= zAVJ5Q4kffX+xor67$C{Y>6;OL@nb-8RkWFn!`o(xaT8vaQaT>}eg|s#Wz%j|CQf^_ zDG|a@IRRZ^SSr5KaU$d?mV|-yUXJjXXpho&y^}=;rT&)4Iz@aH8po8#c&inzBKjN& z^1{Yqe)jO8X~VFq&wfZAVJZ@j_QMnVNy(b-s2VqLB|Lu5jiGLLK!K4J)G}e zn8WAUHuYrGH@~Y-zpk!UOZWIb0YyHZh!zWts--O#5aqMuqO#T)?!#u~*;-3;7S|de zZ-LknebdN??ZliSL94I|2QT(+o5bb0j31r9KW2{9wvW#?=f#vLT&yPKCN!>PYRyb> z?crzge)R8~1y{SP<02+jj#4wZeJca+gbNw%_oCo`mNYv#n0Hmnfe-06_J7{X5ymP% zI?76}NK8)h)wixOP8052cQJ)q4};!Cx}LJNUZE;hc2vfy_B5r=DRysc&$cd-fKqlB zUQ=$P2^oW<#=eMcZ(h{{Oqaomq|=pZJHcV{q<#5Dpb7{n;dby`owc>!&M#hi8eBa7 z<-LNpN-^3&P`{U`1_ogf2d61mc7vL&uJ{qm|VQldymmT=VuicbajvpPr zFE!*K#2wshy=&%iS(vaZ!0ta&U?3r-kbRi07517dmqa!us$CpE{`3C%j*02tan}E~yh&`m%l16$!v9*cJm;N4wK;t2W zM1Ki4AS9Hv6t73-DZRoXkIFp|u-_bvK5TnjJo{9vZ?n(kaaEov#Y_A3?e5fWSsSBt zB4a!*tBFQg?-eE;QsSJ%L^7%wVg(~R)4-?9-gzF6FV$A#?HbHz>->#+#P(xLEwbig z^lT@e<}X*M>_*}WSPc74v^PZKu`m?AG1!gQ?KQ=F_r`>48Ey9S%No1e8&Etv`!-TH zpx#&7i%_(q(%BZs<8~eo73^zq^p}cH+b30K13MHqI!sQ~6YI6jLpZ(o!0SgdtS6(g zQ_{JU{WBhC<%mZ?l_t@6Ua;t2OC7*$Sgv%k46?SwAjv#A32m1z%5q;y4dfau2-u#2 z%(yg|Vk!ynOJC!SdK2x&^%!XxSu9LD0#3W^#dRa8rB+Ybm&o)CHbA(G3Y#r39B7Vp z5Tc`Nmu+_6c(OXg-cMJzXaxPC-=A_p{Oyxa+6Gr1(;`6GprM;{d6NS%(l}CxIZiW2X$*8 zfgk6kBN|gJk$9=CKhnR9^WReW#hJ_e7CWgBYy>G38f#O2QB;()q2ap3elSZVS@>p# zma*+{n!!a7O(8O|ME}m^JAD2Jo2am|sUGUL+#Qj)IXfn4i4?8y_g<|YN!#8&-6b1` zeYNUVO|-uC!8V)~o<(f?3sVKTzrX5vcl~$V-NV9 zMLEB_A{=-hotmxU zEHfQ0x~72FH0&UO^Y<%{Nm=Pt8SYYd)g^huxVB)-OVVX^I*CTvRC2z;DDKdA-Cde$ zA#3D7y~G|cFPjKe0gKveYDakOS{JBd86U}8$e;^-F8bTj}{wa@vUT+ z$0JHqAoNQxJYz%drO2TJZQVMQ(u*#d>1k}oJUQ@r?XOM)tW847oHsZsUGiW3SenX{ zVbaSkV?3hg(tK92HjxFv3D7-V)qA%%EyA!|U}&|}m;;yJQRQ@N<$Bzshm;c^`d{`D}mmqBNTOH0jE6cl~UuBU?+mURo1 z`E7nO`sK9>5}72xPDn9Z&yU43Jd&@EnpiOpDhfCbp6KvOyuYR3bfdjA{WUAbfZOwJ zxWC69r@yG`f$DH5iSD-Moyk|+fnqAvVKFJUWZP$_lhZizstWX#4!iHGW||z$hr$vl z@FNJ~H_yR@l_l26Im}Bs$?eCl%)zA$oo1U?mvz%-frGwJMH!!4^zw7d3;;8ENx-3y zE1$vmlsWqPO~((XapS9(r&zbn?D^AataXPk;NY3+lEKc`XXlY8Ob;*M7r&ZuCjuGG z_$w`!OeSc&iu1O=kbi&q2q?zyd>OYjour0UXU**IXZ8TJAp>-DE(<5cNS3B*g!JiXT#3HM4FI~shQE(`tqLay1%pdKN zXH+9($INmE{++lzx=+Uw-t#T9`D|30@^nfVm6sgw$gLH%BI4pHpTQQ>KYx^RY|(S> z?2ttj9Eh0N+D1k^s?9w zP+uu|#PJUQ$A;_YnvJeO^pkPJDHlXh-+3D)d>$e`e3H&tbxs~eXWMs5ctANCJuLl( z?0-t=8Nk5@*k&DtkAAc0}I0_3ge{x;cN^@6! z(6Sw)c=B^X9n-bB5(NNMq?`FPgN*ZYY1I`{(FQYOqK&n?pk{Z-o5Hs8pnTc!o7LB& z^`_QAF9g(?gY?KyqJV5*0YTt{>smOwj86QVb=!graldq#Zi|dbIe^J(MrWcR$79o> zRK3Q>FQQ3TK0T}+qnd{yo9s=3pKNAz7s{`WD-TXD9M6H4yC+Zb^^KnOA>hS5@nQf% zr@D`^od!1KQc)Q8wkD_jO1YgxJ+ElT-AP1xT7>4ylam-!zN>a4B=>`ct>XRqV|Lhi zpSF|!qR|p*9KqSGLNtwbxAOyr!{+;3@^vgwamcRPCb&{naah8{L)w@{RIj|-;iFxH}X=2%ljk8XO;yJZ+8B8SOZF9;7Yjo0r}A)@)KC88^5~ zo|g;VF(-GYBhvc%nYbbVAiB^0hN`uL(0PGj$@?*_Sf@>w%i*^M1*_S3e)M26SekDP zf-BE+>=LhCRml`%vl-XOZ>;K6XSGC(x&0lo->RF&6;OFJR{v#%mx=lxZh<3OFdw8j zpq#5)~5Y)7=lE*LorcV&(bF z3TIQ&?(!r+@_g5E#TvCLi*CEJW+gPbMM`cLzex|h--)UfYtW4FUs|6N*_CSeULBPe z{@75Pf-04jYV%tJk1MLnYZVkEJy;Pi%iPpjn>VG*kt>wIDUCZaUz8itF})lvn=C_M z0^<8+E4TSz8$g3gTxkvrtlMnuTR zJ$kT94@2)%hwvUwtQm}E_gGfn9 zbC2Ho?KtmElYSEmX9*d|mW&Ncd7T^Tqd5n%WejY$8hgUR!|>VV@IFFkDWnsTppz*N>pze_4Aca=g~w*j|Fl+K87)#!XoDR*-_<~qTQ1N zdmU<;8x|>K6WN?D&&fqZQ#a@oN)REKff1#Njo=O7^jXm*BqgJRgAulYia@sc&bZY= zW72edu53zwt>qlt_xX>?3kk5MMp`z5(g}>QO^1!jRl$x4#&~+G2rZVO(KSI^fYhwMJB{90W_C~9YZ zLM!25v)=`Fuz20Md>*4+Z%qdO21)>NK+&TYz=z>g_elX$Z=IJQ<~8h2OnybDb7gg z*LZVjw0K-1V)O_T3 z2NV%1O@{Ec#|vUJL+$|)(02x*vTJs{3qR>DZHM}&oU$>Xe59im*;baQ#i52zf(8hH zEke(>C!}*nzI+H#hg{SEuuK>Y9jh|SIedZQx4^*+e!WI4MJkiwk+xp@5k7{+MZSiq z$##SK@5bgYKwcmxw~tO2h?2mC)C6I{6;wPJX_q*i(${dtnJq=rYo(j)^T);H5e*?98te?b`cKt|yh8 z>tHiL=n1(U3x-!PUH4zsATdv7N$~IpfUIOmxZf(HlfCU~v*Ha}1;E{}3I6^7xNCzA zioQW6Ed79HgeaVI0X+T`pWA*3)4Tn;rDXimcslj|wMc#|?X2eWz^fN6xCf=*AK>9b zegIVz8Nx4@i=I;id_d;aP6lV(L&oiVz?P5!WYqmF5W43xgJ|}rsguM>+}5o*nyr^b zr36%oVMG*qkC$4mTP5s+*RtcI$3??!UN-k0mdIZ94>!BURS>av5r`P23G7R5J(%Ow zs=t@j&d22K*AYuJPgjD7k){o_yZW?Mt4a2`s_iAD)q*Y>a9!0(ZlsAk2bqzTb15jX zgTC|tW<8(e!)L@*>s76Zvhzboy<#VO#2f$wI9~ax>v^g^MY zFXsX3HZ1NJ7P~|C#?%ZxN=;6dW|;|}@Xz~rS1Czc+CBzjSB*){!|Qzv6@e*-+_ggE z$CX3Hgp^iYkzj^}muk*7zM5$QdL=Y=37GMO-&yJm;}xobqs`)5RDNoV(iXY9385;F zK6JBRl{wfw^gJ%7dkt?`FV1%-lXg9M91A^8wzo{?)XcuufIy)2i#hAq4DV}IPB%P? zyyZ6*{28CQ=z2OL@HSET&&v0|Dp__yF1mNi2I`hPlnXC5%*N{_mF;Is7q|!P-QrVY zqrWInOs$9ll1Pp6X^b*4i;158CQbl7Qqd-Wa&o)@wkdHkef%~S(_sS5Hm?@jSD_&x zEbK%1c8{eH_P~4=MNK5>52sA#gcovzT)oO8t(o2qETO2S;}IEN7o5HnFHV-q+M+2S z0u>y%d6S7wv)($6r?gU0&u@Hh4`U~G#9mC5o0q1MOaW{?^6jOvvU%GrLW;`Niwy^J zsAj02g3=NPN?lVBv8znCj%`5WH-^})3~${=$9<@QVhH!+*a%;20^YZ?&6F~nDf*=f zplXfTMdN zc+sxpV#AX$oXp#g^!dO(TYswhmdYjEN8EUw$0tfRHk!`R>IH;5E(hFiz}7(8kw5`J ze6@&1j!P|dB;F}KhfUQsoJLoq{%D*G6uH%Oy{;!#lef5?4qGn!GeZM6=hTmc!*Y)T z!U*{0n-ecGrof$Zxv$J+D?h11;~SOze;ZamAG9BNXzFF`cuNN6+fwaQc|TXT&oSbs zI{sPEaXc$N53Tilm|GNFGmo?n_JV zbA>+N{s<&ves3^(e|*M}m0B}RckQl!uexF*XW8;yxa44zdQr|Uk zJ286xyls8NEgt(+0q3w<#PGz>|?G zC_yI*jkViTb2j_0ipsbmyMX#V`CGcR=KFwsm&g0dAFDC7j<<42m*!Thz8$mJb%*vt z&z%@|!puemb|AKigd#tfLLiN{RHvsyj<$4_9~NOXVu-w!VHo$5!a7u;ge_NBsOf{V^sj= z4GbctJc)S6R4HStBo>jeS_SQ=A!z`h;2?s9gTBot1a-mB)XvtJo33g$Lht-pdwt)E zBNJ}EIBe7V{m|i5`}0kN4ypS_hnFWg7_3UI~d~gduU#DaN7*`1wmGKEIaG_)<`IdSw_1ardumlnnI~g49}kz*OD+ z-W<+WLqw{>njP4d@e-eps(at!-72X`f7et+pjTqgmrK$vqTeD7ouPWX^j6}|Ke~ZE zK0eMZsFUALZwa7#%dcmRYd^D*Al-YI%mGtlK@$0qvbJL#pl?jBetCAYWuiw{tfP2K zf9@1DJ5$>#r?_8a=Pd>){Mat1sv9%)6fLTOl6_eYG`Ze&C~a~`b1&eV%DmTQ*0j+@gOSfZMb_Dr$hmPS@sFJqKO3_N~b zET<4vGmxNy9YzG*-%yZ#6p*S0!gVB7R_RnrCBNMCBR6~9sP@I1#TeG=W z80ykpsL}(J1LrNk7?&GmLpF3o#!B4Lh1Jafw89EbLGs~i5K#;eS0Jzi2zEyjGs*Lw z#}&MLJQ-k`+n>Go{RaEBG^}KO4gB`d)%lbT9dUkqHcL*N@-wH{zV-$pw&0drKqHwV(WZvc8F@T~g=IRq1>&PqDV;wr{w~NTZl?5;?)n&{J2z3t%=GolzJf z@zU|E*8dROGFd>HbP{w#j;y0OTL*F5@8d-nF9h>DHnvk=M?bM4La1!)rfQ+yZ-l#Y zuE48L6g(=js7&^q$ltFHt48?D#lou_%i10qX~?g?zaZu?x{vSgxLplpN}Iwm&_f~Q z$_6X;JBL16Z%k)vTA38T74O)1j9kHdRv=gAT0ky?k?(c;Q1V&3L7IT-&He2q*6*72NXZ=oCo-0--eQARyr5?| z#g!&Jo$+-C3hF`4(-B{VnHcTfMMpUZ=Jm9ma#Kdy%Zf8uT_I5+f^fW>> z`HrtJC~BFemoNF%B#ROlx(0gkmPt<=tXj0|O4cCinZ=ccx9@jpz|WqQ%)zr4pKC#{ zq^dR$KqYtkCu0rURA*0HKge_uMQ{x)mknvlAim zKKmeBc%&cQi8LJg4d8irMq7q({d{9(LW|IQIrY*$ppPpY(QotxF0;X>8bo>xs)=*U zc`U<{+mKlSv5IUS^;&!?T`iR==uCz&CQeUCLjg8gGc~|sO+qBk;C;KDoZQy<32=zf zTaHhWfJkOKpQCt=`@GM?*_b>f8ZluOUuYAu0e@-Z4ODXUONKbHYrXfW>7H5s7 zhSN;kmzPN=Jw+`ol)HMe2p$Yio&ix>!u<@yWsId3iVMRe1wbp+a6CVhBg*u2EZ3NH zW>%p1L`?tGsT$yp;O?3ZTUl$Q6$Z#jNZNl?TFh4W!7VpI?w*Qr6krO`2x+ z2iBS?U+FxhkO~eCe%Hkxo$fXsyuz}!`-r`0wzw!Q9*H?_0-KSv{-I`3Af5t0K0I za~x4Hot^h1>@#vDEK2L}Z@6H)Y>UNa%_tfDlXdTYWGI85K$5@Rc)6mmC z+3-k?l6KUFWn=dZNtAcLxN<0gxFdlChpW)V(5uTNi!uNi&%`_{=pbN_9asA z3DJ$ph66y5ps>^p1(=89a7xO`03LnVd@5&_rCDKv!LOY${&sWO@>NtfpE`E76C+JR zcJ{TfpukrtEnobkI$C;;&F!RJhSVHJ$kj_aHAz3_50+A6wfcHMQ1*m*Jdq>Mqw;j? z^d7xNo18P0Xy6hsrl1}Jj#IjC2~}YscXMR2{ZT2`gLchqhvE@L@$tzN^b{1D(5V6j z!ya8yfFfL-juc?^E33XS16#S5@*Y<$IdaNM(?wwLwoo{lDMNnhd_vCd6^t=#_ z-yIDxu{w4J&&BcBd!DGe9B_WzP6@!vSh^d}3~apO-)_MJPA7!Shw2*5$V*7=od#@H zjT@)73pKgEG>JgGuh;!i^@y;EC;!-mLX9>5aql;imrQL}ew{Q7Ci-V+WfzACF9blt zawN=!SIN2;$;4QRN-?6;XdYDK+kE3II2S8K^!}xdy7@# zN@r#^cF+)`d{FE+uSKFA;)Aud5_Zjp)iSRU9d74@_m;D9S>o>j2&kOGr#=X9E~E1@ zo&XAb0?=0_Oz&MgLWMwGuU!BVD^LSDh|bG!lQQu*cdTi`1lY)Pcj5KiWf8KrhGn&e zz>CuOZn|IKv;%!!jFOebt96R0Y{%u-WBT8&Z;w@a-a2vOdk#aA=4v_P6055nPD5fj zIKRzq4f8hT$d4?JDm~+*0jxa$qkT*_yW4WHx#6=hE{{vO?JMD~+X1FBw~Uz%@xGQ} zf#u2p#@G+ZSQZm{-hF}RyGBQ%6jK@%Q_WFk`?uC@zepYjjPAe@st%RQsnI{;Xhe~f zp5S<2C5)eX06o7m2aD)azLv8pqW@_qNsB1*-2P0RIty6!qbB@0Ii&U@V`reYg~!=| z8{b^44;_Y&)*IUSpg@3NCKf)}32W3Ul=jld!LG)QzO*?Zf`z>vNB8TwMuhng*4-kb zV-+r_LKYI)>kj!lCYj0K2nD?a^4fA4%JfShBqYkwkaH z51BD)eccEH--)*DhI1!W!OV8IOss(lmEyyWII(1{nnUaPrdI!gyr`Uql+MJ?#H$zp zOB?$dk3ATD%z&k7mJYntrB51_luk)wTagf{dLq}A1G70aSKI$xIH=#)tYRO@`wfbD zxX+Rrz3Fl*!yBt63f~;*M5&$#!f;i1nAV3I^Y7t{iubtRy^s)9;shdzc#VKRm{Cbb zF5-5%Q^45VfV)FGYc098CWtl4a=26{k3Ec{_~&l{U`P$JoPU(!%4>}L=7a(etAYJr ziUc1%3}cmcRyJukZ`Ht`PG;ey5E}w<*EFs4a4P597m-Az#Rt=bUR&AFh>q2EKfytQSDB!+_jUOOu*tI(+Y^XJc$_>1#;-tm;ODQbOS zI*-@1j96m5D8Q(3K7n=Z=hd_Tv*gu2VUzQ$M`5ZgwczfQ<-7P?x;*gZiaQmKg-$O^Q%(^ni#5Uu}* zrn6v+>U-Zkj5H`cv~-uWNOyO`&?Q~c-Q6V(N=o<8Al+Z-lNEiQYS`C33f zgvv+@)X=nm%gRR!+f0!^uv|DztB`s9t7iMMHrlVPuKw3R=#h3GTk`{pOI?*xzCnlA zREM`ahe}~+Wt5?Al~O7P73?F{`T6DM$diS>@bcU5xQjWdhBsK8CSY#>m$`O~_ z$nYMN{%gpPz)cA|NEv`L2-HWQKuAYC zzgw7)+m!`{i>hySTG=z$ggIEVk3q-c;gK=DU3 zu27%$cwOLlUjccY37bYf0r8`UFa};gg$3}T)j`Tlw_lAlrQclOR7an1;3;jFitR#y zZ#Rm3{a}kSSCz~EHi`fNotFVt!@5Jg<(j2;G#Z(d*p;i?45D+$keidCLOpNd*lBVz zAKwse9;EFTzN?Q<(#rE9=kRar zZyic(KSp9If9d_#A*f9{;&-#OoAAS7G{ z2*aERQlQUfuDl*>IHrQnY#F){XK84 zW>xDUgVkzlBXUfbbFUD;n;r`w4eJLV`Mo~7m;D@h$`=WiCp-fR6cE6%rFCe(2wQgE z=#%N5MY7F(kx4a&b>?!b63lxx#=Ly0AcdD9W;s|DqcQX=o28`IJ4+Y5_bjMb=u%pJ z??U~l+t8rHE~(+e2RB#FY(q9U{*Lyd*KNdTD;3`GQs}mmyn;e1>F!fuS>lQ5E-|l9 zNQ2evuOW@#Mp~8nIAG(I$AEb&%qYDjm6R=*0DcPWf6^kTS~)=BVFsH1o`Ag%+N!g3 zkpnj(w+JWg7`ze-tcbI-Cruiq#Ei!qGs6pdT~+BVYh(*}`DY@Azrpp*ElqR~;{ z&&z@Aydu*pi_1AXo}E$QW^O9=;xR)q9nabpkIUz$ljK5&Xz)E`_?IshvG6SR`FV<< z`?H$QF{C=7%EVJH`gejd(+8WT!jUw|SVEc6-SOxXFdH{uM{#qpDdTOW>DjZLppyzR zsg)>lv9T$etm}rkx3AmZ03S3f$e&U&<`auKP{K;NIawy+sW)LA3Z_bsf&oiKtAgY{ zRAIWk?8>hOvfn%Bk!tLA2hQbF zLD8$9Ouqk9D+(0_5n@m;KIE%85FVrSnjF+0Nn{sY1rm~>!~b9}3Y%+$dYnsa7QuIo zP?n}&0BwTd9}>}KfLV_P23s@%p)i{j&;Kl)o$D6e0X2M-YaU_>dNxiK;RoSlGx6yp zj&P9XZfV0|VPTn_ulE!qpi15VwltwRyc4ssE*-GvSILkGwPNd}&=O=aEFq1`Zs?^lY2ELu5=$3gAg(o?)U&f9aocN}vLYvQC4) z&0m8I8CKBw90foTro&?+bA=Mp(G~r&*H^F1+6?}sk2Taxtxk@<_v?S#`qoCd=f5A- zuE&f>=8s3n6&lZqDVEvF_+ba* z`5h_X800E*=!MU2hUNAksXwREc!3Wy&=7+=r|c$VV*}Cvsc$|gFvv4XH={8sCetWv zr)n9nAtNUxB~gjFII67#!*m`c0F6m}3GfA@vi(I)(VNYV2-~IG==!7m;9LAG%QBMy zrqa=gc?#G^3S7BJ(RCE85iBam{@B)S{W;I~aFVyW{*^)8p*4rzn0(0T-m(n5eil zuewi9O;L!-!R$_wy!%FL?kw5Ul9H<*`Wy&DcO9&{(TDUktlaY&X zpFMCgz$IHhIG|?re+;&r_J)gwLzqjMJbMJrtEgUaFNiX-6A5~P-zn@kT`$Ak|1vD{ z>Ncp}=EX!M--20F+N5F=Jdb!rxyKhCE}3~`(=8NY!Z+m`r^ZxR(xU z@Eu~Nd*7mPi*Zet$o8k#e477E;{|8{MJW}~{Igtxm&w>8MG_0%mMstLtJgJ9$iCIe z|8H?CaPFxw%dGtGyX_?@@iT{~ZP&^J)#>@cUk&REw!Y`6xW&a?d9MB@vYGr>&$pM8 zjI7Bl<5?s8v4J9P(;mKeHsVVA%dJIY`g_zFi-bgCP6VKg11p;L-I|S-IrSU>W?7J}Inrsk1Oki8 zkZ#zd$Nk+MK7w*ULL(YHnpFJ9lQDdV#j*5M0F6v-Jv6cQb$bQSoG9K3i|k*NGaADw`^sU?oxMCx2NeE_z>g&%$N1AS~#6 z2YWHoG&-YjoIQ85Kdn-qF^AGX!xzL(rNcYL1kF`*+BS-&O~}cx(%;(DOM5L{re`Ug zt~2RJ5j$>Lwoe9fkohbB98J-i>EVtYR-h_0irLzyo!L8mK=BvVpeaaw%p)@%luAngn#B4MPi943FISlz;jF+soSe_APn|1n`#X-1|6y?`kDX*kp!a@v1As zp2N^Ri|!-FOOt1t{IhFrF;`rpqLA{ImtnhI5?Y5aDm?f{yEc)gBP9!)#_5yeFF3fn z(Tb9f$;V?|91AD>#58JPmNYALMX(_WZ9f;1$Jex$zs-NC1s{yo3^u`l1-s(C4fvE( zL0_1Ge04ca5qvbe^S1O)C>aJJ8+pxdX!&~jwgjHOnE@C9TJQp#skQ;fj)TRVo-iA6 z{>zHdmN_OJ%g`YL9tU)t+!*xqmA)Tac;HjvhU716?G~@=L-Wofb=|1FfuF!TCeuT5 z)%!dI=Trn|o)QDgP2S+?A?u1J?Rz#;weRasCXdQwaAw1rolzzOgNN;6v8T&LO&MND z%FuEgak6)HYmn@yXZhAP-@jX#IE4pW@3DcMS;z{Nm`a9#chg4r9eAd?lu5*bJ(>vu z{jYBLmLH^S@zLj81pD2${r||5`%j`-JKq$AVVw4mAdtuy+GZ>ZbRF~-LF`U)1rC;3 z4_|81R4%cHhK3Tf3_Sb$G|0(=B-X1C-KGB*gh&zZ)T9$hundo$K+xJ zkYQyxL@>dz)E_xI_8JN@Qv&N)vKrE|v_vwaff#}yGZ;)v%%sdrz5eF&@|G6^I+lo8 zkkHI}k2~>iih`=Hn{0AqbujFc!3XQ~wPR`OH3;}KP?Yf(`(^xA2_wDc_5^w{=>sN=b__J+u% z%qwilx%WMfb|s?+N-T`CH&O*$f?CP=iota zHfTt|89+385L)p5BmcroLd(Sfk-~WkA3d8<@V6P|^8_|D6jGC@??J5}ro zbYEo%jIgOS^-&-QAK6Wn5VpBlSl%=d+8Y=NQjYUm{}`6F1Sou&!ag|ZPv^bpqkq&6 z6h#hoAgF0DviC^q!=xs9=b%Z(DyS$%RtUv+=`J)L{yxkeLYK`xWZo*z4P##sNe;Nj zW}r-G#w2!VSXd+}RAuH1gR1?j^Z7<^OBK<;Gn^)=T8ON6J0Ef$Vyp>CM> zj1;M_SxTDxI6<7RUXv{wtB{b88V0ruR;$~IHIC-WI$vA&Wyr5v47y=(!ITMkk2@e} zU1D^fVUWJU$SB3IP9E?N1|rAoPpf^5A!1eYf8oQ7Gl#mzl2BfQxH6PA=&>|hNk-xB z4ea2ixQgj{ycPw;9Lv59K?_3V7Jzw9ULG}9c?)6wZ=KtTuA0!hQ!7V2o5ERhdJQq7 z)mB$YV-Z+oxMIrp9FV~T`aueXOinhDSI2{+za4Edb1*!dQf&v#Bpd^22?<7ZwE{19 zTQt3dIN-kf_UD^rc~UzaNGG5Ys(i05Q##9n9(<2H7C)CQznrxSrV5~^s{-JVxZaL++>rY$bm3v(86*4fNP~O2FgB1_8*v zx-m&vg_gDdXJOj%KON)FEpKI;12IRNKaiL~#s2qL@7>d$U zxu4_yx0a|Oh=eljhe$Ajq?1T7EdvWYvc~G3t}gNekX7NSZfM{Pw${mU$alapkNs1C{wHe*JD87;gk>qC@JzQXm#Hf zQC1rAyNwhQD^bh7Z@h$Oa^m7bYo8k}8nczte^+2dagrdtPZekr+kPN&_yqpG^eg54 zA5dN_5x4Vap5`d$jeB5;%}S9e_#@iCo-#gnsQQ|&rvk1QBp|?g&@L=KDQwlnwCjY8 zvP2V-4;=LG^88<{7VfY2Z9g)1xWO?!fWRoMvD_>hoU@M3lFn zH%rL;WE|uJKxXWS1Z1S&N>Z0VtOq=w zGDw^agQE;}m5`Ji0mP3O)tCzNi0E3}K6Ub{VnE7f|N0-<|BwqF8=*AR^+hC1pRe$k zQx^EsIaw!db^bknv!_bJ-!aeQMZY`hgutJ72fm|OAg$Gu?%)TSCTdi5odW_oDd57q zl_oQwvwfNPNV`VU_*`c;I~l#bFpR{Tnn+gFAc`&j z(Ou?vYcl!LpgX}04FVaOsP+gV;u z{Dd~LGDuTrs*o(0PWQWyqKP63_&DL>7k>chKJ!Q7TWwOlb`+U#h_p0KB$|q3nVCM8 zCBZvv;gXx6A#cxn^$?n%068K6Xh9jU z36;j5YOtE7upz>A7mVM*`VQbzjPHghiIlICuWRe^BBr$=_Fd0zIxWscI(U#-xl? z%Gk_Cj5AP+bh4?UoT}=Nm)eWOm*-_%6fB-X`Z9V|h)IB`(zaqVMF0^Px+qF~U>sl1 z45}#e5DsiOtfILyn6X}xH;MabksTe)#KOe9R(+@TveMN1<)^f(&BT;Wdc%9s;_DflvhOdcEP9Enyhzi7k0tG3}4se!wg$$@hx>dvOgA_1Y zRSo|de1Z?%cW|BW`|);9<-XAS+w$2r@%rg##0VvXNKo)dZh85a>(oZ=QM-`Q-kVgzNHuzD{Qw)qIdxD453R zR@3S0*~Wf0H5p!V;WSI9+VhZY(t3ZeTqMzHafi|L8nFHKktUW%+DWL?crY%e7gPIzgnyQRCjaI z0osa=jZHnOYE#^u$g%MfIp<)=#irMnd@kGsb@}3?hZF%e#}L)-P#Sm{9tdlv+(+ng zgyOO^*p1rF(s}w65<6PhN##Ac5bGXgk~4=@vpWq(WR8dar(vbybjAmA8S`5Z2--Cbk`lHLLW-r ze+PQ^t_Zjvkx-Gybo4$6&arU@9>tVPs&Pa=y62bu9yBO=h*V(uQYT z+grdZmihX%6hJ{GW%yi;^U&~7;e4QTgn~bB!v2TK+D--xe5?x+os&>xu@%Zgh7EU? zIfQ1fT*uaB63=aCh*z=84p`(g2_qQ7FvF01%X_3>7J82nh+n$5UM?+#2Y}U$?;L85DXE^0tIE z)OdcmKpaR?AsP?(BhcE0DMH0`d>47a!=`N8Z+{HN6tN}56_*fh6;n;YDr|e1eB_&G zx4pG@U+!4$a9<0bUs#QF@R?}YRc0TR6{cDrr zf@7p4YL}~+^MxSvyRJY&Ei*wfKJ%&R_|cPh z4{`u>o~le&gST+9ef1F??!Sl=Wl{LRnoqNy>Z}(fg(VvI^@h!MKb1eTF$1}JROlCG zd&Cn9Q@~-9$Gv{@bbg1}f2hs6gaOSsL`FpKx#Fwn7~aMa4vb@xHpcK33#{|bs{uqOKH=+@q+I77X4hG%|^zojaGAaGyg^O z&5lNKJiO9>wKxX;X4PU!N{U&9OvPz1hh3uWHZaoSNmJE^7Zz6oF(eyo1K(XQ$JK5^ z8(iNFEXa=#O*IBxwEyA0aeZ&T3_xz*nAmthYwRtV}T9=N9)Ts{i> zUV6YfNtdiLfpmxpU+8j(@@@Vw9e$_P^v1{xv#m{-E7z)?Q7C*GzgW?WdB5?o+qApI zZ6z2o51cIJYxo1=$&{i-^djA6sU5y zn%C5q`+@{qUfH(ymd(d&^>Ti%PCwco7da1>eiqHS@=~`oJyG+`Zogi)oymwkdpR@U zT1C!iXlt5?sVHyCj08?C0vJ&-Mdh&!R!wa`F$>3%9+clsbQyFBW$@+%)VQia?9#Q` zxmuaddZn*4trXA*0d?{QSB}|27d?_Kn4PSpcgpfR=jiQ zyf5}_>Nx+68un=;tpD>@MCx=)a`8Y7zm*|nBQ;u2;oAPBfXn3qF+}Wle*oN|DO&+> zGYSah^edFwxc`Ex&*(qA5kV@($x)j zWfa<^9jkq3@^PZ0=c&fy!azn|p7Zt*aM*UeS|nV~M-wE)7LB0`Hms z0m!HXK<;~u_HCT~{)e*nWjoH(Y;SRZtH1S>J`7$CEhNS!@eA_1G}a1Ty32%sf2aN? zi4m^Ge%fx3P&cQx&CEC;LG-5K&#zkghJ}a{PGKX_JE!Ve)W~`!J)U)~VK_;bY#FZ; z%V7wDSfHY<*wXLQ1aev1O`oW#t!MufYfLv{r63W{^=EDo5@{%R(drY;@3_AGt5c14xRCAwRWAz{xrj5|7`CmyV(!)U79IZU^eOzB_o zQy`Gvc1A?uelWSdTs?AHG%DCMxgpDMew?Knm&A zK|ANvI;1sz-~H=y)qSl+ASS?3vswVTe0{<(;fwIS1K0Cxl9xlYX zCkWmG_Zg7=hD(u6cO0XZ>afm`PDP+YnGII(+@TZ8*Z6{OF@u4q?Fn4A>sp;>zG4sLQN218 z1DNb^XCBclJ;ByTH!B^!mQDx^+UN)JK1`(P+tM!T(Y{bTq*J`R@T>E^@JZV%Nss%a z69hG0MoXxE;jPJe@whN=*sS_fV`B4PFGl4s^T9+pB&u1JHE*swucR8#2395Gb?u?1 zq$Iu;@_e{(&-Z=KWfjN6nP1FKnXq&?@SE#Uo`P*Yl$?^5x`*S0ti^iE65g1rdjkt9 zy4o^$YLay7hjrea*mZ<3RBSMcR5qH5Hj!ihrQ}e}(1QMIQO(EmcYi1*dWoE-8cGDg z5`LR~+apVTZXT38e?5Nkj`2=rxj^EL1%ZHVRo04M*tbIPUk>ObIhF3YWCY`e=NR9p zt8t76Nfq;jY@D})*NHLw)i-qG77;S$1D%TO8z;NIFH+|MTh1XjJ&3mg71m?8?76xT zdmGO(KIvVBjxBJ=@$%m9pJwyy=G~Ewg}#Zq9j}%k8(hlVCs9*<`F#}>d*;>1tOLjC z*Q{*Z)tgYEZ&{b6U_D__$!czpi-Ehvp8ue!{qiX?CMxB3W^Q9*6BmL+Vs!r6^jG${L5JU^rg(q#8Xw)mNU3urs=eZ{!TQ*X9+%S76D zQQ0RR9#Y*T4(ECo_^A-gL?8F4dFV^(JTTXKJRQEm6YEgv>POLD~1&NXNxh zpK>6?13RdRq+DFmF0o)V2Wmmo8%qDvDKJp%hsRAYDjWmH?<>qA0Y=cD^^V}rb94?% z<9b5@r9l6tVTEAaxVzO%z%yYLO`bQ5UReTmHX4a*M zE!RjKtcI$`SryANf&;UK$y%jVy4<(8oQ&bW^%g7Cc?@ImdhF+0zFJh~Nm`^R_}hVT znrLDPWrwlUUlhSUX>1Nx%DbWA-rw77Stl-f9*EQgd-BK9N!kBt18-xhLlhTm4d^cG z_p|%9Eoy$a*PG)xfk7O6*6^L_UI=ru(9^kCSXBNrZ&Y(RU&7JNKNIrot(9j<9h&dN zq04&k^ZI;>B4Pvk6PfF$#1Y8Pnf(PEh@XUV2l<6KR1w6;p0gx~w&sUOEfsZGMUA@lCfV zndtPN3GG(-rVT{^#S_Akrd^A)jRTqqylnQ6fOxdT=JC@jB$#;xZ<%+wE^B6RUX#|F z%v7pb)-!|_g~BMq&BIwEoSS^$D!?FNCD)u|azbt_&uYA%QM--vpsra{%G=d!nJCpr z$-cY$d@xrgC!Gw|B353B#NP7WuJouQ{5(?Bd~)~jnO0bkW0H@FuoK^1()Y50qBgJ@ zOk5)pd0RMC5>9e8e3%V+9Q~?TWh#C4GCZa|b(HK2(DQ2MPrYN}J0mB7gNO=M6qyTQ zt^8Zg96+~c;EW_NuarCeG=uF$-gOsPOV9h6LWe6;O85BkGMzrY)bte5?bKL>*77v~ z&G*&Rs)Xux#!3$Igw$-egbVA75H<)^V%YT3b`!FchBR3tr04l%6tFQ_J==C15QBpK z?`EuoM%o-Y&84Hui>N#B0(njl581xodcvxd2aw7?X z3QadQcG6hjMdyK3ApT`ot*Wx9+WIzx6RkGWb-{fz&={Njblmr0idH<@4EZk-8)ZcK zY-}OA_KSX{B0-}>wyO2U-)|;gq1=kjX)}Z7Hx5;7LMyL*|GOxFX7uk+BIOPc!(%7t zcVu>9pEYmtb%>9z)*Z*fbLc?{*oydWt@$(a>HPQ;Z^7h;x1}W}92uQ%|KB2kT5BL) zJHF-9gG-uBG{+}{s)Td~u3tn#E|7jCgK-7(K7y<+zn>WE9|DO2u;mY{J8j!~f?=Ze zZF&upF-iTXmYeO>)^2{$Xu|F+V0Pm5aVh^S^0@HG;MsHcJd8BH?3{BRX57>jPZEu8 zp@*PLBfeRMu`M1D_d23o%_rq@^W%0NDYqdoqLkG!jvw!ilAKGFsx+c>N(*V?Ye`b~{ zGxLmXtqJB3J>2ENbK0vsJ&ga&K_axK?r!|fsddwHi1zvFQi7o6B#v(zI1eaz{`XjT z0b>iSWw^^$??w3agp&kkm_Wwr_Nl_*flKF5goh@(MM;)JA>mf{1BS}RT{%>RgXLeI zs0S+r!T+W!WPB%1nD=a4-9p6C#lp}oyix@ai?`-kvICq!sjK~!P&dUfM0h-M0{4B6 zfpk#WHyoc_{l7@%n-vpny5eLk&|6y|u;T|1aM7}iYr|V8s}?JeM)txj|53*Co7`Cb zBjx(akKE6LX>DarH3n#@>tojbil}{)GRtF)LlICBkbUa`4aZnm=BKb$?l=s_-;G5q zF+3qDF=n|)CCLKzyJHzvLANS50tv%>Q^l#mV!f!9xR4X)lYTl8cBASh0vWsnejJQ;~{6JftQonZ1BR*b+ zD7q49Ljtb&pxCDFOzSD)1+sdxy#~dgcuuZANCw$A4vav9k@dnGx6RQ>C2Y#m2zU$; zBwSyoutAr@K?7F{WvJW4N;P~TI9djsT2)ljyaV^+jTA?jv_0mPVSWyn>}JLD;Q1SX zX2g94S2BRM$3|O1R&wNaQv^O0W6x-5ohT9plK2jkZ#g-yjiQ)?U?6^$c5yPawZT9ddB#}3yzSLz< zDhw(T6}-e}SAMj)FaV#DQ7q$})XLC4O*?0xxkqx#VA*!$Fd+Aycp#-zlU#`^TrlT% z#@{&~r<1|-JaXrDF#yT;aqG)Y9NqeZWTQ6|gDaOSRO}_k$*7jBoEijcq6l3pN}DB2 z4%3Ko|B>AE`I_x%TpOwg0EM~jcy+cR@vDUyypN^uv%~yh@N*i(bwG|nH-3(DH@KY3 z-u$IvYx7H0>5qOYUH=L=8c^n`=tHv|J?WZD31qFj+b3V5(6nJ;1w}K@lIDc@9mB9ucI4JL=G&uxt15wS{b1z~tFoY9%~R5{x%W{S%WNRXc3A51QwN3b2M$GcyoTV*_Y+1j0q)Q# za_NBGkbVuHy)cfFIIP5wcqnM(karn65R)3DKC3J0qWXbfQBI>7E zs~UsKIeCIldd{rpK+t8tk6}v_A>|(}lBXeT^`(KhkuWY3uAp|3os0#<6xAq#%^$lm z5#FMSu8@On1RfUJo50WHDV9T7$z|i?x~Wzl>)=F$bu8@_^pGBwo-FZCAYHR@j7iU)WNA7V@AuJ5e%qm|0G(Hn!N&x3%HcAHncDfE8S6F{amk)(#c#%54$3mHi0j~LLXg*i`z(O8_&AEU<}ymt zRYc{T^mI4ClxeW$sqlzHA+#d3QUwDP0sje7 z!NTJm#qq4USlU7{z7TZmr)#6FAggG0U@kdra@N$4VdM>d^k$+KL0Tr$N7|B8Qcfd( zr>zk|uxaqlR9amVIeMD(3=hXcTEZ(NX6pd&hiqN~n-WMSxK9k9_y~Irvw|Y7v3I(q zYWV<_5zNLJlWa3|3l7?WpbktlBI3>C%vPP8FA`vGF0bClUlPlTZWy17Cw~zcda9+B7q!SoZnAGy z@f9ielpb?hO*SH#AqRdpxsZjhBt9MRm)FnKWF=2wqzgPa6FF#d$3uBn+_M}-@I8zt zEQ>By(b{d0C2(&+kO9XtnKSb}{pg!gBe4LS;)?Zp>tlLFN)8u_oX?C+75Vkfd3Y5p z89jp^Z7^$)?hpfi{vu@}c#h2e;VL&C!y=l-L;~G%vv*=%cW1LJT{+XEYk) zWd)3Bpi$_W6YAxSk9W32H-!=A5DI7`GmNiQ8=|S$xOayi*nx;H=LZ}d7^+HR+TCk7 zXU#~`8t(ATmb2ZF=(JdJ#R&TGl?gY0yfE>az)z5-HQAhDpj7?T#vNn^-A4bL%0rm1 zIR4rp5+Hm8^c_}-W!)$@KlzZm9?m4H{FKZnjV_0CMH#{yVKU=RVlrI# z-zeKN#4e4bD<0>6EHqZKez>%J(Ozy*!tCD54RAC;5q!Bb&1;H97dj2^bHJdptz;1l zo#(ay9<>J$k*Tc{$6?wwu3kNMfKxo!ynOArhzIFLB<7|v5~f&@1Y`+>6m;u@rV08BVaoe`mV6u zqRmF5q-mx+d;e9@4jl}ugjAZPO@(YVC1bI5FjHZ#BnCOpi64U9Ri{F97O=#{G0dB9 zveTQUDwE5>`PF&a+l2QS3FDIttO#xNG=QXiX8)Wdn9%s4fIFt6`3Y2SHv@d-(xJk( z2X$B)L%uX!>fuX5z99j_iX~Q^i6NU#mjlc^Bh&K+8$ajlIKY7wlo*CB)^kb|zcZ{N zQ0W=u%DMXT%E0JfNB*4Zw<4wP3`f8DMwZ6=JuWEpTqOL%0Mw%aGyjmQkwLmd?m-RG z`b6IF3LpoN`j5G--^T$N%^A|w1#V^#@F==a)Ub(hAx(Vdx$&Y<9;b;U#MFar!`fg$_t~(zY+!=oIj5; zj0T3z!_>tQb8npiM$L-zKMkOAg3B8|E-E_FufV5rWYAM9dkiD8KR+@ndsvmggRQwt z=ozCI-=9~s+X6n1aIRFo$&nkD&L{54tpscsaclYWUP{5#oWr1}myqGQ$L^l&{ka=s z%pr_G5N0A430~fYQzw5nvlFsbxm@KTm(QY-te*DA(~mysXhhudt*%EFC>;RsV9`v5 zLJ#W#hSVyN_m@_9l(Kr9hLhRsFO33rWG73;bxhDb^j}&aGGt+N#`=C)f0M?r-9)Bcnw2ZW6HiqeenWmSorz zFp%Ef+%kcs+00&tX;2u+jxVm%vPbcEx`aH|bVXdHwnGWzZ%AM<<*Ke1uVnV`oitV# za&=THBu^83XE>@O2yMc1gq&ZEibZx@7z-j{`Zj0A!);1vVe+`38h~Gs+6?;KMMe4?1={>G@1Q{neXP9gFb$Fww%$FwB4}k z^smHGvv%FS<-XmO0a-x#*Pf)JP|gEh`j3%vP=tN=lLZ-+{s)#5@{po1xkSQEt0?P) zmp4ZoaPru7nsuA)?WPd17hP7^a zdKv{)&}jX%PP^u^K=U9k)=p1hNS%}|Orv5cl429piqQw+&tz{Xsy8aLs6xtH1fR}w zkwDHqTdz!&IuK2DzISeAD3)>kRS+nH+eXEtG3(7i`5(FlU%etUkM@B|+ZY0bbqxR` zyt*wZ{cktvRzgxg{7;6$7utPLVvL|wZ>^%!anDM?aYDOT84a!WxylLI5%%p^2Mxz@ zH)BRHU|Fh?@BRBcMa_anQ)?n|uzCfd*Z5(UhJ>b>N=V2$t&h6v$%p}l47^>=_|`{u zsuyMZNizVl;q?M103Loi9w*B!2)QuoTpAZm;9r?CScqmku-Ri(rr}Zl1IlNh8@@yv zvcNU~sT}(R-OAH#k-D}|U;Ygzt_e+ZmH(kc(3~O8%)#*}W$};87b{s8= zK<^aR&gQ^7^gKZ~aRk;|#=n}*<7qs*K#~I|CtOPJB_QPJ^QXd&5(Wbw zxF`eys2DWG@Qh>`7tU(Z(QmohESwdLbO7+Tz3XVZ_O{D?6kgB69W{S$DB~X123KOm43$X5sf?OCH#3p(mwxJtzw135sFKrl#<`oJrDG zNzl0Ve_owFR*V+52Nd+u(wVN!!2|41I5dkdZpH9omM50wEjfMHjLS6Umm#kD{p-=T4o~|5Zb9f+|hP0y5LvA<#tj@Qp+VSEX)BaQ-Z(X8!xmDt?R&?k+A}_E(1QRkMaolCtiAu zU+QfAFQtj137ny?5$=n1=Fu`r%Yb<-W>PbO@h(A6Rb0W^Op>x(n+|tjjG6!ch9eN{5tmmvlEshje#Hb3nRF>Fx&U?(S}sl_cF z{7lDI{W5w?&p>)W_(e#b#I^ zXEY_7Gj4lDTc0C-+^Z3*n%8xbkPl46jah5j=%IVo6p(1pto*h*1M6EIL=SX#C{9F|hP#f>ZUMihS_W(GXJtC|D-Gj+`63UD6$Nk3T*TMc%~kw;&+KW#Z;WSXe@^}jdbF^QtoTj%Ny z4Jo2GMp^?ee9F5OIu!Uo$?8(*S$QFRzddtsg+Y|RRLWV>HjgBeI~S%^$gL;Xr2s4 z!cGEQ&8)aX!r2I?R=|t7oyMry=hYiZBFs}MR#M6qdIKeGeE`@Cu5%`*i^cEeN z@P5nr(n+!Pi}RG@V-21=h?>8%8f}&e3N{10&(`KIhf7bsAtbagEX&N;yjL+y&-*aQ z5B^tW=*J_=%Lsjh2BH?tR2bLH_Wx{sa#2mFLal+k#1n0Yn(M{?Zt21=XXLb7@vi;w z`Ojce$g@k;vk4l4EUf&Imv*jmh7O@lLrrLNBtd_X%~Av9r#ooEZ0Vnh$F~Qp%v@W8 zQAvO@Ia$?_Dg_4nH*kkZL+K?Xf}7twDR}EXt*6%e3}tD3y!9=J|GAHbvrTIqHvKOh zmVryPU0QX^eXnKB8~Fn)aAc7K5-1!59^vyQ@^>}%Sz96@s#m}(;N#Gb4ZrX#!ZhGj z6zvB4^0&nfzu+}{q{YdLW&hyOd*&+3^e$=b6WyIEf6MU4_Q`6s#IZH4n{XYm?C<^3 zO26mto4G*OMJk-=S!E#v^B@(Idr~YGxW@3|&9=z#FeZgX_ z`VuL=Xdq^bCAc`Z)##e`vI^4hF@8*qL0dARZ{$r(@yHVR*8^B%8~Wxeb7~?3dM#N4 z;w;g7a?B&Z5VCLvnY3dhDUUCD&vp=6fNUd!sMuGX7Lva?n7n2#;|lp+h5mFB!DtHn zwWD4t-K4{I%*{XsR>|x*r7cz}Q1K0)LhdD9hXYA>SlzZOBHm{gwc)h8@%0r}7y-7p z0jp})rrHf&ztWPp%2iD)ttJ<=U`LF(n4ul3`?E>f0(2prUKv6i9cUEYUS_gxI z2pLQ19A-*Yfq@uPd_m2AL9W-TisXO3ntr38CG-(p06+m@Z1t}JrqIuwW&iEOH=QJ? z1-6Uq?$?Q)EeJ_e;t6eWsT?;a@0GVRT?EtRKUh)^9K?O5cC7_m60@$Bz+6NoB%30uZ?g?{}#_!!@!1nH9juU8zEc zJ1zx)@44Z|Ca*|qQCgkfQ~+A3k*Q60nJgvx`Eq@#Y4fLeqxTLgc7l8KpgjHoP@~J4 z9!c}t1pxO9khy38BYiP0BlfqlFrZ$B9>;=c!J%}F`Ic1owPC(C_&Ci;oiFkNseIyQ z4BCvwW>2*DSNc*6w7B?$G=*-c&U zob(?SlHCF_Y9NTwg0N|D*Fjuio@;$7?&l;7_`C3?FB+M_N)+U78(xbb5RjB zYhk%12?F~8(C-fa#4G~L#4SO!bm|q>@fwG2(PoMNl7YB_QP~J7O~4m5Ss0ZzHMY?C zUJA8rn-4Ve0L^Pf9lQn{2is3Do-<_P8YFss_Az+N|No~Hr=ygv9#MT>k*jEk!v9xf z(K*zHRciWF#?7Mb^fu2d2sR{AHu80R_RZo114vU`h@Txjm1Fd9K|pZQye}w*8#@Vz z(Z2W0_M0*Qk`hU<7}@oqa5N~pu6V}}%j{yapJn%w+1v4e$N4I`Orx1aF?)!0>tDM^ zBb&ukAEsKlYH4Cf5aRAI5J&U8`&k$8+f{eLUg{kG174AS>q6NgVRLvz`ovuCj&`WQ zBt)DZ700|k#IlO<-Yyh0b6*~QUdgu&3jL%TJ56{523B!*sF0X!nHbisu3Pl+=DPBx z2nQpqI1|$laN00I@nqKjs)V7!Ws5&mIc)0lEIuQGuRj@SuBdDmEYe)5PA!7DvGz$S zyXkRs?EQ+>`v{usNWT2P?Ln_3c4$dE3!o^zaNFM%EnnQgvsiDNLw4^%q;hY;<8ft`&`DZlX3z=L)>!d zlNeSfgD3V^fWC8AYOz#-4=X9o?|Elm3lMDu{QD^|19Rg|>;KRkWMlz$S;geoi~wLi zpspi`CDGG>RZHRTsHcha{8%*;dU}zDZhYc!EYx3`H6t^)Oj&}&<8)`g*5;w)uHdsF zL{GGtCI6ceM>C_5#?h0?HhN61;cxkH#2uN5vg$T^+5{{@lA?*eKBp{ak<=|l3Bev zpTGWHu68V3TWtLJ`U@rmC)53qrZz{tQf~^lHeQ|I-BV!(0|85#k}Q5uDK_pLB_f1o zX+s+*m)`io&^67r-o`&!RwGcRG<+}S<1xy6>hR2$P5&((e7TeKRkd`;SY3~H!|=W= zfg+i)fUs2kzlWM94CU-O4Y(M1O(1^i^V(+TE?0GUW!g1}A8!zmG zyTy_Zx8Qn$Dn16$mn}fNhqX8*MPw+ zH-3+@x{1zCR;8`~iU~f2ob-@J^CE0Bp)$_4X2kJknExqNu&I|RKKoeuB zcG)PTv+8lHOQYLTNAokJR6**aEBPNDl>{q&BX^~v4Ml>hF284otAEVp{f=6QnAF6~ zsc4l|TEvodzN%J0gQZ~ZJ#Js7>*jlXWRmQ~T*i;E9~~Ni>!}&4@&@kWFUGL&!DHP4 z@H^)*y{X6#dD|$XOn<-qMgOgaM=P=itBFG0b!-5=wz7z$6>tJ*8*G^9I&tZ!9Eb(j z+J(1nu5T-RObZ3x#?ewq59bEuceMCDbJ=yc4G_*L!J@(arl^oCvlw`{R`{!61qTtK zPrWjy`ulJ_?pqrH0SZQ0@aE`OW~c|`l8qqhQ&bn-c))RpqY$=o0OWh(D^e7`gpbKzLQ`U5b*pcinjT% zDPq=DEa=Jfc-?v|l^cKTWVt3h0xQX`tFTqK1U>Y+#E8FIiIO)Qky~|U#h1SIv{@Fr zHyOEAlFYK`?11eF!nOcxUl&DP1RxbdDbF-gN%DV{1-`bTAV}+$90w2eO zh93d_Biv4kwYZy<;!Q^ypKv76wZ79M29Uj#logeLMW^lHS^Dl}~XS;_Kg;S6o9L%+0U zzSv$IT9%}zr)N=jteHR>ryYkl$JURE##L}D=FH=kYsn++xKxW@Qx@O>myEES1%O0< zG?2o1sMcUT*_nrf5QwD6hBD$ z;)r0W+R(7^3k{8sJ--qCnY6}F?=K=&RZnzL`d;}zZ1RxY@oJUSS1Rm|mb4Eo&iOCs z1a$w7f9AWf4ikzbLDl=hku8NJbho@Y1$h5b1iV^4eEK3J@cMMg*9dBLSrfS5JgR(h zj60D2OMyc@tmxQ4MS_G~y7yGDI@;I*EVjDX)7)8qg-70N)`pg^HFcxLghyW25n0JB zu$bnT3cM9zc$-$py>tS7RNA3(6<_?2`5F~({0sT6pqsT^NbBWai|WgZc~YH137NLb zNn!_zpzY0^QY>VkoKvc9pb#>~?N`VW;58FLaqP8=QF|OSGAiN7CRK2epOrinxfz4S`B8^ZnDpz*KUyg z_jC3gC#Txvz0`?(5c@lqTENaC4LG3ki?zInxpzjBIMwh$ZEuH3!5R$m7?g5siHS3g z!-%$RcUtwC`L=!p6F3ytcpR~dv$Rqg zFRwMb)eg0LqRURu^nyhX* zH>iJH@;mxu@zD(7O#Ym6n!J>-PjM7y@_DSNQg#o8>3fp}{WdlcTAEYhqFmH4s+lk5 zUm`_)LI^BooIejeBA1=^hOfC>Rt?AU*e)`Ae-SkGX=uF}f9Wj>A2FHWv&w1vtAKFM zZ31~b162Y#M(r#CZcQ+FV=xAwgb#B#UHCOAvc=!!L3_g*<5kPV1hvvw+TQ8ePAo&* zbGSOiLk@}Y@#KKnLrSXh=qhHqb8kHBjCHE#-?u5B40f4NY#Q2pQF_!2t2T%0L(C}= za1+*A8|l{EZqjPI`;h)+M-nrU54G2e%Wt+)6}1>Hvckxb`K<658wrJ>nuylC3DL$> z%LdQASGB1QhimC#ea>O-#g~Tvng_U~ie=KO#mmiUWv>?={2V(LaT{(EZ{XsKWbeE> ztkvxTN?V*`w^}|aGj=qM@wRIv+WsqY(#T~$e~DgREKH&y9Sjw$UaBk&UK+1jZCBM(=adae)??&Wc}EgE?BT*bLK#VI@aCky{;RzF zislR(iCG9y;Or0o*Wz1%mO}q!EMps{DdSrm+D0;95lAK4?Lv9;+Bg0s3Y|jwlM8Dw zx~(~~Aq$oHmf%36PHj1O?Ov3_3T}s==QbA)1rxEx^}wh-(c>je`UO<;eV46x>CByi z0ZtW-i$q5K$r#iBw+G4yO?xCwX|~~C2k)~A2!v{DJO1HUVFA@3+mLgGR>xwU^|k)v zP27LQq|^8Ua7@rq2kHE=6?rlcukHcSxs+7R9PUaya{Dul1ll~ zKbr!N9?cT8;_%f=Nk6h>Fbndv|JMYtK;tUj?$p`BKKzQC=xh#Sd%+Fvwn6At4QFf5 ziOv|Ab|NdCx34dQ&Wg+`F#X8~_JH}3bamv_$*lG+V=@!IYOz8_!$0L#>Ic*gyxCHz zOUiK&W%rw{P?a@_-C>l(RKR*#AiLF6U`biK5RS{bt)R=i)a+O&h1z9K&q4k{=uS2~ zf=nZ}>JOXUst$0}Rt$9Ny7zmoWMtVNFgS!p`N25)PY#)Z>0hE-Ng2J;@*3%rbqL z6Y*EhEA53NhcG$u_l={LX8zwajsetEHn+bZk?3=gLNLGCh#9rc zjz&tSS-B_omihGd9rxuP)*jN(qC6-TPE0hQuk0tvMWfU2bhBt*H*xl!A| z&T;|)2!irilpYDkt~CfkAEHBnxA`S5-JoU!+#ON0N}1eK6Gj;eCg;=30YKZX&AW`>4_m9(IW zo)z^LB{xy6f8P6D&J>xCZEdf(I9JVgZ$L&!*^exm@0L{IGAekYx{B|LsN?V z z|BNXo0`7I47fb=>Q9@UokoCh2w>~m8BQKlUc2!Fh;Y>oFvSlukx;j4MOaJ#o4F6>^ zuTL{)7JSXs8Q~^O9e240c>O6vo2FGAsN@}h)a_D~{$t7naVG+p&941A@t$Pkjmanf zaHn^qqRzDd`Ae$f9Q0W#$6^Tnm2hhD%j3-%lJ{or+Blg+RC>_RHwu#^{pCUH(WKY- zF#^`Fk;V1-z?4&Id(6L2lWL!K-_U7~TDf#sGc7}qIRDo*p7#I7VymR~ia30kJ}++f zKvB9=T$mY4tLw4P3vTH<;Qi5P*1cTf^gBBDVdClL;unQ{Ig=sYR_?B`U2D~#CQv4Z zc3J2~(9h>JiygSoCHL+11#SLB2z4t$!#T`db{J(~v7Di1l?WaIOsAyKVSo=HZpBUz z2~{9*@*6qwAH@~?h{pyji}SA3>qu;I0#4g>Fn8}PvJ2ZO1ELjVix!10g(cb(wCTFfpw}cj>0Jk0gf)IJ)m8LL8v>c5E)IH+rs$#x*ejG2>Cz#Y~R4ZR6&E@JCNcqx;=xur|Lm0NksI;P<6Ysu|D2#5q=X9+oKFB6K zb70GH(;pv24W}?Wr!X?Wloz*7&+A$hm%}<4Y_n-7&F&@{#&lE%i}bmfUH-JxVP_of88J#Yuml!*fm|XDrD$Xsl z_D{$}R}ZEhp2lWKz>hXgf@O5A#<<=?+6Iqao|Wx`g&hJLA7~>Z6K#kJORFI+8GSg1 zFf$5ij6ah-l0Gsi+w?PE4soBBfhLegl;L)l%cqde^$jR4(GaJRQn;%egvyhX|7mn$ z;ONyrr_z{e5cDwzjV1w?GZp;WHa2IglOAn76(7!97 zWH9Kv%(J}BtZ|-#U1)y~G3p0?7*I@I02NvByN|UFmO|;!|BAzziB8W4+0-QijCgL) zIg#u7o2}SDqp`ymK;oBThDH5v7kgx@JbPil`SfY)Ts-6S)c8~64+-KQTpVkcfae|7n?iYfB<>*qIbdLf zraAMUVvi%X05xmh1S`-L8WuIM+2c?A-YOcXa#Gh;pUf@WejHF*#kUY!UW_S) z)>QfLth(v%AxQ}=`!7h-6bzpFhy#>VNA{@=&`ZH@UEEhTnseDcU@V5m5Hd zsF!?rJ$olbkDYSrPW5sEe`_Ueb^W`aFtBdEtY-Dcv>Z(sevWTG(7+^CK_zAk`k)Ux zLFP(0mc}{Rmn9_cYHK}ZG%~Xy60F@_jwFNBSzW{Bun~lVDEJcn#Yv)au05$NOF;F; zcFvWL4*I_Ri4b3;DuI|d<>D3{HW`A1!ZE0TrCx1-T1LMeF&G}YSnpMYlJz?v5@;jc012F0mDM}hunlp~mQ?Ha&P4RhM}c@nx^&_O1KL@V)qqlV(V!Ooy^wIgcKT{xkkQ~ zg67M;xx>|Z2itqGn&}4qc~Vp@g!IJoWD%ueufl9+G{=e%;KLxs{MLb5b`Piwd@41n zMQq!z)u8Wn_!-9L@sp_UgUDj#nLpNOO&yC^&?B_{6tbVMHPRj2W$na26rB0lw!tU7V{S z9we8<)oj$l6RZK&B9Z8UxqYMv7L}C%3Mj{R3TU8`Crr)=w*Uyg2)Yq~G)m5HIrC-v zr%EZgVdKLmSEK=$9n>;R_w|RR>Kd(ukPXqdsuMYqv zK9tm03cR^GRD(;6UJ(}V#Mq=^tT!<1XX96C2a9OyOyGv(2a5T7Nb2l|x`vJjY-|v< zd^l35R5l_a<|!T2+u4GB!qS@;y$$5BgPew(bpZJlXt_jOCgtgQCLX68r_j_@GJSwY zwGt0+4tstWAOk2g6i8e_en}0p>|76Cmt6@e_tl6lIXehr5Q>3_AsuVnO!4^VaJDppWe2l9-+2;n^cW$aoCxW71JluXIQYX?XqS7S zJczU)Is;&W$mD9d5Q_n}^Z?P&le@dS_zpslDaSuROz|chrsAJx7BS)StA-r$jeNJ> z)JHP%gnM8K<^y#qO&{DS^BVK~8JS5zG$R*4EBS_q!)QJ6y!QG+^#ofJSc+Ao)tFi1 z{xoliHRsZ-3p^-CO$QUF5%wSJMM@jc^O-GjNWDoIt5XxPyIV9zTOzUWWe!2QA87i0q@EO5fUBYJ^)0HR4_9fZ>DG5UJ~qIIKX z9iXM@Ud_i+vbzoKpYSBL=U`m`y)dK&W@ZI6_}0o(^! z2!U{o(t94_sQh7XXQaS^w?x%xd`kq)yQx-CTaVMrhmy6Xh}lz@m@*}ZgTc~VHeM@X znRu5S!v1ZC(P)EGuCv1lZ|8Mf@5jfWrt>Os(BDlFU#L)!)`ZX4sXnF!JI46)&XDwX z;x3sM=W(sWDNM7W(U3T*8aC#|kg%{M`4isnPqMCwiwHmCLV?cm3XgLvZs!Z>BMQL~ z%R*R4NTQiss&vX!3eth!u&@{e5(y_T$}<_KZ9)IO*=P#mN6RwtBkG!zeT`j}E07SsUUhg@s> zvJRUA&l$~2SV7lpyd71C&a*sGhUQXM&IO7-zP2z$>r;iByhXXXpFT6o%F@?J?#JP z@$yYTX*8jy>@z0;{C&&|H&tvreQ-$cO%|Fb_mN0@o{@yX!NbtL)Tw@Om)%+g+JED_>L}L8eGDJ7)ceIbo^CL=0qU}jhILhya&}dx z`lg?;Vwhz@7z6u!QR&sfTGm@$OTgwxvkBu5OqqBsBWFcXo3gx7x4!F7BmIDnhyh|Y-BFGh{qsvbV<1 zAYw12+j|-AAne%XWF`Z>CXAQ)Q+326jDPndED7BXfZP}(08~IG6L2oDd?~Eh=r`OT zO*jel=IvOS@Yz4jo&O}}_`h_0T7?dff+=!td&e>gJTF zmRrX63OdcY4%g(H<*$)^`TL+)R$7rj{Yo7`t&&3XLCll=41 zRJ(!rgeMy#@HrwER06@wn-F*!5x+BlfK;Z_JazmC&CMXvAl%15my~0G4eM9HtBom2 z{E)eWO1`+Ovm0bu5;#yN4Apo0Vhpwe5H^u_^ruUJq7Gk4YW)FF_gM_=qb5bPRKdM} zBZyunq(dx4|ASeOYBj{_4Vp-Lc#4ksM1Ums{0F38j&d0;#7n{TMxQHehk@)pmPw*t zZGSl`42t{Nu_>+v9$mCxAxG@R1>&^xvIJBfAKauo!dHyY5X51J2-IF7R5l4LG(t6b%BQ`=`U?Ps@;@t<&-j7cB!6!U*70p za>UnI9>Pze_=EPTP2BJ^>S|q7`7+w6=PE!z3c*6U>`!FoZGtie&Na_klE3mxK-&4M zIi}SO@8mxLuZK?}UCqeDB_(+njpSwTB7*|Vzl<+!9 zoW^1Y)UNYK3`PV5?gD{|g)Jlw22*Ew2D|a>u(JRq?u@dq8j>Og%PRI z!*dnNNGF{o-(nMc8-D{Dohc*;O&*4DBm;rIt_WOnB$5PMB+jt0n`7K;l@%EYbfBdq z#>9`T-h5%nEvJoH44@oeq3g^&DdzHV>_g~6K?;08MR-%t7VIERt;cE97mQiUUmAb+ zvrzAOtoY)e_y6Q!|LcQztd zpE^AQ2&Z`h2G+Q!VCj%L7VfgPY`Z=Mn}Yq79gRLTp!}zk0E!=7*xhmUMqn=3t_8<_hCZ5#Mz0Ez{K*)4;F8nI#F?CZzH2U zR@eNanEUGhw}qW-H>_@~@ZkH7E0@;>umATO)P zw+GLw%_qd82x3J?#wQ(b^8oE5$AU)Uw>qkKh=7cGN~z(S@+Ahf@kj{xE~x2?Jyman zU$tZDG$LiQ$^t-{I6KBzg-_ttp(^s303_n;X zv#04f473nuB@%jn7Rk2mEr`vAtGUd*izwhu=^G;6lF8v`nOMjBdV?QW#-5i|m)D;p zxzzCAst{Ty`{D+{#4W#8v|q9OQw5GhzGEzZKixja?U1JRU_P0e@Y`!?qUINj=2hr? z8O7|(1G=tkSll|w|Dj9Y}&uvH>9JW8ht*6#}--z1qU4vO4| zy3`6gt)D5qsDeg|Zo;D#UZF~QAl%NZRM(^CzhwzXlBhtYbsf?Iy3A`ev#%HNr>Kkh z?4~U)N0j1q_B(+g7;GmFb*;U}4KQEV>khG@18Fjkmyhe;7IQRV#Y&t{NiL+DE&&F^ zrpT(78r#t28B$Iz3z#k%8}Vn1aCpfu;PM0MF8%!P*C7bN23yR`a=HQ4oukb;Sc|Ic zH+HEGpwjf@IGonO|8>H3gs{$|b{0Fzu9qF5)_9qw6@UF^k@b=fbdMM)l`ye#$!D?g z^*p6Nh$j(~__s8q25ZzL0~>+Y`?!e&7d4`rR1+tZe+&1F}v& zIH|3$4EREWs(TtdW_bkjM)|Q!Tzdu*gdzwDetvJ65~2j3x=e8KZ?=`)E^`CB%6g5B z@+N#`Opg4UYmD!e@SBK0^K8k5sH%C%Do5Z73I;|TW^L>wV?64~!>OMBu+?0LjVgY%K6`5D{#3VMNuZEP94btG4(sgw;aOP!D)SCGz5)+JQ7ChInRp*Fv*g>tW9ER$b?vE>|8tkfJUI~DjQ z=G(Vse)1(B4m95;>&)v?g84_)P9^BU5G*7>qft^f!_^oSD6!PUt-fAs3QoD1{_!z* z*;3D>BEtO=H<#6}8?XLhjA?lCg@1ISWkG3bK1KPb!^8?AP|*eqa_0jx(178ma{ zJi>v6gL;Jk%n!fnxnGF0qOj{_Q5+m7R19KIZBulPKgi*DzbI!jsmA)|5fqY>ljNk! z&hOdChI^=&;;@Hp-Borbf~097R`f6qN?B_ru21XFw|O#+r)Ft|uLBCn)RUIwah)$O z+K~hT^Z{pWqN(-YEhJH4zxM^BQ7W4NUHj=CC_Z|HeB~%OE2ODv(N-&~je7S8di@UBX%*jEFhTBAVwsO-)f}@l;-v0f*65ZEW0>;vkA5|!2N?`u>@#(0Vj)wz1H^_ zCd=9%)8Pyny8qK}`?dLY0pYsDGD!?^dzn7Mk%yLNZ*>23_kbsX0MJ8rtEs25nXdD1 zSzeR@@F3kSzcWHtsW`tDaS)sM`ytcyDTHdf0N?JUG`mz;LjcvvAtwMx<0%6)05qT1-yj?4kOnwpu|Q3x87_KuAzT2u%)@3> z?G!0;1ADFsY>dnHJ=*i~q;X2sDlDA~tb=77sbWWSsGn=u@hlqiD2!JT5RTKT>$)Yh2Cxj!*)xNe~Z0$fZSkZ{Q;?49AKcaTSUGC+t z9^cs6m#Z|_EO2`Fz%~y4i|7s$TA&;!w^&z2*KV;sm+y(>>q6sV0G@#ffd%8rZP6gl zu4a}tS&XF48FuT7n*0Hj1$DBYF#Xt}39i=`6)MHC;e@LhA5JhQ#p)|b4t02>tz1`3Vj*KB=Y3oG-^BlgcKO)|Ny834 zo-T~?Xl=hXSR?2>i0zbbcgva;0>GLg~x-F@qunXt`3c-HdgIQOjx&eyDkA6NU} zg(l&05j7D+KgnlXLoccCb6TWEMxvIs&Vb0uS73YV$G5_RkBw3}EQz1Ye^9W9>ma1+ zR2wx<(lb;9^x7O(c%0528z0GqhaCw#F8sF;G+^5#vfZ!olxj?*H0!9j$$l5IE;U-w zDHKt_Ews91)mhGJNeZ7y^{@?4kda=`a;_!AcrIq&%v}fAp3mr{o+Pjhd9SB%rL*x) zS|?u|$#UnPAt@nK&6e&RRMj@H7V-iVqaet8KtEnB{ZB9PCZly^Dt~l)8Q23nbN9yi z9Y?4HNMF5pMi$af&!n+NMrq;jAG^(zWG#cK4 zCd{M2dz>yTRKAnI^J8wzuzjlDXm8BG|96rsT9Oa@^hYqmORPwc@vQ2)b(XcywdvCT zyh}O$UDFEuHlUu2drU^1p+2~mf|LrDSVuN)>2e;YaequiZ+GDf-1`2PZ)AdU++}XH z1xjB6>TYS^v+n=2L}0&>mB{qk#jS1h#Wo3J*w$^2S7+PrQ~?+YPxbT4TA7@;F06aX zR0_=u@#%RqkJVx%h4NX_+)TewuqOJN33%Pc<;5% zL(~`jFUS1do1NC6QA)}3)#tptIj*`*eh}b7^TK*QH`{{XQBwBOM{VbmMG7i~`zV5YkEj28<44CKJV7p* ze%j}qI^K;krD=4}EUi52im_7hKb@1A;ecsO*tI2gD`ZEEdt+B7xEQ1po&zBXEDIu# z4bByZG|Vs={7wg@ax(Y{RDW7kT94VAFs4r0_d6q!{7I_gcI@r7zxC%7N<7p?zR!QP z>)64*G#6=hs7RZB*7?FSa9l`So6r^@)X#B}zY3Qm`Ccw3oyo^x@w|=IgIC!OwX6IK zZAa4#UuCDtk$2=H7KR<($kw2%oQ|ZQCq!j8RMre`X^TqUG6F_Mr^Y{nA8NKn(1j(& zOnZ5nMn#nD$76Wmm3cd%ugpoUKa`(@9M7W_276lsj55_%Jx^2{8C4Fh*^M8fXj+fv ziZdYGwM3^x4pHfaepkgi@r;(-YOhsa&h+$sY;~9z*sdMW#>01q(gBV((Qme(izB;^ zEo=G9R-d(wcTrXq9X#U#3iyyc1!tyF>u+WXCkrRt0YdSs#tFR6F)hzbiT)Q9)(_k3 z%|BiUiz{LSCB$X8$7R)DW9h76ygB~f@X&By!^rOlq2e)_@Ab!zH!n;>#Vvroo!EDD zdYu;*Q4?u(Vc87=S6?BWGu$*MW-EC_~{%m6#m;@>T*onL2#sc*<!@F}Pt^IMSW1y{?*LYdJu zHWwi7t4ZY2wZQ#rXU=jcSSm-{Dx_40o#OAW)~(})F_QB&ZOVX4>Yuw&INuI!?`z^5 zrvK;lsX}=A1v9zU)MP|C&RiUcfsURB>?25#7N#eSjdtUlf0tbfB?=)Qx!0`jh!yu1 zW&cG}{k@ibZ!RiTjm7j&dUik~E$R5#)@U#NxA*SB5IF<6CR4FDk3?QA-GVWF#@pkH zHeZ;vrOjC@X+kF=nU*`mkl==EeMpZ}hSbcQ(%E@b^Z({$V&$_aoU zH#8lMPt5J#e90ndy>|; za(C;yylGV3?_;El!HlYn9pt({nhUrp%#UjB4qAXe!64jp^blu!68(3n!QAGid9=;t z|5iFGE*fK{rLXr9=ZJ68NGY(A{zrlqpztU9QU#rJ>>m_ZB%FR z`>p6VEWTdc&UJ&v(@PhN_;w>e2KN*JdclZ(ck=gIubo47iyQMg!;@5Y8fIZ27)}0G z_p(2k8IU37s#L}trzU+{nt0h7j5}(?WHeEqBa}Tpdqo|pT&26vFnG}YK1D5Il~`<2 zP1*9Qh?uJot>A>jphfZxPv5JZyy1-eZ-0grp4cTqZVvEgB`Zgiyj)Kk2bI`n@Cd_Z z5tjT{U=De~KHQ!$0V-)hE8-=WV^kf_`{B^+ulyG?aWsA>jVsQ-==JKTdkX+VT-qic z^9_<5@3AR;==8Z-d3BBXJaWkVtc44@dT#uJY)|soNM`$^(!fnu07|~c1CZ;w)4ZoA zd;7_Sx6xXPaY{$!d8u3jc1o1E!Gy+%s8t6^^Cqe0BmPl)F>MNMx0?UkH&ggYQ&;md z#>IbID+3RDTCrZqqG>kK3z^XE!n{pQ_u-HRB#rt*HMwUqjsI_JGa`6$diw|B&zF=9 zeuzbGa#}JWtpTjWn$@1oGAO5l$R39lArTBF&WwP!9&p=jz6haccYfkkL$qKXlhQJ@Re1{FtjQ_9NZz8S|EoZQ6dkF2rigGzZ79Vj_ zLt@oh?H70L8zhnm`1w&+OX1bnvpN^DAwh%-0WCCN%c2clO-iqNoc|$l1Hvv%_MdM7 zPPuSnp^oz~g&8mIY$Cnp;ms!ne3^La0}J($8-4SM_$WbFecQH+f_R%%?|;>fN!x<8 zCaZ-mc6JK2fS~RJXrshf_4~B^p&px}W#-Z#taH}D?6j64XcPH800qL*u8(oY(W9Mp zPp8{BRI;MjLmR>bZ&0W;IulA|=uP4D7C1O&Xw`fEjsL$2BM=re$o6OHTVP5wCwm1H zu>lQl8VI(Y550J=g(U!?r$;8_p-0-CoG;T!GdW#uHa~H^g`a!|yh1X5tk=fNgmLcL zrD_a2ryi$&UBuG?4uRIeN@-G=UOmgTq4{&WQNQ#-v1UiAviUVJ&OOKsLp0Zn#biJ)y(HBzQAm_ z>3(uG-kMUrdqg;L<0Ave7GNQT#*&64qBk)n6cIb zGF{`=_MRADzqcd6QUFE*%Yup|J%-Z&C3dm}kP|NcA0J;G06gc=E5|9_)S|^z19-OF9#33M z`+}qLOfStK$E=e)t$Z&P@yXOa7A-q+_?+&111ph{guK%8r+Y(r7hBtzB)2jrEmBJN z;W*u!{W9A>g#+@01IXQUjtq%|NGH$6rV=`_~^y<2-G34obe`VlFiTV3`kec~k zgX6VX^X7Dg3zzs z!Ru^zB6kh%)SKT=#oL8)X&g1+-mj`E5Gg`JjN;^4(qC+vFn1yaU!z`R7wgZW6U>ge zvHd=vt+zf;hMLxxKl5_nFeB*f+s$`}b9MOE28CgtN8#?Nn-8xxTx<~pcxW6^O}1Mi3Tb5HvPWiv=c>-KjXC)=Y} zH!N;0J{xU}C8cy0$Uk>Ct)bx?OUd93oxBqy?(#}kz(LDp+s&Qu4V%q44kq~5Aa`TC zO78KxNG~x{{{e?JvQ}zjLI=i_)-dLq(iZ_e#H7liQ-g#+EuO@(yN!3(%5e)dO-+#@ zs&&nmd;ec|U;P)=_pMJ1B|RuDJ%DrxsB||-cejjyNDQ4Kl9EG6H%LoMhlF&4w4~$^ zL*Mg``?;@szyHB^emSq<#hi2Y-fOSD_OqY0o`J3Q2Q>p#w_l^Uu{z(_lGv5=uJN5j zBH4v6YX4mUtRDqMrx|Cf?$2np*s4;9TLilX4QZo`iIB5v<8x}mWPbH-jixg~p2KH> z%B>B=!UeiN8G)GioWBqtmM-GMc;`XWnpT_Q-EZC!PK)31tN!?6s8OZ%giA{e{7u6- zv9DY|cb)c!Tkf=zfw3{x$9;)1QqJi!f8z^>w|*+eEK{KoA0 z|77cs2DOGsU~o3V4x7GP=VC4ZJXOh&qebpn6!MGBgm*ius7>efMjQ?U-^$EisYOfp zoAAVpKVEf@e!f*qJ&mmCF3oy6|4rHBs|jQHyMZ z*~!QG>*)6K1I5LAf4i*jvkPzL8Wc&_4t3&AFosLEYd&vFfz|v{b@Y@YIoqWStp^Gw za!(sNoE$T@wS~9pJ=EcSHgD0HGg*kRL1;k|=5@r4Iv1+*_TBgsQZ+6fyT4N+c4sZ6 zosWT1P@n6U-bD&1mlKSLAcpIfPQmqptVD2hqGB0Fek}n$&*gV+%_a;JD=D+8sXcDi`dsS0olHPgTk$>oeSxsw60wPg+|k2p-|32@%10w5nm~6<&k?yid?; zDf3r2dA=1g4I&41laqz%&}6|yCq&NkqfiocrESGu0FhA{n^Rg^lr4<|M5IJY8WbpA zP;f5cKR&j^vNm)?R`h^%`2Cfr*XXdo{(WkRw~MZ~MlcH9jtCPM8XlKzj;M{2W!b%WyR`d~@I)ek$^&yiIQ5T^Z*jeNCmR z_*#DWL~$__5!@0qwpTYUlVxqR>oybi2AoAQ?VIH%4L_VM1kyYGojcLVwLh|;nOH6v&fY{CELU@|BCsbGn_?ApI$8T;4RGV1rd z{(EGDI;dlS!a%es9!su8ihiGlDjFF$nFR0m%UhQ7XGp@Z{%M5(F?MG`gnS0Zzm59O zr30IR=K;I^S=!@&Ug#h5{pZ#a=s`rrlrBAr|9<_SOB4edePh(RQH0?CbHo2nf8-Ed z3tQbB4Y+qp12l?G6LfWwXb|xJd_agxeIa2q;|2dOU&PPoms!W5EK}PFx+kE=w*Z5( z;q@RIF2|T~Y{I{W|Mx`+Ti-wO2FNb(HFT=(RZ`qAq#F}LL*O7x&%bzcDKxHsViyek zAbPpP@3$jOn^wotvXdJ*5%cO$KP7hTeeV-JrUl-?_gy$F`9KC|Ri9%{7^%_H@jrIFZ8=EhYWY+1a<(?}y!`zzZ~xS`>%XHLH-SNB9CrcGnXJjq#Li39 z3&`4$y>1{8Jrj4G1@~E#Nx#cqdJUHa+;}f!C0OLqnrHis$(>92M~r_> zOn}&&s~xCqH376)V`X(AL#mDn{|DesO@&~9B3orFmT2Q{VLGK-_>-MFjv?ot;t#hD zYF)}%^#1aMK`=EJitlVtA*GcH_lKI%!5oP^2C6X>sa`?((? zeP#F}9{WkG&(g+3nc3Z59Y8D{A$@tdShQKu(5&)SvX04aQmM`z8~Uj$meWW;*(8m zr1`im1$3GUDw@nnD{>tuRt%`=iukwh$a1G0pa>%E`NUTR+V)DzNV7hjoC71{=b;~P znvj)2YVv;Eve}xp0V{qHT>q31!-!)QAXyz| zsph+!!VrTfy}uyv6cwz5|b3)k?RvXWgF_Q4sR{eu^)394LM>KYA+i zXoVlIGvXS1P$tXP;`T|UmuzM?mP3Q;(9P|Eb3poCNQXJmz1C1?WkeeWp@O#1^|Y6l zgl}R++x1>(K*DMFu}KK{mgCMB$VE^a_1)s1oCF@ydELaruO+pfZR&RtX;wa?Z-t+m z_T(WyLC4u=ak2E(VPoE$+%EOBR_1A9;7sV?cC&uwpZD-h!5gX6h!M5>BBM7l9 z+pa_}LyXwb>o^I2g|`S#zp}K8p1II zU$FQ=aLJ{650UlYtG>|qkM|SD=F!W1KcSc;!{-zw`SK8FGNqr}pmG0MOlzgz!5buZ7yF+>wKKLyrepHSOLw4uy~UcF1P>D8d##VV_mHsmua`H#lD_eH}}agRHP7a4ZWRwWjbjovox{dD$s!=OcPpOg)5Qp>*%b`^`>X)D&lQKA5HNn$fg5&x+ zuMX<8Hqx9)XxY9r%H-w@&^C=~y|i0Rc#_ggey#lMOZ&z5BR}o{^ZRBpL2?PdG&{gI zxchlm0bWyRVn8W;xip|*-ZnksgV>*;vu(V#nl!ELZ&<6glkIytm8As2tATx<4D4J< z_9pM@t3hOYl5`C=6LQOWWF62~>%xDVo*;-xLJ4BH$v|S&PZ?v7ed@-H;0XStI@CqJ z@#VX1EJcGpwkTqlpMA^OXOy;fY@>v!5pXA2y9R-iHN~;wq%Z(&lQT0GJ1XK#>=(IP zzHDpI$0S=hiSg7PW;bkj2s4`lf=Qo=L=agh&KCK?;m}&f{xPda%J(A-vMmB1+MI1* zJ@DS35D5VB;~$~(J0TXV26OJgN9dE0fn&Wbox+T*$d_=Ierb^qvP7uQ0dH{1He*js zF1>Lew-grIr=R<=1`4-BX7`#-HTL{9=Ie;Wkol*u@EN!#At) zeG>Y{M5YPN?>yN&^O>!yx`S^j$6aN=&$HUrfXJ4KyVAe5AHY$kz-gGEkovQNeY_c! zc#ZbpA%MkVs@S-h>CJ@c_lx>Ef=r<}tMh|TvW$sYN^drkZQMB&(^FSe3F_P;M}b{f zFfjIrE1&(V?FD-yk9~wvQxUY>`kz39#}s7Vwb<8CzhBlU{S@#W|-e}qKbo%AOmmScv| zoX}Cs##N&YMU}t_VtMNE##R`_h{>;vzQ{O@0fTo z>9Gh+v37@uZcvOC3pLk4iO%yyRYN)YJZ8bat0EZEEe7PBoPS&SW&n(U(X9=Du)T@j z*waH-ol^*L@|@4ucdzs@iJ#73O2hVfMZ57Pp9$U8LuAVBuJXE-|G`4UZ&j6SY2VqZGp9saw>1MVT&oM1W?^n z@nqFQvw!MG9|b}tJU`pZOsoJgvL$%j9x|wHNt0!7dARw4-q&=^&r2Qx-V%(UlH4Fg zG;B2p9lL?~>;80GucWH-XIgn_B6*bSP{qQ;y-bLMscxe=I+(|ia~z`mmWR5R?Bo95 zF4zqk)hsP0e;mQ2-gcpPt$cjuvjEJh$TqPy+BMJn=}P}^uLW5|Vp*}{;YmV=fThqF zG(2?@I}Rcgm~agqP1P^AzV8dzKVu*47cWW5ugM_>Qwzc;;C-})VD+rqO%`_N4`FPN zeKaAJt~tg@*T+_I1lj{br5f98SIyr`$ZfU6@Ex_BZWHf;wX`mc{YvLa7< zHo3cU5w8-G2b>i_Cq_;f6qju)>*01=P_aH*X2^2Ld$whHT7lnyl57v8u3MQ{rO3H< z4@8xw3EF(0t#7QL{70$NdCmbhi|&*J9TVbIej{K*f+Mq9pfuzn)76|+M4BEBH2l1} zx=aS^>H9e5vRg;;`izVDUf0SK8jX-4iC_Akb9hqgeA=+u2yvWV&-MmQH?r#9PN>Te z4XmJ-u{wJGvSqtC^lWb#2SOSc?=?Iw4$~JC>6LOAl$Hs<&kzsH15VZbGlRf9m?}+4qwv6wsASCX+yGG zR=<`if+|CrnCZ~D!rsx>IR4m~UCVYc#2*Y2HeaBvpry;je(l3h`945`;5eU$ZQUIO z1jJV2{ZE-RUJUO`W2S;s)BGzVd2H{(B%X@N!B%b@*V$iGW1Kh-^#mxI9_uF1F2f z(R7a=Zh30*Y)#o|f1P}V>sw5)IO6i8#e!OcfpaBIwI5;iX+=&!PMY!arjHmNTpc%5 z5EJEl>Lcz4V1Cpv4-|^u=RxpQ8p!Ab5-`H80I#XxRhV-6eJ`@c{FdmEFRX3(@O0&ew3er?GBrda4M2@;T8?qDcej7heS> zkaX4Mi;Ihzv>9B?N@b0qF=d&E-f&L zpHjQn){e@UU;`uWU@>>NaN+U?V`KuvL7DkyxcJ-=cI(}VQ9JloG$iYO6SiJQ`8T}L z;YHkWJ9RT`QD#P&&J%L6k%u{H5p@=f>)?t~4{~1XR|!r=UYkF^gGssr< z*zZm$O3E6DKE2DJ(}4<1x%_}H%>5et z%{WxbN=&fXl-?t%ZC-4K(c`Nn2B+aKwyj8%rUfg(L~=}d+fPd6DYPRk>~YwmC~Nai zFQAlX88ym#&j*P+^Q*a|bhN;la1Gz&j?Wri@YoaOMn#IbcM8VoZ}t1TR=0_w+r*an zp-C}@D>Z&$f{Q+udTHG@Bx$20h5E&KUOWv&w#Vx%EXfz2wu7H7?#o>0ioC zM5A>>TrK&deur*X8E59Iv{S8DA8r#mzv)sv?8u8dijX+`g<8zCv6JY6qvJ`F0(ip` zIt!l@Ms!GlPUYI_NxC3It8Knvl~o*hhCCIM13p~W)h6JZj@`LqP+f= z$z2{eO7N(L&pf({2N zfQ-qXwu<#4Nb)VBL8iy$kcE#h%1cq+tO(4~w&5klU?Z+@zg-RsunylhtZ;6>UFG?V zG*1l%FLZU0@BsgjuoJZ!uh~^u-{q4G1VPV4F8m3JRM!q$P|WRP?M{qz8D)_SnZl|R%KldSyX^B1SW=by+!r}K#~bvfY0WNAiwDYgt*s{gnyx0-+O((;kT) zvvhL?cIx@?AKPzUv0SvXaQj}}xYGQMQ34yd6hMwAG0D8L66tg96jFv&?TMNc*rGj#N4u}WBJ*dix$7&;=6Mhnp3wVB*P(<{#;fF?{kyqRee#su7WG1ama#68 znOapi^Jk7gG#L&f8$(uPlHaPHWKC)wmu3|gQOVy&m5|f%Ef@ z5p@PBn6Cer(dbl{xl1tkaMV+dk{!LDg86!KfsX>-GgH&nlO0&nZIfc6YbN~-ieaof z#pB!|g4!uHklg*8VHdWy=UmYC!F=VpL@k>D4Vv@K0q+{vJyeJ3fvZo;e-r1g7y<aZz?ovb!r2P+Z?6D+%jaYhL~-bMjWEg0UvWDJjkxH_XW)s4(Y%f~ONKelXnMcI0Jw@ask_EIjWsva1LX5g)|SNF=G@9?Lr2y-|hwN z0fIW}c*r}A6x&Mnc9~#@IR$?5L4r$n(rZR^X5o;p6~Swf>sS-hx<+3|`Xv<$R=zRl zX?{tri;q_7gGue|#`>S-MK2XMLF{E1qU<&P21`R2Gz{VNe;nHp*Iwiol8eF$-ov2_ zDrP{v#$37AJh8(h5Ik$W(_y3_-}?-ntR&J6{Uyr^jYBpM5V2=iRZlxN~U70ylz6wJvVHrmu^U?)L;jm;L zD>9OPCy?;PLhdLq!AAhcs!gVd4jenuNTD#pdzoK}@%JaIv{HDAxH&yvOu(lCiiR!N z+~V`(orlg(=Y-QNG~$KehTvfGBqS4q2uU|M&`VQ0CHl=PI`fYErBYckW(_n7IMm@n z4XIBM8IW3@%$&^t(8DBz9N8T1IMN|GR;%W=`J;0m;rhT0q7 zQ{7N>Mb4==kok>e4cp|HmX}=;+|@KE_2=c4@LA;$fdTGVtlH=S_qa+TQ8V=Hi7vMQ z_qf@6%iktmw4_HI`tLfdh_XkMiUopH>W)>c2}4PD@)Okdc(a(+$-hZ>y* z-gunTMDbcR#BAJxt!)yq+cDkW;e?lkpH~XKn4of^G5Lg7a9OmT5~iYljAn|o(UPSq__@7% zNgYjj`{Xw0-k=Pd+!;i^^?D*20$TLuy}ax!Z!}BuF__YkoR&ARv1q81@J6nMFYbBA z5LbLRU<_hb65+<(aC{oZLl96NS>lgya_Q$QmTAi(`PL8(%m5Tnp-{3HpfH$)>|v?t z=BGp#;Q1rASD*=(iTL`qR(*%}X{3p`D7_AUpyjKN-LEuRG|NpePGE)W)P?f_)Pkir zP)pgYxX&i&bJ}%&DPQ#Z{?4tE6?~>(a_y|BZgOccv-8 z4Xu2VPa&dRE8V`OzT1ILSdKycy{{C#O^yxe^5qH6Fy=>4@B#(u$IeeXkb58V?2DCz zWKrVwP<&1tz0fYafA)78Ao9rX2IL!pkz#;&q7w~21^Nkk+#M@lLf(dO`=3HFC7DH> zued9L()<>eVYHX}MXnm_#^%g}eR=CE`9v>k44OzdHP#(_CZ)-3U1H0SwAFw1Av zhj(87aI-EI5-=S4w0StLB(_(Qw1R^dYao+lo=@)L>Cl2#-JzIu+fRO-exE;;|pXAkOqK>K`f$Hr5qFT$` zZ*)b6@+K?M4-6%y@N9E$BmA8O6P&-PD9ZAQOp=p~hZQr?+K&>fPGpy6iMJEy+dF!D zL_cA!N5FhVJ&WDhbm20FSc+hlbe5^MzB9E`m9U7 z{W4nNMwH5TuwNU8F+(UV9*5C+r}w!LZGY}z#n9U7j^aXKe00_hIwT&oX`D-txt1wG zdZR7iL%o|vPy-XK`RJCCU}bsS+-8!!zPF&% z7jw^%W`l{-RdNOyKOU*)`By(XP_*Jk;4bVYlI2@T9C83K|Qm% zb|NPmM!Uw;pY~5v?d~uWqaq;jI@@x7f3Ob~K_Qtxi*DuvYo^o!qOX$04SqOk4{>dZ zLB+SbSZ96*qNihCCR8T&y_HbRscDzrH`W_;u2k*8Ok597_l&Qk9i#qe_IBfiBKr&u zv#id5zhksOy`R<7$ha}qH{~SP)hmJVFTG}KSPce(?J&5DW}WUVn{~*{76K;W=~b4*N!6M4Ui`;!iedqnpN&m1UnJQS;q|bl(rc zW>~096ScS#9^v)Stp@=Zwd$ z-McEY-8+X1Ba6}bpSLp{AonT!24l!_xcwnt9 zUz=+Nf1`!mR_dsxVa53?Z2No7yTGVs=kTW zos4!Fr}iWK{|v|jr$FewM%JW<2rf0z^QooG>QFvvqUx+Zkj8<+Gb=tE#4GV1)zyXg znmnoJ^7BAk&f`hnv7}_pGVIdQCjx&TN~%(IeWJsWW4--j;;#OUSe7^otrcSwgKrSa z7XEPJTmd!0OdUkoy@zS>4r? zFwG$dII{Eyv1m9rtWJMDv3|5BDIh-&8poA2J{m7#r zE#F7MYtGe@J7SVBMifa#=Wr6}n~(R1Kf}8CC%rare5xNPmt}6G)ZsiOiAEVoFsI|@ z6Z@_jw}2o?hC)GB_6ru<%RkI~#fCM|wU-yscxuTX-&tV2%=Oc^<9)dgu#l{3r}_c? z8;9n#4^(?@n3PA^@^jK?G<3%FabpIbKkRP)L3H&;AF!aOLVPLfx;3!epS{KuZ1(sj zf8>7W-3qG|E=hS=sg9dKFauft9>_kbC{C0Fo7p;Xnv<(F42+wRJrkFr+SpOAdm=`C zx-r|?e=yJx%W8qdHDD+XB~(`|)+{R%b^Ov{f>?2Qv8HB0KH$-RfXx;D`VRC}{oy5y?jCA|$C_Kzw}wgx z31Oa>RX+mPjP+00! z;*AyYhA44WN04jQw#{2R`lxBoK9{XRF3Wp!N%)J;)oD#3S82RyS42VRL0FNFX(%4j!r&pK<&&Wxut3=hg^6ZjpnT<}edP9rWld4L)(Kx3UB+f>4u9Wv3y zSkJk-me6TDd`mE1xys@tG&N*whhtHsOkz2CZMBES?gd_kprY<94u2kA(YLIkQ_DI& zz%WA^Q}N)KhS1?*f27{eh^fbm!^wAgj>g3SN1L?@Hy2dhc8Ake{nX~iK zD=6{CH~m-%#1!b9TiMw?L4!1C(`qZ@$`$YMoDhG8HN3OEDlJ|V<$wLcU8Q7o;yg~v zJ(ln@fgKQvO_UO;!Ddb(Qh3HGF&AV0S#g*r3sUQGSHW@E+YW_brgOh`ZWJJ_I z%yfm{%UZ1pIpkxMBe8x`V27?+E|csD3`dqr=69fveS6-LCplAB&(5y%1Hy$0hTmK1Jiw~nH{q_VXA9FgJHVH>gf zzht<-kh;8Z6oPHx+C_8az|8T{w-m z{n{a8%E#PJf_9nNokz}>3Nga}r``6Tk6>JJxzPIRi*gGx@7xM-j5pf=R!AK7YT`%B21{Bq=S|s zE9EsEh^$Mm%!aVt;``*CEx_#SV>!h94?*bPA9i5V0X@rj49>;*Z|cy0?mQMPfz9;` n+4Ji^fBA=`0IaL1pnEL7p`@>O?zWC7z(-M5O{PN1Jovu=zWx(t literal 0 HcmV?d00001 diff --git a/docs/source/tutorial/application.rst b/docs/source/tutorial/application.rst index f9b541e0d3bd..83128b409950 100644 --- a/docs/source/tutorial/application.rst +++ b/docs/source/tutorial/application.rst @@ -5,3 +5,4 @@ Use-Cases & Applications :name: rst-gallery explain + shallow_node_embeddings diff --git a/docs/source/tutorial/shallow_node_embeddings.rst b/docs/source/tutorial/shallow_node_embeddings.rst new file mode 100644 index 000000000000..76a7f7876555 --- /dev/null +++ b/docs/source/tutorial/shallow_node_embeddings.rst @@ -0,0 +1,145 @@ +Shallow Node Embeddings +======================= + +In this tutorial, we will take a closer look at how to learn *shallow node embeddings* in an unsupervised fashion via :pyg:`PyG`. + +Introduction +------------ + +The key difference between *shallow* node embeddings (*e.g.,* :class:`~torch_geometric.nn.models.Node2Vec`) and *deep* node embeddings (*e.g.,* GNNs) is the choice of the encoder :math:`\textrm{ENC}(v, \mathcal{G}) = \mathbf{z}_v \in \mathbb{R}^d`. +Specifically, shallow node embedding techniques rely on embedding nodes into low-dimensional vectorial representations :math:`\mathbf{z}_v` via a *shallow embedding lookup table* such that the likelihood of preserving neighborhoods is maximized, *i.e.* nearby nodes should receive similar embeddings while distant nodes should receive distinct embedding. +These techniques generalize the famous `SkipGram `_ model for obtaining low-dimensional word embeddings, in which sequences of words are now interpreted as sequences of nodes, *e.g.*, given via randomly-generated walks: + +.. figure:: ../_figures/shallow_node_embeddings.png + :align: center + :width: 100% + +| + +Specifically, given a *random walk* :math:`\mathcal{W} = (v_{\pi(1)}, \ldots, v_{\pi_(k)})` of length :math:`k` starting at node :math:`v \in \mathcal{V}`, the objective is to maximize the likelihood of observing node :math:`v_{\pi(i)}` given node :math:`v`. +This objective can be efficiently trained via stochastic gradient descent in a contrastive learning scenario + +.. math:: + \mathcal{L} = \sum_{w \in \mathcal{W}} - \log \left(\sigma(\mathbf{z}_v^{\top} \mathbf{z}_w) \right) + \sum_{w \sim \mathcal{V} \setminus \mathcal{W}} - \log \left( 1 - \sigma(\mathbf{z}_v^{\top} \mathbf{z}_w) \right), + +in which non-existent walks (so called *negative examples*) are sampled and trained jointly, and :math:`\sigma` denotes the :math:`\textrm{sigmoid}` function. +Noteworthy, the dot-product :math:`\mathbf{z}_v^{\top} \mathbf{z}_w` between the embeddings is usually used to measure similarity, but other similarity measures are applicable as well. + +Importantly, shallow node embeddings are trained in an unsupervised fashion, and can eventually be used as input for a given down-stream task, *e.g.*, in node-level tasks :math:`\mathbf{z}_v` can directly be used as input to a final classifier. +For edge-level tasks, edge-level representations can be obtained via averaging :math:`\frac{1}{2} (\mathbf{z}_v + \mathbf{z}_w)` or via the Hadamard product :math:`\mathbf{z}_v \odot \mathbf{z}_w`. + +Despite the simplicity of node embedding techniques, they are also subject to certain shortcomings. +In particular, they fail to incorporate rich feature information attached to nodes and edges, and cannot be trivially applied to unseen +graphs as learnable parameters are fixed to the nodes of a particular graph (making this approach transductive by nature and hard-to-scale due to the :math:`\mathcal{O}(|\mathcal{V}| \cdot d)` parameter complexity). +However, it is still a commonly used technique to preserve structural graph information into fixed-size vectors, and is often times also used to generate inputs to GNNs for further processing in case the initial set of node features is not rich. + +Node2Vec +-------- + +.. note:: + + In this section of the tutorial, we will learn node embeddings for **homogenous graphs** using the :class:`~torch_geometric.nn.models.Node2Vec` module of :pyg:`PyG`. + The code is available in `examples/node2vec.py `_ and as a `Google Colab tutorial notebook `_. + +:class:`~torch_geometric.nn.models.Node2Vec` is a method for learning shallow node embeddings, which allows for flexible +control of random walk procedures based on breadth-first or depth-first samplers. +In particular, its parameter :obj:`p` dictates the likelihood of immediately revisiting a node in the walk, while its parameter :obj:`q` interpolates between breadth-first and depth-first strategies. + +To begin the example, let us load in the needed packages and the data that we will be working with: + +.. code-block:: python + + from torch_geometric.nn import Node2Vec + + data = Planetoid('./data/Planetoid', name='Cora')[0] + +We are now ready to initialize our :class:`~torch_geometric.nn.module.Node2Vec` module: + +.. code-block:: python + + import torch + from torch_geometric.nn import Node2Vec + + device = 'cuda' if torch.cuda.is_available() else 'cpu' + + model = Node2Vec( + data.edge_index, + embedding_dim=128, + walks_per_node=10, + walk_length=20, + context_size=10, + p=1.0, + q=1.0, + num_negative_samples=1, + ).to(device) + + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +:class:`~torch_geometric.nn.models.Node2Vec` takes the graph structure :obj:`edge_index` as input (but none of its feature information), the :obj:`embedding_dim` of the shallow embeddings, and additional parameters to control the random walk and negative sampling procedures. +In particular, :obj:`walks_per_node` and :obj:`walk_length` specify the number of walks to perform for each node and their length, respectively. +The :obj:`context_size` then denotes how many nodes in the walk are actually used for gradient optimization, *i.e* :class:`~torch_geometric.nn.models.Node2Vec` slides over each sampled walk and splits them into windows of size :obj:`context_size`. +As previously mentioned, :obj:`p` and :obj:`q` denote how random walks are generated. +Finally, :obj:`num_negative_samples` specifies how many negative walks we want to generate for each positive walk. + +After initializing, we can go ahead and train our :class:`~torch_geometric.nn.models.Node2Vec` model right away. +We start this by creating a data loader that will generate positive and negative random walks for us: + +.. code-block:: python + + loader = model.loader(batch_size=128, shuffle=True, num_workers=4) + +To generate random walks, we can simply iterate over the data loader, *e.g.*: + +.. code-block:: python + + pos_rw, neg_rw = next(iter(loader)) + +Here, :obj:`pos_rw` will contain the node indices of positive random walks and :obj:`neg_rw` will contain the node indices of negative walks. +In particular, :obj:`pos_rw` is a two-dimensional matrix of shape :obj:`[batch_size * walks_per_node * (2 + walk_length - context_size), context_size]`, and :obj:`neg_rw` is a two-dimensional matrix of shape :obj:`[num_negative_samples * pos_rw.size(0), context_size]`. + +Using this :obj:`loader` and the built-in constrastive :meth:`~torch_geometric.nn.models.Node2Vec.loss` function, we can define our :meth:`train` function as follows: + +.. code-block:: python + + def train(): + model.train() + total_loss = 0 + for pos_rw, neg_rw in loader: + optimizer.zero_grad() + loss = model.loss(pos_rw.to(device), neg_rw.to(device)) + loss.backward() + optimizer.step() + total_loss += loss.item() + return total_loss / len(loader) + +After finishing training, we can obtain the final node embeddings from the model as follows: + +.. code-block:: python + + z = model() # Full node-level embeddings. + z = model(torch.tensor([0, 1, 2])) # Embeddings of first three nodes. + + +MetaPath2Vec +------------ + +.. note:: + + In this section of the tutorial, we will learn node embeddings for **heterogenous graphs** using the :class:`~torch_geometric.nn.models.MetaPath2Vec` module of :pyg:`PyG`. + The code is available as `examples/hetero/metapath2vec.py `_ and as a `Google Colab tutorial notebook `_. + + +An extension of :class:`~torch_geometric.nn.models.Node2Vec` to *heterogeneous graphs* is the :class:`~torch_geometric.nn.models.MetaPath2Vec` model. +:class:`~torch_geometric.nn.models.MetaPath2Vec` works similar to :class:`~torch_geometric.nn.models.Node2Vec` but expects a dictionary of edge indices as input (holding the :obj:`edge_index` for each edge type in the graph), and samples random walks based on a given :obj:`metapath` formulation, *e.g.*, + +.. code-block:: python + + metapath = [ + ('author', 'writes', 'paper'), + ('paper', 'published_in', 'venue'), + ('venue', 'publishes', 'paper'), + ('paper', 'written_by', 'author'), + ] + +denotes that random walk sampling is performed from author nodes to paper nodes to venue nodes back to paper nodes and author nodes. +Otherwise, initialization and training of the model stays the same as in the :class:`~torch_geometric.nn.models.Node2Vec` case. From 4265438da6ca6913901e46d37af3398d82c066c6 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 21 Sep 2023 12:30:12 +0200 Subject: [PATCH 1494/2432] Add thumbnail to "Shallow Node Embedding" Tutorial (#8065) --- .../thumbnails/shallow_node_embeddings.png | Bin 0 -> 39680 bytes docs/source/conf.py | 2 ++ 2 files changed, 2 insertions(+) create mode 100644 docs/source/_static/thumbnails/shallow_node_embeddings.png diff --git a/docs/source/_static/thumbnails/shallow_node_embeddings.png b/docs/source/_static/thumbnails/shallow_node_embeddings.png new file mode 100644 index 0000000000000000000000000000000000000000..fd26940743ad18ef55742d4eff7f0967194c858f GIT binary patch literal 39680 zcmY)W1z225(=`kaFu1$BI|O%^Ai*`bySok&+(Uri!9BQZ@ZfI2-Q6YMaL&1(_y6G< zcF&%+>aMO`wQ3EK%AaJA5eN_f008nwSxHp@0MZ6rzJP-P|E;c;bp>C5mST!x06#RtEr2%lbf}p1KE4O#wL#L zZbB3k?*skc&%g6@v$ptuBRRPKCl**h*7r|X*;&|F|F3UwQ^EJU{7Np?=HQXv`xj;x z{HNys-@X6F5oCQI{QqY%|4RDLU9hOa2!gEtw`{@)_y(J>0DvgqqokO+7sQD^te0N@ zT*uLB5)*EV+*t>1H}?SdK=YKX!Xr6!5+I0*DwF_@c@_v6iY6{^^^uN!ta;nP&eSPY z&rCuAv$TyB% zgr-sG|KBa}CvcE&Q6c}YA}@ZB&+a!B0=b91xG_V&zkb6qy&?Dy1Mchm2R+{Yc!?2~G#v(T2V^^((gBl8+_9#Y zu@6y`#%rZH-5+>RVIak*bLDq1FsHl>vvM6{=Odg3z z<@k=;CpocM1@`IcC&dR{SRC3v$Y!yT>45-f_sHCh13ZMNsG`X<2P`xkX^5U4EIcA{ z;kN*nm{N>$vXBjbRRzMmwoXq3NV!>lXb*e!e`W1HMX#1`KA9Z|_&B#~o}7`XV6HGw zCKpxGS`ou=byY&em}SGjFvPEuCmPm@pjnf~5Lw^)U?2u5QtcsHBhI4MjnI7Fop! z42eDX6gx;mX?mMVeP$ITQuz%`9p&cmq)z~M#wXM(oKcLX?rJz1yVZxeui|Y32_j$l zY^`seDq#;S!zptm$2JpezWk>_* zm}p?MCmfNuOXkPlx(NJ387(PKp5%^9&Jvf%pylLY>g%>Di7>U3w96gRU(-#wMpuXk zwM`Uh8yTe|oiZZ47!cGpagu7ss{g9i1rt!Tf=ROA0VqHEfrFmMS>&5pK@C-9XryAU zVr=C012o3Yhk}a2{GfpA9Q|K|3$b0x3@PoG`=A76gyXsBJXqwy3lbsvU}}e*&p)&6 zK`QbYv)j61AdH5T%>tSKPJtH`q_}c-)6qvbLsvy}=qyXgoLW?xi(`OBwVEh1;Z83+U@f`&ysTWy+6PvT$uR^N zd-{he2Ch6nO!ZZwmp;!i@QB-tPU^~6)T)FSr3Z7JsbHZ~O{5+Vyi&|za;WP6t&}9N z0*E|ar+PQ8uu>C?kc3i$6?xj2mibVN3Hn^dnZ<4-{Tt?xQc!M*y+?XkoZe!Q?ugF^ z3Hpo7VkTX)KV#?kL0s`dspQytKmMzW1;U0}0i z{?uAG4oQ(l10`1maG0v=>oTCgszAZ2slSLG37aj_qrgE{$So4|)Ep|x&881X-~VTW zyvQIm;ZaS}cWH<%k{WXmP)M<$d;+;MWgxs~rul((+`fIz+j5ccFj9TUxoKh&bYb?( z!L4pJS{B4m&(g1xbl?L?@I{J;;a~gZMFgopM23rrO;dgn^?`%V`&AddQY=$?I8{Pv zNSNB{l*9*(8!MFU(tL%R6y<(uKgN32|G-JW7_&AkMEw^GRyq@SdZE&u|C=5Xq_X}z zCM2*>aj7-&N++16it=TzYetKq>GGv+)#@f#8hJNy0FMjLikk-EO{4e{`5B8 zMn`-uoW*rVKDAIOm;5bLZ6fq4*Jqatenzf4d9*x82=@K-mahgov`b;X3zlMslO*UQHP;b8{@bsV`fWV}(^vu+GaYG%E@N zKUi)X^CZVsr1gGk{iE-w;@zEI*AxMMD>!mX^>KbLFZPIS0}`(5iF@_>?hSTb0}zq< z;Yh{J_;n}KT$j_g6u*ij$NY#d{17D`igq9HV>8 z4_|B1E@2uLSHOFR!qwx_A=X+XS?PW)6|ed4!9Oqq#&A;K4;l0Sy0g-_jqEKpE=QHs7N0~```l`V0zr+;m~-!?l(@9vdhxnhN zy%-=<`gy!(VB}UGuph}+(Ha>)UeBtCoVt@^W2^t@gb3m^8h@NM?UUsjoKgQFH~UEd z`vw4ZIl!Jr&?gy?M#k`GZk3}w_npB{P;%4!{*romPh?iCs%|i>6>$^Y$6FPFsn$v!n zJWuAfT+_8%RxrAiulg(J%{#y~K>j-}L^!3g(yVHueW7W2R{@A6@ni|U7{Z;%iFckv z{l_Ij|MO&js;Zk_=87R$&6`NYJN}hd5Ho07VLEA{Di;tZ_j8j+&^0E@MdJd&{ML(k zr4l)8D2U6?9hh}PSk?cZx0n?Vz)?y5Q_$;nQm&ysc*MvYC7OY@(Xumg zJs2Jt@L{6RlyJeBpo{^f%VGp`xrkrjkm?oN4dY_D;@^|m{Sg>bkWa<z!D_Fc2Lt7I7eHSW9w5GeshmlaI( zKYGixlr_9t5;z;=T{$NWv`GuwcJEcza zK%FhfxMz2netM09*SmjGhjH)sv5bq%0Kqq z7Y(hA(~x0oxG4YR+aQaN__8+akr3X&HxC3zGTVS2@(P&M3A0%HdA&b9qLI4A3Yh=7 zqO3BY(`m4aefY4DQ6k8bfoWZ?pPo7qfzsiZSW!d3d9DmetkaMQ_D1bc5uAjbYQToB zA4ak*|DSVPOlfp7J?(Lpd-D3+H`Epf_0>iN$d3V$4lCJD=?YI}fw`zQB6_e@c zWtiCLZgKvt#GU48x$&QA8h}75ub8m z>v7H08e(%=0bDv2+Y!+tzW?kmvjhlNn8{81LUn0GZ7z#BL^1wI5J+VQE24l!B%en$ zE+P*{JQFz%4VKN3#cJeV+bIYsou1-*1BYEqv>STdlLT7bX4PYztVCyHpRe`smnbDV zN9`Zojq7BiwjqvfZY=(1>zl{`)&#|242(m?+eG=lQwy=$m%T^kj@?`NAjRi@{h~MD z<`)-U9+%swk{lbOP4f6R%uAKZHPBCXWlQ|+@+rX)0NbRIw^!R+Nc=1e8J1aSww|vSyA4w^0gsg zFFHYjqw_fo(;(IJVawvQI6xD)90ItSgmHqr_d}V^tX)lM$*ozp@Lje#)1$T-`;q_r z@07?On_Cu~wk**Iuo9TAe+m2DnP;#_bY3_Jbq1@k?(XlanVT188tUrGSzBAHtEy70 zySlhoJ>Q)s_lTLRB=P=wD$^*ZlS6Fn z#BdR2u^iR~wFsO%rKYEI$t4Y@8#n6zXfXi=;Fo!Je&3|J(?Th@SUtg&FEaSRAUf8rA@VT?=W?J5D&3hmHNX+}?a`3RCZai-+gJ~-OF{2UL@w~f&^=x^5Pl?Ns5hZTJ`j=W{(^q7Y1Ay3NN+wOUA7aB@147~ z-dq=r!DUHVar=G}WyqcQU4k-r48qdz=`kA%i;HKb3+1h6iWGx~)46O*w0;`gEOfnT zP+9(coCvgAO2Um+;w2z5DA6qSO}9gwt-3t@W@d!wRgNoZeSv{QH@%R{c@f_3p=-{Ld$Khx4Bu3kOMj z6$-2qbeXlQK~)`hy5yqD`oW^eM7#-6(a}=G_#`Bs5)%`})1uO_g0!L84ujMYf9B@q zstuW#`VVmH5yIgW_%#dP5K=LL0jihpqGHqG_GG~&q-9g1qYC6-7vBa&-c**`&O4tA z9Gmyp&Q`*1kF8PV?+30LYS+GU3*J)|?(a-H8u(}7D1MieAJ0-Ww`*z)m=KOKhu`cY z=gy-mPpt6e=Bg$&W$-uivpr5_NDx$_5B7cSA9_ie5caDq-b=06JYFbV{vu6$ofJqL zvd5fvMk%cem*M(UB00v_Y*Puglp95GoHC=LL!vQ8<@}N|FeYe(konMA<@2f1!L|&E zgnT;`kR{zJn!Dt(_dhqR_Zzb*GaGkaim`S}__$`z+Ae)f6PNe=(@>l+t>AU%P-G^r zRPC^bu({CY6C?i9Ytby^D9D(gLAW%*k}a?&jT0{ric(RWw_W@ z6L@GDDZYjUumgzjQ?m+e^3Om3oPdP<0fZquyZ+q0kUqP$c5mQkw#Ag51Ck_`WN{RU zpjTx>6cWL-aiM#?QcP~kA1CZED?jgWZf3A%*2DZAA6ALhlRVYB8XPhgX77uRMn5U! zhQ_ZIPgb$F=guKOh)p0y$%m%h9Fp{6Gz#)3WKvL>O;3{FY!;2(OlJJebES>RU;n$_ zQ15V0(t1wnIIW1wRA9R|nny9vnh;4ij%+avX`S2^Pr9IGp4#{$c0i`4G}~j% zu|s~OYmFvL+iP4Qozn>*Ro(N1YZ$0#>rw4*=*!jAjMcb}sB9?^@# zlawzdEVFB})Su6qXk*r*-sK-Nss|MmW(Mr|APLNguiX^18!~c3{kD$C+Aiw3?%$Na zkx*IkRJ>0q6F2#Axh<7vVb^M6YAO|LUFrCz*|}uiV40G=Zr+x`uDNp4(^XG?jWFA{ z^;~i8yDx8I9a?C$J%MlW{8uBim@0h=Z66?Q6vGid_QzLWw|&m226YuRDYY<^BCP;_ zfiJMIDs=M6awGQHm9Jh%6ym{$vAc|ix2!2MBc6Zc$UJWV?j>8a8Fq1idUo6?X!^!`Se&>)oOv|yBs845$Sb6yfd2*eo|ar zbg(B~k4;C*5h`PAB$FzvSQ`^br}_j%@w#bWB+B*su+kifjQB&gYd2G)E$402;qS68 z-9#ehO#VcRIt0!L_A29y%77Dp0wNqSu?m_n@r&5qa_iIatef!;PZf1-M>*bXIqF?( z&NTQ>cwdEh>2r5&swQl%|TYGVzx-L!N+ z{1r?WaVCPUy-_Z(#$iG(dRp^_6CE@BtRYjn(xjRy{7T2hbdu~FlkWqL<5gsBgx=TV z0~p+(|3fJe6Eh_kv$-h+Ka7X;yzaG)d}-l#zv}s=L@Wu$)Ccc4znkM~TAsC#trI?9 zQc`lL?KbBdz@*K(ZqxFZFZ*t(gqY+0v?%>b^~#>=v6Xzd*~9Oy4h?nD^5dpG*#|OR zGV6qdx$0T_%kOGd;}12p#0BPOp*2{9b6;9(D5az{xKZ|dSeB2}&p3_R#d_`plFBib=U!|7#_L85@ODS`Z`l^`iQfKmYpJwRU6trI z$7iWVhdIO0X`n@xMjRO@P;VYxIbe`mN}t^5D$qZXW*vy;X7+bZdMwe(MjyNYlk{+D zQVZ>$1t3D+elG37W+$W?2DIQ^_d;QkM4h)^#{u2G+*4E8DBJ568qfPw;hRi0PN-{Ot`7m|mN)ywVB zZ@%lc>5ux``LFK-L`pUE3=RtBhiWB9g2lQvH0uQG@`B3zK`b*)`s+~HzgybS?cRov zt1mC>*0MPcZWr1J9WMLCoL}Zz4=DEvE`7e0|NLu7-PyeOWSnq3A4nVl*rT54cV2jn z5MN}OnS%NuSqU|$n(+=T50ZhT=9nswaT&KG;Wc)#we-kU9)7e+ieUwM=~7%ULUvHB z;dSyb6$O-=Rcx|9y;4CYFo_~3=lCUKh&(17F(uW0mEpU?jEGMBV2nbR#!_I8S0|`R zAJAzpU|x&7LY%7CM^bF#N_PlMl}8*6spd0z548r-|I>KsOWu7uP8vgtkik0%<2sJYd zq~ljj_8pf1iW~|;!0&-=EWLj%h)5XfNDd2)9q8)+xSL9?Al>A!N=1T~355jDal7R; zYr8gPxs>}iU8>vYn=8|IhUjQIHiq;gZTom5!(6sko|Z==24 z#n_P&-YM&Mj%XiA8F+Sq-$@lYZ-?Hk9%n?Y+gH)0B?Za97ca>(H41{@pnl!verg|W z_-+et;m#y(Q{R;9AUlE}KXQ1AZA<^NHdJn;?BMFr)%va^ar#{EE~(>h1J7aB^SK4K zWAjaA==Q>H?Vz2q5x^ZD8NGr22{K8y&sfE&5<+38-i-`WFr`zF6(No!kI19okk3XT z-?g|>XnGWAFZF3c4U8b6NOUu$SxiYBTVxX3M`nwg)x??WL z>Dc*b;fw^j6p13Yeu&{8Lj*s^P#|KEp=1kNT9Vr=aiJyM5)+6j%L`v+GFpr);iqYN zF>g*uB)VF+R9pS_kjOcO7PsB5B7SfdIFgc!Wau!rUYx;Ny`FUtJ%tE;`i^ud6&XuZ zrBs#JB&zF9Mfr7@43pPZ!u|p2nQykn?il6Csn-Xg;`RqeIC25$*mtSTilUzuZGwwM zKY$M|%j^Tq5g%6>4J-1pWa6g#mAVwdwVz-P2@GU7YOQ+wCzLaMdQkJG(b5n(KM6kMoXz}*DGUS#=xe7a#&ygPHJ}M1S>j^ri^1F%}dc& zleN4p(jH_{J7`Yt8znAh&X}1MMD3bmwi7zlw2=83 zlS-ET$WgvWhfM3(<2;}C@2ew0|K&^`W!oexYdbi3a?co{DW!p2xi-wF}-8j+Sq?JrK)jI)K!=oAQ~eY6%1Z6+IO{58RWF-ymFR#G!{jGKIQ~Sq74S{iB363S07#vRAzSfbeM0 z`zy_4%ZUoXugjHb*Xx53djV_K!I=y`2c&PVqfN-Z_iBP)+aOV_Vy-@;^gVA?^30gU zy;`)(n#&-*85;y^Qxi`V2pGP#W!BuwVvGr!A_(A`O_4Vimm6{IX|>%=YY}PVQ&Z7_ z)EqRTQcr+1?uMuJy8)*INn#~@(Y$CEReimj0l#?>ENelw)6IUS=Fw^!&+DmU;Mbuy z-f=3zu8UuYU`=434Z>2Od$2o7LVo%wfmUK}lIZ)8D~naF-Qv?jB-V_R&Gf-gd~t1L zkXBheqsDIVZgxzh0E&K4#|K+U0EMKe?d^`iQu09Bl{PL_zPxHju=xBXgXH-VnUkzB z+<)!+G557@XFhUW{u}Xn;)mmXTxJV;(iG#B^9{NcU6l-soc0ojt@9Hb!uYH^HfLE&73x*>c-t*_{itASW^bRN}-0@~?cyqJiUOAZ3E0T;|co z6rAVtFeMzc7>{7Q4=z9fa^Ot3p?yJ`!xU$dL?WnD=Vy80>7nzf zB>E8jec^1S?yJEk9t}hp9B> zVzE2hz%oWQp7uL!UHAcaA^PYdx+fR_8Wv8IT@6FBUyvxm;0lk6(<}nZDB3OCSrM(G zs+!ctDUsyL3*D_&S6!`~33M9IlZw8uU=7>(fpT4YK8p$l__d+p6`|ve@#SSU@ed>W z!{=hZodi9Oj)|mw^l(z@t*#_hsj(?zU)X4G_v zNGRvU(Uoj-@z~-)#~>2zYCFPM&hU$wKNTW#f7^U2KX9^vA>Mp&nh8VwVMLP3Agozv zoy9|W>04r4s{1xYIcq<;;qfXA=6kI1+JLdmK?;BgGzQEZje4fY+hu%d@(kjSlO4%r zZKp6)(oYkD*D(bizdRV(dp#M4dp((}z5oUgO_Ti}&@`N^P((DLugF?8X^AHZ@s4F= zLZ=?pXorcu=*}+IZ~8d*E8G5}2QWivwRRWtth80GH-ulYXv-?+Q*i70 z!l@(6pKb*4M-6PqCQi|Sg_EQWvV%?YL??eu0bvy!-Yv}&YOuzysinHQI;n4wf6`^=fFB7?#+>YYd|HLp@H_KMW)+Ms(gGu} zp&LFKT&eJ1ta}cza#pBa`L+_|YJUpRKGhjmLi(`Oi;uzk5jbI^L~UKgtxLQ_8W3ZA zzR9j-sAMeUQ{QT=;onH`K}Zb!f(49%#{iLntXNxiFrwn)ajlnU!6=f9$P+p_J--Vs zMeNr|Pncv$#lC&pOE%z91J4}MOnJJ-T2(rKS-#5Pc|oTAV1gQFl_ly&7}Uqer204# zcwAf5Mw^p|M1M9h{`ut-Vjo!dn#;&zNMkZPDM_5v{|0?`XD33=6$}`<9Tujl zauIS;lAVG^tj9{Ev%On>--Dyl)zFE@f{d zx*@sYBqP@ptdT+m(NT6k_`Txl>%mD6dR3oHij5vPBjccSjB~pAFT1x3hPWn;7Gng% zE#40o#$eu=);qH6VPYXP;Zp@GvDcBRDI&ul7e(*7fP%0As!G7I;=_h`LAhyiVxTd2 zjt%4p$mGhS;`&mHa)OPg`)MX(_ItOjWt>4Xg%bwEabB=r#)QaUL1w+?1eVSRlaB#l z!T~0YLIUOnCkWD|2r*G~^V{Dtxsj4EjT~(dnuqv1V~#TfaF%+ateTuI$7Ra+;Hj2_ z+XTG~>rP_@qqSNH2hLlQ>3H>oA)SB!{#|__G86sSZX|`7)Bnk>#f=xaudgo?GQ^oh zImrWo$>1Kk)EOi7E%jwCvHN(yfVJ5k?xC94k$UAZuJcYh>S*O>KWbaqlO5rjPYT6G z3LGf;2gnRxX;dyHYBXEu^U3Jl`MMGq^`I%0Zd@{%mdmT@N zQyjb(*KnqmXCWma!U#zc25>;~+8jncr&-^l&GDlk=R&K%vCcW`xHnj?x5h-q{~(Fh z&5a2))=M@u;tYgLldr(JpOrj`1IZpM1JlKG!|JrL3X7LKt{o5Eq#~CO(WYOcKp7N9 za0Ift5HbmY$CXV7DW4q@K@HJ3%y}{--l2PA8MIok!{3KHB9S8^i>5hwh#yc6xc4-@ zjfF9}N`#fVCf!6+>*1xkh0r#h*^hFmPynJYN|KJX~qCrwm=g z<8vv{!>6(+TXX9b^9W+mN_FK9CP3LkI0WImyIk0i7jG4N;hA|cH#k|vl9f_yhhkA6 zTM7^@heTrnJ=r(Y8#6OAS<4z?k}Ls_liaH-myc<}HnSya>gnJ<$DG}l4CdtR7!j^Y zQuZ^v>716{YzDC)a3ulpJd^%dg@vlj>-oE$>VH>kD%uDx8rjv|GAK4^;Iyf&;ELJZ z4wMfcYfJ~Jluvoq{Uo*r<5LO6BpOOG^zuArzMyq)1xpptF-F_)NhLWNm? z{egnmdAdP`FKiEiEpgg1i{g)cQj1LYjVZrVMMndQHdd&{{Q0Bj^Y``sFp9A%x6}W5 z8ky&hsEUe864C1V`g*ov?xr_FF~uYa@b}e~ec0q_spNzPupJ+Qn-Fww$r64fda<%6 zSn{pJ`&x3!)>8n8^f5#i(?-6)$LHWjxK%d@f-~^V&j*ZK^vhyXA@3lu5?PMBwjs`= zv{6BFY1rit{3dLSnB}!$;S*n`*OCFKHh`*~81`OrG%~SiKv|s+VXGyp#o)vvsp4tQ z0;LhP9yT_1z0DlM?|n`xIMp_z9!NQP`L7j_yyFBWP&*3n=?hAv;gCjqYQ!tWmpfan z_67qe_L7Whz;+0TPQ%{PTuy7eFSITGp`|Dg|2NXmCCocx5gkJGFj(wrr4F$6KrS9z$CV;BpKAjv$KM5}fB_UUgs4PfkmdeSNykFV6OfN?|qp6as^sv@JTF zi@-HFBK=wn$4e0O(xPJUyZmJVD(!uEiq!({BIu|WZ!5kyx8VIm{jV{>ACNw8KOV%>LEr0odr7A6M z+_HD>Npf71yY``%{$(OQrXy&mTWxANWDs_g{T#!i*hZItLW+$<~ExBmY6zhC1^ zI2p72pg7}nuX7I#q&7I=*yzjYS@R9PP82|JSVmVmOR#L!aNB5JeSxknCkMB8J_v8`pUVWjaN^-leVLQXFEPyN#8~e3VF66%fj1H2m`EZG8CbFk zPO*lrf@o>gGwUoT8hOg^EIsIMLdwa=Bu~|EW6IqWpMSg?ICu9GWi%pjDf;1L*B!Ke zBm#;e=y^ahiOlRMMFdf}K-Y8J-PtHxNU%e0{1PF+T^dCN`mMwMqx5-ZFsuh!z z!@2Q7Cgg1z47=WUPotJ{?gLj!oYOvOgm^qCTZLqQ0TftjQ1{RJ9&y&F*?PH?SmuX) zaq8VublVw1hqnh{%wv6o!qb+l5usy?I0e}pUSk`o60B5j3ZBg5!PUb;j53c>V*$%M zw&Mg2Rz24XS@c^cr}Aa`bpU0bh{7L>T&skvj$4R>K%;w4e5>fE)`U?Y9{lBVXA3QY?1vwHVknHHx<_SX zUFGHF%@^1kzxrMF;~svu%ucj-c6RoN|97zQsJsA=k3cph{aVvydhpW?z1wGVFtaf9 zIzSU^Wj05XJy8aYOOFbNmnq_z4;xBd-k!b$cE6~oXo`H?>ZcUC6*FX5uCcJDhDPM- zQf1Sc94rb^F4*W#gLCSKfq$P#zqmje5ZT5#+6(x!Q%rsbb1Drjg-lV`O*u#PjnH|Noe7W&Oo{N zkYN*sewWZ4Z%@aeoXQDT22MTD7y(eUMj{5fSP)I(05#41%FoE(J?0hBKrms7X37<| zA@6da;S9b6`wjjPX|CGJ!zi-}U1}Y6vgHxLgD4qS*%-P4C$Oh%e|FGJFTs*{iJIi; z2R!cHkr-EjX-t2xgTA8T@yD;aBQ>m7g@c&SE!lQS6mkjX2ejo^+(PFG_#f*MO+?4k z2ciooO+NjIyNYrVM`{%A@Qtoq8O~6nHLT`7Pl8MHPR-0z;Q8Yea^w6SyAcvHvf36` zD#^{CKIx!4Ap&`mL497T*?Ty>g;{Qxz}&>3r#iBDn{tL>RFb*d{?o|x7PNK&In^L3 z+)aKoaR{&~{IY^V-$J&YfyRySvSfEfb|n)M%6hwN>Y4(>tB0c;6IcSAX!Y4bzRPpD z^L#s55r-PS2$15_lDDM_V{?P`IMdHKvKQ<{HsH86Ka#z~JWfU5Kn&uC@g0gcj!K=O z=CiQTWo2pA4`KliWvs#0Ulh?=3JOS%?Au(>(G7_Q^wu0DUW0A!?M*eQ7ZRvKlHp6z z7?Q{yRz(}U$2Ashos5tU;h+SL#;l&aaaQK4cu}~O{@)T5L+SV;XN}5s5*uE=)V2j> z$6wH@0yRkfNRUJmq!i7$Bzf362j*GOkaj&kW>cxmN8629GF2_;`&Pcrz9$le?*F{I z6~z$|aKAk9!g(6u2(K(ebVR_arAfamh5~U7rd~|N@~p{PX1U{ZJnm(PPYZ5|-%?-& zS)g5*Pk6PDp8xnR;jTqX0U&8)<@+MHL^Tv2>8|KsF|e$Q0^PIz`eLVAFGph)@^q6Y zdnc(?J0A%ZR`lI-_Jeg5*ti+5`YsA=-$^3piT`2SIK(Hxp}`yraJPQ}<+coUHG7=u z2G;|Bf{(%a&DWflK7(z2&sC|*%gfmgUr&N}f(HTD@6HLiwV1?~C5yFF*kjlxvF;e+ zX_42bg|3uI!ZcbZtY)y5ukP{J=Yv_2=o`M3va`IrwCXkyB?CTRxV4ID`AHyR!dO`! zcCoQ*T?FGj9`$RJNbMoQB2ki zl{TnCn+JB(s}vSL(?e~Z0EIJVUppH^GusbphggBzV&VHN>p#JMN<%WPA;y-7|mUSLo- zJ<`xUnCXO%k6&*&K@!RBD1_1|w@`cJ_8z{Bi{O;VCvp#=@!Txj;O`%z_C;me`ZdN) z7z8j7gX3y9Bn+;YLU44mg2vVrNqMZPHW2r7n3YZ&J|kKYGDZc|MOtXFKjZ(`FBM0D zL}_7^KZ5}cKWKQxc0_Y3gHco2OHZHr?KUneaBT&HUR-axgB!2MQ*g z+E51VSv@^Hg`9vVdLb|a>77Hi6d$;t?@1iH-6^&eb|3Ki#3eUAnN7c!KI-Z3DGZ5x z8NN)2G5td#d|Hubm8x&$<`+NDD7U{B*ar=oJixp9V)|`2U>AAeRCr-X`p})LUO*^= z1<@Eo^Rt9qy7Wk+zl3A0rWF8@Jc$=-B7T3ohz`d_z2-Wzil>Ub6)&8BkClsvVw8tW zkAK%0qv7Sn@2Qs(b;ig*J0v(*5@Or(&mYv!g&!&W5&G;NqGtpj=ooki< z?m)S^9)jr;_27WM#(AZ|HdXeiniK^d5tpUl;K0gRPSl1xLHMT0nBAYp->%uPO8_M( zY#Z4ULRRt|95wpvB+5|fMJE<;3C=rrOs0cF7->LX%=x!3rKRQ63=ttX< zgKqO`%hFwH-7hjI^zY}1v!RTJ$y-3k+fs=#=C-xBp5;}Fix;~y^Iem5D~g&5_$+1nH-wR zr{m!G`fD)*ng{dPtj`im8eELksBLx@)e{`c6yC01{5J6dii*GuIasyDS?Ihnb?BRASp zrY`TO-)2)mr&)fEyWeMu78fA!o3V(1`cy$S7k$NwqQpBFa_+nk_c^ zFP~gdeH(E-~9}fajaX|AA0jI%^aKy_oLtRVwk(c;4ScEMF2Q& z_?l@3Kw#IgQN?B5y*vh3W4bpV^tZV-ftceZURw6mn0${+h*ov;EMy5jzzM!w^wtg; z_}|Xm{BfPAfh}Q%8E$voN1*?033cH{BMs)3Bf9A7BGjqp$+LE-3ktT?5@7TzoZZE^ zs`bIU=XcitSQmwUkmbFO@vJkIsTPn0dme&oQn9!00Ny-T?3<=GdP1n}hLY*ktokC* zEf(m{yMg^TJ?%Lz2?6N9QEezn*=ZwRxOfAsYm_7lp-&!1VE*I?I*4;DzK~_koZ28R z^w>anHl>>K#p;#cgX|%gf-;|;>O-c3peN3k+Ir-j@`^t+?1*=$M$FuUTGyBP%+ILP zYqMC(K5UUJ>HI-^v80U0!(9uGMS=b;nVk>Yy?Jt#CY#|^tmF89`MK%4dlTp#KoKBz3Bu39U5^63j36zQO+C+qg1Xo@&yzV^7biAAKP=el0LMPNkL7u9+j0JOv#Xbb3)7DaV#n*pn3umR!_Jccwzfcj`(e9s zqL!H@Y3OQx%CTRiDW0vF{go5Hp6C=Yf3_~~Y2ZS>TPNp+7%1X9D;s4N9_p+&*!5tg zqoyG7bmj!SSP9P3>}QTl*AXQVc!#c6j}|wtvrpNA zOP{8zr!t(cQXX>`mSBJnJXq5Wgu5jCi(y}Hlkdok<`HTrx6Oe^atGvN9SG0HIUaOe zAtZk%WUv<6XsK(jyOq-XpdTKlr*0I=j94fBR;-AgJ1cu~4}7~$Y{>-?AF&9VGC#7ETm!G3Ptz6ClpNj=$aI8h3@5WOt5e{o8C`Z{`uCUO@X1S=-1zx`JZRfMQGT($ zi*!EMB3Ts2j$6%3RcMrV_EEwtm{Xc@As&>BJav`G4smA>rtAs!vx}i@1zHUzhxxOu z2yL$F(N>{1S~<2~hVl9`8MJF>3bN6H60tp%njJb|q`GzBfWItKG&ue4w|tL>pCjs) zXMF7MZSihXwKS))^TVb5`QmzEAD>sQ+_&Ds61&sX=Vn!?2Ib1DN^>_Q!l!5jJf_xl zS}o*$K0t7?1(4lZX4IkKAc>MW4MZ4Y4djUEwq>JU7pgeF_6e7v+$%OJ9nVmvM}96T z$lsD0Q7=4?lLo<}4=*V?Grk)c$T(pzEmwS+rTs$OJ6th+$&saMQ=sTuOI9}{CNw#x z@~AkAq27j}#B}J}cic=wg$oTg?3{Bj2U#VSB5WlH3U-GhlTo2(-;1VztR!L=#Cqz8rhxe%S$8Cs5CTGHIJ~6?qC; z%pS(O>XR3ile&V&rJySN8!Z^B>{E$g;_v<4R70GPnAtEft##B91QG!f zv%0Ph@<*OG?3!C0jZJ z($aD?u(QVPCSyG%Bg>6TXDsJoeqxF!GO@gkTA4hKB(J-yfneNew%5e9hli(}YF@ue z>ACbBS`Z)iqoZTWwE2#cFcz2zTO_v%1|0b5TNGzhVHXPGmATkx&W;ST$nmv z?^g28?3h1tfW)~TCIa8SVqjuYJ8mCRx$FsQ`N9g*DGF_h&ZUA=IbilTraQfhrtrf? zh)diftkN{0W4nrap_6JvY%KO?o;uR=V%tHVigJG#mDe%H00}U3&HD~1Dw^!tyw1PT zL9&Dc&Uo6rl4~rsR~8IXI8`rHXSM`^WRJm$%i|I<2jNPukx#BJx1+yIRQT>=4#es0 zY@dJs3=mTbM=exsEsoE zO&gy-90C)en5pq$!EUv+Ztnl%=p5rRYr80(?Rv5`ak6b2ldU^Uwr$tc#L32FPBqy! zCfj!J>HXTL&hMQ2y7pdst^bB2jnnxL5xXrGU#@qbDd}IvsN5WqXOQ@AUN|vEQkVmF zP1k$dI(1d!471qlCT(O;+j=Xz#MD?kR*y582)k+@AWcPC(o)(*U^Y4J$vy*CbTT|F zB$V0@NMkltUXMb~1(X3;=okD|`{R=#ceX!=t19>V;s`X-hFdhuz!XOXRrt&%?qui> zNH;2Am7=@LHO1)1!YwXeBCDI90=O}Hr3oKMZRdXRa2rRtH;;W+3Cg?))>XP6X^VB; z{!IXnV{SV3X0pgGJo&H!5|%cd%JV=8UknpwKn_fg3IwBn@Mo3S&n!S+_bxINO9=)p z4=f3$YzUaoho}b#H)teTY=+_^LBl-dXaNt!y&W3lPFYDJ=+n}1NtBbX#D4`v#P6Xj zO)#VH`~GxbauVje8Hm6^JG27ydwcw53c}LpH@jgw%Dk$vR-M$kCk$-|PbSp_JvLh| zft!s(XukoK+nd!j%O;NP;J6*QI~$O|xy_$0AvQug4`!R{s<7l-r9-V5A#0GO0tEvj zT%%9_Zlnck*SSSJ|5itSs5RyBbeX=bx56q3P6}xBLAA9J;gg=&px}+#S0a?!i%1<< z71e^)8E>-jnzW^_tKvhQ6m?}7t`sYV^EX_ivX)GOX(ZY%3@q#sLp8{WBLxK@_d)gp z0|Y(7kKXvvXGlmH7IElRGRIy-V>w}~0NA0cL<%X&9|RGKy$+zZ z;2sr&AO|^YNmf_Xl)0i>sc+)!(z%oqgT+PXqfPF6VJ5Na$9)#vKL;%W5Pls2FVIeL zChzMYM`DN;L$Yq$i|_hJ6BS96(_J#Rz-awGSL%ZbppgYL~W}`_k*Lb@dPY1 z^aPGwSP3N71myY)7gvv!_x);upCqb0@N7YdJ8T%^Tw@x%7GtzbZS2K<+%Bw zZ)Z?4O8JbFVcN?=5pkjv@TJwg?Pl<|QUAl6D@#Bllb0fi!!v!Jy413N zN0Gl|?U!v4Za`XIsWV5kTdEZy#6o-jt7D`K7yMp7DX1%IMrwy+P@7hdt(Mk^%toy| z?AK{Wa3+*a^LDlK1=Wz+O8(|Fgu(d355a8sP*@Wd3Kn|yw!T=!koX(pr-6VF3M0*5 zMEiVrx;v9*C!W%ng@72GG94?^Qupbm3A=@CD}Krl%rN+YwjU1+za&sGB`oz!5#DR+D8>hn*(t0O3YU_Z2d7{p zaiMXC^w(mSN+s4{U9D~U5)ctF!XH#Qu2r4|bkw|CGZLA~;1B78%Zcz@a;`EuW-si1S7!|#k=uMzbuRDy$*aDdh5GMR2`X6w>$Q}{* zeK4~)Wm!NT$M{=o&|{Te zeMX!^X>9Ej>B>L|BSnp{s)&T!iPXXP&+mv!Lw$_Q-Qlc&yA3w?dtr3V6Zjom@H*M_ ztLK4Au0f{+#v)b^f05X;1mJN6T+IA>ID|@hAw?utz8L~g^U~$oV}$L=8fFaLmzY&S zrZx*Jyr|%&1=!OSB$$oSeUeqPJEeGkLmHHsmHp3K4^B6-<7+_!J*F zV!^)8>`Njus5r2NM`89MLjN8^gT454lf}d!c>#F=_@!QLaPj37K(5v##1LNjt9_f< zHy9EYHXV4DX{D?B_04J+`u48e{Py_8aH`O`Sgk>cXx^zMAS%gZrS;f!rOlB^|23F? z7-y9mY&T3H^YE`kG;LvE`Fumk#pf}=3mgLi2M%q=WN z^qBQMjwdHo<-egQ>gh|uOQ~7)^y8@1+B5!#^PlQccaD0056ST4kN3is{3On*4LN~< zTsd@bKa3EzfY_{?>l-UT`(TgU>HhgA{rOHGcwH-T0ZarZ2q6jf)bhd&ub!SE0g zE^ZuvkjwMlx1)0HgQ>o}U>+M0u*@I_+ac`veArfgh>qf?M_4pJU7BpkIV2e|I27?7 zf5659hkldj2*xS5^lxf)3UBl8#!;5@1iCKwZND|eUd(;RZ^ep21ZjUzxp4tfX?3T44tt9Ta(l~VLZQS+@0>K9Ht-tmV9QBxw$ikhr)@* za~pI4tNMkhm2TV~a!CLLNeam>ipfBlkuhvuFhK!+q<;m~9ciSE;!LP8>O?+xu)xLE zF26H&6G5$;DRP@0tHYmw^C3}|BA;!9?=xmhj*24h<#H>o#-sPg;(h_2R{1Xr&S^%q za74v7pN2ai7)Equ0NM`d*8e3XsXkSkGB5)P&R^MZOMboU;qklkP@}`c;k9m>l_k1p z-EVXeRG>1R*4w`Jk3@mr`sCc4Qa=k_>xjIL-{zd!{9L4QBFZ~w7qg<|tqm6wu|h+Gs9z^6xgd%UhyhOqGPv?(dmWaI zS&yxil_{J)O^IF;Z9)0z9n0S}UP|jQu%36g9^0+EX28MVwG+!V#=#cNXf7RK$U_!lDyi$y9xRyq6p@>WLX33|! z)8n=$;G=QT-~>x10}m_#>=qdX>N8l2W9XwIDoN^=nfU$_Pz`s`n0JwOFfg2~_aSZ} z?yYHjDIg<>_?Ay*(~@Cdrv8ZG)d^JVVoAU_l1|JqL=ket`e-A38|JEWnE1KC(N;ld zx{|>3{0TY&92M9Ym-R*{f<$vvQRUo9d8Ky8@RFJI3z*R0o6!smvr_TDeg|(KB^_C+ z2(GSFFI_PbjgAzy9plr{*8HkP)2+4+9fw3S`o3*LY3;+Km{uBi8Bj>}Hktc=`{lPx zW-lHX5Ub7x6kX(S3`J;~h~oHs435CO*>`Y%GV2~PPS*w`gpVEgm^Dk9VQo+K@eCDz zl*ja}+Dw$QkAP+;o$>@`y5Y&fj(iMW8)c+HT$3-MZTQsITk>=HdqBP&BKTbmY9%({ zi81t2OnGk8Sbh9AQfO&oyG2zuDOd?jLgO6fvdac!yc%@{pS^*L;M;o})$LU5qKdTg zuWD@=22_thds(N01;c2y|Ci>j2ljq91YTRAIFuq>L0N7;+Um<#*u{~_cpF+!ARxDQ z4mY>N)W@q2(mOb8OR%w*vx!?yIG=klCslh=h#Ox zTN-$vTlyXxsQ9f7_WI)7Ct0WK&bd~rd*{QoFhLA7{5t6tN$EV$%~T6~F^EIFU!%UH zF?#t<$5b2)0=euD8M|rJTW9Gz46Q6tP4BbNJ{;iI6gNba4o7xKbAH1oHkH<4@m}cx zfl(sEFKn%Yr12^3vh~MiNoS<=KkQNZDifUgGXZW;C@#ojlS28 zJ?i~!zAdGYZ~5`czPFF9%z+rTv;FMsRi>QbzszB4nXceRC{|_IwZZwqVPSpwOsJ7d zBr-UkX^~`LBSmWDSChw}fq0BBX1@Hz76V+vpBKunGF9WXmY42p-246LYN>tvc?q`; zPa%3$pE+89_)6|sjRHucTFJvkv#t+b90odUSwv4;S~|jq3-K2jh?{teE5~CS9-a;n zu$gutp(iEl?UGP()?19{#}aTMXcMqoC6un|14eap9(o z+{x9DjexS1+Fe7#o{Yui%zC~*lbco{%+5f>R_p8b+6;v4^W6N4$kZS=qnB8iH(49b zds0~+QYwN_3b3LMd#-+3C}t{-YF3$GSd40cKYbngNlR+OiSf-eo+;#FkMQd+A-P@Z z*jyu&`Ogl{fgEBz9HnHf?@e}wFofmYj`_*su*&^Ncj1xYtrwC(<%Bx$J3t_^Bm)x( z5fa+%=7EK#SSvlB1`JkUK8?l#jBlrED+4`7WIs!{u=tnaaVW8&v+NxiNp6AyFCfA|c%{rRP8mZ%lP3*m0b5pu7GBAsE?%TB5RJ|l;2 zjlVFA>2}=@R3%gGHk`G1QdANWA+#dSU`=23q2t>flBuxzQ3=lZ&?Gqk0f2r&fgQc1ik(0@2l+1$U%ike9pz8W1%rB;}zObWr#b#o= z?)P#w(+F|i{$oN4Y({4-C5}W$5wLu+k;RPIHLp_>^$Hdy1Ysf?kb;)Nt^p%Nh{Z&f zOxN>#jn&0Qr)v1}7g(U~1Ac@E5TxdpP3MA6SW@Gb;Uw>G)*vc~7`$_Dk*xRgV;;-d zNhUpc%XmC09--92MhrQ3zO2Dcxqp5ow=0Ts%#T__ZWqf69N6}4YH{Cta6B=Lh9$i$ zV9YJd8#iMng!<%PnH5=F?w2j)!fRU!(tWQbK3usWls)r>iDDsSENyKoeK=p355^Ez znd6Odo^EnK^x|Fs_J>l~eW^cbBGLbEb;5YC8Y-H+#A&PnM>HEc4i#@35y^-=AI??j ziLrd+x91rSXG<0Cl38b*JSm%+Z}Bx!0gGR{0)*ory=DZFy-&rF4UN)L|0Xe)GOhBF zF;5$%#Cxp;p5lTKo&54`O@dt+s{@5iOvv>C6$J|qPX_KT$Rs;A-EcqDH>{fxm^rYT z1gupVbmF(KnS7O%y374UQEWCx{Ig`1Yi+-cF436~8y4c{>dcrp0+3wI->M3z;ss?g zAE=d?nKm5grnZ-X#NN)J4!KROuHIFdcxEzwnuI6gDe3Pc?hTz%&amI^{Tn@e7J9RPJHrYier@i-_B4!&bsJ z?y(i(GhcHi(OC%Cl@<3+upkQY3;Ev`kf>n5K1dMqFlGmX?IRDE@eS^KU-rO)j_1nQ z9k<1gV#pjAkF*I(^h7LTfPO!c0%8I*{=^6piw96xfiQsW{YpS6WL74${#h*>9)ELo z=-XNv#OqsW(&DD-{bd%HIXR>MwF*Xl*qo=V3xUYEGbfpyI-_$OjT@!akJ2Z&G$+Em zIz@)USI2OOaEM**PiM4qLHYh+gN8ra2S%ED;l?jiv;TD-mzZjka^Sc(lU_RwVvhYORuuJxg?G0t5X3|9LoGrFjU=G-JWi_{t(c<8vXatH zs(@6KMvI$BrAvK()4$m85?7q3&vU-Pg#iK=5{&_>{8NLXiw!PqobTzE{+Z6YhV_aemg+0!cNk6td+wj4(-pLe z1%r0iEAO+r#l!u~@A&}AD2bJgEws>hi+zUvj@O#(2s-aqY+Qge!Hl+LHkDMfr6Rd^50l>&V%fpdL2ikUVxq;s3%maG`sX`W zg!=om#g+d7R&G3x5|N2b0HFm5i5nlY#_zRjcrkly-&J71q7Sm_62xFraJy4lcs6DjQk8iA>j;;}wv+05 z%LuEB|FS&ye{QE=4iMN+4U+gX%pbKh;b;IUZYvmY24MzT1T$KZ8u;X8&2g3q>+yy| z&GrOpI%d)Ul#=3UOWmfK>RuVqsqz){(YY$Hsfh)O?rXO9Y$z%BJ^tK(i`FZV8DzB5 zW&&BU%TYWjA~vUiQ~1Z-6&c9nC0F^6O9k=MpBVwbtdrvclZO$8Od#3yvVLKfCw?v| zw&VbYJw9Q;t?;tm<_cKG!XcjWKeD0%ST3akbovCmft=-8PRO>MhqH}CBUOK9Mm>gr0k*S(kv(Q6B0_z9OC6rNpbT%lb{9YdmYMfmuifqOpj zHoR+FkW4rZkYOR@%-VjwE1^O0;5PmX)pk8XPs_)r`(FFM$tbQ{tOz`9j7=RYlh6KRJpjlD1FjX=yh zCF$eSAuZeN1bhd;Plk1JMc3W?x#)lnf;b-R!|iJI#&R~}yjN;Fa-#xy_cs(IDpc*8 z(YmD9Q!-O&etE_OE=!7+Yb*vVgn@#9pUxpP@Q%8|@!cE{P3!B^!6D5}dpVA<^24&z zg|{l}vK$xKkEc7^V@oeND|SZ_m?wG!(JEM}L1Hk-#Xs35nh5+&`Qk)Kmw^OVhT!X^ z7_ZZA#1bN-ew+F=R~#-&&}UqS7qC>I@y^BbPNz+f+kQp}MnhupWyLnl3f`S@A#iO{ zlmwr|;obm4lVm#1(uU6P`4O*cFEZB@$D$s^QR)5ZGDUbW5@5f@0zV)%I?wGKQ22GJ z6<4ue)Wx;7C)^XH)t0NPC9c%*K;mWdBuhs_i>;tuRL-Bx2Cm}BtRd22Xmb3Qa z^-{95OPoCvO%yMiF(*#*ydnc4V`^$@pn(`X)(29M#0U)*8|=!e|6DMOxNf@hkVgP# zu~P2Wf37~O(X}}>a9PCQhE2mGD~-voVoO#T{_z{C4n^-i7iIe#Y!pnnUiGuxE{Y9) zR}J-64!H)~caa>L(GO$+jb(#F(^wM|P&CMk{3Hfh_-{p8I8_ZXF83>A`Gva$P&;1g z+oR=_g_+00J;t#0w`E*Xx#uHL6;%D~AYMp5W~g-iEh`0_ZR%AW6@f*PNo&jC*?fnf#yRo>%|9 zOIB!BDFdgAJj(H-l+jH$8=9{ zxD|r8q^JgTSvn8#1tXO*sh(ADY15?uJ$>}2jb|q6KYF-wm6VF=HrbVQ!0>@q4CY5J z)^kOZkigwW>N^KKtHPw zRgww!7it7iTHL=OJL5ccH|!t6wK0iYE8Xr4a}n8hfI%Id6>`o0<1I-NE(tri86A-U zjwo($6z$I}`{MTcAtI#*b>2g@e*5m)f}Um43~o$tCoitjFZQ9o?&drpe6FT}x|Nq$3nE9J~ExLf}y#XGEo>Sw65FoX}kvbxw_ zNZ8CnE+v}e-a%3cJ>TfmwOu4tfpiSU+6@rEWD-e=o#$va5uSe!4IW$&&10vHZs7uPNfQK`3qpSF3FsG2U%^RtS`-bNiM(sNB zq!O`X=+!1v!AIc?QFZUaYJ65RxX|AoMEzcWY^^RQPnj5Li#lQNqtj0M8Gcuon@)}! z+NTyIUPDqP^)J4?+!jn1NY*u1j2kGq9d^x%p{-$q`j!804K%&@c_nBkfx->u z8@P+5y28cXI0rZ{#o=XFsDN%kP$jQb<_hx85%E%AUZ^7zMV_g8ze(LQTp=?-c|NS~#iyT|d4J zZTg;*Wrfot1_WOuB7M#8IoxvCnzS{{tO(4I<6r>LW_t*_NaiCFJUpf2VGS)eF%yLb zi?7n;&@TmDPBoY>iQ!^_WWe0+ccD?jv>QmfETN&y7LL16P`J9x@L#56w^8g>kODp& z=TKte45x4L&3$(P-tfw2r4dbst;#v8bkJDNK@VXN1NP6KOrpa4ZnEb;L*<0bq7ScVIr7S9aC67&b7iMXf~ zD`ZOI=s5)9R*H>ae^=4{`wP`^E6%xl{=bTED&`tR(p$<*$5Ow$R{y{SM#IJbQoYyF zxtZ4~-~3WKoi$}JdV~)41pOT_x`YrSm6fVE6PWls>kzs_4dAJAj1izUel|%KlQ{|k z+>)dNoovs$Efuq$c5#V-3&=-bGO+KtGHPX{N6dWK>Z={XCl~EsS8mkWplR{5V3Hom zKH9!~{N-fZo9nVf&EU2o?RSYJwK5)ib{vU(?zce)JGk7LVo%H}DxfMF$6mF#5;ZxQ z$O<_O*WMk^N6-)dmh3Lh@nML1-%%EAA-L=*9 zwGb&u0`1>t|^nBQ*nGi43>-9{oZ^Uia|$aX$goQGR^<5t!!TW-g55k&eT>*ob67gcsn(Z8G=@L5hIXTCj*(K zhNS=8t#Z*5*T@WYa`hrF^Gy!79%T96UkbgPz6k;2>V=kB7h{nVCPf=W!m4=7F4swe z<>Ii-XqSQgCW*9>G{JWkJOiy?IWX4if5D?yvG*=PZlX{P<0Lg@KjuzzlGq#)Dgv&` zimfs4=Dv))mXtz)n*Ah%otR5Rg85a#6#gr0oD5Hc{u}l)mP+o&5Wt`~ucKGi735lX zNlwl=T_{^?yr{Bq`0qa%&r&uu%a%VG?pt~?6}MpZB)HN!$>L2I7cTx6Dth?RpoRZOFT^4$H%d6Ry|!vQrdbQ(B-P?$W-)uexW^9~L2VW7=g#*NB8inQTtJ`+suW5I@E0d1}Z-VTj*s)u69^aNY#sa0#P8n;bBk& z^r`#gl#H*-68(AQhfB`+XpQ2?!;p&eiOP8;17Mb_k(lNy=!u4=;=+9;K={OFH~Fuc(LE5)LE);p!{p4Afml#AQYDptp{}5dpI|Nl-6Oy z3J@OKUGdZlK0!P{=Y~hC^)ouqwI3QNAx=xl(kq+&8RY>N*@g>N12Ml6|5o#S{Q7Qq zo^1bBkM!GE1{tg?p^S#>Pw~{c-{rF*P*=*|4Z|Kq10yPRHqk1ZN$)f_ z|B5$C@q?Xjr*ol4dl@+#U=YLSP{K7e+hB7U+_$8D7Ef6@D}YQz!UT+SF5$8|;HH== z#OY*QeWE9_sD|;*E)3{y;YlzAH>CwMpfUJH??;Iq*60z>9|Lu{nHB6T1+UjHhFbFa z%fC*y%135(#D00V*_U@B^ftVfj7b{HA#42o1LGwkGJ`5+-XDhTFK}Y7Z&bGiD_+Of zv@B2}%KK-04MrDrSLFMZ%oC8e1y^hvviZ~>jnnSz0gjvRcF=-tZx?pG#C`@u7PC>- z0e;8DYk*U%CFss275>%c*oQRg;|TbE0fa^{-i3n-kxw0U zQV%9>+THV+*!mI_gYMpG(s0xG$l!rGO-fh(WH^5V@&WKuz z_&wX#!F_LWQu%mTv%oE3>M9?zAO{80F6C3dSaaw_RxMOr+UgDGE8S^4=cgZuBT?2R z^ZDodz(XXNL*+_7XRUvwVXe0ACTMW7hj8}c3AIfK?j4UXpp;-hRZt{}94)rFLU;M6 zPKPZ|H(%ZAU;oR;RP;wu&HFXi^-`B%a%BwC)8y05ZZSPmTA=I_ool~AEJ~Z^L=#<_ z(!;KZu#{9;Rb@g4#+{iuKeq{)#2k!$V8W3SLW-GwW9)5!q2OR< z_WF3Gs;9@$>HXkV%tx)1vqmn^GBAQslO2?56D+GC_i&yVWjaVGv%{~tcz#SC295pj z&%K6%9XB}?n1{u544AI|uG9C}3cc?CcxIAX`Z^C^FmL(t_B2+;aecv z=qjmyhVT8VeXo%yNEskPwCXIKmM%J=27$=lVquQBF=}7DeyZ~*$p2lJc-C0^J+s-? z@%~_mL~VO}?fbkX3f|7Bb81@{?V?S3XLl-2&v{Keej4BLa8UB3RjYsXv-Rkl)%C>8 zdY#w04x_*5q303X7vjmVTE`n|yh8B?vqw0ojUcUlb1L6Feu%3G;WzKpAK4TV^%IGW z@$UlSGe%0!eJT_bx`2NW8?O=2c&pjK$1$yTMttCQC{v z=~XH~si;p@Bc~TU@Mym}H8kYTH}jJ8aWg{az$x>i`o-sLYt73m*kHUJU+6vP=cQ=+ z$XCF6L`PT^iYz{^S|{5mNO!PEr+0@_6&K|0LL0%}b)@@{7p!5PrBIt})C&|^A}KlycThNU zObuF;4J~JQ-HI&P%dyN!&NcjiPW_cSRP~K12_5oTF87q-K!a3f+zxH>b|HhQs_XdE zCTcebZ@p1;ShVCb>D?qqikyDV?ekbmIC~^g9Q|7&oS*^-s)*Q{lexA3g`Bm| zeHybgoBAbkjxobEMC3iBmvGCjac`7$c}u}l>}gC7$3{TEp+CPF8t9`?KBwh{rmflc zesff^+p%mpYLt!PR#@|U$k8G0IRLLBD6OMKp6?NVSbauA#ZPO2_DOMkW-Zl&FC^8-t@V>|5{5zCLL7>`sS~t22EA$xHi_RLZ zqUBTi=jFV8;(@Y)%C5g~y)Q~UYt(e~D?Pp3%U{U5lJb z|C9fCZmyL^)5#zk$4i6P#S^&<45+(055S*R7@LRFReFomhaVA{7@c7f)YL?027Z_j z#cBg(QBo$JQ4(VJ5$3H~Jf)s3X?C93%i+JlDe;j9Y)_YdG zK6IiEHR5D3fu02o`0tQfrN24dkB+)twT7d9rEnwC{+8>0AQj*iaj_C3_@v5hoNuGc z*(9#hu1RK{ZFh|61P&r)0A$xcPviEy@YAt|Z&C8;+|x3hGmR8hilTj;O9op3GH8SJEyI)Jk zkBRbw1cO?FZD#hC!YKS#S9sz4XGulM3w6ksbhlG|IQFxy&9-LlFrU$KMC^s(*_e$J zahX>~o2t|0*=c;?tnSkLql%v6#(Zqta1k^v+WJ28#43jam3ksOvr(FCB@~XL*kYGx}bw5%|O_Fv#6 zeQfQgW&T~)qCm2mW5`ij0B-9)t1Yk)GEbv(wdX61HrY@pbuB=k`Ds|m&t%xo>Yr56 zH_M!|ik}UY0IuK%h2w5N7ULv3JPxl#511x44GE>qo8+BiiKi5sguG5+q#%K+Jf))a zyfhyn=4f;0gC_Q+{`&-i)M1b1!H-T=F01?)5)Spal<%p{hGnU9jxr{2Wj_^vmK(nM zd!K@2WV&gcvz@t;^GAm4YqZGTTmxm?seFF;c`uG09H1YL!A%K~ikxh;$0KF9R#yTb zZJB957MT|eYzQ+eAw9oJ(Q{wFve&!NwB4S}E;^odHgl6~>t3eDA;=sU$1M`M$NX#z;#b~W4vlZU!AzH^kYWZ2qv>f+-t8Zd71ikmI?M@o;v zB$~G<0uA68L80r-7d{7k*MO9}p+K@tN+`e#QFyqG$du%IvN(ET|EIdmvhjPZo2w=n z784g}{`ecNmmP&9lKaTsZ}qqa`ia!odH_c_hW3+hDtpSa^je-e^r-nG>*svJk$2a zqOA*6{x!zWvpJKI=XRqfVH}>qe^Cj!cZbT3vE=%%DDe!lzTjDVn!aqk)fw#Q33Q4l zrKGgJU?pY!CD&#?aFMB0b2s$WYs(>!vS zp+Yq44}KWV=zQ57{$`NoS#HQ#Sqp4BZFf_@lTNc=A$L|JVB#|L{CPV(u8AoxqgwSw zb_2KI4dN>7d9%?5_hE<^ri42j?Cz$Wo$r;DjNlHz*Q2Cm?~9EpU0y$w$otZ0+#A+H zWzG30ijuI;f7OQ>*U$khYEcZjkG1y^i;9ZUOJ(>9b;xe}>&SCWgoxKgIj(Fjfr+5rek3gyVj^NbZlO#eH!JcFqM}Ip0GllTVKTh4dXC?^(nxGDU-Y ziQw~LL6FEd#9&xdw;4Y9NK%k#KS<~;J2UOe3+k!6TQ7rQ`5yotib zDy#p*L~_T;?sN}>rMfAm_#qZbuD-(jlWMn#E=RxY@qCBI8DZ9!Xx{yJlP2~%GRG6F zRi-Z0j{R`%A#A-q)t>owH^D5w+U1@zD`Xn#W_qOE=63-=ZlE52m8#^)T&N95e^+s& zSq|$VZPNoTGfabd_x-M-4VM_d3+dYZ(*J!K1@ra?RX=7kz-Jg!W)_fB_KOXBShUjMl5CPo+g(JY zGGt@2`Is5)GZd>ROO021+qp(DX}kc_4IL7?dbyQ7S&Tn-yb9f_8wFt>8YWP ze`Xj|EQs`e9=MwYa8NH(mx9PbGwIx-{{_z9_~`lV(Hkbt^X^=owEI!B?HiJR%+f1S z#}b`%Al#mnd0F1^+7#MA0d4G$c=FB3ECx8?nMWff4Fh(Ek%j~*v^ zws~x&9%%$~FNtsGB2&!4fz4oW1m2>|Ied8^r$s0R_0HIX)yw%%eE$+(E)^$6M74th zJ+@RC)&h+a(gL91hY9w@04Uc3LZv?wYx$t&}4O1TlXZTF|f`anx7% zk(pY}WTPR*7gRkz=RefoF5u%w9OxlNy5v5E1N}1fps{E4biG&ouNBBmL&4)8<3Sa! zF~sD55SR4H-}W9=j^fCQScIJ}rv8HGT@9pMUFMo|@x1u^VwaWxC@)qNP*$t1D~k_8 zrsakAL*G*j#gkp-F*s7zf5Mmv$EO)(gE7$0T-d3_uday!q~LYLv+W`7`; z@@6!st#xAmh8k_!;uup+Mo=Ni@?mV;JD_mATdyRSlvHWt)4nkyuot*=LrA;;O_b22 zHDd$%!x$^|6jx^01o_NCLMTpZ`ZAA=nP;h3P3c~d9Q7Pu)Yr$W-T77#0^8>3JdI)a#k-q|(8aD|fmMA7dpHC|nY2x;UBl zmRZV=l&S5%M!NAuKUmN?QpU-wp~UAN8%~GNu<|I#Esx_uAr5t>@y0SZM*m~Nt)Unt zQ1XN&NJ)fW4aO0t;L}|HsMjYhJ%%_Q5 zS%yuuBS$+h^Vc^VNASUB%cGCkIQt^>G#r+ZOiU2?`j}mO6_zdZZGldkugyRH8A>BF zG^dioXy3m95~2gv8cRT7-gwL+gL7i)*EGVil=)mRP9jc-faW4Rt`*gi+%C$>6-hQU z(^o?)rV1rVA1l$B^od^AnXXNYO!}(> z4foO7v1aS1>3UD=a>M0>K1w5_sg=h80q6g6fL*~?Qtz}S6_lhZc6%YPS%M|Sp-3C7 zsWBn|5e!#&DyGnp#KU0&!V_jxm=Bd$m%r-)u7L`o$IbFA-fhJG`XoGIjG+y{l)*3iypThh%*xi%5fpU=&gK_N5e-?`uuAtal6DQ&&g-$M(iU0fl z^=*H}h8;R0qfraieg->ba+QyH6B2GU)BX?bVksQp)w55FX8N<_3WcbFm&L>ry|}T* z_w%|wjLWGH?qLtHtkyYkE*T{kg;?%QV2%-!EW?F-m@TO#CA2AaW38+}G0`mh+7361 zJ9k=aqRlFA?Y=5h)v;8*{5WYh`I|lkMHB)-aR7tRxtM6TTv#|<#D?>vzyT$^&n9!g z_)j(|+|rhTC|wtb-&az?4>#5A=SlzG&fVkYR5&&F1)p}46@O>A#g2$j2C@fv!1^bn zfa=g`gu0ejS-N`T*Cjq`#L(dc(>CktM4@x(4;1H67?AIMLQDw>lO@-r4CVesy)%LH z#S#j=Kkl64cGfxW4747}DNf~RV=)8LxqCJ=I0G6pNz0bGb0YbxRBJb>SkZezV*{b@ zKYs0KGzk&`Rj4dl6prN-F6$VibZ%M~Z(n~rkj{92yo^+Ah}}w9PqP3d6IZaHq`{LD zn?Ts=_xADDkh&VS=*1j!KJ6PPqC5;FpNy}Xb=|*R6Bx_bCZlPEuxVpdcG|A+OEL3u zWlaXlX_LRX9`i$HvsW>Nl(S&|;f}V}6s8cKwC>tBo_aK6s{D%b_E}Powg*4T$G?I}B?ADxvV@YpQ4ee0bkcm|2;b#8(XzgpoE{B=#E z8dgR$zWc93_ zg!WqcVRf25A+9i2Nx>Y6&t&fnby9igkswSe`EjJ=e_UsQ0I$yu{Nre+zt-SCn5>~r zf8teRXXT){>>s?|PNRo~`^l?Hcvm=Jp42LR+b=4uZvx1MlIA&xt)%6xPc znq4PYnPS1<)L<^ZrcW4X;1+tBO@NKtPVAmDW?xbZzUrCMrSv2wlo)<2h17McPkrWY zzA)|(LXE;mMiB0vzQ0_!tBG$$bbYgx2NLITQ&p-cZ$u}602iE<a$(%R4N7ei*#jzcnf}d1l;zR;T^O=Me){=Id5k*K3h3#XF}s670*c?_9Ee;BOzp**ZMNN`>JRHETC%?aUyd^Gj4?~>S2U+D4WN% zTwdd^MiHc8BP;tcluW%>hWgK&4%k$>ymWTQeED47D&ymZGOr-WOz1*g`D6@_<^%`% z`B!)}YD$}%gGqPDYfwq{{%vqVzDSM(WRuI0YU@@Q!CI&EG^yL69l3FnyUS*G;8y_{ zvtrfO@a|s%y0KAp$8p^pGvbTMmzTY$?IJJBmbtEj^5MS!tLiKKqUye{X$EN!rE3O| zMq=pB0Sr0uaYDJdDc(*QvcR5}I(0jU{EVrY=x_4)cdpZC50z&ZEa zb@w@Y?{(I~8QA(H!sWVy(s}_qqjfSzy?KD*yPdB*#Q`PkSKI<0BU(P)Jn=IHO~yzz z;W7E)b-x#nG>|v-zmXG=P*q*-#Hz}D=sk;B-T-JkaFe9vWFl1tF~bMQYyM!KUm}lR z&5eo$0ye0hIi%>n8V+YzJfXnFV#dl&o^|<=l@v6LX6)ERI?ieYDz@xzR@*GXuY7X( zHovSHGpp-7?RS9rX2+&PclQ;Zv491~5ID%gY<>{ssjjY?U0L(Myv93O2(C({ZXRJ%=#Vz|)rC9Z&>xiTpp zI7u9vF?2n$Ke5?us-poQBrdhSI}nZKc+zcG6y{7qyfyfiGT}PUo4iObCDH*+ma&6S zk#?*@gXgY;kK>d-On>Mu3}mDEbO)pr`wUk~odq=srWU8*8~wEP{v~vkR%H!*LsncY ziahT%tY!Fh{XRWp!re5%DUV=G$?x$~<dW4{`a%0v0_PmhVo(9(6UL zUmunWuh@f`Q+>%#hkmdqA|4MZlD^{c08t-2Cr~rz)-p_&EG460D}tOykaK#MYyy$g zzYi&0;$!$yeRlYD51b9f8pjeH|Lna*n5HWTyRM#)c*3KVKaZwu880~!v z_}Jd|qBSO&envpwHds#&)nf|2PiOWMsS-+AGjWVv(xfxiKD7lD>#YlV#&42PXb{69 zP3;4jTvh<#!{yHcki;0SKlH4XkomKGaG4%6VA^TfdV2n+wJtW6_4qtWzb7u%kxH=a zpcun!)#RKCZr9WKLzAH#OS`}1^geLc{vM5Yv}18&+&EJ|AEX}W-qMTbGkQd=UA<+s zFwMC67EYeGQ}DJkUB6;V^`wn`ro{p^zWr*-Vk4SXiY8%Jl*g!oW;^qPixX@&$zltd z|53z9N}2R*08wV*d?h@jzYTGpQ`?Ow*d^22KYQbI6mfbd_Oh7`*u0a&--exp&P_fb zj69OQE}mZ#lgU!_Vf|jqs>X5Xw~xuItT^(0&~Hznw+o8exjRb>p;V*Y5jnw)7R-gl znouH~geV)8UKF>7-ZHnPNZ>P!T9=$og4TP&_QFl%GlnxV-l6Cu$1$-lr&l#y<^NV$$){4KUF*>CwLKzfrG%^-Vzk)i*_M#H=dl9 z^D0K*)@y+^L)=HH2#2ILW!o!{A%ToWi2|uJE^nxQ$yjh4eMSInNGNgNo^7W#zFpHu zf1?tY)y{aDq9A*{DlDzK-tNF-)6z-7X$WfeU9bO*f#VY8P=Ef4mwrPFeNUm(L|4R)K><;nePp^ZwpZGbfP}E#<5|3T95MPt}b>5Hk)TiLCEdW(o$-aLfL&D7dr8h z19qv<_~^7_I_N0aBMxjVU#7wnqt}3hGLYNrv3v8-?kUpaZcg1L)?X0y<-e$o=vwK!xtILZ9_wXPz1pT&|+dr>L|6SWw96`&dD;_KnqL!Kyp8cz& z;dFHOL$iTk5x=Xsft%DoV|`3!hyQzgPFD{UfOqeb*Em! z>jDMzhis1kjb;V12=BeFn=eyoemvCJ$=WysbOg6Bt|LDjqa{iAsZ?8h>ibripYV8f zcPU>>V!zKdIev3DK(O6#9KV;Y#8A-Y4kTr2JMCK*jh0ZHsdHY9WuI?c!Ifn8p&Fmq zF_;M~oL1(U%I0}cq#qQMF+&`iRi+{+l6NDIL4gCa8qbI1%!9V%4^PyQ5@S2;1Vr2H zx)hG?@9iH20|ir$ST(s1qqhlf3W*OFL@;DG=5kw1}%aoi8x#ve|4YTk_Skv9A6L+$KxNBM$bCPkpyb!qKFPwdMG^XT4 ze8745;3$x!gNz8z*>a6WtkCneA8k^MpN+|Gk6wC&RgiwOUmayO-v5!?-NUB zmw8}q9R_iH*HaT9AL7SM{g?!V%Ssak7Uq30L3kDD7zsOfFp-3IbLJXs!c%l#d{6mhruS*%NXO=$ZyD zWCntsz=@W)V!{u#-Aal*Yqtt^EU%e zNYO@2`4ID%NpW+Rn5w4?c_2ck$fX^1oQzKrm9i6J<_Bdvt$V_kCf$i5fd1F75Qr_r zi*5=es=%8!XWtt5Z|GMr-ZEl^6-P3=^q$H?s5M_=q^_W(zr0rN+6nqoF8@Oym{RAK zJHCb};4T)kg2?xbi*I)m-yl^8ZK;XUxn7IDl5FZzGPV#~>ODXOr-p|;0DQ0YO)FJK zw5_T$sx4{|sU{Cr>!>c`#O~Y~HH=>TE<@)pves7)S zH7SdJ`sf5b2=zp&1pUQ!+!aDL;`Wm)O94#T_G=pV`x|8g-0q$tR42g7)uNJD^@lBK_XnT{ zY@cz15r7R2;S&aYxAMTssX zy2Pgig-6{hGHwD~Y;L|c~nG<#KRCVnbDjZ;Wl zF$94(Rt)0GSh7$nDI)H4zslW@hvBv{WQDI=#7rxBfF`Pt@N3L_Wp`7ZEZ^b>+G zUm1Er%)@N^6=ZFLX~k2wYA>w@0=AGj4!V4>Z$a3m)YkWtghPFg;;&w{mTo@DDPL|M z!q!U?2wIwFbh=AD=P-Mtx9@=}&%Qy2%D`qo!Z9SeB`ov>&#fzMIhyq+BUhD+U|;5! z0QTAcetCpi*C$=JokntMhL(n-$^M$wc_i3Mw!$f@1o`NUnUo#4>o}MzK(ZC+k=#Kf zoXM#jWws}80UsfYtuR%U@ffOY9B%{`lA6w; z*u$r*yw5H;C)zpD*sYSs@X-snqLF(LlEK&XGcb6Mo5kUl`>An#wRz<8`N^NtzG;Qj zsmsB(%VOk-*B$(~LPK6?eY=9Hp@F%<5wdU8XAblZk*aY80d<@dt|~SE1n(V?9~q#m zn#lNTw#XhgQhWDsBh4AtDO~z=&0mJXNRYx+z8k0e>em*9)h3{WNXsBC)7qU;RnoST zghthx-PGPS0naI~ADo$#cJWenU{Kcf#TEG5`)D2W7adN|uW?A^f08Ks zbm!am_mGymg{M0_ux~g)<>v^bx{1ZN?4>J9a?gLSZ$gp>q+#P#KFaAOO!zI~jmg?f zzk@m0N-@Q3W(66MZZbR0vlUQIavP0M&HoN8#sGBt}(c1InU`8Gdd1Y+cC=Ul9w z@O-W@`6X2Q)~H^wMrI$4iL9~w<4#Ci`Q_Gs*m-f4P+-oeR>gue#&bhz{i=L}+v}Wz zKwTQI??Q8UR&)9eK0ZYXBx79Q=rvbrI@>vJCqplLT2UE+d#AVaYbfOsN#hCaKOfkMoT(f}$DR^zNZoxohiz`R} z{hT0O?x&lIHYVU$|7~PLzaGsI7+J1oTX%B z*irez_V%SZDtw*U>C50^(RpOhyBDy?Yro%EX^pqxkP5}2tL;vu?(-@zqnOVbNExsT zQ^haidHr9=1(Mu$C1FiHH^rI$2!r2&-{o^(v4X-Loji;wdrULtUpSQ>?2Y&8AZ~MkLz{-pjv z@p?h^sFkcd;@T{+olKcvW~MdKXa#dkOA=yd+S7ssMNSWJ-Pkr-Mb*p$X|TBOwL zO%ZgOVg0qP*+B1A!BrK!IK;p>jXNw)bbP3yJ-B=v$<*i>Y{KX4^XB$xlIk2TzZCl+ z@vF*7UbTO<<&SHOG7dF;LWur^bm2#HAsZ{@3E(XyUVYX?H@TU|C=*m^p{;7R#N*uM zwEzz!pv!sE`!^UblxPtY{VY3Qck9~)h>y$IA8++%pz>$#7>Y`>g7l4<;do3!|6F59NUaU3m{MF4F!0d1eLD(Im9LJi_>d3G zO3jt~(dk)v!$o?fInK0POm-4|OO~mEmVm-CLvq0gY*6BLA(XTbTC((~Rle{SpvmCj zt2Y;)!h_m3@Bc1i7ZpvP z#{*1&_O|K!s?L&fSTf`wJE6B~9H`?lH%n#Ci)1cAK`09eHDlip{l`N}PRMJh3=FfabH$*C z^`YD?RsN#Mw34q`A@`iRpPYUTt^fC|%m^MH$5>TrywFdtGviR^Pj>~in18dMsCd6! z+);$D3)U?c3!hI_F^rIAW9?+m33cCt4w$~~cr2(8lWjI^+i`?jGI!-K1q|UhQ90Ar ztyRx)h6bM}Vxm5MF4tcFc?NI@A3F6WIouhfVgh<-|DIskcnRcrPl72-O_pk3c%zH= zmL_D3pJ0(zMwZp<2FSLV3fH-cT(IxYSF&V_6(%viX+f`UtMV`YzG=kp0)-DiK_IX7 ziUtc8B`L#m&=wX#qF*L9`UjgpH4Utk2yy2iYYSz0H>#qPTsNko!&K7?ivNxqfh056 z3BjH@ZYCww+N6;s?Ji&Lx_>V8O_|6lsV1ceMq5y-x$-9nxPE)KB>rDk@dqqeJ(3yq p#yW82+Wz6cZ}KM@7!IO8zI?8dH)Ue7tV#epFzwq~H5zu2{|8x9Vif=Y literal 0 HcmV?d00001 diff --git a/docs/source/conf.py b/docs/source/conf.py index 7aba0f19daa0..4d8d74b33e8c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -50,6 +50,8 @@ 'tutorial/create_dataset': '_static/thumbnails/create_dataset.png', 'tutorial/load_csv': '_static/thumbnails/load_csv.png', 'tutorial/explain': '_static/thumbnails/explain.png', + 'tutorial/shallow_node_embeddings': + '_static/thumbnails/shallow_node_embeddings.png', 'tutorial/multi_gpu_vanilla': '_static/thumbnails/multi_gpu_vanilla.png', } From 86660641f2f796fde360f30e7c3f208b7868dcfa Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 22 Sep 2023 08:07:08 +0200 Subject: [PATCH 1495/2432] Update `Node2Vec` example (#8067) --- examples/node2vec.py | 142 +++++++++++++------------- torch_geometric/nn/models/node2vec.py | 6 +- 2 files changed, 74 insertions(+), 74 deletions(-) diff --git a/examples/node2vec.py b/examples/node2vec.py index 7c2d4ee35433..90072d5e2414 100644 --- a/examples/node2vec.py +++ b/examples/node2vec.py @@ -8,75 +8,75 @@ from torch_geometric.datasets import Planetoid from torch_geometric.nn import Node2Vec +path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') +dataset = Planetoid(path, name='Cora') +data = dataset[0] -def main(): - dataset = 'Cora' - path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) - dataset = Planetoid(path, dataset) - data = dataset[0] - - device = 'cuda' if torch.cuda.is_available() else 'cpu' - model = Node2Vec( - data.edge_index, - embedding_dim=128, - walk_length=20, - context_size=10, - walks_per_node=10, - num_negative_samples=1, - p=1, - q=1, - sparse=True, - ).to(device) - - num_workers = 0 if sys.platform.startswith('win') else 4 - loader = model.loader(batch_size=128, shuffle=True, - num_workers=num_workers) - optimizer = torch.optim.SparseAdam(list(model.parameters()), lr=0.01) - - def train(): - model.train() - total_loss = 0 - for pos_rw, neg_rw in loader: - optimizer.zero_grad() - loss = model.loss(pos_rw.to(device), neg_rw.to(device)) - loss.backward() - optimizer.step() - total_loss += loss.item() - return total_loss / len(loader) - - @torch.no_grad() - def test(): - model.eval() - z = model() - acc = model.test(z[data.train_mask], data.y[data.train_mask], - z[data.test_mask], data.y[data.test_mask], - max_iter=150) - return acc - - for epoch in range(1, 101): - loss = train() - acc = test() - print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Acc: {acc:.4f}') - - @torch.no_grad() - def plot_points(colors): - model.eval() - z = model(torch.arange(data.num_nodes, device=device)) - z = TSNE(n_components=2).fit_transform(z.cpu().numpy()) - y = data.y.cpu().numpy() - - plt.figure(figsize=(8, 8)) - for i in range(dataset.num_classes): - plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i]) - plt.axis('off') - plt.show() - - colors = [ - '#ffc0cb', '#bada55', '#008080', '#420420', '#7fe5f0', '#065535', - '#ffd700' - ] - plot_points(colors) - - -if __name__ == "__main__": - main() +device = 'cuda' if torch.cuda.is_available() else 'cpu' +model = Node2Vec( + data.edge_index, + embedding_dim=128, + walk_length=20, + context_size=10, + walks_per_node=10, + num_negative_samples=1, + p=1.0, + q=1.0, + sparse=True, +).to(device) + +num_workers = 4 if sys.platform == 'linux' else 0 +loader = model.loader(batch_size=128, shuffle=True, num_workers=num_workers) +optimizer = torch.optim.SparseAdam(list(model.parameters()), lr=0.01) + + +def train(): + model.train() + total_loss = 0 + for pos_rw, neg_rw in loader: + optimizer.zero_grad() + loss = model.loss(pos_rw.to(device), neg_rw.to(device)) + loss.backward() + optimizer.step() + total_loss += loss.item() + return total_loss / len(loader) + + +@torch.no_grad() +def test(): + model.eval() + z = model() + acc = model.test( + train_z=z[data.train_mask], + train_y=data.y[data.train_mask], + test_z=z[data.test_mask], + test_y=data.y[data.test_mask], + max_iter=150, + ) + return acc + + +for epoch in range(1, 101): + loss = train() + acc = test() + print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Acc: {acc:.4f}') + + +@torch.no_grad() +def plot_points(colors): + model.eval() + z = model().cpu().numpy() + z = TSNE(n_components=2).fit_transform(z) + y = data.y.cpu().numpy() + + plt.figure(figsize=(8, 8)) + for i in range(dataset.num_classes): + plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i]) + plt.axis('off') + plt.show() + + +colors = [ + '#ffc0cb', '#bada55', '#008080', '#420420', '#7fe5f0', '#065535', '#ffd700' +] +plot_points(colors) diff --git a/torch_geometric/nn/models/node2vec.py b/torch_geometric/nn/models/node2vec.py index 789db1ca9bb0..691db24d34f2 100644 --- a/torch_geometric/nn/models/node2vec.py +++ b/torch_geometric/nn/models/node2vec.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple +from typing import List, Optional, Tuple, Union import torch from torch import Tensor @@ -99,7 +99,7 @@ def reset_parameters(self): def forward(self, batch: Optional[Tensor] = None) -> Tensor: """Returns the embeddings for the nodes in :obj:`batch`.""" emb = self.embedding.weight - return emb if batch is None else emb.index_select(0, batch) + return emb if batch is None else emb[batch] def loader(self, **kwargs) -> DataLoader: return DataLoader(range(self.num_nodes), collate_fn=self.sample, @@ -134,7 +134,7 @@ def neg_sample(self, batch: Tensor) -> Tensor: return torch.cat(walks, dim=0) @torch.jit.export - def sample(self, batch: Tensor) -> Tuple[Tensor, Tensor]: + def sample(self, batch: Union[List[int], Tensor]) -> Tuple[Tensor, Tensor]: if not isinstance(batch, Tensor): batch = torch.tensor(batch) return self.pos_sample(batch), self.neg_sample(batch) From 9460374eeaaed66d2eb777b4f416aaf13b87ab8d Mon Sep 17 00:00:00 2001 From: Jay Bhambhani Date: Fri, 22 Sep 2023 03:25:49 -0400 Subject: [PATCH 1496/2432] `OnDiskDataset` class with `Database` integration (#8066) Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_on_disk_dataset.py | 100 +++++++++++++ torch_geometric/data/__init__.py | 2 + torch_geometric/data/dataset.py | 2 +- torch_geometric/data/in_memory_dataset.py | 1 - torch_geometric/data/on_disk_dataset.py | 163 ++++++++++++++++++++++ 6 files changed, 267 insertions(+), 2 deletions(-) create mode 100644 test/data/test_on_disk_dataset.py create mode 100644 torch_geometric/data/on_disk_dataset.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 2999faeffb03..76e0eafc2a69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `OnDiskDataset` interface ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066)) - Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) - Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) diff --git a/test/data/test_on_disk_dataset.py b/test/data/test_on_disk_dataset.py new file mode 100644 index 000000000000..1a19c3213822 --- /dev/null +++ b/test/data/test_on_disk_dataset.py @@ -0,0 +1,100 @@ +import os.path as osp +from typing import Any, Dict + +import torch + +from torch_geometric.data import Data, OnDiskDataset +from torch_geometric.testing import withPackage + + +@withPackage('sqlite3') +def test_pickle(tmp_path): + dataset = OnDiskDataset(tmp_path) + assert len(dataset) == 0 + assert str(dataset) == 'OnDiskDataset(0)' + assert osp.exists(osp.join(tmp_path, 'processed', 'sqlite.db')) + + data_list = [ + Data( + x=torch.randn(5, 8), + edge_index=torch.randint(0, 5, (2, 16)), + num_nodes=5, + ) for _ in range(4) + ] + + dataset.append(data_list[0]) + assert len(dataset) == 1 + + dataset.extend(data_list[1:]) + assert len(dataset) == 4 + + out = dataset.get(0) + assert torch.equal(out.x, data_list[0].x) + assert torch.equal(out.edge_index, data_list[0].edge_index) + assert out.num_nodes == data_list[0].num_nodes + + out_list = dataset.multi_get([1, 2, 3]) + for out, data in zip(out_list, data_list[1:]): + assert torch.equal(out.x, data.x) + assert torch.equal(out.edge_index, data.edge_index) + assert out.num_nodes == data.num_nodes + + dataset.close() + + +@withPackage('sqlite3') +def test_custom_schema(tmp_path): + class CustomSchemaOnDiskDataset(OnDiskDataset): + def __init__(self, root: str): + schema = { + 'x': dict(dtype=torch.float, size=(-1, 8)), + 'edge_index': dict(dtype=torch.long, size=(2, -1)), + 'num_nodes': int, + } + self.serialize_count = 0 + self.deserialize_count = 0 + super().__init__(root, schema=schema) + + def serialize(self, data: Data) -> Dict[str, Any]: + self.serialize_count += 1 + return data.to_dict() + + def deserialize(self, mapping: Dict[str, Any]) -> Any: + self.deserialize_count += 1 + return Data.from_dict(mapping) + + dataset = CustomSchemaOnDiskDataset(tmp_path) + assert len(dataset) == 0 + assert str(dataset) == 'CustomSchemaOnDiskDataset(0)' + assert osp.exists(osp.join(tmp_path, 'processed', 'sqlite.db')) + + data_list = [ + Data( + x=torch.randn(5, 8), + edge_index=torch.randint(0, 5, (2, 16)), + num_nodes=5, + ) for _ in range(4) + ] + + dataset.append(data_list[0]) + assert dataset.serialize_count == 1 + assert len(dataset) == 1 + + dataset.extend(data_list[1:]) + assert dataset.serialize_count == 4 + assert len(dataset) == 4 + + out = dataset.get(0) + assert dataset.deserialize_count == 1 + assert torch.equal(out.x, data_list[0].x) + assert torch.equal(out.edge_index, data_list[0].edge_index) + assert out.num_nodes == data_list[0].num_nodes + + out_list = dataset.multi_get([1, 2, 3]) + assert dataset.deserialize_count == 4 + for out, data in zip(out_list, data_list[1:]): + assert torch.equal(out.x, data.x) + assert torch.equal(out.edge_index, data.edge_index) + assert out.num_nodes == data.num_nodes + + dataset.close() diff --git a/torch_geometric/data/__init__.py b/torch_geometric/data/__init__.py index 58c144f3da11..add215bd3e3b 100644 --- a/torch_geometric/data/__init__.py +++ b/torch_geometric/data/__init__.py @@ -9,6 +9,7 @@ from .database import Database, SQLiteDatabase, RocksDatabase from .dataset import Dataset from .in_memory_dataset import InMemoryDataset +from .on_disk_dataset import OnDiskDataset from .makedirs import makedirs from .download import download_url from .extract import extract_tar, extract_zip, extract_bz2, extract_gz @@ -22,6 +23,7 @@ 'TemporalData', 'Dataset', 'InMemoryDataset', + 'OnDiskDataset', ] remote_backend_classes = [ diff --git a/torch_geometric/data/dataset.py b/torch_geometric/data/dataset.py index 2b4b44c30dba..e62a78a5f8f9 100644 --- a/torch_geometric/data/dataset.py +++ b/torch_geometric/data/dataset.py @@ -67,7 +67,7 @@ def process(self): @abstractmethod def len(self) -> int: - r"""Returns the number of graphs stored in the dataset.""" + r"""Returns the number of data objects stored in the dataset.""" raise NotImplementedError @abstractmethod diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index f8b5b1733c1b..81940e4eaf19 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -27,7 +27,6 @@ class InMemoryDataset(Dataset, ABC): r"""Dataset base class for creating graph datasets which easily fit into CPU memory. - Inherits from :class:`torch_geometric.data.Dataset`. See `here `__ for the accompanying tutorial. diff --git a/torch_geometric/data/on_disk_dataset.py b/torch_geometric/data/on_disk_dataset.py new file mode 100644 index 000000000000..b011573f74ec --- /dev/null +++ b/torch_geometric/data/on_disk_dataset.py @@ -0,0 +1,163 @@ +import os +from abc import ABC +from typing import Any, Callable, Iterable, List, Optional, Union + +from torch import Tensor + +from torch_geometric.data import Database, RocksDatabase, SQLiteDatabase +from torch_geometric.data.data import BaseData +from torch_geometric.data.database import Schema +from torch_geometric.data.dataset import Dataset + + +class OnDiskDataset(Dataset, ABC): + r"""Dataset base class for creating large graph datasets which do not + easily fit into CPU memory at once by leveraging a :class:`Database` + backend for on-disk storage and access of data objects. + + Args: + root (str): Root directory where the dataset should be saved. + transform (callable, optional): A function/transform that takes in a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + transformed version. + The data object will be transformed before every access. + (default: :obj:`None`) + pre_filter (callable, optional): A function that takes in a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object and returns a + boolean value, indicating whether the data object should be + included in the final dataset. (default: :obj:`None`) + backend (str): The :class:`Database` backend to use + (one of :obj:`"sqlite"` or :obj:`"rocksdb"`). + (default: :obj:`"sqlite"`) + schema (Any or Tuple[Any] or Dict[str, Any], optional): The schema of + the input data. + Can take :obj:`int`, :obj:`float`, :obj:`str`, :obj:`object`, or a + dictionary with :obj:`dtype` and :obj:`size` keys (for specifying + tensor data) as input, and can be nested as a tuple or dictionary. + Specifying the schema will improve efficiency, since by default the + database will use python pickling for serializing and + deserializing. If specified to anything different than + :obj:`object`, implementations of :class:`OnDiskDataset` need to + override :meth:`serialize` and :meth:`deserialize` methods. + (default: :obj:`object`) + log (bool, optional): Whether to print any console output while + downloading and processing the dataset. (default: :obj:`True`) + """ + BACKENDS = { + 'sqlite': SQLiteDatabase, + 'rocksdb': RocksDatabase, + } + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + *, + pre_filter: Optional[Callable] = None, + backend: str = 'sqlite', + schema: Schema = object, + log: bool = True, + ): + if backend not in self.BACKENDS: + raise ValueError(f"Database backend must be one of " + f"{set(self.BACKENDS.keys())} " + f"(got '{backend}')") + + self.backend = backend + self.schema = schema + + self._db: Optional[Database] = None + self._numel: Optional[int] = None + + super().__init__(root, transform, pre_filter=pre_filter, log=log) + + @property + def processed_file_names(self) -> str: + return f'{self.backend}.db' + + @property + def db(self) -> Database: + r"""Returns the underlying :class:`Database`.""" + if self._db is not None: + return self._db + + kwargs = {} + cls = self.BACKENDS[self.backend] + if issubclass(cls, SQLiteDatabase): + kwargs['name'] = self.__class__.__name__ + + os.makedirs(self.processed_dir, exist_ok=True) + path = self.processed_paths[0] + self._db = cls(path=path, schema=self.schema, **kwargs) + self._numel = len(self._db) + return self._db + + def close(self): + r"""Closes the connection to the underlying database.""" + if self._db is not None: + self._db.close() + + def serialize(self, data: BaseData) -> Any: + r"""Serializes the :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object into the expected DB + schema.""" + if self.schema == object: + return data + raise NotImplementedError(f"`{self.__class__.__name__}.serialize()` " + f"needs to be overridden in case a " + f"non-default schema was passed") + + def deserialize(self, data: Any) -> BaseData: + r"""Deserializes the DB entry into a + :class:`~torch_geometric.data.Data` or + :class:`~torch_geometric.data.HeteroData` object.""" + if self.schema == object: + return data + raise NotImplementedError(f"`{self.__class__.__name__}.deserialize()` " + f"needs to be overridden in case a " + f"non-default schema was passed") + + def append(self, data: BaseData): + r"""Appends the data object to the dataset.""" + index = len(self) + self.db.insert(index, self.serialize(data)) + self._numel += 1 + + def extend( + self, + data_list: List[BaseData], + batch_size: Optional[int] = None, + ): + r"""Extends the dataset by a list of data objects.""" + start = len(self) + end = start + len(data_list) + data_list = [self.serialize(data) for data in data_list] + self.db.multi_insert(range(start, end), data_list, batch_size) + self._numel += (end - start) + + def get(self, idx: int) -> BaseData: + r"""Gets the data object at index :obj:`idx`.""" + return self.deserialize(self.db.get(idx)) + + def multi_get( + self, + indices: Union[Iterable[int], Tensor, slice, range], + batch_size: Optional[int] = None, + ) -> List[BaseData]: + r"""Gets a list of data objects from the specified indices.""" + if len(indices) == 1: + data_list = [self.db.get(indices[0])] + else: + data_list = self.db.multi_get(indices, batch_size) + + return [self.deserialize(data) for data in data_list] + + def len(self) -> int: + if self._numel is None: + self._numel = len(self.db) + return self._numel + + def __repr__(self) -> str: + return f'{self.__class__.__name__}({len(self)})' From 99f704708ffb1db10f141f19fcee55fb4340a52a Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 22 Sep 2023 09:47:12 +0200 Subject: [PATCH 1497/2432] Add persistence test for `OnDiskDataset` (#8068) --- test/data/test_on_disk_dataset.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/data/test_on_disk_dataset.py b/test/data/test_on_disk_dataset.py index 1a19c3213822..679a961e7df4 100644 --- a/test/data/test_on_disk_dataset.py +++ b/test/data/test_on_disk_dataset.py @@ -41,6 +41,17 @@ def test_pickle(tmp_path): dataset.close() + # Test persistence of datasets: + dataset = OnDiskDataset(tmp_path) + assert len(dataset) == 4 + + out = dataset.get(0) + assert torch.equal(out.x, data_list[0].x) + assert torch.equal(out.edge_index, data_list[0].edge_index) + assert out.num_nodes == data_list[0].num_nodes + + dataset.close() + @withPackage('sqlite3') def test_custom_schema(tmp_path): From 4a0272eb6996e23f3ad2c3c803e4ddfa3acd6372 Mon Sep 17 00:00:00 2001 From: xnuohz Date: Sun, 24 Sep 2023 22:42:18 +0800 Subject: [PATCH 1498/2432] [Code Coverage] `data/data.py` & `data/hetero_data.py` & `nn/conv/eg_conv.py` (#8047) - Part of https://github.com/pyg-team/pytorch_geometric/issues/6528 - Fix `AttributeError: 'function' object has no attribute 'pop'` when calling `remove_edge_index` Not sure if I am misunderstanding, please take a look:) --------- Co-authored-by: rusty1s --- test/data/test_data.py | 16 ++++++++ test/data/test_hetero_data.py | 7 ++++ test/nn/conv/test_eg_conv.py | 57 +++++++++++++++------------- test/nn/conv/test_hypergraph_conv.py | 2 +- torch_geometric/data/data.py | 6 +-- torch_geometric/data/hetero_data.py | 6 +-- torch_geometric/nn/conv/eg_conv.py | 27 +++++++++---- 7 files changed, 81 insertions(+), 40 deletions(-) diff --git a/test/data/test_data.py b/test/data/test_data.py index 2271fb59d7da..e52f5f8c6d3a 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -29,11 +29,20 @@ def test_data(): assert data.get('x').tolist() == x.tolist() assert data.get('y', 2) == 2 assert data.get('y', None) is None + assert data.num_edge_types == 1 + assert data.num_node_types == 1 + assert next(data('x')) == ('x', x) assert sorted(data.keys()) == ['edge_index', 'x'] assert len(data) == 2 assert 'x' in data and 'edge_index' in data and 'pos' not in data + data.apply_(lambda x: x.mul_(2), 'x') + assert torch.allclose(data.x, x) + + data.requires_grad_('x') + assert data.x.requires_grad is True + D = data.to_dict() assert len(D) == 2 assert 'x' in D and 'edge_index' in D @@ -453,6 +462,13 @@ def assert_equal_tensor_tuple(expected, actual): edge_attrs = data.get_all_edge_attrs() assert len(edge_attrs) == 3 + # Remove: + coo, csr, csc = edge_attrs + data.remove_edge_index(coo) + data.remove_edge_index(csr) + data.remove_edge_index(csc) + assert len(data.get_all_edge_attrs()) == 0 + def test_data_generate_ids(): x = torch.randn(3, 8) diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index b820d0815c49..7d5b78fb967b 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -639,6 +639,13 @@ def assert_equal_tensor_tuple(expected, actual): edge_attrs = data.get_all_edge_attrs() assert len(edge_attrs) == 3 + # Remove: + coo, csr, csc = edge_attrs + data.remove_edge_index(coo) + data.remove_edge_index(csr) + data.remove_edge_index(csc) + assert len(data.get_all_edge_attrs()) == 0 + def test_data_generate_ids(): data = HeteroData() diff --git a/test/nn/conv/test_eg_conv.py b/test/nn/conv/test_eg_conv.py index 665e1bcf05c0..cdc86edc89fc 100644 --- a/test/nn/conv/test_eg_conv.py +++ b/test/nn/conv/test_eg_conv.py @@ -1,3 +1,4 @@ +import pytest import torch import torch_geometric.typing @@ -7,55 +8,59 @@ from torch_geometric.utils import to_torch_csc_tensor -def test_eg_conv(): +def test_eg_conv_with_error(): + with pytest.raises(ValueError, match="must be divisible by the number of"): + EGConv(16, 30, num_heads=8) + + with pytest.raises(ValueError, match="Unsupported aggregator"): + EGConv(16, 32, aggregators=['xxx']) + + +@pytest.mark.parametrize('aggregators', [ + ['symnorm'], + ['sum', 'symnorm', 'max', 'std'], +]) +@pytest.mark.parametrize('add_self_loops', [True, False]) +def test_eg_conv(aggregators, add_self_loops): x = torch.randn(4, 16) edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) adj1 = to_torch_csc_tensor(edge_index, size=(4, 4)) - conv = EGConv(16, 32) - assert str(conv) == "EGConv(16, 32, aggregators=['symnorm'])" + conv = EGConv( + in_channels=16, + out_channels=32, + aggregators=aggregators, + add_self_loops=add_self_loops, + ) + assert str(conv) == f"EGConv(16, 32, aggregators={aggregators})" out = conv(x, edge_index) assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) + assert torch.allclose(conv(x, adj1.t()), out, atol=1e-2) if torch_geometric.typing.WITH_TORCH_SPARSE: adj2 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-2) conv.cached = True - assert torch.allclose(conv(x, edge_index), out, atol=1e-6) + assert torch.allclose(conv(x, edge_index), out, atol=1e-2) assert conv._cached_edge_index is not None - assert torch.allclose(conv(x, edge_index), out, atol=1e-6) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) + assert torch.allclose(conv(x, edge_index), out, atol=1e-2) + assert torch.allclose(conv(x, adj1.t()), out, atol=1e-2) if torch_geometric.typing.WITH_TORCH_SPARSE: - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-2) assert conv._cached_adj_t is not None - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) + assert torch.allclose(conv(x, adj2.t()), out, atol=1e-2) if is_full_test(): t = '(Tensor, Tensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index), out, atol=1e-6) + assert torch.allclose(jit(x, edge_index), out, atol=1e-2) if is_full_test() and torch_geometric.typing.WITH_TORCH_SPARSE: t = '(Tensor, SparseTensor) -> Tensor' jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj2.t()), out, atol=1e-6) - - -def test_eg_conv_multiple_aggregators(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = EGConv(16, 32, aggregators=["max", "min"]) - assert str(conv) == "EGConv(16, 32, aggregators=['max', 'min'])" - out = conv(x, edge_index) - assert out.size() == (4, 32) - - if torch_geometric.typing.WITH_TORCH_SPARSE: - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) + assert torch.allclose(jit(x, adj2.t()), out, atol=1e-2) def test_eg_conv_with_sparse_input_feature(): diff --git a/test/nn/conv/test_hypergraph_conv.py b/test/nn/conv/test_hypergraph_conv.py index a510e0cd9aff..483796721f27 100644 --- a/test/nn/conv/test_hypergraph_conv.py +++ b/test/nn/conv/test_hypergraph_conv.py @@ -6,10 +6,10 @@ def test_hypergraph_conv_with_more_nodes_than_edges(): in_channels, out_channels = (16, 32) hyperedge_index = torch.tensor([[0, 0, 1, 1, 2, 3], [0, 1, 0, 1, 0, 1]]) - hyperedge_weight = torch.tensor([1.0, 0.5]) num_nodes = hyperedge_index[0].max().item() + 1 num_edges = hyperedge_index[1].max().item() + 1 x = torch.randn((num_nodes, in_channels)) + hyperedge_weight = torch.tensor([1.0, 0.5]) hyperedge_attr = torch.randn((num_edges, in_channels)) conv = HypergraphConv(in_channels, out_channels) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index d618fb4ded97..935c3b43ab13 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -954,17 +954,17 @@ def _remove_edge_index(self, edge_attr: EdgeAttr) -> bool: if edge_attr.layout == EdgeLayout.COO and 'edge_index' in self: del self.edge_index if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop(EdgeLayout.COO, None) + self._edge_attrs.pop(EdgeLayout.COO, None) return True elif edge_attr.layout == EdgeLayout.CSR and 'adj' in self: del self.adj if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop(EdgeLayout.CSR, None) + self._edge_attrs.pop(EdgeLayout.CSR, None) return True elif edge_attr.layout == EdgeLayout.CSC and 'adj_t' in self: del self.adj_t if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop(EdgeLayout.CSC, None) + self._edge_attrs.pop(EdgeLayout.CSC, None) return True return False diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index 4df5ce69ab50..b83157bb4349 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -1050,17 +1050,17 @@ def _remove_edge_index(self, edge_attr: EdgeAttr) -> bool: if edge_attr.layout == EdgeLayout.COO and 'edge_index' in store: del store.edge_index if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop((edge_type, EdgeLayout.COO), None) + self._edge_attrs.pop((edge_type, EdgeLayout.COO), None) return True elif edge_attr.layout == EdgeLayout.CSR and 'adj' in store: del store.adj if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop((edge_type, EdgeLayout.CSR), None) + self._edge_attrs.pop((edge_type, EdgeLayout.CSR), None) return True elif edge_attr.layout == EdgeLayout.CSC and 'adj_t' in store: del store.adj_t if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop((edge_type, EdgeLayout.CSC), None) + self._edge_attrs.pop((edge_type, EdgeLayout.CSC), None) return True return False diff --git a/torch_geometric/nn/conv/eg_conv.py b/torch_geometric/nn/conv/eg_conv.py index 964a74361940..b1af61f4020e 100644 --- a/torch_geometric/nn/conv/eg_conv.py +++ b/torch_geometric/nn/conv/eg_conv.py @@ -77,15 +77,24 @@ class EGConv(MessagePassing): _cached_edge_index: Optional[Tuple[Tensor, OptTensor]] _cached_adj_t: Optional[SparseTensor] - def __init__(self, in_channels: int, out_channels: int, - aggregators: List[str] = ["symnorm"], num_heads: int = 8, - num_bases: int = 4, cached: bool = False, - add_self_loops: bool = True, bias: bool = True, **kwargs): + def __init__( + self, + in_channels: int, + out_channels: int, + aggregators: List[str] = ['symnorm'], + num_heads: int = 8, + num_bases: int = 4, + cached: bool = False, + add_self_loops: bool = True, + bias: bool = True, + **kwargs, + ): super().__init__(node_dim=0, **kwargs) if out_channels % num_heads != 0: - raise ValueError( - 'out_channels must be divisible by the number of heads') + raise ValueError(f"'out_channels' (got {out_channels}) must be " + f"divisible by the number of heads " + f"(got {num_heads})") for a in aggregators: if a not in ['sum', 'mean', 'symnorm', 'min', 'max', 'var', 'std']: @@ -223,7 +232,11 @@ def aggregate(self, inputs: Tensor, index: Tensor, def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor: adj_t_2 = adj_t if len(self.aggregators) > 1 and 'symnorm' in self.aggregators: - adj_t_2 = adj_t.set_value(None) + if isinstance(adj_t, SparseTensor): + adj_t_2 = adj_t.set_value(None) + else: + adj_t_2 = adj_t.clone() + adj_t_2.values().fill_(1.0) outs = [] for aggr in self.aggregators: From 4a7658f22cc16dfc4fc776195fb91d4257507c34 Mon Sep 17 00:00:00 2001 From: ArchieGertsman Date: Sun, 24 Sep 2023 10:26:34 -0500 Subject: [PATCH 1499/2432] Added an Example for `LCMAggregation`: Second Minimum Task (#8020) I added the first benchmark task from the paper [Learnable Commutative Monoids for Graph Neural Networks](https://arxiv.org/pdf/2212.08541.pdf), which is the task of identifying the second smallest integer from a multiset. In short, when using an encoder-aggregator-decoder architecture (as in GNN's), fixed aggregators like `sum` or `max` fail to "align" with this task, such that no realistic encoder or decoder architecture can compensate. Learnable aggregations, like `LCMAggregation`, offer more expressivity and are much more effective at such tasks. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- examples/lcm_aggr_2nd_min.py | 143 +++++++++++++++++++++++++++++++++++ test/nn/conv/test_eg_conv.py | 2 +- 3 files changed, 145 insertions(+), 2 deletions(-) create mode 100644 examples/lcm_aggr_2nd_min.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 76e0eafc2a69..9bda88fda1cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) -- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023), [#8026](https://github.com/pyg-team/pytorch_geometric/pull/8026)) +- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids, along with an example ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8020](https://github.com/pyg-team/pytorch_geometric/pull/8020), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023), [#8026](https://github.com/pyg-team/pytorch_geometric/pull/8026)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) diff --git a/examples/lcm_aggr_2nd_min.py b/examples/lcm_aggr_2nd_min.py new file mode 100644 index 000000000000..0ce2b854def3 --- /dev/null +++ b/examples/lcm_aggr_2nd_min.py @@ -0,0 +1,143 @@ +# Final validation accuracy: ~95% +import argparse + +import torch +import torch.nn.functional as F +from torch import Tensor + +from torch_geometric.data import Data, InMemoryDataset +from torch_geometric.loader import DataLoader +from torch_geometric.nn import LCMAggregation + +parser = argparse.ArgumentParser() +parser.add_argument('--num_bits', type=int, default=8) +args = parser.parse_args() + + +class Random2ndMinimumDataset(InMemoryDataset): + r""""A labeled dataset, where each sample is a multiset of integers + encoded as bit-vectors, and the label is the second smallest integer + in the multiset.""" + def __init__( + self, + num_examples: int, + num_bits: int, + min_num_elems: int, + max_num_elems: int, + ): + super().__init__(None) + + self.data, self.slices = self.collate([ + self.get_data(num_bits, min_num_elems, max_num_elems) + for _ in range(num_examples) + ]) + + def get_data( + self, + num_bits: int, + min_num_elems: int, + max_num_elems: int, + ) -> Data: + + num_elems = int(torch.randint(min_num_elems, max_num_elems + 1, (1, ))) + + x = torch.randint(0, 2, (num_elems, num_bits)) + + power = torch.pow(2, torch.arange(num_bits)).flip([0]) + ints = (x * power.view(1, -1)).sum(dim=-1) + y = x[ints.topk(k=2, largest=False).indices[-1:]].to(torch.float) + + return Data(x=x, y=y) + + +train_dataset = Random2ndMinimumDataset( + num_examples=2**16, # 65,536 + num_bits=args.num_bits, + min_num_elems=2, + max_num_elems=16, +) +# Validate on multi sets of size 32, larger than observed during training: +val_dataset = Random2ndMinimumDataset( + num_examples=2**10, # 1024 + num_bits=args.num_bits, + min_num_elems=32, + max_num_elems=32, +) + +train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) +val_loader = DataLoader(val_dataset, batch_size=128) + + +class BitwiseEmbedding(torch.nn.Module): + def __init__(self, emb_dim: int): + super().__init__() + self.embs = torch.nn.ModuleList( + [torch.nn.Embedding(2, emb_dim) for _ in range(args.num_bits)]) + + def forward(self, x: Tensor) -> Tensor: + xs = [emb(b) for emb, b in zip(self.embs, x.t())] + return torch.stack(xs, dim=0).sum(0) + + +class LCM(torch.nn.Module): + def __init__(self, emb_dim: int, dropout: float = 0.25): + super().__init__() + + self.encoder = torch.nn.Sequential( + BitwiseEmbedding(emb_dim), + torch.nn.Linear(emb_dim, emb_dim), + torch.nn.Dropout(), + torch.nn.GELU(), + ) + + self.aggr = LCMAggregation(emb_dim, emb_dim, project=False) + + self.decoder = torch.nn.Sequential( + torch.nn.Linear(emb_dim, emb_dim), + torch.nn.Dropout(dropout), + torch.nn.GELU(), + torch.nn.Linear(emb_dim, args.num_bits), + ) + + def forward(self, x: Tensor, batch: Tensor) -> Tensor: + x = self.encoder(x) + x = self.aggr(x, batch) + x = self.decoder(x) + return x + + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +model = LCM(emb_dim=128).to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) + + +def train(): + total_loss = total_examples = 0 + for batch in train_loader: + batch = batch.to(device) + optimizer.zero_grad() + out = model(batch.x, batch.batch) + loss = F.binary_cross_entropy_with_logits(out, batch.y) + loss.backward() + optimizer.step() + total_loss += batch.num_graphs * float(loss) + total_examples += batch.num_graphs + return total_loss / total_examples + + +@torch.no_grad() +def test(loader): + total_correct = total_examples = 0 + for batch in loader: + batch = batch.to(device) + pred = model(batch.x, batch.batch).sigmoid().round() + num_mistakes = (pred != batch.y).sum(dim=-1) + total_correct += int((num_mistakes == 0).sum()) + total_examples += batch.num_graphs + return total_correct / total_examples + + +for epoch in range(1, 1001): + loss = train() + val_acc = test(val_loader) + print(f'Epoch: {epoch:04d}, Loss: {loss:.4f}, Val Acc: {val_acc:.4f}') diff --git a/test/nn/conv/test_eg_conv.py b/test/nn/conv/test_eg_conv.py index cdc86edc89fc..b9c9c58e9051 100644 --- a/test/nn/conv/test_eg_conv.py +++ b/test/nn/conv/test_eg_conv.py @@ -18,7 +18,7 @@ def test_eg_conv_with_error(): @pytest.mark.parametrize('aggregators', [ ['symnorm'], - ['sum', 'symnorm', 'max', 'std'], + ['sum', 'symnorm', 'std'], ]) @pytest.mark.parametrize('add_self_loops', [True, False]) def test_eg_conv(aggregators, add_self_loops): From 6f10a61ebcef4ff40ee478cc6db698f1d37e5669 Mon Sep 17 00:00:00 2001 From: ArchieGertsman Date: Wed, 27 Sep 2023 09:48:05 -0500 Subject: [PATCH 1500/2432] Added a random permutation transformation to the LCM 2nd minimum example (#8075) In the LCM paper, it is suggested to randomly permute the data as an augmentation technique for better generalization. I have included this as a dataset transform in the 2nd minimum example. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- examples/lcm_aggr_2nd_min.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bda88fda1cc..6c159c264e4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) - Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) - Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) -- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids, along with an example ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8020](https://github.com/pyg-team/pytorch_geometric/pull/8020), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023), [#8026](https://github.com/pyg-team/pytorch_geometric/pull/8026)) +- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids, along with an example ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8020](https://github.com/pyg-team/pytorch_geometric/pull/8020), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023), [#8026](https://github.com/pyg-team/pytorch_geometric/pull/8026), [#8075](https://github.com/pyg-team/pytorch_geometric/pull/8075)) - Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) - Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) - Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) diff --git a/examples/lcm_aggr_2nd_min.py b/examples/lcm_aggr_2nd_min.py index 0ce2b854def3..ad92f977f08b 100644 --- a/examples/lcm_aggr_2nd_min.py +++ b/examples/lcm_aggr_2nd_min.py @@ -8,12 +8,19 @@ from torch_geometric.data import Data, InMemoryDataset from torch_geometric.loader import DataLoader from torch_geometric.nn import LCMAggregation +from torch_geometric.transforms import BaseTransform parser = argparse.ArgumentParser() parser.add_argument('--num_bits', type=int, default=8) args = parser.parse_args() +class RandomPermutation(BaseTransform): + def forward(self, data: Data) -> Data: + data.x = torch.x[torch.randperm(data.x.size(0))] + return data + + class Random2ndMinimumDataset(InMemoryDataset): r""""A labeled dataset, where each sample is a multiset of integers encoded as bit-vectors, and the label is the second smallest integer @@ -25,7 +32,7 @@ def __init__( min_num_elems: int, max_num_elems: int, ): - super().__init__(None) + super().__init__(transform=RandomPermutation()) self.data, self.slices = self.collate([ self.get_data(num_bits, min_num_elems, max_num_elems) From 18f0a19510046202fc9812c992d521021d0570cd Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 28 Sep 2023 09:59:38 +0200 Subject: [PATCH 1501/2432] Test permutation invariance of `AddLaplacianEigenvectorsPE` (#8087) --- test/transforms/test_add_metapaths.py | 66 +++++++++---------- .../test_add_positional_encoding.py | 45 +++++++++---- test/transforms/test_fixed_points.py | 14 ++-- test/transforms/test_mask_transform.py | 20 +++--- test/transforms/test_pad.py | 35 ++++------ .../transforms/add_positional_encoding.py | 1 + 6 files changed, 94 insertions(+), 87 deletions(-) diff --git a/test/transforms/test_add_metapaths.py b/test/transforms/test_add_metapaths.py index e16bd4ae322a..994ad3527760 100644 --- a/test/transforms/test_add_metapaths.py +++ b/test/transforms/test_add_metapaths.py @@ -1,5 +1,3 @@ -import copy - import torch from torch import tensor @@ -30,22 +28,22 @@ def test_add_metapaths(): transform = AddMetaPaths(metapaths) assert str(transform) == 'AddMetaPaths()' - meta1 = transform(copy.copy(data)) + meta1 = transform(data) transform = AddMetaPaths(metapaths, drop_orig_edge_types=True) assert str(transform) == 'AddMetaPaths()' - meta2 = transform(copy.copy(data)) + meta2 = transform(data) transform = AddMetaPaths(metapaths, drop_orig_edge_types=True, keep_same_node_type=True) assert str(transform) == 'AddMetaPaths()' - meta3 = transform(copy.copy(data)) + meta3 = transform(data) transform = AddMetaPaths(metapaths, drop_orig_edge_types=True, keep_same_node_type=True, drop_unconnected_node_types=True) assert str(transform) == 'AddMetaPaths()' - meta4 = transform(copy.copy(data)) + meta4 = transform(data) assert meta1['metapath_0'].edge_index.size() == (2, 9) assert meta2['metapath_0'].edge_index.size() == (2, 9) @@ -66,7 +64,7 @@ def test_add_metapaths(): [('a', 'p'), ('p', 'c'), ('c', 'p'), ('p', 'a')], ] transform = AddMetaPaths(metapaths) - meta = transform(copy.copy(data)) + meta = transform(data) new_edge_types = [('a', 'metapath_0', 'c'), ('a', 'metapath_1', 'a')] assert meta['metapath_0'].edge_index.size() == (2, 4) assert meta['metapath_1'].edge_index.size() == (2, 4) @@ -112,7 +110,7 @@ def test_add_weighted_metapaths(): ('b', 'a')], ] transform = AddMetaPaths(metapaths, weighted=True) - out = transform(copy.copy(data)) + out = transform(data) # Make sure manually added metapaths compute the correct number of edges: edge_index = out['a', 'a'].edge_index @@ -135,7 +133,7 @@ def test_add_weighted_metapaths(): # Compute intra-table metapaths efficiently: metapaths = [[('a', 'b'), ('b', 'c'), ('c', 'd')]] - out = AddMetaPaths(metapaths, weighted=True)(copy.copy(data)) + out = AddMetaPaths(metapaths, weighted=True)(data) out['d', 'a'].edge_index = out['a', 'd'].edge_index.flip([0]) out['d', 'a'].edge_weight = out['a', 'd'].edge_weight metapaths = [[('a', 'd'), ('d', 'a')]] @@ -157,43 +155,43 @@ def test_add_random_metapaths(): torch.manual_seed(12345) transform = AddRandomMetaPaths(metapaths) - assert str(transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[1])' - meta1 = transform(copy.copy(data)) + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[1])') + meta1 = transform(data) transform = AddRandomMetaPaths(metapaths, drop_orig_edge_types=True) - assert str(transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[1])' - meta2 = transform(copy.copy(data)) + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[1])') + meta2 = transform(data) transform = AddRandomMetaPaths(metapaths, drop_orig_edge_types=True, keep_same_node_type=True) - assert str(transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[1])' - meta3 = transform(copy.copy(data)) + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[1])') + meta3 = transform(data) transform = AddRandomMetaPaths(metapaths, drop_orig_edge_types=True, keep_same_node_type=True, drop_unconnected_node_types=True) - assert str(transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[1])' - meta4 = transform(copy.copy(data)) + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[1])') + meta4 = transform(data) transform = AddRandomMetaPaths(metapaths, sample_ratio=0.8, drop_orig_edge_types=True, keep_same_node_type=True, drop_unconnected_node_types=True) - assert str(transform - ) == 'AddRandomMetaPaths(sample_ratio=0.8, walks_per_node=[1])' - meta5 = transform(copy.copy(data)) + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=0.8, ' + 'walks_per_node=[1])') + meta5 = transform(data) transform = AddRandomMetaPaths(metapaths, walks_per_node=5, drop_orig_edge_types=True, keep_same_node_type=True, drop_unconnected_node_types=True) - assert str(transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[5])' - meta6 = transform(copy.copy(data)) + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[5])') + meta6 = transform(data) assert meta1['metapath_0'].edge_index.size() == (2, 5) assert meta2['metapath_0'].edge_index.size() == (2, 5) @@ -216,11 +214,10 @@ def test_add_random_metapaths(): [('a', 'p'), ('p', 'c'), ('c', 'p'), ('p', 'a')], ] transform = AddRandomMetaPaths(metapaths) - assert str( - transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[1, 1])' + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[1, 1])') - meta1 = transform(copy.copy(data)) + meta1 = transform(data) new_edge_types = [('a', 'metapath_0', 'c'), ('a', 'metapath_1', 'a')] assert meta1['metapath_0'].edge_index.size() == (2, 2) assert meta1['metapath_1'].edge_index.size() == (2, 2) @@ -230,11 +227,10 @@ def test_add_random_metapaths(): assert list(meta1.metapath_dict.keys()) == new_edge_types transform = AddRandomMetaPaths(metapaths, walks_per_node=[2, 5]) - assert str( - transform - ) == 'AddRandomMetaPaths(sample_ratio=1.0, walks_per_node=[2, 5])' + assert str(transform) == ('AddRandomMetaPaths(sample_ratio=1.0, ' + 'walks_per_node=[2, 5])') - meta2 = transform(copy.copy(data)) + meta2 = transform(data) new_edge_types = [('a', 'metapath_0', 'c'), ('a', 'metapath_1', 'a')] assert meta2['metapath_0'].edge_index.size() == (2, 2) assert meta2['metapath_1'].edge_index.size() == (2, 3) diff --git a/test/transforms/test_add_positional_encoding.py b/test/transforms/test_add_positional_encoding.py index 414e615526a5..a3cd16ce97d4 100644 --- a/test/transforms/test_add_positional_encoding.py +++ b/test/transforms/test_add_positional_encoding.py @@ -1,5 +1,3 @@ -import copy - import torch from torch_geometric.data import Data @@ -18,15 +16,15 @@ def test_add_laplacian_eigenvector_pe(): transform = AddLaplacianEigenvectorPE(k=3) assert str(transform) == 'AddLaplacianEigenvectorPE()' - out = transform(copy.copy(data)) + out = transform(data) assert out.laplacian_eigenvector_pe.size() == (6, 3) transform = AddLaplacianEigenvectorPE(k=3, attr_name=None) - out = transform(copy.copy(data)) + out = transform(data) assert out.x.size() == (6, 4 + 3) transform = AddLaplacianEigenvectorPE(k=3, attr_name='x') - out = transform(copy.copy(data)) + out = transform(data) assert out.x.size() == (6, 3) # Output tests: @@ -38,14 +36,14 @@ def test_add_laplacian_eigenvector_pe(): transform2 = AddLaplacianEigenvectorPE(k=1, is_undirected=False) # Clustering test with first non-trivial eigenvector (Fiedler vector) - pe = transform1(copy.copy(data)).laplacian_eigenvector_pe + pe = transform1(data).laplacian_eigenvector_pe pe_cluster_1 = pe[[0, 1, 4]] pe_cluster_2 = pe[[2, 3, 5]] assert not torch.allclose(pe_cluster_1, pe_cluster_2) assert torch.allclose(pe_cluster_1, pe_cluster_1.mean()) assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) - pe = transform2(copy.copy(data)).laplacian_eigenvector_pe + pe = transform2(data).laplacian_eigenvector_pe pe_cluster_1 = pe[[0, 1, 4]] pe_cluster_2 = pe[[2, 3, 5]] assert not torch.allclose(pe_cluster_1, pe_cluster_2) @@ -53,6 +51,31 @@ def test_add_laplacian_eigenvector_pe(): assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) +def test_eigenvector_permutation_invariance(): + edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5], + [1, 0, 4, 0, 4, 1, 3, 2, 5, 3]]) + data = Data(edge_index=edge_index, num_nodes=6) + + perm = torch.tensor([5, 4, 3, 2, 1, 0]) + transform = AddLaplacianEigenvectorPE( + k=1, + is_undirected=True, + attr_name='x', + v0=torch.arange(data.num_nodes), + ) + out1 = transform(data) + + transform = AddLaplacianEigenvectorPE( + k=1, + is_undirected=True, + attr_name='x', + v0=perm, + ) + out2 = transform(data.subgraph(perm)) + + assert torch.allclose(out1.x[perm].abs(), out2.x.abs(), atol=1e-1) + + @onlyLinux # TODO (matthias) Investigate CSR @ CSR support on Windows. def test_add_random_walk_pe(): x = torch.randn(6, 4) @@ -62,15 +85,15 @@ def test_add_random_walk_pe(): transform = AddRandomWalkPE(walk_length=3) assert str(transform) == 'AddRandomWalkPE()' - out = transform(copy.copy(data)) + out = transform(data) assert out.random_walk_pe.size() == (6, 3) transform = AddRandomWalkPE(walk_length=3, attr_name=None) - out = transform(copy.copy(data)) + out = transform(data) assert out.x.size() == (6, 4 + 3) transform = AddRandomWalkPE(walk_length=3, attr_name='x') - out = transform(copy.copy(data)) + out = transform(data) assert out.x.size() == (6, 3) # Output tests: @@ -85,7 +108,7 @@ def test_add_random_walk_pe(): edge_index = torch.tensor([[0, 1, 2], [0, 1, 2]]) data = Data(edge_index=edge_index, num_nodes=4) - out = transform(copy.copy(data)) + out = transform(data) assert out.x.tolist() == [ [1.0, 1.0, 1.0], diff --git a/test/transforms/test_fixed_points.py b/test/transforms/test_fixed_points.py index 2f0c8917ac9b..321ea7562b06 100644 --- a/test/transforms/test_fixed_points.py +++ b/test/transforms/test_fixed_points.py @@ -1,5 +1,3 @@ -from copy import copy - import torch from torch_geometric.data import Data @@ -17,7 +15,7 @@ def test_fixed_points(): num_nodes=100, ) - out = FixedPoints(50, replace=True)(copy(data)) + out = FixedPoints(50, replace=True)(data) assert len(out) == 5 assert out.pos.size() == (50, 3) assert out.x.size() == (50, 16) @@ -25,7 +23,7 @@ def test_fixed_points(): assert out.edge_attr.size() == (100, 3) assert out.num_nodes == 50 - out = FixedPoints(200, replace=True)(copy(data)) + out = FixedPoints(200, replace=True)(data) assert len(out) == 5 assert out.pos.size() == (200, 3) assert out.x.size() == (200, 16) @@ -33,7 +31,7 @@ def test_fixed_points(): assert out.edge_attr.size() == (100, 3) assert out.num_nodes == 200 - out = FixedPoints(50, replace=False, allow_duplicates=False)(copy(data)) + out = FixedPoints(50, replace=False, allow_duplicates=False)(data) assert len(out) == 5 assert out.pos.size() == (50, 3) assert out.x.size() == (50, 16) @@ -41,7 +39,7 @@ def test_fixed_points(): assert out.edge_attr.size() == (100, 3) assert out.num_nodes == 50 - out = FixedPoints(200, replace=False, allow_duplicates=False)(copy(data)) + out = FixedPoints(200, replace=False, allow_duplicates=False)(data) assert len(out) == 5 assert out.pos.size() == (100, 3) assert out.x.size() == (100, 16) @@ -49,7 +47,7 @@ def test_fixed_points(): assert out.edge_attr.size() == (100, 3) assert out.num_nodes == 100 - out = FixedPoints(50, replace=False, allow_duplicates=True)(copy(data)) + out = FixedPoints(50, replace=False, allow_duplicates=True)(data) assert len(out) == 5 assert out.pos.size() == (50, 3) assert out.x.size() == (50, 16) @@ -57,7 +55,7 @@ def test_fixed_points(): assert out.edge_attr.size() == (100, 3) assert out.num_nodes == 50 - out = FixedPoints(200, replace=False, allow_duplicates=True)(copy(data)) + out = FixedPoints(200, replace=False, allow_duplicates=True)(data) assert len(out) == 5 assert out.pos.size() == (200, 3) assert out.x.size() == (200, 16) diff --git a/test/transforms/test_mask_transform.py b/test/transforms/test_mask_transform.py index c36df3630e55..a50050cdd38d 100644 --- a/test/transforms/test_mask_transform.py +++ b/test/transforms/test_mask_transform.py @@ -1,5 +1,3 @@ -import copy - import torch from torch_geometric.data import Data, HeteroData @@ -17,19 +15,19 @@ def test_index_to_mask(): data = Data(edge_index=edge_index, train_index=train_index, test_index=test_index, num_nodes=5) - out = IndexToMask(replace=True)(copy.copy(data)) + out = IndexToMask(replace=True)(data) assert len(out) == len(data) assert out.train_mask.tolist() == [True, True, True, False, False] assert out.test_mask.tolist() == [False, False, False, True, True] - out = IndexToMask(replace=False)(copy.copy(data)) + out = IndexToMask(replace=False)(data) assert len(out) == len(data) + 2 - out = IndexToMask(sizes=6, replace=True)(copy.copy(data)) + out = IndexToMask(sizes=6, replace=True)(data) assert out.train_mask.tolist() == [True, True, True, False, False, False] assert out.test_mask.tolist() == [False, False, False, True, True, False] - out = IndexToMask(attrs='train_index')(copy.copy(data)) + out = IndexToMask(attrs='train_index')(data) assert len(out) == len(data) + 1 assert 'train_index' in out assert 'train_mask' in out @@ -44,15 +42,15 @@ def test_mask_to_index(): test_mask = torch.tensor([False, False, False, True, True]) data = Data(train_mask=train_mask, test_mask=test_mask) - out = MaskToIndex(replace=True)(copy.copy(data)) + out = MaskToIndex(replace=True)(data) assert len(out) == len(data) assert out.train_index.tolist() == [0, 1, 2] assert out.test_index.tolist() == [3, 4] - out = MaskToIndex(replace=False)(copy.copy(data)) + out = MaskToIndex(replace=False)(data) assert len(out) == len(data) + 2 - out = MaskToIndex(attrs='train_mask')(copy.copy(data)) + out = MaskToIndex(attrs='train_mask')(data) assert len(out) == len(data) + 1 assert 'train_mask' in out assert 'train_index' in out @@ -70,7 +68,7 @@ def test_hetero_index_to_mask(): data['v'].test_index = torch.arange(3, 5) data['v'].num_nodes = 5 - out = IndexToMask()(copy.copy(data)) + out = IndexToMask()(data) assert len(out) == len(data) + 2 assert 'train_mask' in out['u'] assert 'test_mask' in out['u'] @@ -86,7 +84,7 @@ def test_hetero_mask_to_index(): data['v'].train_mask = torch.tensor([True, True, True, False, False]) data['v'].test_mask = torch.tensor([False, False, False, True, True]) - out = MaskToIndex()(copy.copy(data)) + out = MaskToIndex()(data) assert len(out) == len(data) + 2 assert 'train_index' in out['u'] assert 'test_index' in out['u'] diff --git a/test/transforms/test_pad.py b/test/transforms/test_pad.py index 8deb63949346..944ed9e56478 100644 --- a/test/transforms/test_pad.py +++ b/test/transforms/test_pad.py @@ -1,5 +1,4 @@ import numbers -from copy import deepcopy from typing import Dict, Generator, List, Optional, Tuple, Union import pytest @@ -329,12 +328,10 @@ def test_pad_repr(): @pytest.mark.parametrize('num_nodes', [32, 64]) @pytest.mark.parametrize('add_pad_mask', [True, False]) def test_pad_auto_edges(data, num_nodes, add_pad_mask): - original = data - data = deepcopy(data) transform = Pad(max_num_nodes=num_nodes, add_pad_mask=add_pad_mask) - padded = transform(data) - _check_data(original, padded, num_nodes, is_mask_available=add_pad_mask) + out = transform(data) + _check_data(data, out, num_nodes, is_mask_available=add_pad_mask) @pytest.mark.parametrize('num_nodes', [32, 64]) @@ -342,12 +339,11 @@ def test_pad_auto_edges(data, num_nodes, add_pad_mask): @pytest.mark.parametrize('add_pad_mask', [True, False]) def test_pad_data_explicit_edges(num_nodes, num_edges, add_pad_mask): data = fake_data() - original = deepcopy(data) transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges, add_pad_mask=add_pad_mask) - padded = transform(data) - _check_data(original, padded, num_nodes, num_edges, + out = transform(data) + _check_data(data, out, num_nodes, num_edges, is_mask_available=add_pad_mask) @@ -356,12 +352,11 @@ def test_pad_data_explicit_edges(num_nodes, num_edges, add_pad_mask): @pytest.mark.parametrize('add_pad_mask', [True, False]) def test_pad_heterodata_explicit_edges(num_nodes, num_edges, add_pad_mask): data = fake_hetero_data() - original = deepcopy(data) transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges, add_pad_mask=add_pad_mask) - padded = transform(data) - _check_data(original, padded, num_nodes, num_edges, + out = transform(data) + _check_data(data, out, num_nodes, num_edges, is_mask_available=add_pad_mask) @@ -370,12 +365,11 @@ def test_pad_heterodata_explicit_edges(num_nodes, num_edges, add_pad_mask): [11, AttrNamePadding({'edge_attr': 2.0})]) def test_pad_data_pad_values(node_pad_value, edge_pad_value): data = fake_data() - original = deepcopy(data) num_nodes = 32 transform = Pad(max_num_nodes=num_nodes, node_pad_value=node_pad_value, edge_pad_value=edge_pad_value) - padded = transform(data) - _check_data(original, padded, num_nodes, node_pad_value=node_pad_value, + out = transform(data) + _check_data(data, out, num_nodes, node_pad_value=node_pad_value, edge_pad_value=edge_pad_value) @@ -398,13 +392,12 @@ def test_pad_data_pad_values(node_pad_value, edge_pad_value): ]) def test_pad_heterodata_pad_values(node_pad_value, edge_pad_value): data = fake_hetero_data() - original = deepcopy(data) num_nodes = 32 transform = Pad(max_num_nodes=num_nodes, node_pad_value=node_pad_value, edge_pad_value=edge_pad_value) - padded = transform(data) - _check_data(original, padded, num_nodes, node_pad_value=node_pad_value, + out = transform(data) + _check_data(data, out, num_nodes, node_pad_value=node_pad_value, edge_pad_value=edge_pad_value) @@ -416,14 +409,12 @@ def test_pad_heterodata_pad_values(node_pad_value, edge_pad_value): ['y', 'edge_attr'], ]) def test_pad_data_exclude_keys(data, add_pad_mask, exclude_keys): - original = data - data = deepcopy(data) num_nodes = 32 transform = Pad(max_num_nodes=num_nodes, add_pad_mask=add_pad_mask, exclude_keys=exclude_keys) - padded = transform(data) - _check_data(original, padded, num_nodes, is_mask_available=add_pad_mask, + out = transform(data) + _check_data(data, out, num_nodes, is_mask_available=add_pad_mask, exclude_keys=exclude_keys) @@ -470,7 +461,7 @@ def test_pad_invalid_padding_type(): def test_pad_data_non_tensor_attr(): - data = deepcopy(fake_data()) + data = fake_data() batch_size = 13 data.batch_size = batch_size diff --git a/torch_geometric/transforms/add_positional_encoding.py b/torch_geometric/transforms/add_positional_encoding.py index c66f60421ab7..706cf6621406 100644 --- a/torch_geometric/transforms/add_positional_encoding.py +++ b/torch_geometric/transforms/add_positional_encoding.py @@ -77,6 +77,7 @@ def forward(self, data: Data) -> Data: ) L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes) + L = L.tocsr() eig_vals, eig_vecs = eig_fn( L, From 0c45a888f356fb5dc7b919561e93c289246ff5ad Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 28 Sep 2023 23:06:20 +0200 Subject: [PATCH 1502/2432] Save memory in case of redundant `value` in PyTorch sparse tensors (#8089) --- torch_geometric/utils/sparse.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py index 770169855615..47189a68d554 100644 --- a/torch_geometric/utils/sparse.py +++ b/torch_geometric/utils/sparse.py @@ -129,7 +129,14 @@ def to_torch_coo_tensor( edge_index, edge_attr = coalesce(edge_index, edge_attr, max(size)) if edge_attr is None: - edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) + if torch_geometric.typing.WITH_PT20: + edge_attr = torch.ones(1, device=edge_index.device) + edge_attr = edge_attr.expand(edge_index.size(1)) + else: + edge_attr = torch.ones( + edge_index.size(1), + device=edge_index.device, + ) adj = torch.sparse_coo_tensor( indices=edge_index, @@ -187,7 +194,14 @@ def to_torch_csr_tensor( edge_index, edge_attr = coalesce(edge_index, edge_attr, max(size)) if edge_attr is None: - edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) + if torch_geometric.typing.WITH_PT20: + edge_attr = torch.ones(1, device=edge_index.device) + edge_attr = edge_attr.expand(edge_index.size(1)) + else: + edge_attr = torch.ones( + edge_index.size(1), + device=edge_index.device, + ) adj = torch.sparse_csr_tensor( crow_indices=index2ptr(edge_index[0], size[0]), @@ -250,7 +264,14 @@ def to_torch_csc_tensor( sort_by_row=False) if edge_attr is None: - edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) + if torch_geometric.typing.WITH_PT20: + edge_attr = torch.ones(1, device=edge_index.device) + edge_attr = edge_attr.expand(edge_index.size(1)) + else: + edge_attr = torch.ones( + edge_index.size(1), + device=edge_index.device, + ) adj = torch.sparse_csc_tensor( ccol_indices=index2ptr(edge_index[1], size[1]), From b3b3d78e933bb449b4ec3ae3b4346d874adcd242 Mon Sep 17 00:00:00 2001 From: Jay Bhambhani Date: Fri, 29 Sep 2023 02:30:18 -0400 Subject: [PATCH 1503/2432] `OnDiskDataset`: `DataLoader` integration (#8088) Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/loader/test_dataloader.py | 18 ++++++++++++++- torch_geometric/loader/dataloader.py | 34 ++++++++++++++++++++-------- 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c159c264e4b..3aacfdc34270 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added `OnDiskDataset` interface ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066)) +- Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088)) - Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) - Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index ebe2bed755ae..2e86ce7b6472 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -5,7 +5,7 @@ import pytest import torch -from torch_geometric.data import Data, HeteroData +from torch_geometric.data import Data, HeteroData, OnDiskDataset from torch_geometric.loader import DataLoader from torch_geometric.testing import get_random_edge_index, withCUDA @@ -66,6 +66,22 @@ def test_dataloader(num_workers, device): assert batch.edge_index_batch.tolist() == [0, 0, 0, 0, 1, 1, 1, 1] +def test_dataloader_on_disk_dataset(tmp_path): + dataset = OnDiskDataset(tmp_path) + data1 = Data(x=torch.randn(3, 8)) + data2 = Data(x=torch.randn(4, 8)) + dataset.extend([data1, data2]) + + loader = DataLoader(dataset, batch_size=2) + assert len(loader) == 1 + batch = next(iter(loader)) + assert batch.num_nodes == 7 + assert torch.equal(batch.x, torch.cat([data1.x, data2.x], dim=0)) + assert batch.batch.tolist() == [0, 0, 0, 1, 1, 1, 1] + + dataset.close() + + def test_dataloader_fallbacks(): # Test inputs of type List[torch.Tensor]: data_list = [torch.ones(3) for _ in range(4)] diff --git a/torch_geometric/loader/dataloader.py b/torch_geometric/loader/dataloader.py index f6c9e5b9ce53..c803dbba8711 100644 --- a/torch_geometric/loader/dataloader.py +++ b/torch_geometric/loader/dataloader.py @@ -1,5 +1,5 @@ from collections.abc import Mapping -from typing import List, Optional, Sequence, Union +from typing import Any, List, Optional, Sequence, Union import torch.utils.data from torch.utils.data.dataloader import default_collate @@ -7,18 +7,28 @@ from torch_geometric.data import Batch, Dataset from torch_geometric.data.data import BaseData from torch_geometric.data.datapipes import DatasetAdapter +from torch_geometric.data.on_disk_dataset import OnDiskDataset class Collater: - def __init__(self, follow_batch, exclude_keys): + def __init__( + self, + dataset: Union[Dataset, Sequence[BaseData], DatasetAdapter], + follow_batch: Optional[List[str]] = None, + exclude_keys: Optional[List[str]] = None, + ): + self.dataset = dataset self.follow_batch = follow_batch self.exclude_keys = exclude_keys - def __call__(self, batch): + def __call__(self, batch: List[Any]) -> Any: elem = batch[0] if isinstance(elem, BaseData): - return Batch.from_data_list(batch, self.follow_batch, - self.exclude_keys) + return Batch.from_data_list( + batch, + follow_batch=self.follow_batch, + exclude_keys=self.exclude_keys, + ) elif isinstance(elem, torch.Tensor): return default_collate(batch) elif isinstance(elem, float): @@ -34,10 +44,11 @@ def __call__(self, batch): elif isinstance(elem, Sequence) and not isinstance(elem, str): return [self(s) for s in zip(*batch)] - raise TypeError(f'DataLoader found invalid type: {type(elem)}') + raise TypeError(f"DataLoader found invalid type: '{type(elem)}'") - def collate(self, batch): # pragma: no cover - # TODO Deprecated, remove soon. + def collate_fn(self, batch: List[Any]) -> Any: + if isinstance(self.dataset, OnDiskDataset): + return self(self.dataset.multi_get(batch)) return self(batch) @@ -76,10 +87,15 @@ def __init__( self.follow_batch = follow_batch self.exclude_keys = exclude_keys + self.collator = Collater(dataset, follow_batch, exclude_keys) + + if isinstance(dataset, OnDiskDataset): + dataset = range(len(dataset)) + super().__init__( dataset, batch_size, shuffle, - collate_fn=Collater(follow_batch, exclude_keys), + collate_fn=self.collator.collate_fn, **kwargs, ) From 68552e7bd655abb7b8631a2a521b2ce11d814da9 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 29 Sep 2023 11:12:59 +0200 Subject: [PATCH 1504/2432] Ensure `OnDiskDataset` can operate on `DataLoader(num_workers>0)` (#8092) --- CHANGELOG.md | 2 +- test/loader/test_dataloader.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3aacfdc34270..c54a41c005f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added -- Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088)) +- Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092)) - Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) - Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index 2e86ce7b6472..458a91acff02 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -66,13 +66,14 @@ def test_dataloader(num_workers, device): assert batch.edge_index_batch.tolist() == [0, 0, 0, 0, 1, 1, 1, 1] -def test_dataloader_on_disk_dataset(tmp_path): +@pytest.mark.parametrize('num_workers', num_workers_list) +def test_dataloader_on_disk_dataset(tmp_path, num_workers): dataset = OnDiskDataset(tmp_path) data1 = Data(x=torch.randn(3, 8)) data2 = Data(x=torch.randn(4, 8)) dataset.extend([data1, data2]) - loader = DataLoader(dataset, batch_size=2) + loader = DataLoader(dataset, batch_size=2, num_workers=num_workers) assert len(loader) == 1 batch = next(iter(loader)) assert batch.num_nodes == 7 From 1e12d41c28b1fb9793f17646b018071b508864d7 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Fri, 29 Sep 2023 11:57:55 +0200 Subject: [PATCH 1505/2432] Add `module_headers` property to `nn.Sequential` models (#8093) Fixes https://github.com/pyg-team/pytorch_geometric/issues/8082. --- CHANGELOG.md | 1 + test/nn/test_sequential.py | 16 +++++++++++----- torch_geometric/nn/sequential.jinja | 9 +++++++-- torch_geometric/nn/sequential.py | 10 +++++++++- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c54a41c005f2..190cceba0cc5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093) - Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092)) - Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) - Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) diff --git a/test/nn/test_sequential.py b/test/nn/test_sequential.py index 43adf1b27a8a..6f61fab47a36 100644 --- a/test/nn/test_sequential.py +++ b/test/nn/test_sequential.py @@ -34,11 +34,11 @@ def test_sequential(): assert len(model) == 5 assert str(model) == ( 'Sequential(\n' - ' (0): GCNConv(16, 64)\n' - ' (1): ReLU(inplace=True)\n' - ' (2): GCNConv(64, 64)\n' - ' (3): ReLU(inplace=True)\n' - ' (4): Linear(in_features=64, out_features=7, bias=True)\n' + ' (0) - GCNConv(16, 64): x, edge_index -> x\n' + ' (1) - ReLU(inplace=True): x -> x\n' + ' (2) - GCNConv(64, 64): x, edge_index -> x\n' + ' (3) - ReLU(inplace=True): x -> x\n' + ' (4) - Linear(in_features=64, out_features=7, bias=True): x -> x\n' ')') assert isinstance(model[0], GCNConv) @@ -47,6 +47,12 @@ def test_sequential(): assert isinstance(model[3], ReLU) assert isinstance(model[4], Linear) + assert model.module_headers[0] == (['x', 'edge_index'], ['x']) + assert model.module_headers[1] == (['x'], ['x']) + assert model.module_headers[2] == (['x', 'edge_index'], ['x']) + assert model.module_headers[3] == (['x'], ['x']) + assert model.module_headers[4] == (['x'], ['x']) + out = model(x, edge_index) assert out.size() == (4, 7) diff --git a/torch_geometric/nn/sequential.jinja b/torch_geometric/nn/sequential.jinja index 013d6f46cb05..16bbfaea60de 100644 --- a/torch_geometric/nn/sequential.jinja +++ b/torch_geometric/nn/sequential.jinja @@ -24,5 +24,10 @@ class {{cls_name}}(torch.nn.Module): return {{calls|length}} def __repr__(self) -> str: - return 'Sequential(\n{}\n)'.format('\n'.join( - [f' ({idx}): ' + str(self[idx]) for idx in range(len(self))])) + module_reprs = [ + (f" ({i}) - {self[i]}: {', '.join(self.module_headers[i].args)} " + f"-> {', '.join(self.module_headers[i].output)}") + for i in range(len(self)) + ] + + return 'Sequential(\n{}\n)'.format('\n'.join(module_reprs)) diff --git a/torch_geometric/nn/sequential.py b/torch_geometric/nn/sequential.py index af4d4a88f028..3a2ea7e013b6 100644 --- a/torch_geometric/nn/sequential.py +++ b/torch_geometric/nn/sequential.py @@ -1,6 +1,6 @@ import os import os.path as osp -from typing import Callable, List, Tuple, Union +from typing import Callable, List, NamedTuple, Tuple, Union from uuid import uuid1 import torch @@ -8,6 +8,11 @@ from torch_geometric.nn.conv.utils.jit import class_from_module_repr +class HeaderDesc(NamedTuple): + args: List[str] + output: List[str] + + def Sequential( input_args: str, modules: List[Union[Tuple[Callable, str], Callable]], @@ -110,6 +115,9 @@ def Sequential( # Instantiate a class from the rendered module representation. module = class_from_module_repr(cls_name, module_repr)() + module.module_headers = [ + HeaderDesc(in_desc, out_desc) for _, _, in_desc, out_desc in calls + ] module._names = list(modules.keys()) for name, submodule, _, _ in calls: setattr(module, name, submodule) From 14e160d8ee192e8efd3b1a27a188d393ccab527b Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sat, 30 Sep 2023 17:09:33 +0200 Subject: [PATCH 1506/2432] `PCQM4Mv2` dataset: Reference implementation for `OnDiskDataset` (#8102) --- CHANGELOG.md | 1 + torch_geometric/data/database.py | 2 + torch_geometric/data/on_disk_dataset.py | 1 - torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/pcqm4m.py | 107 ++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 torch_geometric/datasets/pcqm4m.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 190cceba0cc5..bdffe8d1faa8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102) - Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093) - Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092)) - Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index 9843749bcf0b..f4bb5afc142d 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -315,6 +315,7 @@ def insert(self, index: int, data: Any): f'(id, {self._joined_col_names}) ' f'VALUES (?, {self._dummies})') self.cursor.execute(query, (index, *self._serialize(data))) + self._connection.commit() def _multi_insert( self, @@ -331,6 +332,7 @@ def _multi_insert( f'(id, {self._joined_col_names}) ' f'VALUES (?, {self._dummies})') self.cursor.executemany(query, data_list) + self._connection.commit() def get(self, index: int) -> Any: query = (f'SELECT {self._joined_col_names} FROM {self.name} ' diff --git a/torch_geometric/data/on_disk_dataset.py b/torch_geometric/data/on_disk_dataset.py index b011573f74ec..3641aec59466 100644 --- a/torch_geometric/data/on_disk_dataset.py +++ b/torch_geometric/data/on_disk_dataset.py @@ -54,7 +54,6 @@ def __init__( self, root: str, transform: Optional[Callable] = None, - *, pre_filter: Optional[Callable] = None, backend: str = 'sqlite', schema: Schema = object, diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index f7857feed43a..7fd36ef9a719 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -20,6 +20,7 @@ from .zinc import ZINC from .aqsol import AQSOL from .molecule_net import MoleculeNet +from .pcqm4m import PCQM4Mv2 from .entities import Entities from .rel_link_pred_dataset import RelLinkPredDataset from .ged_dataset import GEDDataset @@ -125,6 +126,7 @@ 'ZINC', 'AQSOL', 'MoleculeNet', + 'PCQM4Mv2', 'Entities', 'RelLinkPredDataset', 'GEDDataset', diff --git a/torch_geometric/datasets/pcqm4m.py b/torch_geometric/datasets/pcqm4m.py new file mode 100644 index 000000000000..4250fad7609c --- /dev/null +++ b/torch_geometric/datasets/pcqm4m.py @@ -0,0 +1,107 @@ +import os +import os.path as osp +from typing import Any, Callable, Dict, List, Optional + +import torch +from tqdm import tqdm + +from torch_geometric.data import Data, OnDiskDataset, download_url, extract_zip +from torch_geometric.utils import from_smiles + + +class PCQM4Mv2(OnDiskDataset): + r"""The PCQM4Mv2 dataset from the `"OGB-LSC: A Large-Scale Challenge for + Machine Learning on Graphs" `_ paper. + :class:`PCQM4Mv2` is a quantum chemistry dataset originally curated under + the PubChemQC project. The task is to predict the DFT-calculated HOMO-LUMO + energy gap of molecules given their 2D molecular graphs. + + .. note:: + This dataset uses the :class:`OnDiskDataset` base class to load data + dynamically from disk. + + Args: + root (str): Root directory where the dataset should be saved. + split (str, optional): If :obj:`"train"`, loads the training dataset. + If :obj:`"val"`, loads the validation dataset. + If :obj:`"test"`, loads the test dataset. + If :obj:`"holdout"`, loads the holdout dataset. + (default: :obj:`"train"`) + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + backend (str): The :class:`Database` backend to use. + (default: :obj:`"sqlite"`) + """ + url = ('https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/' + 'pcqm4m-v2.zip') + + split_mapping = { + 'train': 'train', + 'val': 'valid', + 'test': 'test-dev', + 'holdout': 'test-challenge', + } + + def __init__( + self, + root: str, + split: str = 'train', + transform: Optional[Callable] = None, + backend: str = 'sqlite', + ): + assert split in ['train', 'val', 'test', 'holdout'] + + schema = { + 'x': dict(dtype=torch.int64, size=(-1, 9)), + 'edge_index': dict(dtype=torch.int64, size=(2, -1)), + 'edge_attr': dict(dtype=torch.int64, size=(-1, 3)), + 'smiles': str, + 'y': float, + } + + super().__init__(root, transform, backend=backend, schema=schema) + + split_idx = torch.load(self.raw_paths[1]) + self._indices = split_idx[self.split_mapping[split]].tolist() + + @property + def raw_file_names(self) -> List[str]: + return [ + osp.join('pcqm4m-v2', 'raw', 'data.csv.gz'), + osp.join('pcqm4m-v2', 'split_dict.pt'), + ] + + def download(self): + path = download_url(http://github.com/self.url_2d,%20self.raw_dir) + extract_zip(path, self.raw_dir) + os.unlink(path) + + def process(self): + import pandas as pd + + df = pd.read_csv(self.raw_paths[0]) + + data_list: List[Data] = [] + iterator = enumerate(zip(df['smiles'], df['homolumogap'])) + for i, (smiles, y) in tqdm(iterator, total=len(df)): + data = from_smiles(smiles) + data.y = y + + data_list.append(data) + if i + 1 == len(df) or (i + 1) % 1000 == 0: # Write batch-wise: + self.extend(data_list) + data_list = [] + + def serialize(self, data: Data) -> Dict[str, Any]: + return dict( + x=data.x, + edge_index=data.edge_index, + edge_attr=data.edge_attr, + y=data.x, + smiles=data.smiles, + ) + + def deserialize(self, data: Dict[str, Any]) -> Data: + return Data.from_dict(data) From 95abc1b73be3650a8d1ebd77a0b4e46add310a4d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 1 Oct 2023 12:25:01 +0200 Subject: [PATCH 1507/2432] `OnDiskDataset`: Benchmarks and minor fixes (#8106) --- CHANGELOG.md | 2 +- test/loader/test_dataloader.py | 57 ++++++++++++++++++++++++++++-- torch_geometric/data/database.py | 16 ++++++--- torch_geometric/datasets/pcqm4m.py | 8 +++-- 4 files changed, 71 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bdffe8d1faa8..f469324a175b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102) - Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093) -- Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092)) +- Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092), [#8106](https://github.com/pyg-team/pytorch_geometric/pull/8106)) - Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) - Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index 458a91acff02..a314081c6612 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -1,6 +1,7 @@ import multiprocessing import sys from collections import namedtuple +from typing import Any, Dict, List import pytest import torch @@ -191,10 +192,60 @@ def test_heterogeneous_dataloader(num_workers): parser.add_argument('--num_workers', type=int, default=0) args = parser.parse_args() - dataset = QM9('/tmp/QM9') - loader = DataLoader(dataset, batch_size=128, shuffle=True, - num_workers=args.num_workers) + kwargs = dict(batch_size=128, shuffle=True, num_workers=args.num_workers) + in_memory_dataset = QM9('/tmp/QM9') + loader = DataLoader(in_memory_dataset, **kwargs) + + print('In-Memory Dataset:') + for _ in range(2): + print(f'Start loading {len(loader)} mini-batches ... ', end='') + t = time.perf_counter() + for batch in loader: + pass + print(f'Done! [{time.perf_counter() - t:.4f}s]') + + class OnDiskQM9(OnDiskDataset): + def __init__(self, root: str): + schema = { + 'x': dict(dtype=torch.float32, size=(-1, 11)), + 'edge_index': dict(dtype=torch.int64, size=(2, -1)), + 'edge_attr': dict(dtype=torch.float, size=(-1, 8)), + 'y': dict(dtype=torch.float, size=(1, 19)), + 'z': dict(dtype=torch.int64, size=(-1, )), + 'smiles': str, + 'name': str, + 'idx': dict(dtype=torch.int64, size=(-1, )), + } + super().__init__(root, schema=schema) + + def process(self): + data_list: List[Data] = [] + for i, data in enumerate(in_memory_dataset): + data_list.append(data) + if i + 1 == len(in_memory_dataset) or (i + 1) % 1000 == 0: + self.extend(data_list) + data_list = [] + + def serialize(self, data: Data) -> Dict[str, Any]: + return dict( + x=data.x, + edge_index=data.edge_index, + edge_attr=data.edge_attr, + y=data.y, + z=data.z, + smiles=data.smiles, + name=data.name, + idx=data.idx, + ) + + def deserialize(self, data: Dict[str, Any]) -> Data: + return Data.from_dict(data) + + on_disk_dataset = OnDiskQM9(root='/tmp/OnDiskQM9') + loader = DataLoader(on_disk_dataset, **kwargs) + + print('On-Disk Dataset:') for _ in range(2): print(f'Start loading {len(loader)} mini-batches ... ', end='') t = time.perf_counter() diff --git a/torch_geometric/data/database.py b/torch_geometric/data/database.py index f4bb5afc142d..56a55f310b01 100644 --- a/torch_geometric/data/database.py +++ b/torch_geometric/data/database.py @@ -424,7 +424,9 @@ def _serialize(self, row: Any) -> List[Any]: # If we find a `torch.Tensor` that is not registered as such in # `schema`, we modify the schema in-place for improved efficiency. out: List[Any] = [] - for key, col in self._to_dict(row).items(): + row_dict = self._to_dict(row) + for key, col_schema in self.schema.items(): + col = row_dict[key] if isinstance(self.schema[key], TensorInfo): out.append(col.numpy().tobytes()) elif isinstance(col, Tensor): @@ -445,13 +447,17 @@ def _deserialize(self, row: Tuple[Any]) -> Any: # * object: Load via pickle out_dict = {} for i, (key, col_schema) in enumerate(self.schema.items()): + value = row[i] if isinstance(col_schema, TensorInfo): - out_dict[key] = torch.frombuffer( - row[i], dtype=col_schema.dtype).view(*col_schema.size) + if len(value) > 0: + tensor = torch.frombuffer(value, dtype=col_schema.dtype) + else: + tensor = torch.empty(0, dtype=col_schema.dtype) + out_dict[key] = tensor.view(*col_schema.size) elif col_schema in {int, float, str}: - out_dict[key] = row[i] + out_dict[key] = value else: - out_dict[key] = pickle.loads(row[i]) + out_dict[key] = pickle.loads(value) # In case `0` exists as integer in the schema, this means that the # schema was passed as either a single entry or a tuple: diff --git a/torch_geometric/datasets/pcqm4m.py b/torch_geometric/datasets/pcqm4m.py index 4250fad7609c..4b16bf374403 100644 --- a/torch_geometric/datasets/pcqm4m.py +++ b/torch_geometric/datasets/pcqm4m.py @@ -13,8 +13,10 @@ class PCQM4Mv2(OnDiskDataset): r"""The PCQM4Mv2 dataset from the `"OGB-LSC: A Large-Scale Challenge for Machine Learning on Graphs" `_ paper. :class:`PCQM4Mv2` is a quantum chemistry dataset originally curated under - the PubChemQC project. The task is to predict the DFT-calculated HOMO-LUMO - energy gap of molecules given their 2D molecular graphs. + the `PubChemQC project + `_. + The task is to predict the DFT-calculated HOMO-LUMO energy gap of molecules + given their 2D molecular graphs. .. note:: This dataset uses the :class:`OnDiskDataset` base class to load data @@ -99,7 +101,7 @@ def serialize(self, data: Data) -> Dict[str, Any]: x=data.x, edge_index=data.edge_index, edge_attr=data.edge_attr, - y=data.x, + y=data.y, smiles=data.smiles, ) From a9776e6473830e1845b4eafd4e46375127c56aae Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Sun, 1 Oct 2023 13:07:27 +0200 Subject: [PATCH 1508/2432] Documentation improvements of `WikipediaNetwork` (#8107) --- torch_geometric/datasets/wikipedia_network.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/torch_geometric/datasets/wikipedia_network.py b/torch_geometric/datasets/wikipedia_network.py index 726f389eed96..9b0a4804fa05 100644 --- a/torch_geometric/datasets/wikipedia_network.py +++ b/torch_geometric/datasets/wikipedia_network.py @@ -27,6 +27,9 @@ class WikipediaNetwork(InMemoryDataset): into five categories to predict. If set to :obj:`True`, the dataset :obj:`"crocodile"` is not available. + If set to :obj:`True`, train/validation/test splits will be + available as masks for multiple splits with shape + :obj:`[num_nodes, num_splits]`. (default: :obj:`True`) transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. @@ -42,9 +45,14 @@ class WikipediaNetwork(InMemoryDataset): processed_url = ('https://raw.githubusercontent.com/graphdml-uiuc-jlu/' 'geom-gcn/f1fc0d14b3b019c562737240d06ec83b07d16a8f') - def __init__(self, root: str, name: str, geom_gcn_preprocess: bool = True, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None): + def __init__( + self, + root: str, + name: str, + geom_gcn_preprocess: bool = True, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + ): self.name = name.lower() self.geom_gcn_preprocess = geom_gcn_preprocess assert self.name in ['chameleon', 'crocodile', 'squirrel'] From be380c39615b66d5eb20242580d8484443b8384a Mon Sep 17 00:00:00 2001 From: CodeTal <61873730+CodeTal@users.noreply.github.com> Date: Mon, 2 Oct 2023 06:19:43 -0400 Subject: [PATCH 1509/2432] Hypergraph data object (#7611) Hello. I don't know the definition of a subgraph for a hypergraph (not mentioned in papers). When getting a subgraph, a hyperedge can be cut. For example, if a hyperedge connects vertices [1, 2, 3], but only [1, 3] are selected for the subgraph, then we have the option to: 1) remove this hyperedge; 2) change the hyperedge and make it only connects [1, 3]. So, in this version, I choose the second option. --------- Co-authored-by: wsad1 --- CHANGELOG.md | 1 + test/data/test_hypergraph_data.py | 171 ++++++++++++++++++++ torch_geometric/data/hypergraph_data.py | 206 ++++++++++++++++++++++++ torch_geometric/utils/__init__.py | 3 +- torch_geometric/utils/subgraph.py | 105 ++++++++++++ 5 files changed, 485 insertions(+), 1 deletion(-) create mode 100644 test/data/test_hypergraph_data.py create mode 100644 torch_geometric/data/hypergraph_data.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f469324a175b..1fbfe0e1ba32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `HyperGraphData` to support hypergraphs ([#7611](https://github.com/pyg-team/pytorch_geometric/pull/7611)) - Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102) - Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093) - Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092), [#8106](https://github.com/pyg-team/pytorch_geometric/pull/8106)) diff --git a/test/data/test_hypergraph_data.py b/test/data/test_hypergraph_data.py new file mode 100644 index 000000000000..e4484d683b99 --- /dev/null +++ b/test/data/test_hypergraph_data.py @@ -0,0 +1,171 @@ +import pytest +import torch + +import torch_geometric +from torch_geometric.data.hypergraph_data import HyperGraphData +from torch_geometric.loader import DataLoader + + +def test_hypergraph_data(): + torch_geometric.set_debug(True) + + x = torch.tensor([[1, 3, 5, 7], [2, 4, 6, 8], [7, 8, 9, 10]], + dtype=torch.float).t() + edge_index = torch.tensor([[0, 1, 2, 1, 2, 3, 0, 2, 3], + [0, 0, 0, 1, 1, 1, 2, 2, 2]]) + data = HyperGraphData(x=x, edge_index=edge_index).to(torch.device('cpu')) + data.validate(raise_on_error=True) + + assert data.num_nodes == 4 + assert data.num_edges == 3 + + assert data.node_attrs() == ['x'] + assert data.edge_attrs() == ['edge_index'] + + assert data.x.tolist() == x.tolist() + assert data['x'].tolist() == x.tolist() + assert data.get('x').tolist() == x.tolist() + assert data.get('y', 2) == 2 + assert data.get('y', None) is None + + assert sorted(data.keys()) == ['edge_index', 'x'] + assert len(data) == 2 + assert 'x' in data and 'edge_index' in data and 'pos' not in data + + D = data.to_dict() + assert len(D) == 2 + assert 'x' in D and 'edge_index' in D + + D = data.to_namedtuple() + assert len(D) == 2 + assert D.x is not None and D.edge_index is not None + + assert data.__cat_dim__('x', data.x) == 0 + assert data.__cat_dim__('edge_index', data.edge_index) == -1 + assert data.__inc__('x', data.x) == 0 + assert torch.equal(data.__inc__('edge_index', data.edge_index), + torch.tensor([[data.num_nodes], [data.num_edges]])) + data_list = [data, data] + loader = DataLoader(data_list, batch_size=2) + batch = next(iter(loader)) + batched_edge_index = batch.edge_index + assert batched_edge_index.tolist() == [[ + 0, 1, 2, 1, 2, 3, 0, 2, 3, 4, 5, 6, 5, 6, 7, 4, 6, 7 + ], [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]] + + assert not data.x.is_contiguous() + data.contiguous() + assert data.x.is_contiguous() + + assert not data.is_coalesced() + data = data.coalesce() + assert data.is_coalesced() + + clone = data.clone() + assert clone != data + assert len(clone) == len(data) + assert clone.x.data_ptr() != data.x.data_ptr() + assert clone.x.tolist() == data.x.tolist() + assert clone.edge_index.data_ptr() != data.edge_index.data_ptr() + assert clone.edge_index.tolist() == data.edge_index.tolist() + + data['x'] = x + 1 + assert data.x.tolist() == (x + 1).tolist() + + assert str(data) == 'HyperGraphData(x=[4, 3], edge_index=[2, 9])' + + dictionary = {'x': data.x, 'edge_index': data.edge_index} + data = HyperGraphData.from_dict(dictionary) + assert sorted(data.keys()) == ['edge_index', 'x'] + + assert not data.has_isolated_nodes() + # assert not data.has_self_loops() + # assert data.is_undirected() + # assert not data.is_directed() + + assert data.num_nodes == 4 + assert data.num_edges == 3 + with pytest.warns(UserWarning, match='deprecated'): + assert data.num_faces is None + assert data.num_node_features == 3 + assert data.num_features == 3 + + data.edge_attr = torch.randn(data.num_edges, 2) + assert data.num_edge_features == 2 + assert data.is_edge_attr('edge_attr') + data.edge_attr = None + + data.x = None + with pytest.warns(UserWarning, match='Unable to accurately infer'): + assert data.num_nodes == 4 + + data.edge_index = None + with pytest.warns(UserWarning, match='Unable to accurately infer'): + assert data.num_nodes is None + assert data.num_edges == 0 + + data.num_nodes = 4 + assert data.num_nodes == 4 + + data = HyperGraphData(x=x, attribute=x) + assert len(data) == 2 + assert data.x.tolist() == x.tolist() + assert data.attribute.tolist() == x.tolist() + + face = torch.tensor([[0, 1], [1, 2], [2, 3]]) + data = HyperGraphData(num_nodes=4, face=face) + with pytest.warns(UserWarning, match='deprecated'): + assert data.num_faces == 2 + assert data.num_nodes == 4 + + data = HyperGraphData(title='test') + assert str(data) == "HyperGraphData(title='test')" + assert data.num_node_features == 0 + # assert data.num_edge_features == 0 + + key = value = 'test_value' + data[key] = value + assert data[key] == value + del data[value] + del data[value] # Deleting unset attributes should work as well. + + assert data.get(key) is None + assert data.get('title') == 'test' + + torch_geometric.set_debug(False) + + +def test_hypergraphdata_subgraph(): + x = torch.arange(5) + y = torch.tensor([0.]) + edge_index = torch.tensor([[0, 1, 3, 2, 4, 0, 3, 4, 2, 1, 2, 3], + [0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]]) + edge_attr = torch.rand(4, 2) + data = HyperGraphData(x=x, y=y, edge_index=edge_index, edge_attr=edge_attr, + num_nodes=5) + + out = data.subgraph(torch.tensor([1, 2, 4])) + assert len(out) == 5 + assert torch.equal(out.x, torch.tensor([1, 2, 4])) + assert torch.equal(out.y, data.y) + assert out.edge_index.tolist() == [[1, 2, 2, 1, 0, 1], [0, 0, 1, 1, 2, 2]] + assert torch.equal(out.edge_attr, edge_attr[[1, 2, 3]]) + assert out.num_nodes == 3 + + # Test unordered selection: + out = data.subgraph(torch.tensor([3, 1, 2])) + assert len(out) == 5 + assert torch.equal(out.x, torch.tensor([3, 1, 2])) + assert torch.equal(out.y, data.y) + assert out.edge_index.tolist() == [[0, 2, 0, 2, 1, 2, 0], + [0, 0, 1, 1, 2, 2, 2]] + assert torch.equal(out.edge_attr, edge_attr[[1, 2, 3]]) + assert out.num_nodes == 3 + + out = data.subgraph(torch.tensor([False, False, False, True, True])) + assert len(out) == 5 + assert torch.equal(out.x, torch.arange(3, 5)) + assert torch.equal(out.y, data.y) + assert out.edge_index.tolist() == [[0, 1, 0, 1], [0, 0, 1, 1]] + assert torch.equal(out.edge_attr, edge_attr[[1, 2]]) + assert out.num_nodes == 2 diff --git a/torch_geometric/data/hypergraph_data.py b/torch_geometric/data/hypergraph_data.py new file mode 100644 index 000000000000..100ae37b6de1 --- /dev/null +++ b/torch_geometric/data/hypergraph_data.py @@ -0,0 +1,206 @@ +import copy +import warnings +from typing import Any, List, Optional + +import torch +from torch import Tensor + +from torch_geometric.data import Data +from torch_geometric.typing import EdgeType, NodeType, OptTensor +from torch_geometric.utils import hyper_subgraph, select + + +class HyperGraphData(Data): + r"""A data object describing a hypergraph. + The data object can hold node-level, link-level and graph-level attributes. + This object differs from a standard :obj:`~torch_geometric.data.Data` + object by having hyperedges, i.e. edges that connect more + than two nodes. For example, in the hypergraph scenario + :math:`\mathcal{G} = (\mathcal{V}, \mathcal{E})` with + :math:`\mathcal{V} = \{ 0, 1, 2, 3, 4 \}` and + :math:`\mathcal{E} = \{ \{ 0, 1, 2 \}, \{ 1, 2, 3, 4 \} \}`, the + hyperedge index :obj:`edge_index` is represented as: + + .. code-block:: python + + # hyper graph with two hyperedges + # connecting 3 and 4 nodes, respectively + edge_index = torch.tensor([ + [0, 1, 2, 1, 2, 3, 4], + [0, 0, 0, 1, 1, 1, 1], + ]) + + Args: + x (torch.Tensor, optional): Node feature matrix with shape + :obj:`[num_nodes, num_node_features]`. (default: :obj:`None`) + edge_index (LongTensor, optional): Hyperedge tensor + with shape :obj:`[2, num_edges*num_nodes_per_edge]`. + Where `edge_index[1]` denotes the hyperedge index and + `edge_index[0]` denotes the node indicies that are connected + by the hyperedge. (default: :obj:`None`) + (default: :obj:`None`) + edge_attr (torch.Tensor, optional): Edge feature matrix with shape + :obj:`[num_edges, num_edge_features]`. + (default: :obj:`None`) + y (torch.Tensor, optional): Graph-level or node-level ground-truth + labels with arbitrary shape. (default: :obj:`None`) + pos (torch.Tensor, optional): Node position matrix with shape + :obj:`[num_nodes, num_dimensions]`. (default: :obj:`None`) + **kwargs (optional): Additional attributes. + """ + def __init__(self, x: OptTensor = None, edge_index: OptTensor = None, + edge_attr: OptTensor = None, y: OptTensor = None, + pos: OptTensor = None, **kwargs): + super().__init__(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y, + pos=pos, **kwargs) + + @property + def num_edges(self) -> int: + r"""Returns the number of hyperedges in the hypergraph. + """ + if self.edge_index is None: + return 0 + return max(self.edge_index[1]) + 1 + + @property + def num_nodes(self) -> int: + num_nodes = super().num_nodes + + # For a hyper graph, the `edge_index[1]` + # does not contain node indicies. Therefore, + # the below code is to prevent the `num_nodes` + # being estimated as the number of hyperedges. + if (self.edge_index is not None and num_nodes == self.num_edges): + return max(self.edge_index[0]) + 1 + return num_nodes + + def is_edge_attr(self, key: str) -> bool: + val = super().is_edge_attr(key) + if not val and self.edge_index is not None: + return key in self and self[key].size(0) == self.num_edges + + def __inc__(self, key: str, value: Any, *args, **kwargs) -> Any: + if key == 'edge_index': + return torch.tensor([[self.num_nodes], [self.num_edges]]) + else: + return super().__inc__(key, value, *args, **kwargs) + + def subgraph(self, subset: Tensor) -> 'HyperGraphData': + r"""Returns the induced subgraph given by the node indices + :obj:`subset`. + + .. note:: + + If only a subset of a hyperedge's nodes are to be + selected in the subgraph, the hyperedge will remain in the + subgraph, but only the selected nodes will be connected by + the hyperedge. Hyperedges that only connects one node in the + subgraph will be removed. + + Examples: + >>> x = torch.randn(4, 16) + >>> edge_index = torch.tensor([ + ... [0, 1, 0, 2, 1, 1, 2, 4], + ... [0, 0, 1, 1, 1, 2, 2, 2] + >>> ]) + >>> data = HyperGraphData(x = x, edge_index = edge_index) + >>> subset = torch.tensor([1, 2, 4]) + >>> subgraph = data.subgraph(subset) + >>> subgraph.edge_index + tensor([[2, 1, 1, 2, 4], + [0, 0, 1, 1, 1]]) + + Args: + subset (LongTensor or BoolTensor): The nodes to keep. + """ + out = hyper_subgraph(subset, self.edge_index, relabel_nodes=True, + num_nodes=self.num_nodes, return_edge_mask=True) + edge_index, _, edge_mask = out + + data = copy.copy(self) + + for key, value in self: + if key == 'edge_index': + data.edge_index = edge_index + elif key == 'num_nodes': + if subset.dtype == torch.bool: + data.num_nodes = int(subset.sum()) + else: + data.num_nodes = subset.size(0) + elif self.is_node_attr(key): + cat_dim = self.__cat_dim__(key, value) + data[key] = select(value, subset, dim=cat_dim) + elif self.is_edge_attr(key): + cat_dim = self.__cat_dim__(key, value) + data[key] = select(value, edge_mask, dim=cat_dim) + + return data + + def edge_subgraph(self, subset: Tensor) -> 'Data': + raise NotImplementedError + + def to_heterogeneous( + self, + node_type: Optional[Tensor] = None, + edge_type: Optional[Tensor] = None, + node_type_names: Optional[List[NodeType]] = None, + edge_type_names: Optional[List[EdgeType]] = None, + ): + raise NotImplementedError + + def has_isolated_nodes(self) -> bool: + if self.edge_index is None: + return False + return torch.unique(self.edge_index[0]).size(0) < self.num_nodes + + def is_directed(self) -> bool: + raise NotImplementedError + + def is_undirected(self) -> bool: + raise NotImplementedError + + def has_self_loops(self) -> bool: + raise NotImplementedError + + def validate(self, raise_on_error: bool = True) -> bool: + r"""Validates the correctness of the data.""" + cls_name = self.__class__.__name__ + status = True + + num_nodes = self.num_nodes + if num_nodes is None: + status = False + warn_or_raise(f"'num_nodes' is undefined in '{cls_name}'", + raise_on_error) + + if 'edge_index' in self: + if self.edge_index.dim() != 2 or self.edge_index.size(0) != 2: + status = False + warn_or_raise( + f"'edge_index' needs to be of shape [2, num_edges] in " + f"'{cls_name}' (found {self.edge_index.size()})", + raise_on_error) + + if 'edge_index' in self and self.edge_index.numel() > 0: + if self.edge_index.min() < 0: + status = False + warn_or_raise( + f"'edge_index' contains negative indices in " + f"'{cls_name}' (found {int(self.edge_index.min())})", + raise_on_error) + + if num_nodes is not None and self.edge_index[0].max() >= num_nodes: + status = False + warn_or_raise( + f"'edge_index' contains larger indices than the number " + f"of nodes ({num_nodes}) in '{cls_name}' " + f"(found {int(self.edge_index.max())})", raise_on_error) + + return status + + +def warn_or_raise(msg: str, raise_on_error: bool = True): + if raise_on_error: + raise ValueError(msg) + else: + warnings.warn(msg) diff --git a/torch_geometric/utils/__init__.py b/torch_geometric/utils/__init__.py index faa52fea58c3..2f3ff7c0a213 100644 --- a/torch_geometric/utils/__init__.py +++ b/torch_geometric/utils/__init__.py @@ -15,7 +15,7 @@ add_remaining_self_loops, get_self_loop_attr) from .isolated import contains_isolated_nodes, remove_isolated_nodes from .subgraph import (get_num_hops, subgraph, k_hop_subgraph, - bipartite_subgraph) + bipartite_subgraph, hyper_subgraph) from .dropout import dropout_adj, dropout_node, dropout_edge, dropout_path from .homophily import homophily from .assortativity import assortativity @@ -80,6 +80,7 @@ 'subgraph', 'bipartite_subgraph', 'k_hop_subgraph', + 'hyper_subgraph', 'dropout_node', 'dropout_edge', 'dropout_path', diff --git a/torch_geometric/utils/subgraph.py b/torch_geometric/utils/subgraph.py index 89c37b9c41fd..6353281f1c7c 100644 --- a/torch_geometric/utils/subgraph.py +++ b/torch_geometric/utils/subgraph.py @@ -326,3 +326,108 @@ def k_hop_subgraph( edge_index = node_idx[edge_index] return subset, edge_index, inv, edge_mask + + +def hyper_subgraph( + subset: Union[Tensor, List[int]], + edge_index: Tensor, + edge_attr: OptTensor = None, + relabel_nodes: bool = False, + num_nodes: Optional[int] = None, + return_edge_mask: bool = False, +) -> Union[Tuple[Tensor, OptTensor], Tuple[Tensor, OptTensor, OptTensor]]: + r"""Returns the induced subgraph of the hyper graph of + :obj:`(edge_index, edge_attr)` containing the nodes in :obj:`subset`. + + Args: + subset (LongTensor, BoolTensor or [int]): The nodes to keep. + edge_index (LongTensor): Hyperedge tensor + with shape :obj:`[2, num_edges*num_nodes_per_edge]`. + Where `edge_index[1]` denotes the hyperedge index and + `edge_index[0]` denotes the node indicies that are connected + by the hyperedge. + edge_attr (Tensor, optional): Edge weights or multi-dimensional + edge features of shape :obj:`[num_edges,-1]`. + (default: :obj:`None`) + relabel_nodes (bool, optional): If set to :obj:`True`, the + resulting :obj:`edge_index` will be relabeled to hold + consecutive indices + starting from zero. (default: :obj:`False`) + num_nodes (int, optional): The number of nodes, *i.e.* + :obj:`max_val + 1` of :attr:`edge_index`. + (default: :obj:`None`) + return_edge_mask (bool, optional): If set to :obj:`True`, will + return the edge mask of shape :obj:`num_edges` + to filter out additional edge features. + (default: :obj:`False`) + + :rtype: (:class:`LongTensor`, :class:`Tensor`) + + Examples: + + >>> edge_index = torch.tensor([[0, 1, 2, 1, 2, 3, 0, 2, 3], + ... [0, 0, 0, 1, 1, 1, 2, 2, 2]]) + >>> edge_attr = torch.tensor([3, 2, 6]) + >>> subset = torch.tensor([0, 3]) + >>> subgraph(subset, edge_index, edge_attr) + (tensor([[0, 3], + [0, 0]]), + tensor([ 6.])) + + >>> subgraph(subset, edge_index, edge_attr, return_edge_mask=True) + (tensor([[0, 3], + [0, 0]]), + tensor([ 6.])) + tensor([False, False, True]) + + """ + + device = edge_index.device + + if isinstance(subset, (list, tuple)): + subset = torch.tensor(subset, dtype=torch.long, device=device) + + if subset.dtype != torch.bool: + num_nodes = maybe_num_nodes(edge_index, num_nodes) + node_mask = index_to_mask(subset, size=num_nodes) + else: + num_nodes = subset.size(0) + node_mask = subset + + # Mask all connections that contain a node not in the subset + hyper_edge_connection_mask = node_mask[ + edge_index[0]] # num_edges*num_nodes_per_edge + + # Mask hyperedges that contain one or less nodes from the subset + num_edges = edge_index[1].max() + 1 + edge_mask = torch.scatter_add( + torch.zeros(num_edges, dtype=torch.long, + device=device), 0, edge_index[1], + hyper_edge_connection_mask.to(dtype=torch.long)) > 1 # num_edges + + # Mask connections if hyperedge contains one or less nodes from the subset + # or is connected to a node not in the subset + hyper_edge_connection_mask = hyper_edge_connection_mask & edge_mask[ + edge_index[1]] + + edge_index = edge_index[:, hyper_edge_connection_mask] + edge_attr = edge_attr[edge_mask] if edge_attr is not None else None + + # Relabel edges + edge_idx = torch.zeros(edge_mask.size(0), dtype=torch.long, device=device) + edge_idx[edge_mask] = torch.arange(edge_mask.sum().item(), device=device) + edge_index = torch.cat( + [edge_index[0].unsqueeze(0), edge_idx[edge_index[1]].unsqueeze(0)], 0) + + if relabel_nodes: + node_idx = torch.zeros(node_mask.size(0), dtype=torch.long, + device=device) + node_idx[subset] = torch.arange(node_mask.sum().item(), device=device) + edge_index = torch.cat( + [node_idx[edge_index[0]].unsqueeze(0), edge_index[1].unsqueeze(0)], + 0) + + if return_edge_mask: + return edge_index, edge_attr, edge_mask + else: + return edge_index, edge_attr From 13b3243abcc9bdb035b7946948f6869f9180f223 Mon Sep 17 00:00:00 2001 From: Jakub Pietrak <97102979+JakubPietrakIntel@users.noreply.github.com> Date: Mon, 2 Oct 2023 15:50:08 +0200 Subject: [PATCH 1510/2432] Add base class `DistLoader` (#8079) **[1/3] Distributed Loaders PRs** This PR includes base class of `DistributedLoader` that handles RPC connection and handling requests from `DistributedNeighborSampler` processes. It includes basic `DistNeighborSampler` functions used by the loader. 1. https://github.com/pyg-team/pytorch_geometric/pull/8079 2. https://github.com/pyg-team/pytorch_geometric/pull/8080 3. https://github.com/pyg-team/pytorch_geometric/pull/8085 Other PRs related to this module: DistSampler: https://github.com/pyg-team/pytorch_geometric/pull/7974 GraphStore\FeatureStore: https://github.com/pyg-team/pytorch_geometric/pull/8083 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 9 +- torch_geometric/distributed/dist_loader.py | 125 ++++++++++++++++++ .../distributed/dist_neighbor_sampler.py | 112 ++++++++++++++++ torch_geometric/distributed/rpc.py | 8 +- torch_geometric/distributed/utils.py | 58 ++++++++ 5 files changed, 304 insertions(+), 8 deletions(-) create mode 100644 torch_geometric/distributed/dist_loader.py create mode 100644 torch_geometric/distributed/dist_neighbor_sampler.py create mode 100644 torch_geometric/distributed/utils.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fbfe0e1ba32..08e028a28107 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,12 +7,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `DistLoader` base class ([#8079](https://github.com/pyg-team/pytorch_geometric/pull/8079)) - Added `HyperGraphData` to support hypergraphs ([#7611](https://github.com/pyg-team/pytorch_geometric/pull/7611)) -- Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102) -- Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093) +- Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102)) +- Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093)) - Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092), [#8106](https://github.com/pyg-team/pytorch_geometric/pull/8106)) -- Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938) -- Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894) +- Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938)) +- Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894)) - Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) - Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057), [#8058](https://github.com/pyg-team/pytorch_geometric/pull/8058)) - Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) diff --git a/torch_geometric/distributed/dist_loader.py b/torch_geometric/distributed/dist_loader.py new file mode 100644 index 000000000000..f4fe84d95fdd --- /dev/null +++ b/torch_geometric/distributed/dist_loader.py @@ -0,0 +1,125 @@ +import atexit +import logging +import os +from typing import Any, Dict, List, Optional, Union + +import torch.multiprocessing as mp + +from torch_geometric.distributed.dist_context import DistContext, DistRole +from torch_geometric.distributed.dist_neighbor_sampler import close_sampler +from torch_geometric.distributed.rpc import global_barrier, init_rpc + + +class DistLoader: + r"""A base class for creating distributed data loading routines. + + Args: + current_ctx (DistContext): Distributed context info of the current + process. + rpc_worker_names (Dict[DistRole, List[str]]): RPC workers identifiers. + master_addr (str, optional): RPC address for distributed loader + communication. + Refers to the IP address of the master node. (default: :obj:`None`) + master_port (int or str, optional): The open port for RPC communication + with the master node. (default: :obj:`None`) + channel (mp.Queue, optional): A communication channel for messages. + (default: :obj:`None`) + num_rpc_threads (int, optional): The number of threads in the + thread-pool used by + :class:`~torch.distributed.rpc.TensorPipeAgent` to execute + requests. (default: :obj:`16`) + rpc_timeout (int, optional): The default timeout in seconds for RPC + requests. + If the RPC has not completed in this timeframe, an exception will + be raised. + Callers can override this timeout for + individual RPCs in :meth:`~torch.distributed.rpc.rpc_sync` and + :meth:`~torch.distributed.rpc.rpc_async` if necessary. + (default: :obj:`180`) + """ + def __init__( + self, + current_ctx: DistContext, + rpc_worker_names: Dict[DistRole, List[str]], + master_addr: Optional[str] = None, + master_port: Optional[Union[int, str]] = None, + channel: Optional[mp.Queue] = None, + num_rpc_threads: int = 16, + rpc_timeout: int = 180, + **kwargs, + ): + if master_addr is None and os.environ.get('MASTER_ADDR') is not None: + master_addr = os.environ['MASTER_ADDR'] + if master_addr is None: + raise ValueError(f"Missing master address for RPC communication " + f"in '{self.__class__.__name__}'. Try to provide " + f"it or set it via the 'MASTER_ADDR' environment " + f"variable.") + + if master_port is None and os.environ.get('MASTER_PORT') is not None: + master_port = int(os.environ['MASTER_PORT']) + if master_port is None: + raise ValueError(f"Missing master port for RPC communication in " + f"'{self.__class__.__name__}'. Try to provide it " + f"or set it via the 'MASTER_ADDR' environment " + f"variable.") + + assert num_rpc_threads > 0 + assert rpc_timeout > 0 + + self.current_ctx = current_ctx + self.rpc_worker_names = rpc_worker_names + self.master_addr = master_addr + self.master_port = master_port + self.channel = channel or mp.Queue() + self.pid = mp.current_process().pid + self.num_rpc_threads = num_rpc_threads + self.rpc_timeout = rpc_timeout + self.num_workers = kwargs.get('num_workers', 0) + + logging.info(f"[{self}] MASTER_ADDR={master_addr}, " + f"MASTER_PORT={master_port}") + + if self.num_workers == 0: # Initialize RPC in main process: + self.worker_init_fn(0) + + def channel_get(self, out: Any) -> Any: + if self.channel is not None: + out = self.channel.get() + logging.debug(f"[{self}] Retrieved message") + return out + + def worker_init_fn(self, worker_id: int): + try: + num_sampler_proc = self.num_workers if self.num_workers > 0 else 1 + self.current_ctx_worker = DistContext( + world_size=self.current_ctx.world_size * num_sampler_proc, + rank=self.current_ctx.rank * num_sampler_proc + worker_id, + global_world_size=self.current_ctx.world_size * + num_sampler_proc, + global_rank=self.current_ctx.rank * num_sampler_proc + + worker_id, + group_name='mp_sampling_worker', + ) + + init_rpc( + current_ctx=self.current_ctx_worker, + rpc_worker_names={}, + master_addr=self.master_addr, + master_port=self.master_port, + num_rpc_threads=self.num_rpc_threads, + rpc_timeout=self.rpc_timeout, + ) + assert hasattr(self, 'neighbor_sampler') + self.neighbor_sampler.register_sampler_rpc() + self.neighbor_sampler.init_event_loop() + # close RPC & worker group at exit: + atexit.register(close_sampler, worker_id, self.neighbor_sampler) + global_barrier(timeout=10) # Wait for all workers to initialize. + + except RuntimeError: + raise RuntimeError(f"`{self}.init_fn()` could not initialize the " + f"worker loop of the neighbor sampler") + + def __repr__(self) -> str: + return f'{self.__class__.__name__}(pid={self.pid})' diff --git a/torch_geometric/distributed/dist_neighbor_sampler.py b/torch_geometric/distributed/dist_neighbor_sampler.py new file mode 100644 index 000000000000..1bdb3300236b --- /dev/null +++ b/torch_geometric/distributed/dist_neighbor_sampler.py @@ -0,0 +1,112 @@ +import logging +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch.multiprocessing as mp + +from torch_geometric.distributed import LocalFeatureStore, LocalGraphStore +from torch_geometric.distributed.dist_context import DistContext, DistRole +from torch_geometric.distributed.event_loop import ConcurrentEventLoop +from torch_geometric.distributed.rpc import ( + RPCCallBase, + RPCRouter, + rpc_partition_to_workers, + rpc_register, + shutdown_rpc, +) +from torch_geometric.sampler import NeighborSampler +from torch_geometric.sampler.base import NumNeighbors, SubgraphType +from torch_geometric.typing import EdgeType + +NumNeighborsType = Union[NumNeighbors, List[int], Dict[EdgeType, List[int]]] + + +class RPCSamplingCallee(RPCCallBase): + r"""A wrapper for RPC callee that will perform RPC sampling from remote + processes.""" + def __init__(self, sampler: NeighborSampler): + super().__init__() + self.sampler = sampler + + def rpc_async(self, *args, **kwargs) -> Any: + return self.sampler._sample_one_hop(*args, **kwargs) + + def rpc_sync(self, *args, **kwargs) -> Any: + pass + + +class DistNeighborSampler: + r"""An implementation of a distributed and asynchronised neighbor sampler + used by :class:`~torch_geometric.distributed.DistNeighborLoader`.""" + def __init__( + self, + current_ctx: DistContext, + rpc_worker_names: Dict[DistRole, List[str]], + data: Tuple[LocalGraphStore, LocalFeatureStore], + num_neighbors: NumNeighborsType, + channel: Optional[mp.Queue] = None, + replace: bool = False, + subgraph_type: Union[SubgraphType, str] = 'directional', + disjoint: bool = False, + temporal_strategy: str = 'uniform', + time_attr: Optional[str] = None, + concurrency: int = 1, + **kwargs, + ): + self.current_ctx = current_ctx + self.rpc_worker_names = rpc_worker_names + + self.feature_store, self.graph_store = data + assert isinstance(self.dist_graph, LocalGraphStore) + assert isinstance(self.dist_feature_store, LocalFeatureStore) + self.is_hetero = self.dist_graph.meta['is_hetero'] + + self.num_neighbors = num_neighbors + self.channel = channel or mp.Queue() + self.concurrency = concurrency + self.event_loop = None + self.replace = replace + self.subgraph_type = SubgraphType(subgraph_type) + self.disjoint = disjoint + self.temporal_strategy = temporal_strategy + self.time_attr = time_attr + self.with_edge_attr = self.dist_feature.has_edge_attr() + self.edge_permutation = None # TODO: Debug edge_perm for LinkLoader + + def register_sampler_rpc(self) -> None: + partition2workers = rpc_partition_to_workers( + current_ctx=self.current_ctx, + num_partitions=self.dist_graph.num_partitions, + current_partition_idx=self.dist_graph.partition_idx, + ) + self.rpc_router = RPCRouter(partition2workers) + self.dist_feature.set_rpc_router(self.rpc_router) + + self._sampler = NeighborSampler( + data=(self.dist_feature_store, self.dist_graph_store), + num_neighbors=self.num_neighbors, + subgraph_type=self.subgraph_type, + replace=self.replace, + disjoint=self.disjoint, + temporal_strategy=self.temporal_strategy, + time_attr=self.time_attr, + ) + rpc_sample_callee = RPCSamplingCallee(self._sampler) + self.rpc_sample_callee_id = rpc_register(rpc_sample_callee) + + def init_event_loop(self) -> None: + self.event_loop = ConcurrentEventLoop(self.concurrency) + self.event_loop.start_loop() + + +# Sampling Utilities ########################################################## + + +def close_sampler(worker_id: int, sampler: DistNeighborSampler): + # Make sure that mp.Queue is empty at exit and RAM is cleared: + try: + logging.info(f"Closing event loop for worker ID {worker_id}") + sampler.event_loop.shutdown_loop() + except AttributeError: + pass + logging.info(f"Closing RPC for worker ID {worker_id}") + shutdown_rpc(graceful=True) diff --git a/torch_geometric/distributed/rpc.py b/torch_geometric/distributed/rpc.py index 761b61694f6f..fe8fcdbacd54 100644 --- a/torch_geometric/distributed/rpc.py +++ b/torch_geometric/distributed/rpc.py @@ -2,7 +2,7 @@ import logging import threading from abc import ABC, abstractmethod -from typing import Callable, Dict, List +from typing import Callable, Dict, List, Optional from torch.distributed import rpc @@ -23,7 +23,7 @@ def rpc_require_initialized(func: Callable) -> Callable: @rpc_require_initialized -def global_all_gather(obj, timeout=None): +def global_all_gather(obj, timeout: Optional[int] = None): r"""Gathers objects from all groups in a list.""" if timeout is None: return rpc.api._all_gather(obj) @@ -31,7 +31,7 @@ def global_all_gather(obj, timeout=None): @rpc_require_initialized -def global_barrier(timeout=None): +def global_barrier(timeout: Optional[int] = None): r""" Block until all local and remote RPC processes.""" try: global_all_gather(obj=None, timeout=timeout) @@ -45,7 +45,7 @@ def init_rpc( master_addr: str, master_port: int, num_rpc_threads: int = 16, - rpc_timeout: float = 240, + rpc_timeout: int = 240, ): with _rpc_init_lock: if rpc_is_initialized(): diff --git a/torch_geometric/distributed/utils.py b/torch_geometric/distributed/utils.py new file mode 100644 index 000000000000..0308a5f99a66 --- /dev/null +++ b/torch_geometric/distributed/utils.py @@ -0,0 +1,58 @@ +from typing import Dict, Optional + +import torch +from torch import Tensor + +from torch_geometric.data import HeteroData +from torch_geometric.distributed import LocalFeatureStore, LocalGraphStore + + +def filter_dist_store( + feature_store: LocalFeatureStore, + graph_store: LocalGraphStore, + node_dict: Dict[str, Tensor], + row_dict: Dict[str, Tensor], + col_dict: Dict[str, Tensor], + edge_dict: Dict[str, Optional[Tensor]], + custom_cls: Optional[HeteroData] = None, + meta: Optional[Dict[str, Tensor]] = None, +) -> HeteroData: + r"""Constructs a :class:`HeteroData` object from a feature store that only + holds nodes in `node` end edges in `edge` for each node and edge type, + respectively. Sorted attribute values are provided as metadata from + :class:`DistNeighborSampler`.""" + # Construct a new `HeteroData` object: + data = custom_cls() if custom_cls is not None else HeteroData() + nfeats, nlabels, efeats = meta[-3:] + + # Filter edge storage: + required_edge_attrs = [] + for attr in graph_store.get_all_edge_attrs(): + key = attr.edge_type + if key in row_dict and key in col_dict: + required_edge_attrs.append(attr) + edge_index = torch.stack([row_dict[key], col_dict[key]], dim=0) + data[attr.edge_type].edge_index = edge_index + + # Filter node storage: + required_node_attrs = [] + for attr in feature_store.get_all_tensor_attrs(): + if attr.group_name in node_dict: + attr.index = node_dict[attr.group_name] + required_node_attrs.append(attr) + data[attr.group_name].num_nodes = attr.index.size(0) + + if nfeats is not None: + for attr in required_node_attrs: + if nfeats[attr.group_name] is not None: + data[attr.group_name][attr.attr_name] = nfeats[attr.group_name] + + if efeats is not None: + for attr in required_edge_attrs: + if efeats[attr.edge_type] is not None: + data[attr.edge_type].edge_attr = efeats[attr.edge_type] + + for label in nlabels: + data[label].y = nlabels[label] + + return data From 521e7dc986d79ce7005175e6d8fddd7fb921901a Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Mon, 2 Oct 2023 22:27:49 +0530 Subject: [PATCH 1511/2432] Add TensorFrame support for`Data` (#8110) Supports `TensorFrame` attributes in `Data`. --------- Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/data/test_data.py | 47 +++++++++++++++++++++++++++++++ torch_geometric/data/data.py | 12 ++++++-- torch_geometric/data/storage.py | 50 +++++++++++++++++++++++++-------- torch_geometric/typing.py | 11 ++++++++ torch_geometric/utils/mask.py | 8 ++++++ torch_geometric/utils/select.py | 14 +++++++-- 7 files changed, 127 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 08e028a28107..bdf1eae2162d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110)) - Added the `DistLoader` base class ([#8079](https://github.com/pyg-team/pytorch_geometric/pull/8079)) - Added `HyperGraphData` to support hypergraphs ([#7611](https://github.com/pyg-team/pytorch_geometric/pull/7611)) - Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102)) diff --git a/test/data/test_data.py b/test/data/test_data.py index e52f5f8c6d3a..e762b179d4b2 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -8,6 +8,7 @@ from torch_geometric.data import Data from torch_geometric.data.storage import AttrType from torch_geometric.testing import withPackage +from torch_geometric.typing import TensorFrame def test_data(): @@ -481,3 +482,49 @@ def test_data_generate_ids(): assert len(data) == 4 assert data.n_id.tolist() == [0, 1, 2] assert data.e_id.tolist() == [0, 1, 2, 3, 4] + + +def get_fake_tensor_frame(num_rows: int) -> TensorFrame: + import torch_frame + + feat_dict = { + torch_frame.categorical: torch.randint(0, 3, size=(num_rows, 3)), + torch_frame.numerical: torch.randn(size=(num_rows, 2)), + } + col_names_dict = { + torch_frame.categorical: ['a', 'b', 'c'], + torch_frame.numerical: ['x', 'y'], + } + y = torch.randn(num_rows) + + return TensorFrame( + feat_dict=feat_dict, + col_names_dict=col_names_dict, + y=y, + ) + + +@withPackage('torch_frame') +def test_data_with_tensor_frame(): + tf = get_fake_tensor_frame(num_rows=10) + data = Data(tf=tf, edge_index=torch.randint(0, 10, size=(2, 20))) + + # Test basic attributes: + assert data.is_node_attr('x') + assert data.num_nodes == tf.num_rows + assert data.num_edges == 20 + assert data.num_node_features == tf.num_cols + + # Test subgraph: + index = torch.tensor([1, 2, 3]) + sub_data = data.subgraph(index) + assert sub_data.num_nodes == 3 + for key, value in sub_data.tf.feat_dict.items(): + assert torch.allclose(value, tf.feat_dict[key][index]) + + mask = torch.tensor( + [False, True, True, True, False, False, False, False, False, False]) + data_sub = data.subgraph(mask) + assert data_sub.num_nodes == 3 + for key, value in sub_data.tf.feat_dict.items(): + assert torch.allclose(value, tf.feat_dict[key][mask]) diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 935c3b43ab13..1a855bf2828f 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -438,9 +438,15 @@ class Data(BaseData, FeatureStore, GraphStore): :obj:`[num_nodes, num_dimensions]`. (default: :obj:`None`) **kwargs (optional): Additional attributes. """ - def __init__(self, x: OptTensor = None, edge_index: OptTensor = None, - edge_attr: OptTensor = None, y: OptTensor = None, - pos: OptTensor = None, **kwargs): + def __init__( + self, + x: Optional[Tensor] = None, + edge_index: OptTensor = None, + edge_attr: OptTensor = None, + y: OptTensor = None, + pos: OptTensor = None, + **kwargs, + ): # `Data` doesn't support group_name, so we need to adjust `TensorAttr` # accordingly here to avoid requiring `group_name` to be set: super().__init__(tensor_attr_cls=DataTensorAttr) diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index cc6dda14288e..9860165af7f2 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -22,7 +22,12 @@ from torch import Tensor from torch_geometric.data.view import ItemsView, KeysView, ValuesView -from torch_geometric.typing import EdgeType, NodeType, SparseTensor +from torch_geometric.typing import ( + EdgeType, + NodeType, + SparseTensor, + TensorFrame, +) from torch_geometric.utils import ( coalesce, contains_isolated_nodes, @@ -31,7 +36,7 @@ sort_edge_index, ) -N_KEYS = {'x', 'feat', 'pos', 'batch', 'node_type', 'n_id'} +N_KEYS = {'x', 'feat', 'pos', 'batch', 'node_type', 'n_id', 'tf'} E_KEYS = {'edge_index', 'edge_weight', 'edge_attr', 'edge_type', 'e_id'} @@ -304,6 +309,8 @@ def num_nodes(self) -> Optional[int]: if isinstance(value, np.ndarray) and key in N_KEYS: cat_dim = self._parent().__cat_dim__(key, value, self) return value.shape[cat_dim] + if isinstance(value, TensorFrame) and key in N_KEYS: + return value.num_rows for key, value in self.items(): if isinstance(value, Tensor) and 'node' in key: cat_dim = self._parent().__cat_dim__(key, value, self) @@ -311,6 +318,8 @@ def num_nodes(self) -> Optional[int]: if isinstance(value, np.ndarray) and 'node' in key: cat_dim = self._parent().__cat_dim__(key, value, self) return value.shape[cat_dim] + if isinstance(value, TensorFrame) and 'node' in key: + return value.num_rows if 'adj' in self and isinstance(self.adj, SparseTensor): return self.adj.size(0) if 'adj_t' in self and isinstance(self.adj_t, SparseTensor): @@ -335,10 +344,14 @@ def num_nodes(self) -> Optional[int]: @property def num_node_features(self) -> int: - if 'x' in self and isinstance(self.x, (Tensor, np.ndarray)): + if 'x' in self and isinstance(self.x, Tensor): + return 1 if self.x.dim() == 1 else self.x.size(-1) + if 'x' in self and isinstance(self.x, np.ndarray): return 1 if self.x.ndim == 1 else self.x.shape[-1] if 'x' in self and isinstance(self.x, SparseTensor): return 1 if self.x.dim() == 1 else self.x.size(-1) + if 'x' in self and isinstance(self.x, TensorFrame): + return self.x.num_cols return 0 @property @@ -356,7 +369,8 @@ def is_node_attr(self, key: str) -> bool: value = self[key] - if isinstance(value, (list, tuple)) and len(value) == self.num_nodes: + if (isinstance(value, (list, tuple, TensorFrame)) + and len(value) == self.num_nodes): self._cached_attr[AttrType.NODE].add(key) return True @@ -423,13 +437,23 @@ def num_edges(self) -> int: if 'num_edges' in self: return self['num_edges'] for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and key in E_KEYS: + if isinstance(value, Tensor) and key in E_KEYS: + cat_dim = self._parent().__cat_dim__(key, value, self) + return value.size(cat_dim) + if isinstance(value, Tensor) and key in E_KEYS: cat_dim = self._parent().__cat_dim__(key, value, self) return value.shape[cat_dim] + if isinstance(value, TensorFrame) and key in E_KEYS: + return value.num_rows for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and 'edge' in key: + if isinstance(value, Tensor) and 'edge' in key: + cat_dim = self._parent().__cat_dim__(key, value, self) + return value.size(cat_dim) + if isinstance(value, np.ndarray) and 'edge' in key: cat_dim = self._parent().__cat_dim__(key, value, self) return value.shape[cat_dim] + if isinstance(value, TensorFrame) and 'edge' in key: + return value.num_rows for value in self.values('adj', 'adj_t'): if isinstance(value, SparseTensor): return value.nnz() @@ -439,8 +463,9 @@ def num_edges(self) -> int: @property def num_edge_features(self) -> int: - if ('edge_attr' in self and isinstance(self.edge_attr, - (Tensor, np.ndarray))): + if 'edge_attr' in self and isinstance(self.edge_attr, Tensor): + return 1 if self.edge_attr.dim() == 1 else self.edge_attr.size(-1) + if 'edge_attr' in self and isinstance(self.edge_attr, np.ndarray): return 1 if self.edge_attr.ndim == 1 else self.edge_attr.shape[-1] return 0 @@ -475,7 +500,8 @@ def is_edge_attr(self, key: str) -> bool: value = self[key] - if isinstance(value, (list, tuple)) and len(value) == self.num_edges: + if (isinstance(value, (list, tuple, TensorFrame)) + and len(value) == self.num_edges): self._cached_attr[AttrType.EDGE].add(key) return True @@ -609,7 +635,8 @@ def is_node_attr(self, key: str) -> bool: value = self[key] - if isinstance(value, (list, tuple)) and len(value) == self.num_nodes: + if (isinstance(value, (list, tuple, TensorFrame)) + and len(value) == self.num_nodes): self._cached_attr[AttrType.NODE].add(key) return True @@ -655,7 +682,8 @@ def is_edge_attr(self, key: str) -> bool: value = self[key] - if isinstance(value, (list, tuple)) and len(value) == self.num_edges: + if (isinstance(value, (list, tuple, TensorFrame)) + and len(value) == self.num_edges): self._cached_attr[AttrType.EDGE].add(key) return True diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index 190e15f50a65..bdfe404eb24a 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -211,6 +211,17 @@ def masked_select_nnz(src: SparseTensor, mask: Tensor, raise ImportError("'masked_select_nnz' requires 'torch-sparse'") +try: + import torch_frame # noqa + WITH_TORCH_FRAME = True + from torch_frame import TensorFrame +except Exception: + WITH_TORCH_FRAME = False + + class TensorFrame: + pass + + try: import intel_extension_for_pytorch # noqa WITH_IPEX = True diff --git a/torch_geometric/utils/mask.py b/torch_geometric/utils/mask.py index 95ab71ce7a8d..d9b9228bf67b 100644 --- a/torch_geometric/utils/mask.py +++ b/torch_geometric/utils/mask.py @@ -3,6 +3,8 @@ import torch from torch import Tensor +from torch_geometric.typing import TensorFrame + def mask_select(src: Tensor, dim: int, mask: Tensor) -> Tensor: r"""Returns a new tensor which masks the :obj:`src` tensor along the @@ -15,6 +17,12 @@ def mask_select(src: Tensor, dim: int, mask: Tensor) -> Tensor: index with. """ assert mask.dim() == 1 + + if not torch.jit.is_scripting(): + if isinstance(src, TensorFrame): + assert dim == 0 and src.num_rows == mask.numel() + return src[mask] + assert src.size(dim) == mask.numel() dim = dim + src.dim() if dim < 0 else dim assert dim >= 0 and dim < src.dim() diff --git a/torch_geometric/utils/select.py b/torch_geometric/utils/select.py index 54d0a3961f5d..fede7fb2f1f9 100644 --- a/torch_geometric/utils/select.py +++ b/torch_geometric/utils/select.py @@ -3,12 +3,16 @@ import torch from torch import Tensor +from torch_geometric.typing import TensorFrame from torch_geometric.utils.mask import mask_select from torch_geometric.utils.sparse import is_torch_sparse_tensor -def select(src: Union[Tensor, List[Any]], index_or_mask: Tensor, - dim: int) -> Union[Tensor, List[Any]]: +def select( + src: Union[Tensor, List[Any], TensorFrame], + index_or_mask: Tensor, + dim: int, +) -> Union[Tensor, List[Any]]: r"""Selects the input tensor or input list according to a given index or mask vector. @@ -29,6 +33,12 @@ def select(src: Union[Tensor, List[Any]], index_or_mask: Tensor, return [src[i] for i, m in enumerate(index_or_mask) if m] return [src[i] for i in index_or_mask] + if isinstance(src, TensorFrame): + assert dim == 0 + if index_or_mask.dtype == torch.bool: + return mask_select(src, dim, index_or_mask) + return src[index_or_mask] + raise ValueError(f"Encountered invalid input type (got '{type(src)}')") From 27c253687c2afc851f8fc9e48106bee51bbfc359 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 08:34:24 +0000 Subject: [PATCH 1512/2432] [pre-commit.ci] pre-commit suggestions (#8115) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/google/yapf: v0.40.0 → v0.40.2](https://github.com/google/yapf/compare/v0.40.0...v0.40.2) - [github.com/PyCQA/flake8: 6.0.0 → 6.1.0](https://github.com/PyCQA/flake8/compare/6.0.0...6.1.0) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- .pre-commit-config.yaml | 4 ++-- torch_geometric/nn/models/dimenet_utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3e932718180e..3283a3fbcce7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,7 +42,7 @@ repos: # args: [--min=10, .] - repo: https://github.com/google/yapf - rev: v0.40.0 + rev: v0.40.2 hooks: - id: yapf name: Format code @@ -55,7 +55,7 @@ repos: name: Sort imports - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 6.1.0 hooks: - id: flake8 name: Check PEP8 diff --git a/torch_geometric/nn/models/dimenet_utils.py b/torch_geometric/nn/models/dimenet_utils.py index 22a31119df79..0750882cdf4c 100644 --- a/torch_geometric/nn/models/dimenet_utils.py +++ b/torch_geometric/nn/models/dimenet_utils.py @@ -111,7 +111,7 @@ def real_sph_harm(k, zero_m_only=True, spherical_coordinates=True): z = sym.symbols('z') for i in range(len(P_l_m)): for j in range(len(P_l_m[i])): - if type(P_l_m[i][j]) != int: + if not isinstance(P_l_m[i][j], int): P_l_m[i][j] = P_l_m[i][j].subs(z, sym.cos(theta)) if not zero_m_only: phi = sym.symbols('phi') From 4ac86965256898b2abc8f1f2c124bacc83df34d1 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Tue, 3 Oct 2023 11:31:30 +0200 Subject: [PATCH 1513/2432] `InMemoryDataset.to_on_disk_dataset()` conversion (#8116) --- CHANGELOG.md | 1 + test/data/test_dataset.py | 37 +++++++++ test/loader/test_dataloader.py | 42 +--------- torch_geometric/data/in_memory_dataset.py | 96 +++++++++++++++++++++++ torch_geometric/data/on_disk_dataset.py | 3 +- torch_geometric/datasets/ba_shapes.py | 2 +- torch_geometric/datasets/fake.py | 4 +- torch_geometric/datasets/karate.py | 2 +- 8 files changed, 142 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bdf1eae2162d..1f9da5e5ad24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `to_on_disk_dataset()` method to convert `InMemoryDataset` instances to `OnDiskDataset` instances ([#8116](https://github.com/pyg-team/pytorch_geometric/pull/8116)) - Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110)) - Added the `DistLoader` base class ([#8079](https://github.com/pyg-team/pytorch_geometric/pull/8079)) - Added `HyperGraphData` to support hypergraphs ([#7611](https://github.com/pyg-team/pytorch_geometric/pull/7611)) diff --git a/test/data/test_dataset.py b/test/data/test_dataset.py index ad0538557b7b..c6ad968ddd89 100644 --- a/test/data/test_dataset.py +++ b/test/data/test_dataset.py @@ -4,7 +4,9 @@ import torch from torch_geometric.data import Data, HeteroData, InMemoryDataset +from torch_geometric.datasets import KarateClub from torch_geometric.testing import withPackage +from torch_geometric.transforms import BaseTransform from torch_geometric.typing import SparseTensor @@ -391,3 +393,38 @@ def download(self): pass MyTestDataset() + + +@withPackage('sqlite3') +def test_to_on_disk_dataset(tmp_path): + class MyTransform(BaseTransform): + def forward(self, data: Data) -> Data: + data.z = 'test_str' + return data + + in_memory_dataset = KarateClub(transform=MyTransform()) + + with pytest.raises(ValueError, match="root directory of 'KarateClub'"): + in_memory_dataset.to_on_disk_dataset() + + on_disk_dataset = in_memory_dataset.to_on_disk_dataset(tmp_path, log=False) + assert str(on_disk_dataset) == 'OnDiskKarateClub()' + assert on_disk_dataset.schema == { + 'x': dict(dtype=torch.float32, size=(-1, 34)), + 'edge_index': dict(dtype=torch.int64, size=(2, -1)), + 'y': dict(dtype=torch.int64, size=(-1, )), + 'train_mask': dict(dtype=torch.bool, size=(-1, )), + } + assert in_memory_dataset.transform == on_disk_dataset.transform + + data1 = in_memory_dataset[0] + data2 = on_disk_dataset[0] + + assert len(data1) == len(data2) + assert torch.allclose(data1.x, data2.x) + assert torch.equal(data1.edge_index, data2.edge_index) + assert torch.equal(data1.y, data2.y) + assert torch.equal(data1.train_mask, data2.train_mask) + assert data1.z == data2.z + + on_disk_dataset.close() diff --git a/test/loader/test_dataloader.py b/test/loader/test_dataloader.py index a314081c6612..50113f380bb3 100644 --- a/test/loader/test_dataloader.py +++ b/test/loader/test_dataloader.py @@ -1,7 +1,6 @@ import multiprocessing import sys from collections import namedtuple -from typing import Any, Dict, List import pytest import torch @@ -205,44 +204,7 @@ def test_heterogeneous_dataloader(num_workers): pass print(f'Done! [{time.perf_counter() - t:.4f}s]') - class OnDiskQM9(OnDiskDataset): - def __init__(self, root: str): - schema = { - 'x': dict(dtype=torch.float32, size=(-1, 11)), - 'edge_index': dict(dtype=torch.int64, size=(2, -1)), - 'edge_attr': dict(dtype=torch.float, size=(-1, 8)), - 'y': dict(dtype=torch.float, size=(1, 19)), - 'z': dict(dtype=torch.int64, size=(-1, )), - 'smiles': str, - 'name': str, - 'idx': dict(dtype=torch.int64, size=(-1, )), - } - super().__init__(root, schema=schema) - - def process(self): - data_list: List[Data] = [] - for i, data in enumerate(in_memory_dataset): - data_list.append(data) - if i + 1 == len(in_memory_dataset) or (i + 1) % 1000 == 0: - self.extend(data_list) - data_list = [] - - def serialize(self, data: Data) -> Dict[str, Any]: - return dict( - x=data.x, - edge_index=data.edge_index, - edge_attr=data.edge_attr, - y=data.y, - z=data.z, - smiles=data.smiles, - name=data.name, - idx=data.idx, - ) - - def deserialize(self, data: Dict[str, Any]) -> Data: - return Data.from_dict(data) - - on_disk_dataset = OnDiskQM9(root='/tmp/OnDiskQM9') + on_disk_dataset = in_memory_dataset.to_on_disk_dataset() loader = DataLoader(on_disk_dataset, **kwargs) print('On-Disk Dataset:') @@ -252,3 +214,5 @@ def deserialize(self, data: Dict[str, Any]) -> Data: for batch in loader: pass print(f'Done! [{time.perf_counter() - t:.4f}s]') + + on_disk_dataset.close() diff --git a/torch_geometric/data/in_memory_dataset.py b/torch_geometric/data/in_memory_dataset.py index 81940e4eaf19..f504713da12a 100644 --- a/torch_geometric/data/in_memory_dataset.py +++ b/torch_geometric/data/in_memory_dataset.py @@ -1,4 +1,5 @@ import copy +import os.path as osp import warnings from abc import ABC from collections.abc import Mapping, Sequence @@ -16,7 +17,9 @@ import torch from torch import Tensor +from tqdm import tqdm +import torch_geometric from torch_geometric.data import Batch, Data from torch_geometric.data.collate import collate from torch_geometric.data.data import BaseData @@ -160,6 +163,99 @@ def copy(self, idx: Optional[IndexType] = None) -> 'InMemoryDataset': dataset.data, dataset.slices = self.collate(data_list) return dataset + def to_on_disk_dataset( + self, + root: Optional[str] = None, + backend: str = 'sqlite', + log: bool = True, + ) -> 'torch_geometric.data.OnDiskDataset': + r"""Converts the :class:`InMemoryDataset` to a :class:`OnDiskDataset` + variant. Useful for distributed training and hardware instances with + limited amount of shared memory. + + root (str, optional): Root directory where the dataset should be saved. + If set to :obj:`None`, will save the dataset in + :obj:`root/on_disk`. + Note that it is important to specify :obj:`root` to account for + different dataset splits. (optional: :obj:`None`) + backend (str): The :class:`Database` backend to use. + (default: :obj:`"sqlite"`) + log (bool, optional): Whether to print any console output while + processing the dataset. (default: :obj:`True`) + """ + if root is None and (self.root is None or not osp.exists(self.root)): + raise ValueError(f"The root directory of " + f"'{self.__class__.__name__}' is not specified. " + f"Please pass in 'root' when creating on-disk " + f"datasets from it.") + + root = root or osp.join(self.root, 'on_disk') + + in_memory_dataset = self + ref_data = in_memory_dataset.get(0) + if not isinstance(ref_data, Data): + raise NotImplementedError( + f"`{self.__class__.__name__}.to_on_disk_dataset()` is " + f"currently only supported on homogeneous graphs") + + # Parse the schema ==================================================== + + schema: Dict[str, Any] = {} + for key, value in ref_data.to_dict().items(): + if isinstance(value, (int, float, str)): + schema[key] = value.__class__ + elif isinstance(value, Tensor) and value.dim() == 0: + schema[key] = dict(dtype=value.dtype, size=(-1, )) + elif isinstance(value, Tensor): + size = list(value.size()) + size[ref_data.__cat_dim__(key, value)] = -1 + schema[key] = dict(dtype=value.dtype, size=tuple(size)) + else: + schema[key] = object + + # Create the on-disk dataset ========================================== + + class OnDiskDataset(torch_geometric.data.OnDiskDataset): + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + ): + super().__init__( + root=root, + transform=transform, + backend=backend, + schema=schema, + ) + + def process(self): + _iter = [ + in_memory_dataset.get(i) + for i in in_memory_dataset.indices() + ] + if log: # pragma: no cover + _iter = tqdm(_iter, desc='Converting to OnDiskDataset') + + data_list: List[Data] = [] + for i, data in enumerate(_iter): + data_list.append(data) + if i + 1 == len(in_memory_dataset) or (i + 1) % 1000 == 0: + self.extend(data_list) + data_list = [] + + def serialize(self, data: Data) -> Dict[str, Any]: + return data.to_dict() + + def deserialize(self, data: Dict[str, Any]) -> Data: + return Data.from_dict(data) + + def __repr__(self) -> str: + arg_repr = str(len(self)) if len(self) > 1 else '' + return (f'OnDisk{in_memory_dataset.__class__.__name__}(' + f'{arg_repr})') + + return OnDiskDataset(root, transform=in_memory_dataset.transform) + @property def data(self) -> Any: msg1 = ("It is not recommended to directly access the internal " diff --git a/torch_geometric/data/on_disk_dataset.py b/torch_geometric/data/on_disk_dataset.py index 3641aec59466..90b4e772f13a 100644 --- a/torch_geometric/data/on_disk_dataset.py +++ b/torch_geometric/data/on_disk_dataset.py @@ -1,5 +1,4 @@ import os -from abc import ABC from typing import Any, Callable, Iterable, List, Optional, Union from torch import Tensor @@ -10,7 +9,7 @@ from torch_geometric.data.dataset import Dataset -class OnDiskDataset(Dataset, ABC): +class OnDiskDataset(Dataset): r"""Dataset base class for creating large graph datasets which do not easily fit into CPU memory at once by leveraging a :class:`Database` backend for on-disk storage and access of data objects. diff --git a/torch_geometric/datasets/ba_shapes.py b/torch_geometric/datasets/ba_shapes.py index 867114d121fc..5be666d8683a 100644 --- a/torch_geometric/datasets/ba_shapes.py +++ b/torch_geometric/datasets/ba_shapes.py @@ -41,7 +41,7 @@ class BAShapes(InMemoryDataset): """ def __init__(self, connection_distribution: str = "random", transform: Optional[Callable] = None): - super().__init__('.', transform) + super().__init__(None, transform) assert connection_distribution in ['random', 'uniform'] # Build the Barabasi-Albert graph: diff --git a/torch_geometric/datasets/fake.py b/torch_geometric/datasets/fake.py index b130aa628357..8f1e2b6b9c13 100644 --- a/torch_geometric/datasets/fake.py +++ b/torch_geometric/datasets/fake.py @@ -53,7 +53,7 @@ def __init__( pre_transform: Optional[Callable] = None, **kwargs, ): - super().__init__('.', transform) + super().__init__(None, transform) if task == 'auto': task = 'graph' if num_graphs > 1 else 'node' @@ -150,7 +150,7 @@ def __init__( pre_transform: Optional[Callable] = None, **kwargs, ): - super().__init__('.', transform) + super().__init__(None, transform) if task == 'auto': task = 'graph' if num_graphs > 1 else 'node' diff --git a/torch_geometric/datasets/karate.py b/torch_geometric/datasets/karate.py index 9aa0ec8481c9..7da24dd3e015 100644 --- a/torch_geometric/datasets/karate.py +++ b/torch_geometric/datasets/karate.py @@ -38,7 +38,7 @@ class KarateClub(InMemoryDataset): - 4 """ def __init__(self, transform: Optional[Callable] = None): - super().__init__('.', transform) + super().__init__(None, transform) row = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, From 51c50c2f9d3372de34f4ac3617f396384a36558c Mon Sep 17 00:00:00 2001 From: filipekstrm Date: Tue, 3 Oct 2023 20:39:04 +0200 Subject: [PATCH 1514/2432] Added `mask` argument to `dense_to_sparse` (#8117) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added optional argument mask to dense_to_sparse so that it can correctly invert a call to to_dense_adj by returning the correct edge_index in case there are graphs with different number of nodes (and hence, the dense adjacency matrix contains some padding) --------- Co-authored-by: Filip Ekström Kelvinius Co-authored-by: rusty1s --- CHANGELOG.md | 1 + test/utils/test_sparse.py | 27 +++++++++++ torch_geometric/utils/sparse.py | 82 +++++++++++++++++++++++++++------ 3 files changed, 97 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f9da5e5ad24..532378d3167a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for a node-level `mask` tensor in `dense_to_sparse` ([#8117](https://github.com/pyg-team/pytorch_geometric/pull/8117)) - Added the `to_on_disk_dataset()` method to convert `InMemoryDataset` instances to `OnDiskDataset` instances ([#8116](https://github.com/pyg-team/pytorch_geometric/pull/8116)) - Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110)) - Added the `DistLoader` base class ([#8079](https://github.com/pyg-team/pytorch_geometric/pull/8079)) diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index 6c1af5e8dca0..ba9ed6c5dc63 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -49,6 +49,33 @@ def test_dense_to_sparse(): assert edge_index.tolist() == [[0, 0, 1, 2, 3], [0, 1, 0, 3, 3]] assert edge_attr.tolist() == [3, 1, 2, 1, 2] + adj = torch.tensor([ + [ + [3.0, 1.0, 0.0], + [2.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], + [ + [0.0, 1.0, 0.0], + [0.0, 2.0, 3.0], + [0.0, 5.0, 0.0], + ], + ]) + mask = torch.tensor([[True, True, False], [True, True, True]]) + + edge_index, edge_attr = dense_to_sparse(adj, mask) + + assert edge_index.tolist() == [[0, 0, 1, 2, 3, 3, 4], + [0, 1, 0, 3, 3, 4, 3]] + assert edge_attr.tolist() == [3, 1, 2, 1, 2, 3, 5] + + if is_full_test(): + jit = torch.jit.script(dense_to_sparse) + edge_index, edge_attr = jit(adj, mask) + assert edge_index.tolist() == [[0, 0, 1, 2, 3, 3, 4], + [0, 1, 0, 3, 3, 4, 3]] + assert edge_attr.tolist() == [3, 1, 2, 1, 2, 3, 5] + def test_dense_to_sparse_bipartite(): edge_index, edge_attr = dense_to_sparse(torch.rand(2, 10, 5)) diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py index 47189a68d554..f5b39b2ced2f 100644 --- a/torch_geometric/utils/sparse.py +++ b/torch_geometric/utils/sparse.py @@ -1,3 +1,4 @@ +import warnings from typing import Any, List, Optional, Tuple, Union import torch @@ -5,23 +6,29 @@ import torch_geometric.typing from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce +from torch_geometric.utils import coalesce, cumsum -def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]: +def dense_to_sparse( + adj: Tensor, + mask: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor]: r"""Converts a dense adjacency matrix to a sparse adjacency matrix defined by edge indices and edge attributes. Args: - adj (Tensor): The dense adjacency matrix of shape + adj (torch.Tensor): The dense adjacency matrix of shape :obj:`[num_nodes, num_nodes]` or :obj:`[batch_size, num_nodes, num_nodes]`. + mask (torch.Tensor, optional): A boolean tensor of shape + :obj:`[batch_size, num_nodes]` holding information about which + nodes are in each example are valid. (default: :obj:`None`) :rtype: (:class:`LongTensor`, :class:`Tensor`) Examples: - >>> # Forr a single adjacency matrix + >>> # For a single adjacency matrix: >>> adj = torch.tensor([[3, 1], ... [2, 0]]) >>> dense_to_sparse(adj) @@ -29,7 +36,7 @@ def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]: [0, 1, 0]]), tensor([3, 1, 2])) - >>> # For two adjacency matrixes + >>> # For two adjacency matrixes: >>> adj = torch.tensor([[[3, 1], ... [2, 0]], ... [[0, 1], @@ -38,21 +45,70 @@ def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]: (tensor([[0, 0, 1, 2, 3], [0, 1, 0, 3, 3]]), tensor([3, 1, 2, 1, 2])) + + >>> # First graph with two nodes, second with three: + >>> adj = torch.tensor([[ + ... [3, 1, 0], + ... [2, 0, 0], + ... [0, 0, 0] + ... ], [ + ... [0, 1, 0], + ... [0, 2, 3], + ... [0, 5, 0] + ... ]]) + >>> mask = torch.tensor([ + ... [True, True, False], + ... [True, True, True] + ... ]) + >>> dense_to_sparse(adj, mask) + (tensor([[0, 0, 1, 2, 3, 3, 4], + [0, 1, 0, 3, 3, 4, 3]]), + tensor([3, 1, 2, 1, 2, 3, 5])) """ if adj.dim() < 2 or adj.dim() > 3: - raise ValueError(f"Dense adjacency matrix 'adj' must be 2- or " - f"3-dimensional (got {adj.dim()} dimensions)") + raise ValueError(f"Dense adjacency matrix 'adj' must be two- or " + f"three-dimensional (got {adj.dim()} dimensions)") + + if mask is not None and adj.dim() == 2: + warnings.warn("Mask should not be provided in case the dense " + "adjacency matrix is two-dimensional") + mask = None - edge_index = adj.nonzero().t() + if mask is not None and mask.dim() != 2: + raise ValueError(f"Mask must be two-dimensional " + f"(got {mask.dim()} dimensions)") - if edge_index.size(0) == 2: + if mask is not None and adj.size(-2) != adj.size(-1): + raise ValueError(f"Mask is only supported on quadratic adjacency " + f"matrices (got [*, {adj.size(-2)}, {adj.size(-1)}])") + + if adj.dim() == 2: + edge_index = adj.nonzero().t() edge_attr = adj[edge_index[0], edge_index[1]] return edge_index, edge_attr else: - edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]] - row = edge_index[1] + adj.size(-2) * edge_index[0] - col = edge_index[2] + adj.size(-1) * edge_index[0] - return torch.stack([row, col], dim=0), edge_attr + flatten_adj = adj.view(-1, adj.size(-1)) + if mask is not None: + flatten_adj = flatten_adj[mask.view(-1)] + edge_index = flatten_adj.nonzero().t() + edge_attr = flatten_adj[edge_index[0], edge_index[1]] + + if mask is None: + offset = torch.arange( + start=0, + end=adj.size(0) * adj.size(2), + step=adj.size(2), + device=adj.device, + ) + offset = offset.repeat_interleave(adj.size(1)) + else: + count = mask.sum(dim=-1) + offset = cumsum(count)[:-1] + offset = offset.repeat_interleave(count) + + edge_index[1] += offset[edge_index[0]] + + return edge_index, edge_attr def is_torch_sparse_tensor(src: Any) -> bool: From 1c89e751804d1eb2fb626dabc677198a1878c34d Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 4 Oct 2023 09:59:36 +0200 Subject: [PATCH 1515/2432] Skip TorchScript bug for PyTorch < 1.12 (#8123) --- test/utils/test_sparse.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/test_sparse.py b/test/utils/test_sparse.py index ba9ed6c5dc63..b8e61316b908 100644 --- a/test/utils/test_sparse.py +++ b/test/utils/test_sparse.py @@ -69,7 +69,8 @@ def test_dense_to_sparse(): [0, 1, 0, 3, 3, 4, 3]] assert edge_attr.tolist() == [3, 1, 2, 1, 2, 3, 5] - if is_full_test(): + # There is a bug in TorchScript for PyTorch < 1.12 :( + if torch_geometric.typing.WITH_PT112 and is_full_test(): jit = torch.jit.script(dense_to_sparse) edge_index, edge_attr = jit(adj, mask) assert edge_index.tolist() == [[0, 0, 1, 2, 3, 3, 4], From e9d6b9b96a10a0dca33cc2eb950c4016f9f83f84 Mon Sep 17 00:00:00 2001 From: Jinu Sunil Date: Wed, 4 Oct 2023 14:27:32 +0530 Subject: [PATCH 1516/2432] Add support for `TensorFrame` in `HeteroData` (#8118) Co-authored-by: rusty1s --- CHANGELOG.md | 2 +- test/conftest.py | 28 ++++++++++++++- test/data/test_data.py | 27 ++------------ test/data/test_hetero_data.py | 51 ++++++++++++++++++++++++++ torch_geometric/data/data.py | 7 ++++ torch_geometric/data/hetero_data.py | 56 ++++++++++++++++++++++------- torch_geometric/data/storage.py | 4 +++ 7 files changed, 136 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 532378d3167a..0af0382b5fc2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Added support for a node-level `mask` tensor in `dense_to_sparse` ([#8117](https://github.com/pyg-team/pytorch_geometric/pull/8117)) - Added the `to_on_disk_dataset()` method to convert `InMemoryDataset` instances to `OnDiskDataset` instances ([#8116](https://github.com/pyg-team/pytorch_geometric/pull/8116)) -- Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110)) +- Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110), [#8118](https://github.com/pyg-team/pytorch_geometric/pull/8118)) - Added the `DistLoader` base class ([#8079](https://github.com/pyg-team/pytorch_geometric/pull/8079)) - Added `HyperGraphData` to support hypergraphs ([#7611](https://github.com/pyg-team/pytorch_geometric/pull/7611)) - Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102)) diff --git a/test/conftest.py b/test/conftest.py index 9c6771f97d62..5908e5112394 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,8 +1,10 @@ import functools import os.path as osp import shutil +from typing import Callable import pytest +import torch import torch_geometric.typing from torch_geometric.data import Dataset @@ -44,13 +46,37 @@ def load_dataset(root: str, name: str, *args, **kwargs) -> Dataset: @pytest.fixture(scope='session') -def get_dataset(): +def get_dataset() -> Callable: root = osp.join('/', 'tmp', 'pyg_test_datasets') yield functools.partial(load_dataset, root) if osp.exists(root): shutil.rmtree(root) +@pytest.fixture() +def get_tensor_frame() -> Callable: + import torch_frame + + def _get_tensor_frame(num_rows: int) -> torch_frame.TensorFrame: + feat_dict = { + torch_frame.categorical: torch.randint(0, 3, size=(num_rows, 3)), + torch_frame.numerical: torch.randn(size=(num_rows, 2)), + } + col_names_dict = { + torch_frame.categorical: ['a', 'b', 'c'], + torch_frame.numerical: ['x', 'y'], + } + y = torch.randn(num_rows) + + return torch_frame.TensorFrame( + feat_dict=feat_dict, + col_names_dict=col_names_dict, + y=y, + ) + + return _get_tensor_frame + + @pytest.fixture def disable_extensions(): prev_state = { diff --git a/test/data/test_data.py b/test/data/test_data.py index e762b179d4b2..2ab61dbc872d 100644 --- a/test/data/test_data.py +++ b/test/data/test_data.py @@ -8,7 +8,6 @@ from torch_geometric.data import Data from torch_geometric.data.storage import AttrType from torch_geometric.testing import withPackage -from torch_geometric.typing import TensorFrame def test_data(): @@ -484,33 +483,13 @@ def test_data_generate_ids(): assert data.e_id.tolist() == [0, 1, 2, 3, 4] -def get_fake_tensor_frame(num_rows: int) -> TensorFrame: - import torch_frame - - feat_dict = { - torch_frame.categorical: torch.randint(0, 3, size=(num_rows, 3)), - torch_frame.numerical: torch.randn(size=(num_rows, 2)), - } - col_names_dict = { - torch_frame.categorical: ['a', 'b', 'c'], - torch_frame.numerical: ['x', 'y'], - } - y = torch.randn(num_rows) - - return TensorFrame( - feat_dict=feat_dict, - col_names_dict=col_names_dict, - y=y, - ) - - @withPackage('torch_frame') -def test_data_with_tensor_frame(): - tf = get_fake_tensor_frame(num_rows=10) +def test_data_with_tensor_frame(get_tensor_frame): + tf = get_tensor_frame(num_rows=10) data = Data(tf=tf, edge_index=torch.randint(0, 10, size=(2, 20))) # Test basic attributes: - assert data.is_node_attr('x') + assert data.is_node_attr('tf') assert data.num_nodes == tf.num_rows assert data.num_edges == 20 assert data.num_node_features == tf.num_cols diff --git a/test/data/test_hetero_data.py b/test/data/test_hetero_data.py index 7d5b78fb967b..81f330dd1fb4 100644 --- a/test/data/test_hetero_data.py +++ b/test/data/test_hetero_data.py @@ -6,6 +6,7 @@ from torch_geometric.data import HeteroData from torch_geometric.data.storage import EdgeStorage from torch_geometric.testing import get_random_edge_index, withPackage +from torch_geometric.typing import TensorFrame x_paper = torch.randn(10, 16) x_author = torch.randn(5, 32) @@ -601,6 +602,56 @@ def test_basic_feature_store(): assert 'x' not in data['paper'].__dict__['_mapping'] +@withPackage('torch_frame') +def test_hetero_data_with_tensor_frame(get_tensor_frame): + data = HeteroData() + data['paper'].tf = get_tensor_frame(num_rows=x_paper.size(0)) + data['author'].tf = get_tensor_frame(num_rows=x_author.size(0)) + data['author', 'paper'].edge_index = edge_index_author_paper + + # Basic functionality: + assert set(data.node_attrs()) == {'tf'} + assert data.num_nodes == x_paper.size(0) + x_author.size(0) + assert data.num_node_features['paper'] == 5 + assert data.num_node_features['author'] == 5 + + # Test subgraph: + subset = { + 'paper': torch.tensor([1, 2, 3, 4]), + 'author': torch.tensor([0, 1, 2, 3]), + } + out = data.subgraph(subset) + assert set(out.node_attrs()) == {'tf'} + assert out.num_nodes == 8 + for key, value in out['paper'].tf.feat_dict.items(): + assert value.size(0) == 4 + assert torch.allclose(value, data['paper'].tf.feat_dict[key][1:5]) + for key, value in out['author'].tf.feat_dict.items(): + assert value.size(0) == 4 + assert torch.allclose(value, data['author'].tf.feat_dict[key][0:4]) + + # Test conversion to homogenous graphs and back: + for node_attrs in [None, ['tf']]: + out = data.to_homogeneous(node_attrs=node_attrs) + assert isinstance(out.tf, TensorFrame) + assert len(out.tf) == data.num_nodes + assert out.num_nodes == data.num_nodes + assert out.num_node_features == 5 + for key, value in out.tf.feat_dict.items(): + assert torch.allclose( + value, + torch.cat([ + data['paper'].tf.feat_dict[key], + data['author'].tf.feat_dict[key], + ], dim=0), + ) + + out = out.to_heterogeneous() + for node_type in data.node_types: + for key, value in data[node_type].tf.feat_dict.items(): + assert torch.allclose(value, out[node_type].tf.feat_dict[key]) + + # Graph Store ################################################################# diff --git a/torch_geometric/data/data.py b/torch_geometric/data/data.py index 1a855bf2828f..61458837d31a 100644 --- a/torch_geometric/data/data.py +++ b/torch_geometric/data/data.py @@ -36,6 +36,7 @@ NodeType, OptTensor, SparseTensor, + TensorFrame, ) from torch_geometric.utils import is_sparse, select, subgraph @@ -759,6 +760,9 @@ def to_heterogeneous( elif isinstance(value, Tensor) and self.is_node_attr(attr): cat_dim = self.__cat_dim__(attr, value) data[key][attr] = value.index_select(cat_dim, node_ids[i]) + elif (isinstance(value, TensorFrame) + and self.is_node_attr(attr)): + data[key][attr] = value[node_ids[i]] if len(data[key]) == 0: data[key].num_nodes = node_ids[i].size(0) @@ -776,6 +780,9 @@ def to_heterogeneous( elif isinstance(value, Tensor) and self.is_edge_attr(attr): cat_dim = self.__cat_dim__(attr, value) data[key][attr] = value.index_select(cat_dim, edge_ids[i]) + elif (isinstance(value, TensorFrame) + and self.is_edge_attr(attr)): + data[key][attr] = value[edge_ids[i]] # Add global attributes. exclude_keys = set(data.keys()) | { diff --git a/torch_geometric/data/hetero_data.py b/torch_geometric/data/hetero_data.py index b83157bb4349..d67c2b41ea95 100644 --- a/torch_geometric/data/hetero_data.py +++ b/torch_geometric/data/hetero_data.py @@ -22,6 +22,7 @@ NodeType, QueryType, SparseTensor, + TensorFrame, ) from torch_geometric.utils import ( bipartite_subgraph, @@ -868,6 +869,25 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: if len(sizes[0]) != 1 and len(set(sizes)) != 1: continue keys.append(key) + + # Check for consistent column names in `TensorFrame`: + tf_cols = defaultdict(list) + for store in stores: + for key, value in store.items(): + if isinstance(value, TensorFrame): + cols = tuple(chain(*value.col_names_dict.values())) + tf_cols[key].append(cols) + + for key, cols in tf_cols.items(): + # The attribute needs to exist in all types: + if len(cols) != len(stores): + continue + # The attributes needs to have the same column names: + lengths = set(cols) + if len(lengths) != 1: + continue + keys.append(key) + return keys if dummy_values: @@ -891,19 +911,29 @@ def _consistent_size(stores: List[BaseStorage]) -> List[str]: if key in {'ptr'}: continue values = [store[key] for store in self.node_stores] - dim = self.__cat_dim__(key, values[0], self.node_stores[0]) - dim = values[0].dim() + dim if dim < 0 else dim - # For two-dimensional features, we allow arbitrary shapes and pad - # them with zeros if necessary in case their size doesn't match: - if values[0].dim() == 2 and dim == 0: - _max = max([value.size(-1) for value in values]) - for i, v in enumerate(values): - if v.size(-1) < _max: - values[i] = torch.cat( - [v, v.new_zeros(v.size(0), _max - v.size(-1))], - dim=-1, - ) - value = torch.cat(values, dim) if len(values) > 1 else values[0] + if isinstance(values[0], TensorFrame): + # TODO (jinu) Implement `cat` function for TensorFrame. + feat_dict = {} + for stype in values[0].feat_dict.keys(): + feat_dict[stype] = torch.cat( + [value.feat_dict[stype] for value in values], dim=0) + y = None + if values[0].y is not None: + y = torch.cat([value.y for value in values], dim=0) + value = TensorFrame(feat_dict, values[0].col_names_dict, y) + else: + dim = self.__cat_dim__(key, values[0], self.node_stores[0]) + dim = values[0].dim() + dim if dim < 0 else dim + # For two-dimensional features, we allow arbitrary shapes and + # pad them with zeros if necessary in case their size doesn't + # match: + if values[0].dim() == 2 and dim == 0: + _max = max([value.size(-1) for value in values]) + for i, v in enumerate(values): + if v.size(-1) < _max: + pad = v.new_zeros(v.size(0), _max - v.size(-1)) + values[i] = torch.cat([v, pad], dim=-1) + value = torch.cat(values, dim) data[key] = value if not data.can_infer_num_nodes: diff --git a/torch_geometric/data/storage.py b/torch_geometric/data/storage.py index 9860165af7f2..3805e8d0d49f 100644 --- a/torch_geometric/data/storage.py +++ b/torch_geometric/data/storage.py @@ -352,6 +352,8 @@ def num_node_features(self) -> int: return 1 if self.x.dim() == 1 else self.x.size(-1) if 'x' in self and isinstance(self.x, TensorFrame): return self.x.num_cols + if 'tf' in self and isinstance(self.tf, TensorFrame): + return self.tf.num_cols return 0 @property @@ -467,6 +469,8 @@ def num_edge_features(self) -> int: return 1 if self.edge_attr.dim() == 1 else self.edge_attr.size(-1) if 'edge_attr' in self and isinstance(self.edge_attr, np.ndarray): return 1 if self.edge_attr.ndim == 1 else self.edge_attr.shape[-1] + if 'edge_attr' in self and isinstance(self.edge_attr, TensorFrame): + return self.edge_attr.num_cols return 0 @property From af20b1b7dcb920f1279f0f3aadddf66bbae0c4a1 Mon Sep 17 00:00:00 2001 From: Anwar Said <40773404+Anwar-Said@users.noreply.github.com> Date: Wed, 4 Oct 2023 04:39:51 -0500 Subject: [PATCH 1517/2432] Adding NeuroGraph: Benchmarks for Graph Machine Learning in Brain Connectomics (NeurIPS23) (#8122) Adding NeuroGraph's implementation (class NeuroGraphStatic). It is a collection of 5 Neuroimaging graph learning benchmark datasets that span multiple categories of demographics, mental states, and cognitive traits. This paper has been accepted at NeurIPS23 Benchmarks and Datasets track. See the [documentation] (https://neurograph.readthedocs.io/en/latest/NeuroGraph.html) and the [Github](https://github.com/Anwar-Said/NeuroGraph) for more details. Looking forward to its merge into PyG. Thanks! --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: rusty1s --- CHANGELOG.md | 1 + torch_geometric/datasets/__init__.py | 2 + torch_geometric/datasets/neurograph.py | 128 +++++++++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 torch_geometric/datasets/neurograph.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0af0382b5fc2..0a4fca5a2f1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added the `NeuroGraphDataset` benchmark collection ([#8122](https://github.com/pyg-team/pytorch_geometric/pull/8122)) - Added support for a node-level `mask` tensor in `dense_to_sparse` ([#8117](https://github.com/pyg-team/pytorch_geometric/pull/8117)) - Added the `to_on_disk_dataset()` method to convert `InMemoryDataset` instances to `OnDiskDataset` instances ([#8116](https://github.com/pyg-team/pytorch_geometric/pull/8116)) - Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110), [#8118](https://github.com/pyg-team/pytorch_geometric/pull/8118)) diff --git a/torch_geometric/datasets/__init__.py b/torch_geometric/datasets/__init__.py index 7fd36ef9a719..0387aab31e4e 100644 --- a/torch_geometric/datasets/__init__.py +++ b/torch_geometric/datasets/__init__.py @@ -61,6 +61,7 @@ from .twitch import Twitch from .airports import Airports from .lrgb import LRGBDataset +from .neurograph import NeuroGraphDataset from .malnet_tiny import MalNetTiny from .omdb import OMDB from .polblogs import PolBlogs @@ -182,6 +183,7 @@ 'Wikidata5M', 'MyketDataset', 'BrcaTcga', + 'NeuroGraphDataset', ] hetero_datasets = [ diff --git a/torch_geometric/datasets/neurograph.py b/torch_geometric/datasets/neurograph.py new file mode 100644 index 000000000000..17b509a76cc8 --- /dev/null +++ b/torch_geometric/datasets/neurograph.py @@ -0,0 +1,128 @@ +import os +import os.path as osp +import shutil +from typing import Callable, List, Optional + +import torch + +from torch_geometric.data import ( + Data, + InMemoryDataset, + download_url, + extract_zip, +) + + +class NeuroGraphDataset(InMemoryDataset): + r"""The NeuroGraph benchmark datasets from the + `"NeuroGraph: Benchmarks for Graph Machine Learning in Brain Connectomics" + `_ paper. + :class:`NeuroGraphDataset` holds a collection of five neuroimaging graph + learning datasets that span multiple categories of demographics, mental + states, and cognitive traits. + See the `documentation + `_ and the + `Github `_ for more details. + + +--------------------+---------+----------------------+ + | Dataset | #Graphs | Task | + +====================+=========+======================+ + | :obj:`HCPActivity` | 7443 | Graph Classification | + +--------------------+---------+----------------------+ + | :obj:`HCPGender` | 1078 | Graph Classification | + +--------------------+---------+----------------------+ + | :obj:`HCPAge` | 1065 | Graph Classification | + +--------------------+---------+----------------------+ + | :obj:`HCPFI` | 1071 | Graph Regression | + +--------------------+---------+----------------------+ + | :obj:`HCPWM` | 1078 | Graph Regression | + +--------------------+---------+----------------------+ + + Args: + root (str): Root directory where the dataset should be saved. + name (str): The name of the dataset (one of :obj:`"HCPGender"`, + :obj:`"HCPActivity"`, :obj:`"HCPAge"`, :obj:`"HCPFI"`, :obj:`"HCPWM"`). + transform (callable, optional): A function/transform that takes in an + :obj:`torch_geometric.data.Data` object and returns a transformed + version. The data object will be transformed before every access. + (default: :obj:`None`) + pre_transform (callable, optional): A function/transform that takes in + an :obj:`torch_geometric.data.Data` object and returns a + transformed version. The data object will be transformed before + being saved to disk. (default: :obj:`None`) + pre_filter (callable, optional): A function that takes in an + :obj:`torch_geometric.data.Data` object and returns a boolean + value, indicating whether the data object should be included in the + final dataset. (default: :obj:`None`) + """ + url = 'https://vanderbilt.box.com/shared/static' + filenames = { + 'HCPGender': 'r6hlz2arm7yiy6v6981cv2nzq3b0meax.zip', + 'HCPActivity': 'b4g59ibn8itegr0rpcd16m9ajb2qyddf.zip', + 'HCPAge': 'static/lzzks4472czy9f9vc8aikp7pdbknmtfe.zip', + 'HCPWM': 'xtmpa6712fidi94x6kevpsddf9skuoxy.zip', + 'HCPFI': 'g2md9h9snh7jh6eeay02k1kr9m4ido9f.zip', + } + + def __init__( + self, + root: str, + name: str, + transform: Optional[Callable] = None, + pre_transform: Optional[Callable] = None, + pre_filter: Optional[Callable] = None, + ): + assert name in self.filenames.keys() + self.name = name + + super().__init__(root, transform, pre_transform, pre_filter) + self.load(self.processed_paths[0]) + + @property + def raw_dir(self) -> str: + return os.path.join(self.root, self.name, 'raw') + + @property + def raw_file_names(self) -> str: + return 'data.pt' + + @property + def processed_dir(self) -> str: + return os.path.join(self.root, self.name, 'processed') + + @property + def processed_file_names(self) -> str: + return 'data.pt' + + def download(self): + url = f'{self.url}/{self.filenames[self.name]}' + path = download_url(http://github.com/url,%20self.raw_dir) + extract_zip(path, self.raw_dir) + os.unlink(path) + os.rename( + osp.join(self.raw_dir, self.name, 'processed', f'{self.name}.pt'), + osp.join(self.raw_dir, 'data.pt')) + shutil.rmtree(osp.join(self.raw_dir, self.name)) + + def process(self): + data, slices = torch.load(self.raw_paths[0]) + + num_samples = slices['x'].size(0) - 1 + data_list: List[Data] = [] + for i in range(num_samples): + x = data.x[slices['x'][i]:slices['x'][i + 1]] + edge_index = data.edge_index[ + :, + slices['edge_index'][i]:slices['edge_index'][i + 1], + ] + sample = Data(x=x, edge_index=edge_index, y=data.y[i]) + + if self.pre_filter is not None and not self.pre_filter(sample): + continue + + if self.pre_transform is not None: + sample = self.pre_transform(sample) + + data_list.append(sample) + + self.save(data_list, self.processed_paths[0]) From b054b7a5cbe52e1092df95925e3d370e0c596c81 Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Wed, 4 Oct 2023 12:09:44 +0200 Subject: [PATCH 1518/2432] Fix `NeuroGraphDataset` documentation (#8124) --- torch_geometric/datasets/neurograph.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/torch_geometric/datasets/neurograph.py b/torch_geometric/datasets/neurograph.py index 17b509a76cc8..f63d696a2866 100644 --- a/torch_geometric/datasets/neurograph.py +++ b/torch_geometric/datasets/neurograph.py @@ -27,21 +27,22 @@ class NeuroGraphDataset(InMemoryDataset): +--------------------+---------+----------------------+ | Dataset | #Graphs | Task | +====================+=========+======================+ - | :obj:`HCPActivity` | 7443 | Graph Classification | + | :obj:`HCPActivity` | 7,443 | Graph Classification | +--------------------+---------+----------------------+ - | :obj:`HCPGender` | 1078 | Graph Classification | + | :obj:`HCPGender` | 1,078 | Graph Classification | +--------------------+---------+----------------------+ - | :obj:`HCPAge` | 1065 | Graph Classification | + | :obj:`HCPAge` | 1,065 | Graph Classification | +--------------------+---------+----------------------+ - | :obj:`HCPFI` | 1071 | Graph Regression | + | :obj:`HCPFI` | 1,071 | Graph Regression | +--------------------+---------+----------------------+ - | :obj:`HCPWM` | 1078 | Graph Regression | + | :obj:`HCPWM` | 1,078 | Graph Regression | +--------------------+---------+----------------------+ Args: root (str): Root directory where the dataset should be saved. name (str): The name of the dataset (one of :obj:`"HCPGender"`, - :obj:`"HCPActivity"`, :obj:`"HCPAge"`, :obj:`"HCPFI"`, :obj:`"HCPWM"`). + :obj:`"HCPActivity"`, :obj:`"HCPAge"`, :obj:`"HCPFI"`, + :obj:`"HCPWM"`). transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. From c5dca4bbd5ab3ca985e4373f063aef47cfc1316a Mon Sep 17 00:00:00 2001 From: Keren Zhou Date: Wed, 4 Oct 2023 14:55:49 -0400 Subject: [PATCH 1519/2432] Fix weight type for `segment_matmul` (#8127) `weight` should be a float tensor as `x`, the previous code initializes `weight` as an int tensor --------- Co-authored-by: Matthias Fey --- torch_geometric/typing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch_geometric/typing.py b/torch_geometric/typing.py index bdfe404eb24a..38c935c09f3d 100644 --- a/torch_geometric/typing.py +++ b/torch_geometric/typing.py @@ -29,7 +29,7 @@ try: x = torch.randn(3, 4, device='cuda') ptr = torch.tensor([0, 2, 3], device='cuda') - weight = torch.tensor([2, 4, 4], device='cuda') + weight = torch.randn(2, 4, 4, device='cuda') out = pyg_lib.ops.segment_matmul(x, ptr, weight) except RuntimeError: WITH_GMM = False From aae8dfd0d73341e9214200808c92e5ed1a7a2f1c Mon Sep 17 00:00:00 2001 From: Matthias Fey Date: Thu, 5 Oct 2023 23:10:06 +0200 Subject: [PATCH 1520/2432] Add PyTorch 2.1.0 support (#8134) --- .github/actions/setup/action.yml | 4 +-- .github/workflows/building_pyg_conda.yml | 31 ++++++++++++-------- .github/workflows/building_rusty1s_conda.yml | 31 ++++++++++++-------- .github/workflows/full_testing.yml | 6 ++-- .github/workflows/minimal_testing.yml | 2 +- .github/workflows/prev_testing.yml | 6 ++-- .github/workflows/testing.yml | 2 +- CHANGELOG.md | 1 + README.md | 22 +++++++------- conda/pyg/README.md | 2 +- conda/pyg/build_conda.sh | 3 ++ conda/pyg/meta.yaml | 1 - conda/pytorch-geometric/README.md | 2 +- conda/pytorch-geometric/build_conda.sh | 3 ++ conda/pytorch-geometric/meta.yaml | 1 - docs/source/install/installation.rst | 21 ++++++------- docs/source/install/quick-start.html | 12 ++++---- test/nn/models/test_basic_gnn.py | 4 +-- torch_geometric/nn/models/deepgcn.py | 6 ++-- 19 files changed, 89 insertions(+), 71 deletions(-) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index 5e2bcaf9ea43..bed489916a2a 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -6,13 +6,13 @@ inputs: default: '3.8' torch-version: required: false - default: '2.0.0' + default: '2.1.0' cuda-version: required: false default: cpu torchvision-version: required: false - default: '0.15.0' + default: '0.16.0' full_install: required: false default: true diff --git a/.github/workflows/building_pyg_conda.yml b/.github/workflows/building_pyg_conda.yml index 572aa2654b59..f121aca4331f 100644 --- a/.github/workflows/building_pyg_conda.yml +++ b/.github/workflows/building_pyg_conda.yml @@ -12,8 +12,8 @@ jobs: matrix: os: [ubuntu-latest, macos-latest, windows-latest] python-version: ['3.8', '3.9', '3.10', '3.11'] - torch-version: [1.12.0, 1.13.0, 2.0.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] + torch-version: [1.12.0, 1.13.0, 2.0.0, 2.1.0] + cuda-version: ['cpu', 'cu113', 'cu116', 'cu117', 'cu118', 'cu121'] exclude: - torch-version: 1.12.0 python-version: '3.11' @@ -21,26 +21,32 @@ jobs: cuda-version: 'cu117' - torch-version: 1.12.0 cuda-version: 'cu118' + - torch-version: 1.12.0 + cuda-version: 'cu121' - torch-version: 1.13.0 python-version: '3.11' - - torch-version: 1.13.0 - cuda-version: 'cu102' - torch-version: 1.13.0 cuda-version: 'cu113' - torch-version: 1.13.0 cuda-version: 'cu118' - - torch-version: 2.0.0 - cuda-version: 'cu102' + - torch-version: 1.13.0 + cuda-version: 'cu121' - torch-version: 2.0.0 cuda-version: 'cu113' - - torch-version: 2.0.0 - cuda-version: 'cu115' - torch-version: 2.0.0 cuda-version: 'cu116' - - os: macos-latest - cuda-version: 'cu102' + - torch-version: 1.13.0 + cuda-version: 'cu121' + - torch-version: 2.1.0 + cuda-version: 'cu113' + - torch-version: 2.1.0 + cuda-version: 'cu116' + - torch-version: 2.1.0 + cuda-version: 'cu117' - os: macos-latest cuda-version: 'cu113' + - os: macos-latest + cuda-version: 'cu115' - os: macos-latest cuda-version: 'cu116' - os: macos-latest @@ -57,10 +63,9 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Free up disk space + - name: Free Disk Space (Ubuntu) if: ${{ runner.os == 'Linux' }} - run: | - sudo rm -rf /usr/share/dotnet + uses: jlumbroso/free-disk-space@main - name: Install Conda packages run: | diff --git a/.github/workflows/building_rusty1s_conda.yml b/.github/workflows/building_rusty1s_conda.yml index 6bb65dc3535f..262742295157 100644 --- a/.github/workflows/building_rusty1s_conda.yml +++ b/.github/workflows/building_rusty1s_conda.yml @@ -12,8 +12,8 @@ jobs: matrix: os: [ubuntu-latest, macos-latest, windows-latest] python-version: ['3.8', '3.9', '3.10', '3.11'] - torch-version: [1.12.0, 1.13.0, 2.0.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] + torch-version: [1.12.0, 1.13.0, 2.0.0, 2.1.0] + cuda-version: ['cpu', 'cu113', 'cu116', 'cu117', 'cu118', 'cu121'] exclude: - torch-version: 1.12.0 python-version: '3.11' @@ -21,26 +21,32 @@ jobs: cuda-version: 'cu117' - torch-version: 1.12.0 cuda-version: 'cu118' + - torch-version: 1.12.0 + cuda-version: 'cu121' - torch-version: 1.13.0 python-version: '3.11' - - torch-version: 1.13.0 - cuda-version: 'cu102' - torch-version: 1.13.0 cuda-version: 'cu113' - torch-version: 1.13.0 cuda-version: 'cu118' - - torch-version: 2.0.0 - cuda-version: 'cu102' + - torch-version: 1.13.0 + cuda-version: 'cu121' - torch-version: 2.0.0 cuda-version: 'cu113' - - torch-version: 2.0.0 - cuda-version: 'cu115' - torch-version: 2.0.0 cuda-version: 'cu116' - - os: macos-latest - cuda-version: 'cu102' + - torch-version: 1.13.0 + cuda-version: 'cu121' + - torch-version: 2.1.0 + cuda-version: 'cu113' + - torch-version: 2.1.0 + cuda-version: 'cu116' + - torch-version: 2.1.0 + cuda-version: 'cu117' - os: macos-latest cuda-version: 'cu113' + - os: macos-latest + cuda-version: 'cu115' - os: macos-latest cuda-version: 'cu116' - os: macos-latest @@ -57,10 +63,9 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Free up disk space + - name: Free Disk Space (Ubuntu) if: ${{ runner.os == 'Linux' }} - run: | - sudo rm -rf /usr/share/dotnet + uses: jlumbroso/free-disk-space@main - name: Install Conda packages run: | diff --git a/.github/workflows/full_testing.yml b/.github/workflows/full_testing.yml index 4299f8b5f013..a9dde191dd4b 100644 --- a/.github/workflows/full_testing.yml +++ b/.github/workflows/full_testing.yml @@ -16,16 +16,16 @@ jobs: matrix: os: [ubuntu-latest, windows-latest] python-version: ['3.8', '3.10'] - torch-version: [1.11.0, 1.12.0, 1.13.0, 2.0.0, nightly] + torch-version: [1.12.0, 1.13.0, 2.0.0, 2.1.0, nightly] include: - - torch-version: 1.11.0 - torchvision-version: 0.12.0 - torch-version: 1.12.0 torchvision-version: 0.13.0 - torch-version: 1.13.0 torchvision-version: 0.14.0 - torch-version: 2.0.0 torchvision-version: 0.15.0 + - torch-version: 2.1.0 + torchvision-version: 0.16.0 - torch-version: nightly torchvision-version: nightly diff --git a/.github/workflows/minimal_testing.yml b/.github/workflows/minimal_testing.yml index f4aba732c8f4..ee6c57dda8b1 100644 --- a/.github/workflows/minimal_testing.yml +++ b/.github/workflows/minimal_testing.yml @@ -1,4 +1,4 @@ -name: Testing minimal PyTorch 2.0 +name: Testing minimal PyTorch 2.1 on: # yamllint disable-line rule:truthy push: diff --git a/.github/workflows/prev_testing.yml b/.github/workflows/prev_testing.yml index 6b8b7ee998e4..5769ffd5a062 100644 --- a/.github/workflows/prev_testing.yml +++ b/.github/workflows/prev_testing.yml @@ -14,14 +14,14 @@ jobs: strategy: fail-fast: false matrix: - torch-version: [1.11.0, 1.12.0, 1.13.0] + torch-version: [1.12.0, 1.13.0, 2.0.0] include: - - torch-version: 1.11.0 - torchvision-version: 0.12.0 - torch-version: 1.12.0 torchvision-version: 0.13.0 - torch-version: 1.13.0 torchvision-version: 0.14.0 + - torch-version: 2.0.0 + torchvision-version: 0.15.0 steps: - name: Checkout repository diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 0d0d3b1aecdf..0710eb177c05 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -1,4 +1,4 @@ -name: Testing PyTorch 2.0 +name: Testing PyTorch 2.1 on: # yamllint disable-line rule:truthy push: diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a4fca5a2f1a..09c60e2b1657 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for PyTorch 2.1.0 ([#8134](https://github.com/pyg-team/pytorch_geometric/pull/8134)) - Added the `NeuroGraphDataset` benchmark collection ([#8122](https://github.com/pyg-team/pytorch_geometric/pull/8122)) - Added support for a node-level `mask` tensor in `dense_to_sparse` ([#8117](https://github.com/pyg-team/pytorch_geometric/pull/8117)) - Added the `to_on_disk_dataset()` method to convert `InMemoryDataset` instances to `OnDiskDataset` instances ([#8116](https://github.com/pyg-team/pytorch_geometric/pull/8116)) diff --git a/README.md b/README.md index 784846c40abe..b9b9ce90652c 100644 --- a/README.md +++ b/README.md @@ -397,39 +397,39 @@ We recommend to start with a minimal installation, and install additional depend For ease of installation of these extensions, we provide `pip` wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl). -#### PyTorch 2.0 +#### PyTorch 2.1 -To install the binaries for PyTorch 2.0.0, simply run +To install the binaries for PyTorch 2.1.0, simply run ``` -pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html +pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.1.0+${CUDA}.html ``` -where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation. +where `${CUDA}` should be replaced by either `cpu`, `cu118`, or `cu121` depending on your PyTorch installation. -| | `cpu` | `cu117` | `cu118` | +| | `cpu` | `cu118` | `cu121` | |-------------|-------|---------|---------| | **Linux** | ✅ | ✅ | ✅ | | **Windows** | ✅ | ✅ | ✅ | | **macOS** | ✅ | | | -#### PyTorch 1.13 +#### PyTorch 2.0 -To install the binaries for PyTorch 1.13.0, simply run +To install the binaries for PyTorch 2.0.0, simply run ``` -pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-1.13.0+${CUDA}.html +pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html ``` -where `${CUDA}` should be replaced by either `cpu`, `cu116`, or `cu117` depending on your PyTorch installation. +where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation. -| | `cpu` | `cu116` | `cu117` | +| | `cpu` | `cu117` | `cu118` | |-------------|-------|---------|---------| | **Linux** | ✅ | ✅ | ✅ | | **Windows** | ✅ | ✅ | ✅ | | **macOS** | ✅ | | | -**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0 and PyTorch 1.12.0/1.12.1 (following the same procedure). +**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0, PyTorch 1.12.0/1.12.1 and PyTorch 1.13.0/1.13.1 (following the same procedure). **For older versions, you might need to explicitly specify the latest supported version number** or install via `pip install --no-index` in order to prevent a manual installation from source. You can look up the latest supported version number [here](https://data.pyg.org/whl). diff --git a/conda/pyg/README.md b/conda/pyg/README.md index 6207e22ab74f..577148042640 100644 --- a/conda/pyg/README.md +++ b/conda/pyg/README.md @@ -1,3 +1,3 @@ ``` -./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version +./build_conda.sh 3.11 2.1.0 cu118 # python, pytorch and cuda version ``` diff --git a/conda/pyg/build_conda.sh b/conda/pyg/build_conda.sh index 33ea72db4ea9..084d3bfa390f 100755 --- a/conda/pyg/build_conda.sh +++ b/conda/pyg/build_conda.sh @@ -10,6 +10,9 @@ if [ "${CUDA_VERSION}" = "cpu" ]; then export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" else case $CUDA_VERSION in + cu121) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==12.1.*" + ;; cu118) export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" ;; diff --git a/conda/pyg/meta.yaml b/conda/pyg/meta.yaml index 0934df2d73d0..959e7e88c2d4 100644 --- a/conda/pyg/meta.yaml +++ b/conda/pyg/meta.yaml @@ -25,7 +25,6 @@ requirements: build: string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} - number: 1 script: pip install . test: diff --git a/conda/pytorch-geometric/README.md b/conda/pytorch-geometric/README.md index 6207e22ab74f..577148042640 100644 --- a/conda/pytorch-geometric/README.md +++ b/conda/pytorch-geometric/README.md @@ -1,3 +1,3 @@ ``` -./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version +./build_conda.sh 3.11 2.1.0 cu118 # python, pytorch and cuda version ``` diff --git a/conda/pytorch-geometric/build_conda.sh b/conda/pytorch-geometric/build_conda.sh index fcddcd03327e..d78f2d9169d3 100755 --- a/conda/pytorch-geometric/build_conda.sh +++ b/conda/pytorch-geometric/build_conda.sh @@ -10,6 +10,9 @@ if [ "${CUDA_VERSION}" = "cpu" ]; then export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" else case $CUDA_VERSION in + cu121) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==12.1.*" + ;; cu118) export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" ;; diff --git a/conda/pytorch-geometric/meta.yaml b/conda/pytorch-geometric/meta.yaml index e10547ec71ed..21b986278034 100644 --- a/conda/pytorch-geometric/meta.yaml +++ b/conda/pytorch-geometric/meta.yaml @@ -25,7 +25,6 @@ requirements: build: string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} - number: 1 script: pip install . test: diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index 9d2677f4c4f4..32d29f37dc93 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -68,14 +68,14 @@ For ease of installation of these extensions, we provide :obj:`pip` wheels for t .. code-block:: none python -c "import torch; print(torch.__version__)" - >>> 2.0.0 + >>> 2.1.0 #. Find the CUDA version :pytorch:`PyTorch` was installed with: .. code-block:: none python -c "import torch; print(torch.version.cuda)" - >>> 11.7 + >>> 11.8 #. Install the relevant packages: @@ -85,22 +85,23 @@ For ease of installation of these extensions, we provide :obj:`pip` wheels for t where :obj:`${TORCH}` and :obj:`${CUDA}` should be replaced by the specific :pytorch:`PyTorch` and CUDA versions, respectively: + * :pytorch:`PyTorch` 2.1: :obj:`${TORCH}=2.1.0` and :obj:`${CUDA}=cpu|cu118|cu121` * :pytorch:`PyTorch` 2.0: :obj:`${TORCH}=2.0.0` and :obj:`${CUDA}=cpu|cu117|cu118` * :pytorch:`PyTorch` 1.13: :obj:`${TORCH}=1.13.0` and :obj:`${CUDA}=cpu|cu116|cu117` - For example, for :pytorch:`PyTorch` 2.0.* and CUDA 11.7, type: + For example, for :pytorch:`PyTorch` 2.1.* and CUDA 12.1, type: .. code-block:: none - pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+cu117.html + pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.1.0+cu121.html - For :pytorch:`PyTorch` 1.13.* and CUDA 11.6, type: + For example, for :pytorch:`PyTorch` 2.0.* and CUDA 11.8, type: .. code-block:: none - pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-1.13.0+cu116.html + pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+cu118.html -**Note:** Binaries of older versions are also provided for :pytorch:`PyTorch` 1.4.0, 1.5.0, 1.6.0, 1.7.0, 1.7.1, 1.8.0, 1.8.1, 1.9.0, 1.10.0, 1.10.1, 1.10.2, 1.11.0, 1.12.0 and 1.12.1 (following the same procedure). +**Note:** Binaries of older versions are also provided for :pytorch:`PyTorch` 1.4.0, 1.5.0, 1.6.0, 1.7.0/1.7.1, 1.8.0/1.8.1, 1.9.0, 1.10.0/1.10.1/1.10.2, 1.11.0, 1.12.0/1.12.1 and 1.13.0/1.13.1 (following the same procedure). **For older versions, you need to explicitly specify the latest supported version number** or install via :obj:`pip install --no-index` in order to prevent a manual installation from source. You can look up the latest supported version number `here `__. @@ -150,17 +151,17 @@ In case a specific version is not supported by `our wheels >> 11.6 + >>> 11.8 #. Ensure that :pytorch:`PyTorch` and system CUDA versions match: .. code-block:: none python -c "import torch; print(torch.version.cuda)" - >>> 11.6 + >>> 11.8 nvcc --version - >>> 11.6 + >>> 11.8 #. Install the relevant packages: diff --git a/docs/source/install/quick-start.html b/docs/source/install/quick-start.html index 581b6923fed7..b9af902a178c 100644 --- a/docs/source/install/quick-start.html +++ b/docs/source/install/quick-start.html @@ -75,8 +75,8 @@

    3uJv6iPF#1u;z(zBN{ob(@gBx`pp8v_}5w~ZpiNr5Ug;l zVyP3%fa)N>Mly_|iMHoTq`@g|uyf`2|2#orKSnZfZzN$J`rc%mc@^JB)$GOsA&Y04 z!;dhn@@UBgDyUHzk$lN5xFp;4x^TQ74Slh#8~Y9Hqmg5KLf^y3G+J10XHv8fFVTWO z7Pk;`EotDHVo$C9+~%!WN*rG{-fFlcCiXqM#mZt3cy&}pbPcqX(;Z&6D^)R?k?N>ZGJP79-M8{RwVU$WC+};l3cgbzfj#;y;%P zp0~1CYYMS%sNxB9?B9{5rvSjl>gN7irnv9#Pn$b#Uc@}ABQbg{kbXnGptIL1v^}FG zc*~IBkDyB>AvTB*ko(x&b!x36a7us;vrs1Smg!h*l1?5C>aY8CKW_;ql-M(ay8E$? zi97{X6#hOW2rL6m2zn-kLs*1EfD|f`xyBA}G7;|~BS6M8WE>D}0pgb~jt-!PxqNb= zu+e5B1S@xi<6*XDq%<7_OEX2Yng(HH`kRG$MBj1Yf^hYRdxA=uNvS5@CJbESQ+h1T zxl4QT@!FP8yv|#mA~@$ankN+JUs5U7VjIsAbTg{mAh-L)w-9>H=FRrZ0_{?<$9?cu zUl^$KGIm~w*OCpNBKlW69!zjsh6=d@RP7hN9HjRU=-WK@rMjiz*(hYrL|EsEzt~)3 zKVg5ziX2@mh0H=(L(;1c+%Oh(u$08emB30c&Q22Qi}Z3|-to^l$HcEYNtOfWp3OnF z#^zEV$zsoyRi^tNk55Hv6Wm{pDjp0G+6BxXv>+zG;8mRF7H5UV^L<)Kkt+^nyJ5D3 zIV0tmZnNOevL-LKR5ucqbSNWC4j)&Y@ThAR5~j81R_q!ICaIOq)egfpXAu&=K3(*V zYa<;c`rt$tPt3$3D;)*(2yiD0zE#kG3O+y8F#o; z`K#Rz2*2FN^?#H&GMbOg(km^Fo^S4vFt>gQ^3`yW!1=Gm9{c`U0(r2pUr0JRf81zf zcs>5DUem*Vut(ZVj*M0)4>ZGOjzox0eM?h$WP=ST)%40jH`$>cP}87F51Z%0Z4p&y z6;=Jzo@$)1@XBV2r$j2f52f5kN{^F1C$Qv=|FC5|C%U=*rC%?pg&uh>K)1Dmiujes#6o^xhe12pfKkmS-K4SUy zhr1fV866M(=h)Sa{TiXgKk61yiA(@Shasaf3$~u7LJfA92Ce(hv$Hz+co$-Pd$58N znua-HJ6y}M8uad2ej%cAYLGV32z!n%mB4@{ON>v~7Ln#TBg$Y+OVbK?x6%%{y@oTy zNytTB%Z0M+M;J#ZaCUp<@24m$bFOFoL_xAvOfaIbQxk&RC(XM!o zP*MC&aD>dmdzq|ACg-o4VY(@eM-EeLp;0Wx5l7Cv6_yRMBISGv^{oF~OGvxt%k5Z` zh_kouXa0uCxu`66-9{eY;pJw`4I!>kK%X>ei_pZ!s*r+oK_{Vx$fY)BQ|ify@v$cc0ecTevyhY}ewaG$q917RU9&u@yF3rA#$hU?^Bk}kPe&hEC#}3y; z`h&(ytw~Qh^uP~T=ZA_a*zzIP>-tzvmHCE+Z=JjdG>82obA#}O899kZIv6^= zYzw?(x%*9LnS|<8Unen^!U@Pev88M&1j^ZG>q;=o2>cq;Pum?y0ZsNeov%w#*}Fe# zyxuOSQRH3i``e5amy+5BiY9L#UVi&}E#lxgy6w(jXLE8$IXW|LM!VexLLPf=>^Fgt z9>#_%)GdgI<_H9>R2bhO6?I%8N*L7d_$TI4m?uf^3x#8fKRA)EC!4`YFZ6x0vtHes z-sk(ehe%qVt^W%cTYXeQnabs}zdfRXPlnapr~y~~cN(wyI_mkQ!q5X`&J)=meamn$;#BZn=+$rHOD9C~q{^m*p5?Y;%lOVU_i=vH zk)kKJl>9jhIIqk8H+JRx#<%^89#?G@^QeVPa%Sl~xDDj@gGce@ zHidMui~TUX`uB9GQFQQ`!!QR#e6U&MiTA;FiRZ`EQZm~Ey(A&y+?jiOFmFJI59aI0 zr9|=SW{QIn?>zCS20FMFtcfvw+{%wIf6vXCE_n@lnJgsn}M+!F_1f3D7rB zjSqUp&6h1s1bdFGl^o`cD5NFaCsT6c#AKBl*#u0sr=;fkg&}A=_o`gqEWPj6+Y3gU zcg0u{o9v(L65E;He)$65msyij11VG$6GZTq$o8k#fh>h%lhh?eIdtENwTw}ptXTOK zq7BexrzXTBU&3D4B=HqVE>5SaB&v1_9&stN@W~kAp50k0ET&&sFi-wmx0P}@dX&ds_h?fne47xxR0gw2u?E?wNWqNSVa*gS3L;EFpXUw6uYxm)GbCz8i#F>w;-NAk zP{qy%_QS6nU|taof&-CLV2_YK_O~IU3Va_W&Q3mL_BL6|iy^Bk%>nakw2;KOXRP3T zBP@@sx!*Ew8lxZiE@PFml)PD45f42V&8p!Z-5Kq{f@SO@Q$DrnpG|k#Fj`{dKjL78 zb3|w}CsY($ZO}vtmhX({CaNz)S8zsiqSUC*HFKgs$O6^fY1;*?ei??HNO$*{kFVAJ z&7S;4tDx(AkP@a8*;njBP%P?xdCc;d>{a&OIs4?$Ou81rA^V>i`?!aZ{V1K?UX%6- z2J>A8wE-nw7eiv9Q5k~vx$qW&tmnTF`nBYegJT}TJ1TjSwNL-LdLIi=bV01><^_D} zQY?6W1`^8fRIgfUTRx5Nf8JI>kc}VGnetcUepUpOcVeY~+OKdVm;pzUv}P2^e|XIp z*T?`_1UnJ987U2JTd;_@H|IB!pc1!{IXnj2=qi5F` z5a|zpaTPrkU%dj_lkbfIm&r2P$k({&7l?Ky9sQMY zZ8vUmJHVc~4){P+&@LkMu)+X3p*R5c=<@Y%pT1okPx7V(UZ1_XIvQ06@V{@NFW{p1 zinh))dTyb62BUO<8dMW^&|LhNB8eXYjD|1?5l7n=*09rg%>};#{6*avRe-_8ZL6gH zTMQ3^lg@9i`D}{DvFwm?g_hNide>m{!V!Nt$v^H5MGYxGSa2N}UXa@=-h4(0_Y%flonZ zJ%cdNh97#IIs6ethq88n{jb3YZj81pUY7813@?8})Q#0S%b=t%eiOd$zmGy-0_0ie=-sGJSB zI0OtTU(n_{w3GGhBYJBx8Ub_L0NQcyw*}CW?w=J-sxo3ljn_WFEKK$QP+wExC4kAL z^~^~6UyJ>Xy?yP#EKe#=^<0R7I)f2I!0Ei-Gwfezbk>P9}V-AArm3#~D{4Jk#Tm0hiZ z6V9=cPU!I|^i-AvK#kXd4x}pJShJTbRNu_GWve~$qUg|4a;3hNRBVZ_+S76ex#O>U z%e@{kQ(@Ho_~IM^mwg2xb2(`q6Zakf zBI~QFX%7_g21q$6JrXBVZa_e;+8$4P&Xxn4v!XrfP}<4<>XUt?a$Z5DUPAz2ZT=M@ zjD3Rk)j1gJ!`#}b^zBN@0mq!R`Gg2?=nkgvoK1jWOCRIGo^}UvUO-{vz!NUj+q}S) zGyce2fJ2TAon&YyIT-;y6Tl%EM0XD^ z%1Ht6qYDh4i7mf-?xO#AOBWEB*yA=&8gKjy&FLHg?-loiThh4^*|^pZl$twWlocLY zu2H*YN4y~2?w|GiYxAP^(|`0HUF^JeQs~zg$BiH|e@9jWXoME6X_&uS=l^g*Mo_Pz z9VrKc9#AL&F>F|lRFstn7v>qkYZ~m;1bqoNMM2S?#xj{+6Dxdy%jjESxatR^z?0gQ z^KVd(Pq?JztilcjM|4d-$Nw;ZkFi{IE*nX;^dOnnN(;^nK(N637 z)QBU)+cfCyP3`OzPkg9nkVl0hy?hXQh#?+zmrU$XS-!+!?n(v&8sHi#=ppge6x}Ij zCT;$Ke*JaQllq>} z5A{o;KSyd(w!8ujwZ5Z5hoQ0Ap7ooC>&KIh43+L2NaL+Woht=eijX-1??Bpva-$Tv zTZ<z-nDzeq1esja3vTW!4L`JA!v(fq=M5GZ+k^SQHbcf$|nrC3JqW!(_1Sh;`^%A z=@?8?(&<>MzYM}?m44{#i@pm$jor>9kX40ccQH(mftW@^kH}n@++>yH$sB*>o!zx1 z3w@%49Z$}5M^@ZGdnevI`4QO4+^7eB#HwjC%jb@s8uiS#)pZp4w(F8{cl2{t4&I?U zxaPI+jS+}bY8}bo^VcU=+M7q@Ys1^~cxV&>Z?9_H4Ff3-u0lz5pf?7qltq(DA*X_| zS}5SW7Fq=-*vjmLBw~=eaMAj#Wda=}B2Ep(Q-H80Qogx>*M5d{C9-a4&x8Y3G!70< zw^$!yC!o)`|JWZz-NXq`(ahLqmo{rhx8`HW0I*|BUj+33%3G6s&zMqK?%HS@fg*(7 zhR-`1Y^=+@b$hGh1k9bR;Kc3I<#J^N?-AOA&0&h9IQ}%X;-qoFbBMwS@I%_j|v%(EIU@s>ph9c@H&xNQ)kE5(=pXCp4H>R;aCjWJd}1St6P{?YZJ*B!N072}(_8@;>K+^nF*fLD z>dy3Xc+Mpbm^pZ}yZxxIswNlJp{1^=puX9i=4`J`FiN2TYxUaVG(EvLixP^)7-@w> zbB+9TC-yZTjx15JQugB%GuwkPV!eajI;c>Xa59)7Sww7CPP&jrF*AdyqsYQAj5ua$ zd7&fd8D$Pz);O@#6w5-w7dRU{?8sHUexo(+?#O_jf%+cf7f7v8`DR_9c8eA*M@x0Y z*c|=cGIxH-EbQJmp263fs3YG!&!NUT4LJ7Z{=t10;eQ)XfjO+-B?voBcuw99jT~{w zV|Ps&%Rx^RVzGyJ7pwIxCa*#U-^?i?NMO`IgC;!A#fXM2F!W$*Z&_mQqp?RaC-PP2 z(OfDRvwfLjZzjq8aS9e6Mo%6~dFE1Lkaqi%S+@2N=i~q{;cDb*#b8RPj83Han{!#5 z)RTrVFQA=wPh_?FI8DIHysuId`8lq)PS5<>O98{7qo$Il@zuzyjl?lV?VzPFVhbirgT0z- zR9aJ726V1d+^_QDC%R(Cs!tjds?5#wO0&^J6NjlZn>gRINN35;Q}onm#o^dbs6Erc z!9IS&RoKxlE^obrb;jM< zr+>Kft-00Y`fRnAp2|fXzcZ+p+z@fAh0^P8N<8>ZNKgftrfGM6xGP}vG4NF)iQ!bs>#R7*_K+DVk-d{f#LbP(8sQB7^_@#FDLhvT_bfPfA0VuJlDaDDI zYHQF4^fwQfnJJl@%L^2HDI`g3^HeAUtN4PqDGTOxjNSXL{&M>>aX%BkDBWXT55^@HDPjfWOC>-cHvP zKuWxC1isGTAjzA6jCxia2hvmxJnFuC8*Wh#1+#b!uQvk(N&H-)oMV>Q-3Tm>=SAq1 zSmbZd?>B52=9}2FW;sVr<)!b1)oW5E79o^o{eo!HB$t91AA|TR(QClloW&|v(al6& zrA`SvAKj#MOnuG|Dk?l^n>{VxezY4+0JAd>siZ!g&~Cq*TNLL^PLZGSd*6eRP)GYf zX8dK#SeLrLH+i`>4#PodLmkC0>z}8G<~(r}jO8pi;Z)&AEM9xEPTi-!7t2oGJd(d- z?#htQAk#WEt?kNnGA`jdTafK5=cLi-Rp(Vy=p*s@c?qWP;hdvW?()D7$4@=I#AcU& z1?=$ZMya)-Q31j+*FXoJL1G2{jweckz?FOVR#FR`C2as+f$BtIw*@8kxv(vFPqQquB9i~a*PD^`mD^Na9U=I26H(Bo z#)j&1I23=La_{~UcKQrWsL)o|oU)|()GF|86H>-1;%|L1Wbq+=vY25mV1Xsay=#E9 zb!`8VW9-k~!>b|74==`_CvbU)|EjN$mS~0jX|s;X!eauOBc`s_ikVNff=M*8QZ2$m zcLJEin^R4<$kp|1vqJZXMn#P)hidS!n+A z&d_H&jTf)4TOQ|saHfutPi^7HS95XgcxKvgT>uGOq|7q3;55S~WmuWDC?bl_W!dCc zH~pm_nF{OGiN-|A!b&L7<}x2Z9vk3CaVOQ2 zczJUJbH`_{WUpCcU%*dI3|zRtxW&N8kQH{iO{SFX0|8nP!2piZ7>!Zy0-M=Ei6 z{<)d&=&Yz2UomYBg-wDocDq z;C|*IjQ0t_DzqnJl21V*-fTroK?T`Qq~iAhvbr&nnf^kAlgU|>P>k9X1rbkbH*$%Eo5S2j6bo&DDg4*9q~?K>utXCXinssp83PL(wDj3 zfNhgG$X$TP{=IxsVO2w&G;OH!o-8A%T0vsFmZ1&T2nXhOMP1c0+2lik;UxmT6D8J$ zx#&6dQLl>G0mAUR)!HASeeRS|5aX>QXRm8LbLVGm@c(1#E!d)L!!=x*p@#;kp+S*U zO1euzl#m7ikp}680R|bmLt3Sgl+K|M1VOrc0BLDvzkF-$z4i~_IGE>s;*RS&Z*MD& zzh!|##1Io;*)W$(($>pH?|JV&i*UHVyF7j8ThnvB_*iSo`l5M65blHfzYE8YCx<0k zi6qBvwo~A@i7mNSfLA6d0Sz>Z&=6D~o%2WI$eGS`A`T+&r^ywfoB|Q7!olA<2d6rN za)TUoa?P11L(&kZ40XSMo~W2KR1FjM82?{JnsE3gU~s5qb;iTdkO=J5c6=oaEdGAKh=_dyr+kE74_6Cq1Tm;*neJL&s zxMCTifwB(Z0kY-yW3Gqig~>&qx@k6?UE12|w<%ZW68C?dOZoYlwelI4-Mt#gVCK~d zwLkx6mK)89v?H71HpJ4GHBbJ>?x)es+j z&^!@5$~#)R>htR_D}>r5TgA9@U-SrAt*&+!LTwdD6YF*vkFw*Fx|@^thBG}gRzDGW zo0*Ij&!Ik7&d9zvtOB&gx!&W^6;AUhe#ja_lXk&6_Ww64F)vC;Kdvji=?hUv#MKXo zV)xU0xB)IxX%f|?&}ABfe*wZ)nso`92RGop)YwW&VICChG8XBNGm0l<4)^1`z?}9! zkek7jr-Ga3gFd;S2Yir+1q&2g5yH)?Vux8R=wn^aMR1hbrLwI=$*E|DPVih#sc~4w zf~B%KK|9}|5;8AhhpDRpS=6>R!4(6P$N=*+nKPg5yTgbsL?nD25KVQ(<92(GsHaC< zJ*tv2H@zOu3EW7HkFVbF)lTzfhRYG-Sv|+eXZoWOaFCH!Pi-m-avSxo_+0N1t76{j z#(&B4uGfj~PfsI!J=Mr*P>e2{6@KXjNW(U~k4)(*sXdwB_4tCex$Gw~o0)}!wap2J z^n%I&19)59q_y$Xk1!3s#J+|nf8NQNe5Kt;f7nh`lglDZ|BQFgh=C3-b zl5F)CMX(&Anu1K)pSkJo=FF^ci>#-2Jq`+{3F`a)?o;9;!Mt9SP$_Ku(6$mTKahETXcf_jI2mhB4xpt@hlKE=v#_QgmpQMW zKuu2MuZZ9&jVtc6hTR4*62g-QD|7|mc^2!ys#|Srz+>U}I{i4}httw+6jsfbi(Q2o z(S9L;?afQE1(fKr4?~!plyOy`Mu8+aV2oQqW0Dse_UbjgiKq}6i_bnB6c%ABGQX-l z;r@GHW1DUq9ARi%V=ua5X}8uS61p>YfIBL`(|;N;d-6E#EVm`|h4Nc1^?j9G{~qfm z4z}8eg-8wjUhyr6^RscBtTtsIe3!PHIulItXEPLuy8oFYHR6d!SvCa(EjMDOIFGN5 z>wRt8qkyF~k1x_8R3-`)P!b6ORKxH3waT_|`dy+c_WxB5Z!0-HMyVqw(%9B>^Z(-g z0An47izGV|`Cy6u-ECQ~JVmCv;Hn4?s8DgN_&U0r@*T)ZI?4o?lD9q{@`B zZHPAiYqi)&r{C#*dFI*dCHd|#Vvy_K9Zo_pw2C=%l{905&#w@vdNZ;Gi&vV}#6W>| z`$cShX0!)=ELWbCYk~en6JFb8HyMVj;(13<2h*-B)5>Cjc576XcoN44o0U)%Kr^G!Q6kvfE*Ij$4PPSb%n9EgKv!-6zqR&&CLBCKam3t7M>;~kPUSH}0D zJbStgHO9BLC2&tHlrj2`#g)yjTc?Q}!e!P9Jr@7!xW{2EH;Oxbf`G{FEQ?kowGwwz zZb0MTk3Smx4LL4YAaLU}aF>g0Mqr$8gvZZ}du-X7#Ejdn3+PV|Z0Ei7p@X_7W99vs zd`t7gb!a$`WL;bT#g^>%YN{ejGeE3TV zjtE}aFawKnxar*!_`e5hyIBX&blg1VZ>F6fbnTAx*^xKvlh5OK?#3?s+-T{9h0z`2 zi=X=TLy3d2P-7HvqM%x9BhdMSngrex?=V&i`5;0{l8{v1WLx7((CRHo83DK z;4R?@c-3>rwLC9~IwM*R9#R0Y$!4w~q(EB( z#f~Com`>fJsVB5`O!w9%ZecR#(Wo4$Srn{Ly8_wI(P1!zAs`W$M4>U|81`o=E1)!bV6yTWrip(-I?sQ8( zkssidFe51MQj7_=zBvhO&|X5B_Vnk3anIFh!rhZ?E@W>inb}gB?6t@S$O6~q5>ef) zlOp$tXSH<}Z9{Jx`~2=6S;eKlYX81ua_(cr@aEhH0xvr1>4VgkS#pceG`ze;at^>>R(q~SDJd4X#OK$ zxX5^5$v-=x8alelcAPzuPS){#L(iayms3ALj9TeLxo6R1wvN|q_loD?<1jyb9L5Zw zqi&qcrGvvI5#TfWu=$@$U>ko$zO1S5FKw^blajEk-Nh(Tx5U(00bb1cD{q-cU+9vb4lxY4%Q`- z)4u1q8Jx(RZoj8;k<)Ggvt+drn_ikZVU~F>cGS{`(z*fd(I7 zs5p-P$cuB!jtoON{2bOrWjVKtRfTv0S8K3O>H*8oe!uPLZRpsFtGQ%bX<^(+ZsR#A zzNLnBi_Y|s2ES)Mt@XTG|Ig)mwc|W4BU5pkm&R472{S|L{*l53&H$isY2qVgU2gs2 zm*5XZ{x3lmv)llQX0+nkGlNETHRRh2v{~c#sUeVA!Ueg>6mVpJq)z~^cap$i#W21{ z(+}i5$)56Fji-r}D6mp2CF*9004&ch_D%AO;1FH~xe3K%lpBlamQatWrD78XebR!? zUyLR+D37sgFQu3n`ntX!6yFCQ1ePOLW5n@6+we9o9$thThFBp#=q{o8kowys56jAc z63k>Zp<%@0TyaKD=*Ut;IFev2{!j)*CMjM?yg}!3pJ`t?c~`T8sqVHO2H-k}DQA4+>)Di*5i`CoTZr z00av!mFZm-a(?LF&7jK_E2}DTpD6^0Ju3e!*7(QOo}(Y}pslF#-~&Pa7%@Rfc?&Aj zNV@NmZRJdH#F&R3W&HS8Ugl3)c|0p|ZvT0a_2Or?`cf<}Iq=*?`wIY(QB8B}!$W62 zzw&19B=>)IJy-mL{ML!IV=;6v_F~F?_t=MIr1Z1-Q+s)jKb(du^X-D z{-KSiH9@-te@9V;hQqaFF|;%#5AbCVICea&(|DR z1Hnx2xV+b78gFp}8l|z=13Jau<|b|=ir|jROX5j$?dJGF*3nlqP%OE zRyj|Cc9Xbd9Jm7P$(Pcgu$XmcdR^4>X<1O(=hOnzp<@*Xh!{Ig=S}!p(IrX9IGy!j|Uyx_X0wOu~^!9fB+Y?%L9p&olH*O3hxGb1D{gn)o zEbZTMl+t^)JemrDH(S=AU-gZd+fv+_e=3klOR@nXEUZPAK;RHcFqIBBdt50`-7yW2 zID%7)Jl4GPwOK)(?Oaq3Ep!Qrq!Q&_S47-ehK; zo?N2N1pP|h+Ryf^_;<=S!p-0RnDYn`7>$-SP$k8~-$Kmyh=(vcmuVav)7w#4T+18t zAH1Nmh%L6Kn8gKD`EzJ@-9;p7K z&mM-&NOfMZEfrw*zvFV>vyC6J>eQyX?__5JloUq7XS9QEd?2lrKO}uZVd?0Pfs0Y% z&ZhjuXo^`N`gS^BD^r!RMf}Y`2ojy&Z$rBC#pg17PmGe%6KT`5O;govmjg()7 z!$s5kA8@(k+W5@oGpCLGIO|@mG`Dbj3uY+-J8+K7<@c9g>bZ>G;p-f!c_$2zxf#>6 zxE60Wx?r=w=?MwFeHiG&z)@t0=B?M&Hb^p65NF?er&b)AGvHahL}*j6#8n8@mzdfA z6zUeR;t#S)^~nt3+A*mYRp$?sohlLD3!_{`E&&pEvdp!{ z8}miz>u2~nCT#8BBSytg4XPME+&lRQ zY>almk6#Vck4FUlC%ZCck!eEPU<7AEq|Br+VS5**Zn9#yY;tU(-r}Q|$<(rNX?d@Q zu0oZYVDdkjB|>+te|mTZJ(MV-(H;bG6CZGf z^amRg00w5^uyp2qQIQ}kbAF-JaQ*>(^I^t=3{3RUd`Hd76uR!qy6v{=-6UBjVqA#(G3v%Q@DuHd9U1{5t?pC6twNF+fk@ogz)~CE0pcd}h#@03 zQ0v3`iyMNdH{C4c7v9^?BwFbd8UQY*R}zJ8=C?bsFI*WeHv>BsiuK8 z&1nuoU2|aOA0qc#2qzNmA_k$UhrjuIMR>;Ipi(q2a8B`A;IELZd{FO98YIzz&kL=q z`%vw=Jj5-SUpQ=a(Saoa;50{#Zt zi$6*r^f-L`BCv?Jh5_s8wuQy$maK8Gl!J2#(=@pQ&7K4$2u*?j#`#A-S3+Z?1!}*e zpY`Y0+h(m+R;Vl38bqN$dr)F~+sfoE3}*0Vd7njN=?I?=m?9=2o~Akur^%OGqK8QB zOy3Dhw;0k0@xd6uW(JGl-@00XqABuJ<{?&y?DYg{p<}kRKr*QU=uc&TJze`@#Ev;UU|nj&N;_!-`PERs@AS5kW?cdwN0a-BW^ZiVo~hM{j$c#Pjcr;dNdw2#sHds-3|{PEde%;gZbse1b#un#*;4e$`bVhJt@6QOAm7Twav7@J2k)`~$sxz!8{*+? zdv=PS5EdE_q5a~=vK@AXm-T+zV+B_h#b3=&uYBcI9uqdC0xWCph`w`+y7H^Q);aEos4@MwGCt= z7~ROz`PFhG z3@pb+^J9*mkr7Mog>irI))HS)Xe4$@#sqI9^q2q-{HjYvgsQLUGZj=Qy>|UqLo3;z z&N2oXf1R)p=WI8roqjP3=4;Q2bIZ$?h+tN>kX0>FFg|k;f<7SsgM162r~TU;{N(f3 zwDI?!;?HwZt74?tvCRV-Y% zp(@HeNMm2M%EJ|NyJZ*R9(FD($yn_a(#_ChsT3%LG#2rey$)EW1zuNHrA!P2PRB6_ z4Zu8fBCO0VyQwGW-A5AS1@25wjC!0iY6#K~13Rj2bB>LA4k?x|zdEFZ!X%|?rcQ?`RE49pae*>&8P_NQTeazOFHf`Jx9_KQJu6E zaMNCDV$xvCztKHGoo=`k3HdbN=PZ%|W#ZhGw7(BEWE!0{75&LKfOGSy9uejC_;M{j z$1l@KBT-0jBLHvcWoP^ayF&2{pw~W4D`>?ce0Qe?He8s88_idar}AMpka0YDt<)(4AaQ@a?MmA|aOQh=;_BX=%3t7%qyc7R2yCxFRYy=AWlTEAO- z&!X6VcAd8`#7x!yF+k0Enz9U#ew9!`7Dfwl>18O0G?6`MSKb1sxyNKz^nk`eR<>1j}a>{ z_ovj&{_Kip%re;nar^W_T0+5fL{NA>XWp4w>zdlwr3+}3-EnPKWdg#X(;oaBN~}mg za4P;2V8&dDEQ+)r$K2BG@0p=|?-`1g9w1d-FuD^rKpl5DB-Ygq{Ispe$BZ9oMY^o! z?7sc%kbXe*Z6lTw&2J#G8uUO1ab z-OP?{Md~7NJ_`eK(#_?eK>G(e@&nFS6~I&J{>_E0i%bFN`z%StXk2V&OMD1$Yoa0# z^#VeJ6=In7+@_H@$btSwiv3a2&WRFQS3=@S595{X=f%U!ipa zzXqqG_jL9;vr|)y0x~E zh5v2z{1h4PDr)-@^z?6r09hDJlETC6{`$PvF2I<;Eb!*ABPb`UiH3DtF>8(wg#)7A zaxj*lgA-H=1bGCYUjP9`=EwA}yjl7I^Psq*BS`r=pNC;1lHdYp%It_gY#RT5p^Q)`ikais^%Ar+- zhbqiN$w~UXh5gUV%ziTBFpWOW3jtkhg zy7HpB?K``Pg$QDUffUwD#<=m@^)i!Z7I9$(pMJni*#*ni#mM6ZEqhhucp6*SQqXO? z%A?cg$rly(za3KN^XCb-q#gQvjt8{G!*OMPoQj_@dCdxvSN!qeC-geN^{evJ@V9uG z>0gT+?)ZJVC_tvjGAuo9{kN2_3n&`N>Z|Am&vY8C2<2XB1YVmR)deOfQVrq=Rd@A( zweF(uCBPE-7kPa>>?Fo8Frn=d7zz!IRC42?VD8a0Pfhu?lHEW%Z(fet4!XDfyS;pW zuv}w)RH7B|IGP$7j&vNCqY00Ejv(olFPs5HGI;9W6`L<4$Y9irx5-#Z2uzmfNa4Ao z&fa=Pu#Y+WyD-=aGbtZQ4k04qqWz*xtSHnL_gHQKD&~DD7re6DIUl+m9?3s9iEDhb zN)jY`w{I)0o(l|RIGwwabY%S7MIm$RNwCzVI`txGMA9TJj<|-1wwI)0m<}k)8t1%C zqm2}A;tqePeSh$nUHiUL(vDKO>nAc3Y{!y${x{cd=C?1t%M+UWoK6zTCh%?K&)yDQ z_;?!rSWMR%LHb)MFx*v))|>X3-0F|YR{3&f=MbCq)878|&NTKPFU(4@$mF2$aVPA5 zh!0Mm(GUk>B||k^g@)Qr5c!m?TtgjE54DUWsoFn)!&lG+0PGT^vH=Y0H6SG0B zb05j>Fg#OI2?#VTmPuObCjh&@@=911MuaEweG&zjeKcSpZwCLr?z!sHx2q(-wE|C) z=!N&ZsGYHwih;k9NM7bpKwBj~VYzhdevz!Qc2V+udd+p^V$TbJjnca@FPwIuZaPj6 zGarDs%srwYQYhti6fP2fmVM|b_PG}T;%7GQIjlilpzh7uQ}`5D9~vs*H1;UBnNREr z@3x<(AMIR!0MlxYIvTYSu9nOfLzXFP?BWl20|r0U3TOhLvA9{++;byBHz#}JOQ@y; z;8YUZ4tBQ$EC)_a3+?L*J213%nSgPC>z0X|GGE`$w&}{x>3;sj!D&vIXa6>B&Bb~m!SQtR$E=-*!~L;7&&CDiV^*rOqL&$`qLhzTB8$X&Z#gl}<|%&7eO1txFWaQAh(IXCkpDI9FloJ* zP4&=xvuN~NZ2L}n%(wIdiT#}4v?&VZyX@sd>y`zhuBYa$(Im{ze9|BN^8PxG#40|} z(aLp|y|`neDqzy58)s^XAa-X}7e%9O3}E16W|^t+rVVc{4N9x11y@x9O}no@i{*7G zVx=YH=Bu_Ihe@3# zjy4X9s~4C9)kXy&v&B~n41@~}1}h7TYQ90wuT?fLmeLL@S?xCkWhD#JifBF8FeYA3 zaZo|hV8Vi9XT&9&{hSS$7@tIStFgf+1uK!;#zBZtaRhw{ z^-4l-sQ7?-R@7rCwg)T4&Tp{JrAR~*i$%ye%pdD_GIh1`0v?~&=4@XxBJiq7DOLmq z;qfLV4;rDGIrJrWgMqFSUBG2fk7{FC0Ni;4q944BvRbX8UqHnF4RY_@O}_s&Fr_iJGk?_dHcEhjXc6^`#af@ z8^&fWj!O=$pG@-<-k?5pzrh>XEK59=f>6PID@~O={=F*3*i8|xBW7lV&p2QH>l-4y zAXoRza4 zwj5qcmTB%obx5#!y}k8QFuQ&#wT+l1W?{;K1bWpQh%9CNGQJ>Q$Mcnc!9ETieY~6c zK_Uc7t@cBYVY;h7wk;(}!E!u5r&DxDA^JE%VL!J)wGEFni9D|beZ@>@%+V&%(3LTQo5wbCDhnvH2OY>qSbYqj%~vg+ej zLXL6!d%Hb5J~K(V#X8^gh3A_1_X%%{>VqL2pZ8j3|1~U|Y&aSvKPBN4;wxyVGPxY7 z<}jZE-$Pe_Q~P+{Tyekfr);7^pfQ``#IECa^R`u$v*0V9~kSm4)l;raJu#i48gCl4HTid`C6&uDLhM0t3xhB+2-7?}#`sKrB!e zPD_Dd_%^2J(sO8_dS0;aerhL7j5sOq^~4)?Nt*>C6Q^S3vqS}#1cRVDOMGvd0s8x-XMe_ zHn0GQ=ocEWC6#Ez^>DB3BFkTN(qKO{Mg0*FG}kEjHhbH8hsKq(bwB0XXG=lYiEbod)4VnvB+ z4Yn}1?2sbg4lQ00y=iI_)tj%AAHlt98Ldo`V!}KLua`I~PBb;0uq_>IPiiG`=G}!S zGK&9q)G`z$(W|=@IsvcdRJkQu-^)*1Z&f!gmQpp)X#ALC`{)n>dZ4A+RhwP%RbnrV z^`{*v%v%&JPJ|Z58^67odB_?I-=-b|^PTLmi2h~{ZSvCk(DKoET{3nDx0b)lAN|KV z+((SGGsZx2e}9&yzO8y9;3+id3Q&xNi)Lne1+YNttX7-_A+C|f5fYYP2)5~N>Yi%9 zOqSTUASok}x?IWKvkLg)blrW$^ZH!JfY=3>azFE?Vnz?SXRr;EwC1sczw_TP*nA}N zNc{>+k7RVJYC&Z_=_$|C4d?7_aq&Ly*M1s|^%nHDVn1gp1Ny#UeJxJ1cgnVQpt^{} zW7H!=eyxHlPj%fXu<1;L<|>H#V6nTlK7w)&N4eYluhG;;1swarXo~iqGe%7sI&nMf zyCgrOF-sIj+9bfvd!R6Be#CNacEEir2vbF@`+(jOnnoRZKr%m!=}M`N)}^F(m%&ng z|DGYzfIxh=m%PZ>@P0qRU4b#--$}h%SQ-w-Q$8Q@eu8nPdPvfMFW93a3Zzo&cB^AA z55hQ68vV?=Qb>(mbD)?XgGCv1-Of5~+K0jBE7l(kez1jeG#0d;MWsFBiku2-cO~u$ z^@i|;P}b)TY5h71b>n4;oAHafsx*!L?Kv%<1zZ5EGaL$e>W*g1u!Tw|FWoQCXy}er@@D z_JSh%g+P>MF%@xqwhCJ+1c?VDg5_G1Kb;4uJgc!QSp6kW;Sx{4flRD4~wxT{&!1lGn_NAS6GA7Dm)Zk(&4nC*BJYeA3=fv)eNI3m-Kr8FIRzDJH;xf8HnXZ9PW*Og-4el3R66Ie7DnKpGzX zy6CX;hiKQt<4?l1?cW`~ec>~Fy+b|hf*)F;Rd0xrS|g2nzhL*2XeiNYs`Sk{P!riv z5`CZ%XSsg4EtTZC=>lLH&8j&IkzjSA63w+%f{I^zV~Og;tcb5je5%s&nYHQY44bs4 z0-Nep<3np&&u(pmsoK2~-Fuj{0!MY3w(EyeviXAe6qeb8mWE;^Hmex!ci7L9H|WX~ z6|(A|Qn&=dPL3z4$Bs<{O?gQh$tI_VA8HSD zs5ghS7_WyLj)qTJm^%^m(Vj|@^{Nv%m>@L??O`Idv$nhOw2;QFf5XbJehlc(#*R~k zK?)Rk62vTlz}IIzioLC&2xao}ev$qCF)6#G%YKCh8=0=MBzAQdk5qNntM#RE4o&cT zXj-Rp>Lam#Il>l}q20oVhmJoM8(f+BY30C2bomNfFLMx0pnpJiF$*`E`brGn9d+FT=R;+@tR$w~X7+}+)Oj=Y#CGwCDl6N+RC z^p;QET&J-jl0yLVUFJ2CC6GFC9~8oX8H??tocpY5kUgxZ;A5)s!l%MCNu-W-A%1(L z%JkzP2!~oZ7l+pRBbKm~!QjmvJtM~{kYMVI>2G~14b>dlycal0ww098ukWl06|{e) z!rJne1MjAT+O9NJw1dcX7OXc^B|bss&9R9+1c~dGJ=h+TzR>s}##gQb)4Cq_U?wK@ zJ!_HD{|l7texKdQGoHlTIP8gT7w7sKxjKY)iMa1K5V|TppADJ2P`fTnY{*eqCrk2C zm5U#fL5KwZBPd~zXI>ebVOsGIarS*$PqD{y5qirSAv52d7h}NcBc-qqY0>Fh;5bBm z4ofF#L+)YE9C*!|R9To}99y2U(AV8t*rDS{Fh`6`d$oZ*9H!qe^*apG0AZFwr`u29 zVcDB{w`{vyAubS+W8?w70RjUo9W8TuYr=5z8ho_ciE<3kl?rz#+&{BdSnQ}1SL!?S zy3OU!_gTjsz!b-65c$HBk-$Sh0YN0Ih-{mV5ux8UMx=UR?!988$j4r%*vz<6eoJbf zgln^p$6zxk@9@3{5s8OW5W>sKPeYRN()Nyu=1I&BT^UU+WJrPZFFI1C z6BLc1M~|SgdKd-xwYp{jAp6ig!3&sfK$GdNj{?Lvh92KTwIlE#{j57D%D0<#auL?@ zjWs)b1(BOGFJZuD^L!~?VNhLAQe`iy zBy0RINRnf^>$clCfvJn>FO99%mF9d+3kH5B5cUWWCJ)Y5SegSLS5NfR2sDk(Y*4s0 zuXVpg5#7dh=%T=C>Q_Jx^?*N!sr?GF+`xr(ymp{e)t*IyB&;^(_6~8gpd~*EPuU5_ zRP(h}_0mtGWc>ShS4(Zq{`MtsK9dw4LI>}`@@7NlLtWld7{|$&&&#h=7 z^Hhcsc1FmMO$pO!djGScqT58oQkaH>Z`Jn&Jo0|YLqP+tRfAUT#ApF z4=v~`tZuyxf}lV8wjKA`EIl2)K}ZtwW^ zaJn2tt{1of({Dru34^<2Z>*`03iQ4f`Sa-@rPG^!nR*-xPX924Uq^5S7YjKjUH-|y zTUjrz;-1sEy%a|B-Q^J@e`-uPL{by}7HbcIGXH&at$0s5Ef-W0Y5yg*-99OB8*?yQ z$EPyE9ka3}&@0Ey*z4|FZj|a={xUa8L!dv{--4a0`%X=~wCEz`qtK}p$DsYB#EJTEXothwyotE581R^U~CNze`Up!Wk3**t>l2`#@{xZhYh=yuS(D1AN z7t@~PLNrgnnwTv| z2|!8L`K`DF1m;4h>y?B0k(T2Po__PNsXrR;OKc4W`+UZ2w)^U*)A}-b=(;zbb5zZp z#|gi*;J%=5c}gykSETi3oxvgF&GlkpL#B2X*=Zi*xH?&gf-xQji2FIUWeP4dt9ke% zTrK97wF*|To}i<^!0bhc!Jy^~B5YutQQ<8ZtNW=&>Awb|y)XJ4cBxy;y-#8Bi8rx~ z@kB0<)JZmAqfBZ@aR!eOD9e-+{~P91X{CO4DyooKpg}LQ(^~{_(%y-pMWA0PNrE6F z<18$$XsiDw@`G>I)0DB+@T%nku*@w-i-}MEk>x+zlXhf&Ta2l=^c<8%(D1<$3*4jAC5mIHe?&vlMxg!uBs9Es5Wax}jhB*|*>T;yy^h_ZvQ~h3>3dY#yp6%;(Z*J;-d(Lxpj}T3Tl~xi{ zY&6iJzt0W)bcw6t@2~&WR}D#ewbK!NGWjG_9x3lg_{PX2`-$RWw>aw%!E)JjPK{^(n~F31(onfl|<*QQJlt4@YR_mjj#EeZhu47KZsNkpx9g zM1)Yrcjk58+N!oGXVdI=MVaMCF7~Q?Pwj60XUzybJ`qJRo0p4qePIhCb{xjFTBJpk z!xzb{ptGM{iHc11CWZ!|vkQQb+-+tK1Ixrk%%dLm!H%-t^kBk^{lH|H85u9&|QC zfE8hi&W49Kq^E_UP-T@&y{1CPEK{FPWaYa%Iz&*m9E3#8A^e+i+VYWDQvqjN`a&mg8(#eO?Ef6;zxwyfj4~ z`u{;6O3S@7CPzfOF!sJB#M^e)BK1x}TWKy%+UvYSpVT*T{GEHUebW70iRrQa9qS6G zJQi*{v5@ePg0{R1!VLe+zhcsZJSwb%{rSOQ> zh+uic9xPax_bAIJ206H^lxR91EnU$R%o|9q6#ehEzHt~ZqsyC2{T8sf!lRsnXcaLe zv!fMB^rk>UMNQ%8)%5WmArL&ulQfYuk^>F{m!R)vet6uZm!t`4k*!hKYia{ppXtkZ z<|h@6Q1^$ZcIN$1r%#UPuokJwAWL?aC!_?jU?mSWPsH5N+^*Lp-cVS4*d@8$nO2=D zow58H)E^JMGN~E6F1#vN>u&4%(^Fa4xD7A=Kl@yX^Fs41Qa^d<`>QB)W=*W=XaRG* zduJtX;eFk6juviV4XPK(XMXGf)xcpoT2H)hMK*2~-Dt1~uomk&sIV!88`8G0_XS_P zPNL;u5Glc7$B}<1de458#U|A#qAszHGN+=ij+tTr`#|4*SHfa2wD}2%MZ@^?i*?hc z)sP1=bi&LlYG#QJ9#&#$9*{h}h3q15Fwj{Xgc-Rnj7)8IegcwnnQ}+lry*yo?TP;A z77g8)t{4eQ%~R@uP@*m-Jjl@ZID@q$5OZF^32ERJh%tkZ6w=-ov#ms~YrnV)YGYu{%pbIvJ9Jjx0*YLa)y$igZ2ugBxa% z5$EJrMDRlaZsTikBkR)0eSnfn=jG^7r_{0`<|U*0(~!gJ1s#OJ`=B(fn->3U@_B!y zPwW|%Ez6{4olS|$4UvAqx4tRg;G)t<`hF#T#0=Fius>d{Xl?@ zixTf^Gxs}pcB1Nd1i(nrb~esdDrZbK{e@tidvspn zQemdFh)+L>0i>GykjI7P0~cW8l|+)S(AgTFi>}P$CNhx90E4UsRwXG09ZMDlnOn8Y zQN~>~Iapl-pJ&H_7k%X1D)qBbhHNU59h8oipZf25;SR9&M)XGCq!XEpxx+Uu@$2_oBsqpbBkCTj&&12)9CT zSeB%7>wdjTa;+D*gU2qCv3BE@4H6I~5NFBUkYVOAu705KzXfcE3o*yIw+)|qqBwJA zZmS^+mb~SLwO^S>|DEo}(O@)G82zY>!dtlkYBqm1tmq)yqsO@HJ9+NC`ruPEQ(z#p z2!hI$+V~hS+c?7n5_x34Nc6IU6$I8OVC2ky6tH9zntLhZt7b4l*@xue7{}D{zUS2L zG7?y9#@gxvT*;h)A#S%+Jg*q0w92s@sr8vdwdb^qWsoUf?k#0N*<5|Sz>v_%vuJTJ z9>VxnpobWsy`!7PDhRn31;(ak)jVjwgb#jstue!bELI8+dvy%zKfKZ9^Z-wiK8(Nn z8ichu;Kx?;F(@4^It3P=4`|GA)VsH}XG1 z)PG5p?Q$8AhJ$@M5dYk#dS56)vR~iM`%?0r1e_SzO$^^z+B+NjL z*1n{f&VgTTP-iHq;gP^vtywD^%yTmWi>5Rr3gRSKzhL1Z9=XOqLRFe2e)bW#!Gc-) zp&0MXG6tk!tmaH|nw{~~ulBWK*fA{aIZTh_YcB@sr&!{T01*q>^NlB$EM<5IGg=jK z$Rq-vZHX+S99$=Bge9-=75l>^W6_f58YU@=-n==jc|C5&7f?3s6T|mjFy0}>$@9;` zkgF41DwffyA9IfaWWB$e1fIC&1d?vj%?&5#E*Ft~NDamGQ2wf%+R02#B49 zOVlYdk-aLXk$d+l)|XuU40kvQ?DMD6WImgfcxF%}}Q^Wr!l&j0! ze{osM-d2Y+SY#uM`l;~QiQ?g|@vLTM`h(ZWQ;?yKiGtV+Fua0JGTRL#WT?C&;+9c} zmtY$xXI~30ejwfv4T^4OyJu#+sV%k5c2x=hR94Sx3sQL{c8ijb+tR43b70_@852PN zdzd$-%M@@nz7Cddp-@rHryvO00eWWt*Lv&jTHyQKGk|90u<2eQ*I%A@W}!WRcY8UQk$ae{TZi5Yph zlQ>!??iduL8e}vP#JAhVewSAI`=2sV!ZO7By((_n+58*M26XinBem<6M5(tE^%qn9 z4!5%GlJ60lV`Pk`6LX38)kV2)fQo~Z=|t&=`y2iQl{4ATx9mV{_zHNPBM>#Dbd5SN zv0ogW)vvlNF@E5fqYLnWe9Gy~bunZ=nenTvDEQQhqPdb=Qo=02Pl6rrLE)BSBh>st zNMR;}uN@m)gmkY7XD!6c;LINqQ1 z-Xx#dvfp_R{97>cz{=Zpu(_%w?j8e<#(5W;BJQ1^uBvVFS^PsZZXj2j)bS93ZS>-F zjTagG`w2aPkjt#~1Iy*cx>sy+riSFj8}sZ_QzXuI-0oW$=4;5oWTFsg7m=bEC#$0?hB}b| zTCoCe1!!3@tchWu=t7xFjj(Lt!SIu20&__4VRbWD4av zY!5F#oThT!>Zm%GT=P9|?^}z=)#fZ8s>xJh{~Mm1Scair09468STE&yGB|`#ly?ix z@A`OV{rS~l1~ePjPMu>_8Cn9rQBFdZ3RA-4Yoz4XJnufhLB(O~0gvtTH&NiA^BC)` z>nKK>@Sm2Yn2Cj&ZAez8e!FkJw^L?!Z^#WajYuSpI{UKQh&&YdH=Ole=Ma#7TnlM_ zf@}B7Y48H@h!x`v;HvZjs^D2LgEiuOJdU9L`LzPWr0Qbw4={bSH}|}Uhm@rUFuvG{ zjejW4zO)0I#ZUz_PMzpputZ>@b#oT79&l0e_bCruH9A6S#fddFWDn-utZD3V_C%i! zD~m+BI>fBQKJ?wZCL34?Vso+Bfxzj)+hpF!n?bL9Qp0N+dqqjnLv=_1#`C&LRaSh;pw}>seb?e zZAZvC$jCZ2k(s?YR#qw;*+)orviCmr%*Y-ggkxpTgY1aN$j;t-|6bmo@AdoV{ONMN z?)!eu$LM5s%)bU<^^@KCtC244WxXIdAVXpf-)lvFp2Dw_NI^0s2Vz8M$5^SG6rTYS zq*L2#o56$1Kh-VB-hs$BdDdAl~li*;vy7TO}oJFR{efpslANMxrA>+2{gLMboxBW16&!tB0taEH=QyZ8(M1O z%?0>T_t}gImWfbc;(wyv8@&Xe>fQ^=;$!PO;l+8u+85yMs8kD>^xzVsB#4daz2&g! zycjYH!wMSXFDF*@u9C|k=tGcikZF(^_6qRc7pNH_DH!o!*@qka?Udw03o}v00S)fB zkh0;jY|y^t@gRK(FC>J@^t6(ayNeQYD~VCdMP9g*WOl)18~5U?$XFe!>K~iaj^nL)zGYe~x4-wZ;EwhN2cj4HJkMTMsWtfhd9{L|cZvuB33NMG% zs|Aw6Uo*Z1{R5D_(@Bx_jAoa^H~CL39Uz((WYU9U@6&yq_5yqrapssU>sD8dAKi6I zX>bCl&@8XNT{w4rJ)t|m=ZtAv}| zo=vyA#H&B_XvQ_tC+2+?WRyK%r08j9My=k|i8Jm2g}IvEmUWL&-3yyUX{UJ)o}0-Z zSNBlRSLNom*i35ZFcAN5!IGPpv$h;=J0}^hR!J^y(xh)$+>a%(FV*Hz1uxuu@-%PB zB&{wxk1V{7Q|_RXX)3v~wrA?~|27xi^iR6D4b^nrA-!?GCE;s3yqeX#G;OuoJA8i} zygft+FmPFfk=RaapqZPNgseK*8SDyLGvLw%VD=kFOf_x7Cv0OZh6@Ysip2+NKUiFR z`Y%zi{9mKFfh5-QULq6WakCO0<1WVJ&c)D|h4~(d_*GNGWG?!&>@6j;0R*N9Hay6@ z+8(b8`2@=h9Q#gYnR?_M-ZZc0XyoUBM)NoXjyyHq3P^rXp7a4xu!#y3}E2^``KW>u| ztm+kqYXdg9KDT5Dp$NI@`WFVCbdz&3LBEHNmfJr3Tn>P4_{G-?zMG?*mIlN+_8H`J z_Dv+&jy+Ewe(Rz1-%~sSPMS#=8g^$+v~w#zp+Dv%ZL1e80)YM~IlRQ$Bb4{JHzfW8 z@O?UFXO%L)-;-F<{9pS>hYIlbH%-?UhCVzp#d(Zcep3nPS_;3NR!_URm%* z>kb!A^%0_CC&D15DWJe~rSFWWq#;T_P_XQ1ckiWhv$a%=_mO@7$(mFA?H?lgTv?+u zZDvg*-;x90NQrEVc~6Bz7M8QKV9c-M(<3XDsB!GJQhnDSBG##0W!$=Ms3^d5m2-kZ z>R{#Gx;eMmt$vrb5XE>acq>fm>kM%KMzLShq73)#m1pf}wQx*M)@{)Y$flM*@9NJ? zRe^(EP$Fmbsmap~c5M0)p~V!h|IWDCw*82^>ztSMMnkCn5&DtTG+4B4qydZUui=;? zYfDan`E&820xq>iEB~tbHrYP|1cuLJcs6`=*8XM!xcc@@23FF;$iaYw>LrR-xXdg{DbgwuMDZrL`j2rxb4FInfviB_{l1yky3;> zfjC^?E1+N7k3Mmt_jCA72E$=OisLgf3a^{z1s^>WUGOg?Nzyuh;by@<6XTtxO%}LP z4QQD=kjH{OXO2bqL%JWJLg2DLFginRzIwYbBREECe<}k)@6Oe+IWP_=3n$GVMi$Y+ z=rtfRYy3ge28P%$%D8+er?#D(td#;Z)o38Bg9k=xOB&~+&UYZDB9s1>8e25p2nQD< zR?ya3!&3j!bc5a;cGjb-6QM22k!r8$>VIkGpH{Mt)7{DVb&@tshvQp0ky$V+HsBZ(|U-n2T{d{XihRH!U@; z=JVH)qO7-s$LZppe-&89AC4qh$$53XRBhFkUbotrAmQ#?(fY}fwJ9>GeiP17o+}B* z-_W`fyv=6T4R!|A&5GCaT(s%xjT9YbYNgsl0{YWu)nS7V(<;5UV3`bnEj_`3~>0tS8< z{A9+dmu`|);aYAAB!-3*u$D<-#DAEZQ22AHe(|HSu3G-xNENB}3faM9xNPtbs0mi& zl`0KH`3!$-wXv8I@ugga@k6o3uHf4Ze@{$R_qF1 z9cQ}(F3L!`g{G?{#{khS6Q50E$2;iJurmWvc|reUHsd##QeL)CnZKc)%S)bwtu%## zZUU!ycl(Ty6MWUvGOuA;5}5E3OC)W_ zK;(VSBts?=y`?0~Wv~8JBVcW-PlwjCtJ?De$?vSPC2y5ukAxL%&qI1^w5Gp|T=3FD zV+SZi=LNEF(8Q3kjk4s6b zE-nhPwiImEkFk)dQT$)d^g!j#$WZ0-9}U03kW*}gL>ME}_Cy&N9h)URFPJ4LJP62q z1d%G%>8aE9IkcrP=;>i%oCoYRb<6L{*a%gavCT6( zk@INGRSDy{i7?Ue%nEZ>mjjvx0h)4?ri!mOkfY99xLIIGr?=&jS#GZMaJ<{hc59O~ zu!z|D+UDctT}JehH-CcTm~QboBC|aLN6z&rfp|GJs^7QZmB|H*JtVdBdGE=Gj2jeD z{?;NrOR?CzvniX2Oj9{3m<3CV`$NV~dG#{n10umkdZO1SmV+zSg~*HY4d5(O?09yo z?7fM-YSSb@TeR!jWPf0A7aI5Y>crs8Zvpw!_w=;nk}Tf_-`&u7j-BM+*4&B*1v=E= zGUM(`$zA_L-p#~b_BFAJY}LCa0MXJPQu(iSnWs#=gMi`p+BwVte)!RD0!2Y>MH_9r zv8>Z84?w;hESUJs{%@o8ah8KLTrtXmS=jbrXO)$hzpQ-oy79|TQ=<+Ilh(4C@=*BO zcQRf#2q@H8f@Hsbdt5xsrla@y<#NI>w{>iZTMpVflDJI#5^@{DEsiz&ytPr5-g3yR z_KpjC+~kfqU-z5)^9hMlMMRDJN7V|Bjp~WFium@tBA-4goka3TgroCdQrOGIFhn@? zen_tZ^xhqVB9ly(=a!XeznvXq%hR?5i%2G!s&~}ahHN9}O)WELNK$YM?;zX!WmZhJ z->eaSS6*HwUVaj|aJ8E6rGz&At{#iYia7l<4^#XI8riA8v^^6v8rke!I`PdHfZz^zwJB8n2`b`XnP)^;;S(cw;I|WMVLx95Bn$VxjOs zO#M|O7g+Hn>F!_{`CDmS>qRCgThI-RGB8jWX_Z4?x52~M8QvYfvE7yeSo zyF_0Ijl4I`?yFO$9k6mBVTgG6sA-cZWR{g>jZ0yWXw4}MyeDT;oG5IK-4P@d)X3Q@ z1nN+{00dq%)tR3B;T2ZK$xO0Be|0Q76-!o&yhQFnO?J74CwusFZnkgkCOQk&q|Ck1 zg-?YuB-<}~KYcvM*hus_^e~G(909(puWEDjcil{k3}+uNE|i$5KEfq_rWBYz%maf_ zoi*t#%3kL9RL=2%)VAQ#!)Ioloxu>1G5uhILPwoU3iSsY`HKGcn>9vM85*yY6OsJNr@{!`O)h@EeT>I~+G@9Xw*{!8vGm z^I!7ktmc^{t`%pI$}t%*+9+9AI*;ui(Y zZz9Lr;xUF(j`@WA@3ECca;&%^v!07QYEc-#CNDXmRUf##rhg7kOUSa9_3k+e2tn-; z4!?sK5leo&qubR^>e4I79jP^E8h;nzq^_J~_lTP7f|Bf*yH6 zF)&KQOdUjZr0S$e6JMdqc2&!XXve)4E*Eem;Od9hMq(^BIl<3J7f0*rxGJ2|_-)-T zz{_ktxiT8%sjuI!4RromiAR}6>2!BSsAl=2N+hEMy`IhukNt*!kOn_ou}=6w^^Kv% znOtp13*&x!uYdsNDz~Q0#b#yKkbt|vMrBjYzU(%w=M^ilRKSsj68g)y2C&|2cm_Z# zhK1BfhF@jLMh(}F)PW4$$5h#c)-Dr;4E9Lqtnp9G`b^szNd?Dg4nBoZq9{w8nN16p zcVbP$Vwg+7e6~l3QA-c97tOEf*zZ6IDmgDmRy`Z0ciq>X%pdf-mif$5;y@=D>b4kq z!T4Z5pVd_ks*)Yp`N_x?x9pZX0%y23Uh>2|AfdqVs+h zgc1;vVU{u8vj!{!yQpyDE$7zhqE+zq|G9enU<`ZqJK>%OoURHY6IJe6qD*D-rhdcRMgNzz+cRgNkyjkyHmI;bu|QAJtMVPZqvLI-70lDl}}ww z<@~v9)TI6WZQ z0TF&U{>MF%uOK@LKf3n#TjLJU6Kl+^0NfJd)%@GTF&>Z7H742rAC94|ExL#eT>D2a zd12Q9Ljf}8evLuD!1nceRtyh zB_mGWu6_u$qNEq{rHp*FYgFu1O?N04o~YmJ4LZVc^XwAyPkfsKu|_$A=ykJ+gnYHe z)=dWYkK-FQI=!Bj-|wedR9N>1v$ts_bM=S#0$}#+&7_y0n1FPUv8|jtn3MNP`#> zNYY_J+!?f;7!VV%NlUJYjadIUzP;L;k6UF2#$-c-Qt*TfR?frQnj)!h5S1h+z#UE` zF!&&vU&CLAqpD@Ayh(V)B#SMsCfuSBeVN!DT$MERAkkkg^`{n_F1-C-zmTR^D(00N znZPueT12+@VPCXjm{uME3X-HT5eBFMvg@eY)h7cvW)`|UpUxlhk9NS-_}gbx%Oh7e11%9 zjCbf1MVUOGT%swMZ~Vbm8x^9z$J_ybrb z&gXHZHP4RFCal~}q9EZ-%GE9GE3Y(ao|iyTaaTsc#;^7#ddcm->(MB^;|`lh`?BjM z6{@Pf`+e(`Jwg&Jqo5u~qL=^Oa3kgR+a`QR((Ha0bD5K+eTy*cFr0UfB>dup9T@Y1 z`L_zcKtvKL)SDDsPg{wU-48IpAdPM&!-#sdqRhef5OZ0Kzuw=`R1#+Dm~{=d{0*_( z(5D4sfIoOZ|0Lf@FZg4B_KtstYO9BfK7=bc(qNz;_leZW70fL3+t`|4TkB@?<+P0W z>^jqMg%nAje2s1otgqf)7`ym9*<1*Lj4Na&RS}sB?w7B4X` z4ln)uO`70M-oH!1#vY5cCvi_vrO{M8Bm$TM4jhcYxU zs%Ca-RsQhSc4E|t+?Oqc{H@>0;9*ig{mh=bw72mK<6v11$E>R6 zO3+i|yiIk~XT&JyROhRKUjEfmo*Wf$9_Ka7IYmD2Cn>Ta)dTYed1*5*F!mkO*Qfzw8 zks1cJ>w2xn-ymOX@ZJ&H>WoD`Z~U@Zrc&8}K58R|+atW)9VZCzE+=)YsK-?wkYqpE zUAF*Kt~|We2I}QGU~g!acU|?A~Ty2?ba^`*t~}s z@q4c_+f~U`XiW_N0i#1A2iyB3h)0ia-`lW0(%!xlB;3r!BL0>b{mPPcUDqw~XtI(PzhJ?`=~1ytk#)g|K~$yQ$}i ziM|=l-Ym1V=sDr%{6gGXs$fXgTf>eVpKwVITLw}0#0AG9w@jPh5CCEZG0SU);mw3^ znBOOKT8I%~ZygM3)OQ;p!+qSRJoRz(V=Kvo%^W0|%Q$X(m3DPd zTOA#!X3h2nEWpIU^73Xh*0-ilFpw`Wg)BaxSavE0B%y44$ch7|99N|-Bj=broTY=D*x=mFE;sz4-@g6IUibZIF^kC>L4`7%-UTChX*tK zhft<MpqAawvqQo3c5*G5O~0FhkrOFrnUkMN7n^D)6ZF#YY8XHrW>ON; z*RP=!$) z7Fr(M>};fZ2mc@HGZ`gSu?Zh#AtjTO!oL1O)8alT+Ccolft#}WJQ2f23oXVMb2_xP zD**!W^n6@wm3^Jc()Nsk)g4{6VtP#z*d2vvLUDRT%DR6d)7+V;@^)#T5-y7)`c)$aU{NEy1g?b4h z1WQ9ygDm6`%+n)(@k>Jp`lWQjjvof@T0Cnra8a3G;q4syyti2TV&FtWs}*N{8EFTu?kl zz{7N@2Zju%MLOBSH?0*8}$WUoe2wD62jMI*D2Q1VadzPpEQEGvi9KqaL zH(ciB3h}H~w-CtfCE1fj*Gl z&#X%SFWV1w`EHh|8#YuFSN?F_1-hy8i^w@%>FY!%yHt>1JjI$~w(`u(4rN;|-FTA` zeW{-LEU5KwtM86<^d(v-0=gv_u@OOhtv|T0Z81B@Qz9o7fHA7X{Z-^Ok6_^GjIv#I zv^9!m*llIJfhQ)?hT_-IKQ?~nsaAqsT)`6cy0nb2SnbsI`i>#V_hI;IM&Y>A?6`vw zpW}rT!jTOD#|p3~79~4uB3uL4p8Sl1Sup(xindxKuj7!d(u!2@hJ;}A0iPlRzdj;s zBnb&+CRroRSXzu-uk>xwQ{sO1Wr|P!BR<9hcjGm}!KuHyz8fyL#rzQ%Dd~ZA?WyMt zR(pXZGGZQoFk%Nzu~(e!C&d7m;N{RmnTC;z`90@b5A5+py;9A}zR6D?EmggAgCN%? zCE~%(cL}ac{cn~KCWY|kvZHpYTbM(dKjmNXmot)S5d(G8bGw56YFy~k?{M5 z8eOFzmA}lvlE%e8phOX4`R4$NNgGbWEw3N2^k6YGym<+V0N(dL2f&h~a!LRctx8dy zZ#k`P?GU&gJu;489a}J1^eg29!}uGk;8|7mAL$`DMS|hvX#Ey%k#a#@N;Sq=#*8ke ziTbRGqU%q8>I&NzaY9%gB3p8Z4}ZDZCEzr%$ipu>S)zTWS807@Q`?^=(RsgGP`aqZ zS!J?Gh_om^GXJBYi#pE`UvR@ASfN+8Q!t6_Ha+A@=4LObjGO4?)@wj;OqS3uK4Cn? zvHxL_0?Lztcf1io*P7-=7?1`Z^Q#%0yv1DdgjgNxRrLL#?)=ytnja%x5+1XuGmef&I(8;4jL))r_g%;YreR4=ee0 z>(%biP*=rtw88)w4Yle=un5jy`wtaP< zYT4V|BY-J-?i*Nc>IK_NdWtfH^P+su#dP@^PP@513mjno2Ohc zpk-YzEW(W*MvfP@v+ed2nQH;0X|Jr9*8vfBwWGPmm1w-p1e#98*?W%fun6;C zyhwbJbCy|-*>>-roH}{6Oda;FJBJGprE7wBgxVe0vL7qw#-9E3O+5~$NsUl`7B2e) zyvrU*K>sW->Wc;7zsNiT$hK&RPI2)^c_cBvsOvNC47*-YEaNkKCr=)r6y@}I7Qchd6S~xMeaM&9 z>w<*9V&LHePCdu6HF4i3_`!EzJx5uOv;w8E18x_Aw6D7`aSengn6)U~eL&f)&o{L7bwbM3*8MB>7N zA|BbD@_9iek7F!47BMq|8_q=+0}j|17RR)v3;jT_ybUtyYl5n!oIKB;hJI{Xmz`1z zm`6qZOy~>sFS8O0Bf}?!lAgsro8KW#KMSVfRMhe3MzT~cg1~W2YMvSs1aHoducYrj zv^$e*9U!CSZR<<5=7uGH+BG6{21!nQG>uSVFG9PCg4o6U|!lWOI5zBy@!n_ zI--7Lt<$TKPYJj4MrqM^;hd`v@&NOV(%}-e-3_4hbxS@DEg^d$6i@Zwga`mK+YFtO zY`zR9W2`EJsyBxWbpA^kd0>{nlPNbC*3Dvd?j&^m^3M12S9TV>Arkz{Bs%W~qd}F! z3LUQ;+iJ2u!B76^B_v&Scv2!_MjtZubEG6k&(m6up8cVdeZ3NLTjs}D(yZh^gS6$} zm;>Zpi*3mttyL?^#YL6tQ!?=Sqr;1$HA;!BiPx#((#nMt(gHqTf*e2|!kl59FHwc%F$ zLvrczQ+R`nEboTRzczQ0A7b+mUUo&Vdycsa399!`ch6RAoebFsC%)EA?WooTFUZG1 z{_&t_X?5&TxUwIQ(gG$6cfTN(*y>`f+ZI9~Up@^W^}T!+L@fpAC7g@^Yc~USF z!H5;wulBy;h~_E`Js3$G-nX!2vzAq5HZdDkXcTMlF!@1H+?p9#Rm+H^MU1?y`#jtD z2Cemf==ewdJtj_F?x>wtoFRTjtDWOK;@ce5K{C z*U9{#4Nq1o(eKy!>?}o3U;G7fwj6@zW0COzvn$624YU1KSm|xR>G6mt4n7ZWb&ahR z;<^&{R4qAHCv){FJE!Eevnns=20yTjLTKk^F`_Wj6k*Xi-FmXNxZLCwF?((NwN#i( z)KquVRzy!v{Znyz#t|4u9q4Z6)Q4e3_il}9BWa**3)j4gPF&?;`|Ko=%WPUU#QLVC zUOCbYN|A|b7H7J^fh^lE1eN^2GB4(+$CR6E5hQ!uF%;8qe=|@ujawt$I-3(VOww3 zz1mK4&25?gtt&76j8j3Gr7hQ~mEf0~NStW_M?Wzp!UI33v7Y4A&eV<`?p!>(N)md2 z3Q)cH9#%Gam%&~!KO5ZqPc4}&gSYsti1CYPUB}A)?pEyNO!#ManCjN^%ifZ^!^(X1 z{w2i6zdkN1+|J{IEW$NTQ)Xd8zaR9Bu-P6IrB(&5W$LiCp{F{ymqGWe?& z66$FDBK<-?&cvz@guOFe`ZYDXEj4Dp`e5L9^`NdWA~tDYpg~2PH2O?5JI7Q~3y0tG zYzA(eIrIn#1pr`SdU&9B2S@D-A9Gso|MlY&+^wY?Ya2lhs?#4uNaNsfiIgV&dunuF4(Vn+18 zD)^;8V=V+M5jzuIg2dC*qgT}q$UGK9E_*#Hh`NdZEt5nstp}V*l&RC^XJ0pL_SR)j`Tuw5F+v8{ z)*xbyH{TEQfYToJpXq|ik*%o>`xs9UwAO=_08_OIzEH;3G!#F!;#rNF!grE!T)d5h z{9g24R00HrChFDagX{CJ$0~^RT@ls_gK>JvKokBUF9_Wz??3jdG-g5(L7kT+hw3<= zr<$Ictd}YCjvuXro32KEe5|}o5Yr_Tl^C589ilN#Z8BrkgsPOx#5Rof5g~YAW{bxV zH@iUV9&sJrM?lDi+m-j(0z1{HH`(4(o9 zYZ*7k z&l}BmX0x@LjKiZVSqiD$1oMN28~s`f*5=)Iq3)X5*LaU;7INNKn#MC-v^o1GSzUoan9;wW4L>V*tCW4Ww1uBv+_;#?xv z#;T>yJza&Gy91U9NY;`*I!zFqs}P+7cn02$;snkx7`U=(Dy+!@JOQ zA$eZSXXRBx%`_v+RI?JExPh~;sY-1K2SQI*nE#4!A6Rt{GWi>XNWauF_tJgXAMdwK zla#`Pe1yrlFoNIHZOJz*pq>ZN z#K?fr+b!m4GlF#T>S@Dj!)A4ebaO;_0%&)U4D~P)D>z|;;Fs^k^K^t!?l6iwTD<;i z-YjUUgIg&*zdXS+HFMHfT4K-xk=?jXB4TrX{9{CHy7#!}tTOH=9tncQG`Wko!{hKc zZc}R)l(3AWyAk94qzyVv|I$LaMVQsy3Mq3qTK=wj=kU1b%ePx{xFz4$2wA?%SDBeb z#-|c(Ed^hsK?4wrAL*63S}bb~(q>YOO2Jo}GCeIhM5`sSKDXY}$z)&lUfRM#;eLOE z+$f2v4BjC>o_&7mn&nDI251VV1>~G2C+yzyPb!}^HWu8J443eQvxa)c0}@viJ$>qz zxNlZmp{)U^@`EZ=*@3_o{<7IMnw`G$6P5_Sv{Gxdn|T7UNP9Nehf}wJ@2_iyc|ETq zBZmEGt8>u8k}2&TRraDQWC=%_+2Q1s@D`@|2Sb=oI7QEV&tC++BOTItJND7tO7uXV z#L}+|damnQ_6H}C;t{d!Mh5Nb7MJB3v*c*Ah0oB|tm|5`v!8y3!j62BeXa!aFRW(v zAKbh(FeKx7;&@LXJDHS;apEb@rz?M@{#n`2y6E!9FQDb4<-(P**J9(vQlG5q=w7gr zk0(p?mwAv)#>VCSMv~UPV4pc66_5Tw=3Zy-13WqP)kVoxA>_B_uC*USL5^y9`J@t{ zN03VN&=bwT6*<~Qk-HNU8W|drm_yuC@H#e&7HyK2Dy}=9 zd>{^0gd4iAOPa#KE=tF>+D#J@XvpNSP{O%0=d0{LySGrLi(gSPdBlCYCQG|xZ$$M@wOGDmvHXXft%2zCU^@wjvrm`t0W zT7Et!g0?& z`f1vLr$N;Zs8p}{{zSEb%D99tEGfpIS= zpN0~pun*Q09J8k=AU#mBo#Pv)6(Oq~8rlg>oE^^{Y~VK(oiP&i&<@)!O6VhzQ+zhZ zLH~lp62|id*>J!z9~P3%i52b6Hyb=1N@Pv#@r94i6k!7HI&$C z1f*YKc}`BEjRjk!F#c|ml_b(e5;6H!R=^zlV7=rSx z_ahV!cUxe9finP|5huo-4rL^E>oWFq$N=NrC42HWv;=qbSIx^n4lvoE?iY|Tt}a#7 z#F6@YQXTc;O`DTf$*s&;3yi+imyS`+wK^@IDu_pAB0)eRMf?*}pY*{q6CsSrXmkq4 zwyf+jY9F$+lr*FOYl%PZ`>Y@%if=IN&)5@CCnT^^ed zpB1=^ZZ-$Qu|C@AMQ?G~-XT3jzm9TVoH*A&kqw7X8wGOh(4Dgj-7iy>QI)k3ZKF+x%-4 z|KG7LtwD>I`&fMnHgUW%0F-~~7g5$&1}`o$X5|)8H(ug`uE6LkY3S>K)W& zAc3;EXSSKDMzL(G#y$!ohCmwUFALkPneEV%H^zt0?D5Uttx8O~dGw&O`y?&~w(c2s zY-dqz8iV4Rw`k(d&`VO;sPCU1kww?z%g2d-ZKu4D59z36*YGC)0WC?ltV^X@!JmHm(tnqZ=NR5HX`|6|Z zF2Vh{O5sV09R#i%&M1R+>*+?XV(3PN(x?*722_+N0@+nLJ{qG@315tT{$^$8zT94j z(J^TC2In%4xf$Qa*%cYB!I%JIZm6aRQt z^_L6W94>_R($#$_yUtGSAe+toVHgD|7sN!62VAm8{UHSw;`b=GWX#qE%YiPDva}{Q zD%4`k1eaEtYYUK9nMNsfbM^Jc*wrreg~UeX^!@b4JXr0d01Z=N>Jz3V-ZI~A13m+L z%h{-_9Ilr{;$~Z^#w;l({FIm%sUB8O!McBf-pKZrNpl4nbMd|-3kY=Ve3C<_{IrZ= zI$##x8h|Zr1)gvn?$;zt399GX-}d=g z?5`VeOJ?*M`Cs*@vS*GbQI;Yc zxfsT|1%yUDwhhLmu(K`U`MozNn`EXw4L@FWIW&@L2wsWw>rku1vlX$ zcc=IRIl`oGyp|Atp)I8BE`>-=@N<7^Km%2*ipb3cJ`5^yjd$G`7oUM(X;n-VTL2oZFK9O1N4 za*U#+ztuq93);i_zkk3H&|rah$^9Kx>|AAaELccFy~%<`ARCYFy=mD%g$v^y&Q zq>bq#0w*%tU-lGu!`dYBp;~L8D2Hs*XuC*)n}XKqiffP~=hvd6)xFu2*5fZ4-b9^| zNA=&ptpL4PKwSk!e85R647)^Na-rlFq<5o8*U~`@P9Br zrtVPIh|9~(Tbkd)KHjf_Lu01+&Z%6CGW`J7+=mGT5v$m8MRNOP+TlL1g)Wpj+ z%gW+Yu{ITdm>`MD9>l$4(Z!H|OXDPSHtS5YG_n-XZu&%$XsD~X$ zYRTSOb;XEJ6)V6FOZ9`F=n|6oaY6-ga#j*AQ%w6+bd(MDfxMACcvRR$n}=W4!@+0E zfCd$46Ds^9ySU>9dbr;pmEE8ZP`Hg$2=2t}|wo~)#u?=2efbfwYC0sZSiDi?o}rRJYaqRY<6cTlwF^ zV(ahB_aBJ4Tbk8Ke=PH!a|Z|wY$lv?ZQO%B(Mscyacv;1K3q(*7&wuxAee0N_?MQ$ z5>Bec+T%sh-(Z{v_5_QkrZi_5yn0e8=Mrp3w;+ksrYwmRmrBA9WCt*b)1QjL5&yuG%uK{#6kH^^b?Bcy^CvYbsd=0XnK18ny3iD zw?WSv@0q1Q$KhRFAd+-^>?9(9syQcoNsIPPk{ye`K)(fJjW7Yv62NQ&Q9K-otIqf& zO^4`a>z!E{jV7u8{*na!i!pvyBa^YS`%TpMl#d>&O)*XJ1CLw|Q(D0l{-q&+qyUEh zK;RUPAsEg?^uV5KmKaXrFG@bkRj*X}WQrU~J?bh3t%GIYACOE}Y7r5vK1}qhSXClx z^9Ra9_1Cj7v)#@D)}$&v?Ds)m!)fAG!+jE*Q@i+-H*gC%)jz_{-ahY&vVdGRu~_1B z3Uf2gwpd1HwzXQWx+ya~L(TxxT=o;2(oeta_3mRV8{XYTvh=OW1i`5&P--8s8X1no zm^-Wr$Y&zD0^57zoC`94{zqQ)-bx$swp@O0_6U~;J5cMxD!2l$#O{DLfIuk^ca{VkKY`a1N&~glB2*XQrJ@T7h-b6jM?=v+1C%2e8I56HCwK=; z`yKbv8pPR2dXAd>oH$WYsRW0e{0j(()aITDY$v%K5|Kbz zC{J1Xujn;AiX8M1j0(g+>nng|5$q^SaO80V5NU6@c^dKksTjplI@!*{P3ig<^zyeA z>p!X>gzw)#fN9LulUY#!|DUWXZuoI>B0$ z6Ga?f?&V6Hne};-Kd*j|^l8V$e^$s_CpO~toMO@6bkS1!U#j`%^n?8_5{G1ClzpkB z;&Dw`co<_c*aKL4{l&Qj677Aq!MO9*bOE%4^sa}BL-00YTnr4wcTQP>LL9N_{`P*D z`&LMQbP*8nFjD2^vED*`|3HwZi83>VF0nSC$Ih(Tk~j$P=E znhxctFI+;kPMCGU)aoB4<_}ydv053Z+;(J=1{zXYsY&Gd$XjKi8rv0+0?C~syy{@)+EFk&)uSeo? zJwKf$VKsR9;^xP%F}6d!UBIJ!EOi=yj^F1L|C7?Szv2&bb5%=qy=|v=)PO63RyS)( zFxDUYb{{IUlRp9_NlMK`>P-FH7e!N*Z`-B(UM)+rCWF-|v5X7Gr0| z63RBVWC^8^bu5LH(T1{%kX`n5EM<*BsO%w1_8PK|C0k_A*kvbsV;kG=o?f5V`}KYN z?!P>4_uR|%Jm*~Joa#>)&9jZG4T&ETf%C;cH(>E$1z&lNl_fOJxv8p}h27)b*GApX?J# z{ei)t2Q1y)@h?u_+e*wTwx`-IUeTH|ten@$pO_>&*W#k9_xh8B0t(5Bot{uEVb_ol zXbrqK1^K;cKlQ>4SWDyQzjDw+R31$PEBJ{+kh;)7O)CU@BQ0Efm^+$^nQauQTMN*! zJ>8yDj+5p;Vq-vTY?z(Qo4r+R`Th8tt~j3Wwz(KOLqEyf!gcE#yDUG&7RuyX1a%|C z<<8jLbt}Y}D)*48=?u#7j=-fW@9jqrsw(4Y{4r6~qW$@){oRRLT}U7)mOHInX3^b= zMmLpPG=KaGzg<&Do>~lFq4wdT%O*m{n&iRf6spV4wRTK5$R|j#R#njbuswS#ETcxj z6$1jARrqJUQCz6kbtt7j9}n6SfRrLpb2QNL`G=~=JjIU%6Wmy!<%L@Y z4p~Sh4r49R#0~xzmup;~2{B7Y0HSkk<5#n3~Ec5hek#m^-C;XT0 zhWFu|P8b#~K5WPPOD&)UPI2&oj;SSocChi%sO#ZcB|1NpT^2~RR`GpOQ$2pi`nAkZ zdOz3dP62b|S+Y%}-*Fu(@jnhJ+O2xL7UjXIUsCI1<;p2xc+g^3d6BFTI+RAQ+;3|0 ziY&L0Z9-3qyX)W1Uee?qbhlq{(0_vYUZfK_uimO;==r+XTAja)Iv$vXS^?#fc5N51EgF{EGr$CNFCi~^qX_UrfCW?bB;pcaW^evqFC>QuhWk=r0ly+tBUkt} zI=$u#)=X=IHv6OQq4OFSY~Q^Weat7$^injw&y4f6*IxoSz^ewj50s1Kwm}V?wN$ozrR&7xe#nCRJNPqhUZ> z2&kAIg42-CFnvbyWJdC>?Yp0*LMRGDC2Bz*pkvgPHWI6Of*x`kSdU8{tX+Pe`j$GU zg?8=J7)Tmr^UFe(gcGvr{Mg>jZPAzPl`u5&+^Fu5AZmv1eWKB2NoW z-;;&{4<-Un#!CYBLa3D7nLW^d-xI9(B^Ri<{A)!r5{fS_mnN*s?&`-<|KD!m!p>=#ZJQCsJJb|3G4{{5EhoV7d&c|NHsR9Be0p-&Fu?(_qsKtw<}Sb&)q zs}(~VtqI)p2?YR~>fo@!o>{|_^>FA#yRQquhq^WOxYwD7gZ+x z_0H>(pg`A}1mG6`3nwF-jrXUg&e>l3e)ZYjuOG?0In?HXsUb{)_D`w3yl_AQ_x=56gg@+X`a0Q&tI zAVe8?E`RX79mzP^Ejwj=+Q@sP8ZhQqd6B~gK1qBpdyzvHEnZ((QePd|rZCy}NmTAa z;L$9vY$5lfq~3SnZ6;}{OP>EdcHOXUyES;`>ZeDp-~N3r>!v-qf?Bw+BZ^T)YN9@spC-3({KUKu&^?sWkWS;{{Bs|QY_2&6`5jO0 z)mH{~+nWx2y6X+lhF|4}1p`bu{#A1cI&TG&r!-YaeWZJtr27@`vK8mI05-&uz_GDO z{h)*IW={I&Yj;QOQ-`REuI!)eH8fn?gGqX;9V@Mt)ekOm?J(|cHrRM%b8PvWDT#(Lu*Z}ad7Lzwz zkzP9cKTb?J{v?|5Cv9=iez>^EY&#)kkln=nPQbS_23Pc16;?~tD^9=m1US|C?yTHc z(u!gIpTNCPko4blf_~L|MO>rP2flOXpYME8`b_)OS=;O*c|Q2-o>X~ZUT!jPTIdpdZBwDX%Y_*bV`$tSeZ&fpD=s{NVh z-vHN2Wj(pc*b$i%_9wuyYjF&)Q&s`IlqwUe!dDPTUFCy0l2_oO*9tj(R8Q>J;j{?R z+e)si%~J-d+f(H=CjVwBRA*j4jsK|MI@ztvAAhXF_}?cve;TzMLN)RY7wy>fTd4o`M(=yuddKeD!}+MD$Iy%BVJc)N?D7*6ikv_j^}+=&Y$ z<}6gM#g^68O$odBA-NFrX;9_aBRQGvh^eiSW2M+XXUB3Fqf3K5qOXn#-4w1+xKH4G-rI%clSe*{bf zm3*Cbn7N=ed$U3^g$C>~6ZNYLE;F(X2i-n#2Mwzebgvx^9te~2_;Wx0(5Cu!qfdWS zGn7Cu*l-lQ0ZuhWb_&eV@S(NT+CO|u4AYe0qt&vdpinh(kTks$@0162tc`M%b^aQm z7K)Ly&B$L7M3`=G2BRj9jmA!0%j`?!Ykqt%sgbdX*+`w5{PNR3=GwxenuSx9HZGRZ z2A~C~EF-u4=dBKOpd0_Ok0_V2e-#7swGY%tk%qwMxAf2?t+u$JquHYR98>D;>dK@g zu~c9Hd6m?F5`)8+bo|;S@PH+CO=`HS&|IR5T3o%Wg^EwoRhi$ve`_+NjR=qSZ77Lf zmwENd;og($GOGpA<{c^>qn$ISZ^Im_d^}FAD}H{-B_CEl=43+MTam{r$YYO+nwI8Y zs=kxRiDwG5&Z|sE5RFm&={cAYBegkg3#1EkGzN%Ba*TJ6-{e4ud)8GeeCF{;rX!b} zOV&gPQtsVv%M1HQg$<-aoP<>iK(W0pGZG8JKuIgNPQ1YcUnF5>KE-S@#D1;r7lX5H zC`yzC=yY~|lx=(qpmJRttNt7=hv@1{rVWA~Zw2e3=qb*u}Dr=-n;9J>pod z8iQe{@7MAAI>Dl*?0US`;L-2R5U!8TSpGUC^!IosA9U$5 zM@9Xdy>98cqiMbzHDRgcMP>a?kz*HRdPg{DTUa&_U>OdYwL|-jwi-=tCkq&H1@0IG zdCUq2&IpfU;zs|7{0wG6`sWXP88#hD79!?EgJM<=K4g+U$lxGq;3~@2FY{NZ!JP9s z!u+7JV8bjwEfBBl!@FRomFss) zWm8{7WQm~}!@779{u#}I^PKBaT0LC3Jy*?6;xjiHHq{v>I zd~_HT_)j2Bu4$EU=Ss@d=qMg_AeB6^^+KHvWaA+RNygD0kzJu0lV3?C{?`O z2!-+An|n5K)1Mek1ycHa@WZRVLg?an;;@&w(VHcxA@ryuaHr4+e;bbEWdhrZ*^C9} zPI%SMau=mQCgMfgIgD-&7TwO**9e*m!#Sv<#F4av&SN3%FonAzN$n~3-R_Spr#uOgcAuYp-!P=ocv|1Qma$pxYF$xPU%n4#`DQOywF#~G zS^0Nrc+cPa%Q4jg|HKHS%ac0>?;nQutMKMJJG^jBuJ^#MD{4($(y}}r2mX}N^5V@6 zlGd73iWmyyniRuU0piOM$reGA+IIN&@cTqn|&S)?|(+Md=N@} z)-82oP?E8qj%qoDYT1Bt`CBM4L6!9K35Uwcd?;}&l-T)X$>ZRoDk)Tz#M|8~{fi-# z7#>Poc%sEo>w!I4L>@0A>+G>7{@9bJ*vT>a*$e)qXL*voDr;3iqa7JtXwi^Mbl&`gPK0D+U*J-XdjoTF}hZU(Sdp&i|}hM3z_2d)3pPAI^ptnY!dm@0wjvcR5_Utx|QY@|Lek>B|Vc z)5b7;<67L+0*nXXPOt^2Q|1J4=ClmBLmS=cO=+I-6Mbol z%2Vfxv*Y#kXG6wLOO~lBgGPXx9KP+I-p%RWtZE6eeM0XdwpL`td+ekWqU4rc={04X zZeQzDBS%@EntvFpH?7<=lwpIp$R7Ki;)-PLW)qvF%Sbp)6-kVarFKq8(ryd0MzIM+ z{XVz3ac-KHzL~l-*z?umkDfX+Q;!>1YIjKMH@DIukXGfl=~hl>hfooDZis4#1Lz|! zsW%*#8|vSV*jW^$4X9*Ly=Ekd<-pf$;H;2FA;JCijsg(7sKOX3}j z$|j)eGq#anLZ0|E;8}k^3ULx;cV#T zY!i*p6HtF=FZ(8Y3o}07&ufRrNmT}qa=6f*r(D5c{)3SekBMIyegaYM{BCbNmQYXo zzC#~^f*i*l<%YA!VtE?%siCwMJ&|XoIM#op)+Z`CmlK=3-87gjk%R$QgG z0H_$U;tm38$_}WvpEdw$iQ|juWX=8yRcD-GRmUIR^NycP0f5&R063eG(G7AWHLh_2 zdVZV($j43%#dv)_pvScZ7^B7UAlwMlsWVCR!N<=A11HCaaiG2l*6A|{O7rfG?lG`u zbR5{O+Qr#-qCYeEU(* z^m5CrYWhlkCBS#WGgiyz(2asD!DFCd->H5t@y=+R^%&sBvs)#|QoFT%Kv2`IHL2BJ zA)Nbw*Hsp(()M|*&K`{;1($B7hq?4=K2{Ucm*V#TcCbCD-!^7V8aDH++n{z90MX?(~;C`PwUFFB(uhwB@NczXO?_tl>8hT&8#hk)7 z_Vmex>oWd%TC$){Td7y+?PhKoxIg`JDsMU0bdP4Cn!V(7u37{{57i6mP^WBX3wsB8Tm1 zi?H$~nJIFE6i;wj4V781hUTqpHoam_RH@hsnZ{#mqw{-P^R<}`r&4XY)a{oUws>jP zTaZ19Hycro1QYsf<%4A~hl5o0lC-o)i5w`mqa230SYW;scC&^RR13dXTZ0zKC-K$}@Mu{}O zq6w2?6?9|ef)a5`1UCGDPvK)J$?BzdKLjL}9;XHcSm17?u*)jSn5`O233e zD_5QrM>O`HDcrz`6V90%H5Y)s*<=Rn8m7bI2I~n>{6TI83yy8`=Q^d`DlNv@C_Itd zVaU8Dho)50Df9%qoSzx!yYZ|sbR75GAy_JdFB~Y|N@+@F4!h3#fwjz0Q261v+xLrW zY*7)HR%6)}BB5Imnu_mIs3Vc04tyil8$165CPsBv;C3(iN^DNh<^4aGQv!*eEb$EJ zhCN4{VtYUk$uQb^-Kzz^7r{>fDZ+tP_R>YuTM(l+;M-2y1|}X4Y8Fi$K%l9UKoAcH zZ0VaEV#(woiW8AEJQ)aZGWVcvfr4p=z+KQXkZXtpKcYoVFVkJJjYN* z(_&b9TlE%A8EbV&0l4YQH!)ZNWs8Bner(z*Dmf*^h#AF2rLF9`Hpdi^_=6AXEmQiu zo?ld=et*Vu?Ca%LO-w77#fKjhDXeLI$zFSNs{KfTmY2PZHPPAGyb=74=fbJ`o9fcA zjrE-Z$wV3p%e3Y2n104X0lgR{zbIM0{rA2UQCD=Sk*Ft}}kN&E(=!4M-tLr|aO^+Kt%1Xh_A%_$9D7^(iNqA{~@MaPf zHU++gl*_V*ut4%7s8K8z9wcwx1O=vpvdwg#g3MuV6je+-+mJt~dIF-B(ZOcgcjF4B zDTncHJTG3Hn?jMo8|>Z4@rCXP(yH5)rBWKxh3M_~jCSDk=?&!>#DT4#WB3{pEN+gW z@t$hwjjcJX1gHV^MmHHH9-+_{9UQoYLR9EB5Fp4s5d=Vh^9zi0;`S>HcXB`?aOQ_m}{tcvh@s2Bb(2H zw;NJ3>RP%%*b6L7u$~3X+_Ka-g ztb5hjRu7V%bj%;Y$;2x3jLMtukyJaoPdIp^3_i-~W!_r^IJ=M9CibZZ65#&R(2G@- z{M`?DIVf&0QP&&o;GTA;;+L6?w}SOSKd<7aRvLcmmSBgk0$X%xb|ACuw5HHe$Rs#O zY^U=nJ3*hPbhnZePXUrHTZ3Gnbcknj2PJgia~@~hUlt5JhG zP}iJeh)^FC_8c{=aH~_S(4-pDx4xwT`WVkcO_dud@13)ha)Jc=+sI~yUG%0}f^)Xd z-Wyv{ySWL{Gp!?983pLePXEwd{UCn#xiTLo0*ZUM$z#F3+gtAs*%y0EPi(%rJNqRV zWd6)kW3sA8f4eiqZS`Bu^VekR`9^?Hg^~^+!Y{2_7w7``ijL^yuL|lkbV^pwF)(*7W+eK{Y zlFN6_yHZwkY#{;-#b+sX*seU>v{HQqyT^9_p^LR@GR%n0{o$sy>T9z>`q0aCKg6{v z%)3Km=qAM@D$IX`$kVNhxBt2K)}q&@F%uTdM*m39wlNzP$tM1&*S0YS7SDEzl<4 zouqVc8qPY~y=dc_Dm%;Lr>OI_xiw}}2W2m7dst;2n6r1|`^)6z!@%?8|K8VGe+gaZ9_LM zKk3OjE>Fo`@u$2Rw6xRi_|b6`&j9)mAhHp- zS2GqybB=u|kuLfWquA?Y#`-fVnyxZU*(H6}dR43`6@=abNaWV!_Wl9;XEU3%B0-}uxmjo}@Ix#oEycc^;R6FW&6`Yl8@ zmBIVsAM1s@9nCU#Gi_Bgdt2V;kT{?cDny2|1mCZGF9)U!2h{6URa1)E1r7)|Of$DV z|NK@t^!%NIy`|KnW%_pPYO{7fRq6=(rkJBZ-LIyOnXATbdt+V3!#9&WFM%t~Yylt0 z83?~>CIJVJiX48Kke#6(vR@ttxISh>fEOhR`0TqvWXJ9+a4K^QIE5ykY6Y$`odAA# zhU`U^0{`oA0-X0azz_W1?8rKPlKP~|&94%;D)2Gjb(yJXcZ{GZ*#6@9Vm)+c7cWN(18IV+ zBwyCu-fc<)*t#5#Bfx8Rnk{HQVB{fPNjX%=5{bjerKfQ*tTTrV z9-wMUIPdqhO4K?tS^je#SLzcprVxDn`E`^s)5Y!Y>Lr8f!iVVuOaR4`KB`>W%NQ}H z)N*!y3{N||rTCYNDn^7ULCLx`>1PvvFt(PG^J~qWlb3h)jg;0$Dy{tqqC3?+3t9pJ z>C<^-%$7~0$tSz!+5LC=J1P%9zEo#vwXYiEuJ>IaWq|Bh#{;VY7c@@xc=~4z<+!{l zAn*0$h+4$~7<4gmX2b{ZQ%`^dSfwQSlr(Uv?gQY3mykW(YBE@(09XR#WJn``az-Bz zQS*`|;E)V_4LMAI26(|}p4`0mf3tU=ZqYTodXb$6ZY%0?y z3a}*|1c~-i)ud2hdYF_Cey5QR^5lQRe}x#lzM9>j1;UT_iSxgCYH(cqfy&UJ^=kY* z*aHi8Pw!Ugj6P=CcNv}P@PIRX24))g>+%u%H>n&WGH%-NP_1fB#ROA-fHurgra zgVh|p`Ppcg!mCfR&v99_p+2hXqpnqFV<^>_@}b0AakV+1Yn*#3{7x=hDT6OSvU~VS zbHGbb?g1|fT9~c()PrcV>NSJC5ToE+%vhxi5Ovs-ZAvIvt8S3taRS06 za!7o51e{X-3V_H7pyEkJvk-`~{7iw1;EupW17r-n08lo_3Ti^eDV2PyML^?fdSCrL zR$bpv28xJWQ?)*6rxYWi6q&(3pfIEP8*HFAc#%?3@cRge5!)+O4$zT)!~6p_!!t~0 zrZo%=M{wJWn}sQdbr)g!)nJ1?aA4tbJA)wD1Od};jR&19Ts7a}LQobkoqd>1Wi^Bj z%3#1a@h5-m@$z`+m;y&=>g{v3pVtCugraw>zD*`mm{Jz=!HFzB7{O8{%p%CBSNix> z%87fnf-?lkRW-~b3HB`uUt?TZESo+Dr^Mw)jE+z|O zgb=A91|4t%X&Z{9`mE#Z@$tKy06K;$J;~Z;535G%EOu92=>_{ViNR=8%^oMVmphaM zX0Tn$SmZE0Czhmdjan!D<)-#?Kkho!u1j_xD6Pn zOtZL;*H|twI%J|`+R-K=zjMzxWBazxa~wBZhW(Xk>bn z6B%2lSmizY$>XBP-(w_T&bg5&@C;%6?3f$F36*r_6Y9DY zhNN3JPtv^myhzU#LpUKHK+1SpY$5amA^~r+M_`v1VJWBhz=V!UGi2g+qtMB0r~6bW zkQ-$W!pVRGF*T`XkdI7fJ9ja^agr-|EQgjZ`UQFFRL5Cb{PZaJotd<&s_Ibr* zgNY8n*y}z=K(qYrhWR=&pl-F(^-^&`KacM^T9?iTN1uKG*uECdphLdbUxZq+7O=bl zX$^dnfz4yV#KG1f1G5Q;wd0suHV;xsY=0t$FP=7nU6twEdjz~Ozg++Ln|7%q=3+}c zwO)R{mBRBi=2byGOD$$xmrn_fjc_jew$GB}$O6JhBM!l##W<$#;M^-|bj2aK*FffR z57)B~+yS%QSl7Fi7bUis<24UkuQx0eXSS;3yN5;G&6jCeD18vptt7BoVCvEGy-E0x zus-9ry5!e$C#UUC{}B*dl&%}Lzlh_~tPz5K^_kE0L#ti+R3aX@_0wk3`t;Mz|0LlJ z8`;``tpQ2!+X%Mx+h1$klkJsw(G(1&8Vxv_5sY{Uau|(<-+xxq6UnUm8qvN56adPX z2cV?Bwa8igJl+CU~Ebi^VFkoOs=vn(c!l=usE#vHx~r=6Il~-`W>W zm6KVO$z35ZTRd_`xJvQ`3|SoDNSk~?`MTv@B*kDP%7W7osYwXY1<0`zkuRV|V#1Ps z1v;zL!eOO}Et8QI!PFok;HtTD5bnp&1aoH|blKn>ZR6_Z5?gGfGFl9KO|YF5b*xo) z4;?yiE7roZ5!8LfWlV=5w}(EMDRQeV-TIa1%Co;AJ0`&CG!gvIdYym2hS3FmwyDei zuGigsA~4WIHV|241JUA%*3-hPiv7|JaN(6>(1H_Q?ggLm3JX4r^XYmV>rD?z$Dc<< z;Ww8xsCZ~}jrD{}8EzdGOJsobiSxI!u2X~{Llp(x%0r51^V2;a)s)A&fL45DKx-t- zAgDHYCMhXNAdHF`lR>h1KDnZ##JgA3_C+4rv67XdNa6uzjK&+7FB|R5GLZFn1_yNm zYodW4i8z?A{N-Y^scsbL#OKx~&Ro-jPwsQ%eUujR0hWUr62|Dq$4=z6K)AI>!bxF* zh@(4tnqhI5Td(sH_%))pvLuz}Rb{G57=4(a)S)ePh}8iRR`-sV59RGs7du!xUPK@xe!TXoKkjRPfnGJQ5zeJ1$vt#wKkj++rTCx92ce31yI&~@ z2`qXGaSqcwJwubCDx1`pXCD@I;fkcsqTStQ$`VI7kRdk}rpSq9FP>iaQ3R(7ngV~H zXGeenq1ikjW|FC^GUGLFX5OhY3{j77&6E! z^!|1l>Zxs9fn*tcuFnjW{$q6ET6ReUwGm`6@3td>C#zw5TM+9_S(+n1w5IF~_ZWC+ z>MQpBJT-H zhmY6Sx1Hw))`At?e^w{|;}dk33~rnuyo;#F<&l(~#!|7p1qT)Mzz8!=INGkm5oGId1VSG}s;`(qit5q7aSq z)Va8MO$i5&705evm3Sc%yU}i>FH5FsAS$D1DN>GuVeo8|CXeG>ORTcCcw}E0_nF7q zUYUHlH`XSSVpLN1yEq(^R4(!QTD_I*_Ja49!28+Xc3U;uyeF(1)N=WD_3!C^er}0X z_FQX{?Rhkn@Xs;QeuKOWt|0VBQ)Vvg`KJ?|+1S{acDnt}#}5=HshB_`0Xy+r*OwpZ zN`vknYNUf#w~CiSoC+6eh35FS`4k83@+PJxayWei4K0Qxvj_o&+Yt#x`0C3s%!8Z&kkiV%vbiSv^^{rRw&nY_<6>Z zW;Od5C_fJWQF~_QL1RvK;bK8p`w|VK_WQ$r`gw)F%gKMbRpJ>k$DVsOSk5oZ2N1f9IHEB{|@($7BNqyM9_bV(nj9ER8h3kUSevoL)`CEGLl^4 ze*#r&3DEYZazyM6(cOY}Tb|!H8i$TCF8RzS8Q_G~BQ7F&@x>*bl!Er^T@(TRn)Ba` z^;)lz4vkj9$K?~hEZ}~I_OhP+yq-7Bn7GP>+)%YNrj#>krSs1+561-p?l-tONR(w) z6I732xG!rOCdEMi1aCGzVZxovK_P%QYA%e)dd`~86QR(=#)0eC4KWujbgDiFcRhzy zZ>N?2UE!Kn$idLL6uLdiCLF@>twS#ypEUYYvr!@bMh&9km+iJ=D3LCd=ym$~X6zkC zNy%EPcZnC0Qv!Yd6LH=5&&eW=dV?+pbHG@^j@o)E=J&GH@DCNijTGsgNT*o=R|#(vXaB3^^cgUwxOjQ?<(Ychj2I-lMznmL|Muddj=UV7@U0)W2GU~Qx zm6Y9OyJS(jl*=%y&g-d(;!xQVp0ifL$?Vpg^vAF}9iREUDQ6X6y``RrWvKiluI%>z z8@E^qT)NSw;7Gse!1FCe+>ZbH+&T7f-lqXIn)C+iH zIh04D7?qd16x7CFWE8PhAUSWwsw?|7_j3!s9CrELcf$=n;y#Ar?}P#_WnGSg;vDw{u&$J@H<&goa?4R;55Q4 zIEA+(CCxo1Z^}^?OM-+{5lE?052J5k*f`E0-4}>_^FjhEF?QV)g$XD$q>P}ezUhu~ zD9!O~qcIuE%zA@S2+2dcJ*6s#yvcSNuAZS15(JfmA|#3IMvcOj*_0= zkhYXWL5?D6PMUsMUq#+QrWq73bPu~@pUk8Du=YhB`p2W;f9-20U|*$K#tP9ut*_@g z96AovMLDr^|57rlzsS6;s|;|Vk3QGV=&Bv}N&d6_f_s`aqpv+`Kv$v2r2XQXH(k6V zD3BeVwfB}@V;U{f9w+>QHbx81gQvOm=w*ruv-vVhFkeTGgRUu(j|t>FHIFuiiIHe} z3yKCMc|!ypt4d5ALBCdRoX`+b|u%|BZ&JqA=oP>W06<9SAcvS_&0X5~fMiQODD=|izokUUWClGc$u^zBp% z2o-Ftw<`a$*ACWzy?zt%RbFzrW^$*vs7-qH*&apk^nDV@~eD@Gng0?uz|$e_|O zT4$47eg2X=E6u*M`uq(D60T90!ia;k=S%n;rDw&@FdBPJc&zQxJ2D%|mb$I5ZpNRC zvi=SZRE|G`s*(piLn;}`g1jWf{5};I<8VwYyMd|KzDbV6}jd!(QpUT4sxR7FD^B(H+9xOthp^Nkx)3GpR|T$$Lp) z=KbP@UZCvk)=&N9$x8qTe^yS~y8jwM-%Iwm7a~8rhC=HtzQ>2YJg%=8;9{lo9n`CC` zbjuj}mZq0ZTtEX00e#)TUoxl198#>-VcV%l;7uA*fduGLYUc`kVe=B;@5fI^DUTq` zhb28(#Z-NooZRVbHIdZsZRrt$M1U``xa!?b)3gXCw|5vJ@35>H@|#7WjD{JU-8jJC z5+TRd8gtvY*>YP0n_{X(sewVStrW_~gqG!|oRIj`WFn@JUrSV0^U}7qD{(P&$IA=h zKfbt53e;@(zgjJDFdh*vE)hRzwrb>KU%&RhX!(g4IaoPvIy^@?9n4tRh+vDb5FxZ! zW?#P(9mB?Lp_(yJ{HA1)LU|u_K1@J-ztyPvA4&IS;6YJ&jPy`q+r5jko@%Rs>5gBQ_K`r zQkE`?IA2%EV2^x`V&5~)5$2}2?mo3rhhs5NQ`!{~nhdV?)&T7$xJ5>0IY7+cA5&Dj zck3|3{3*|>Nx2%JBMy^E#)1^Dwmfl3cWQ@jop9JP)cy|Iz3DKVepA2zBp1cZ+58u;Vfomt+ zyLw|yEhSAa+o3gKkE$fXHTU+)uWYLlNmW+$bas0*%|GSBXbsc_7SPma$N1^ zXr%5txzdg0e>WxMTJ4tKu~OIW&?QgIEl&e>iFEqyLOkuTeMjeMO7y3$u?A=hbQxcC zxINmB81_BD+;Q4jiKuD7yf8lb`V+c>W0IR!n~679fUx_IakfyDHbjiomv<79v5sd) z&c!>zTz?J2U8&MASth74eGtj6=OFj$` zaIg3JFoJKTH-#cXqcq|fvf!P}b;>$v?rh6)io(a~ls}xVY}M(=Pz#Dg z@E^m5ZUtyD9s~Rh_C{(DjONe%47br-F*r;Rv*kMtwNE3Fh9Hb zfD)+fMt%4KVWb4Jb5!KC+w8Tqo7=9QtJ|RGG*Gd5A^_CLZ zlZc-pwez&Le<27i&hT>%+9~FKE>ZIR{ykLPXI4}a(qyY!<)hOc z$F;5PwevK^;0F3rEHF)L{3KVOpB*VB{`jBGD3`T*w4awdNAc=FkQ3wQM^21BT2laZ z{|0wp?@@rvr>}yk<}0s)HYwa__WahjMdOyj?i1?sR)>STYms2->q` z0FkyeUJWtEOM>aZL6b};x)U^?UOR$G=QB4tbDjGhDioKgufRSoClDy?YHBVbw=T@( ztB)dA@+#(PfTYa(kju4@cj)bb>!!mF>3U`mgYr80>e<9BY?zH&tCq^#PI`gt@k_=P zv~soAPO_}^cz*to<^2}A^Q!#IP5N5ErWKnI?b!Ig->T1MrhLbxJv3VK_5V^)e(L{b zkkRGAptt%z!K0YvVnQL>o7yFE12RtSVo%p1w+tCC27R2c=M%x0lQx~Es~d(QSxxTL z3nMIk-?4aJhikF0{h{7VLq+bPi^BZcw~*Z9IK>v?pgP?NPlHa3Z)O7fB0uM16oKsy=7ktXL@~G@ zXHal2K))fNvJNlIkLrSm9c@V=9tVGKSfk=Gl{nfyZs_5wsmxnv+18t-M1rI;0K}NzS8c0-tS` z78pYjIz6mj9u*h%?7~QW>UB7G+wVFH&Q=s-b}6PSL+|Sk#`GDEchySmb+fZ78b)G^ z6E1H*HHx9Xbgq`mArY0htrO|U#|90E7jF$ys>@o@F%5VhU9BXGt67R$Y})^Av+^f& z@K5L?t7fkD?4iU4d-CQjUcdgz?*ApV>iB@AA&@1`OJ&spZok;G@CoEy8)^|QF4)8e zK3Gp3dt34c|J=vh%&1ETARur?9L7xFT5%V)(agtsl%{4T_YAk9(cv= z59-%4X0{tmh|h~c>Xz1cI*g4TdOtU%8Fi++>Tg9S$MueJUni)0=nGV#_gk7`+O3!k@Ho_54KN@OA5cFUk8(uD)T4CGMom$SCl8RMsVz=BbQ&P|%z~7garj>>9 zIyC-#qWDYbHT`DzGvoXU*RY_wlivJkgV9LpVxiUy69kW>|1Y{Y;u*%flE>3$tio}F zlCc>!9e=LRd@1H3Xdjk3N=erF3%5!edfo)G4$xU&49LhTIQ+Riu%DwpF=G(o3a1Yh zplDHZF`LAgYJ?J~>I>UW0?Mdr!ys26MG4g{Vu(bL)B{~pD1)G+q+})hO?Vr0Ex5Os z0~B)G*H$S)7c1v7jVi314#!l7L%jE<4}bZPx0s#WR-db2A2|;cjUE8J4m7fu!|fJ6TtL zVrZ;G@h4s^K!8|0^q`A^)N62=a>mFO&FQ{7yL`Q0#dA2>e^t1I^zw~);*2daq}H|_ zaRIqici$lA}=?rE4@usVE^`BGR3s zVIVCaC>;t)r!=EGB}R95!>Iqx^Zs7v?{y!K_IRHo-&xz5#(*8DHxNIVyG*|H=2jGA zm1Fm?BP&7p7->Rh3D8DR955c?n}DZMR6*+-{f-#Eij?PVFB}O5pu!kO`nNBHaXPR* zS%E`(6(|kIH!3~xa1Q}(kBC5zC)_XiRhU?Rb{giO6 z4BZ;KVP@WQjhSKEjP#Clc1Xp$OK~v{xJL)vTE;;JZTaBhn_F>3M@U2*Dp`5TmZ8%2 z@`+?)F{i*ELgnxzI*2m}?qqHu!sF2c{1hns_BJ`klwlJMA?Kw4v1RVn!iPN#O}k2c z4VJTQuWvbMMr8?$uLrw5KRJo9`~$r`h#oD3_HYTfOJ>= zq8V{BOa};SVt`v(No`3|;d&>`%fL&752R{)`8_jzXmq(Pw;A`8lateq9#z)lSFfStZq|=JsC0;ndG5w6 zEZUnNB>p3sE@IR`EhI}6J%S!m=!Vx>ujWGvP_a}y1s`r%idS`~dJ6Cr3>_t~7^Dwb z%*nduDYLe><#8~w9kMw9SG|{CeH!p_DHI^B3lXIJ3SRnS=1^iQqXF9y%9MJzJ=Gtu zoS=C^a>sT5&nvt*MtgT~YVe0CkJuPx3}2}`T@B=<)%u8pFxL)h{KiV++cLF@@jT$U ztgQaV`ah9=k$7iB{ALUGt#4>3NC!_RVE;hW?o3B%?$vVacT)_y#P-4QCPXPCIc%lm zoDTs$jcKGzWz4Cd`<}-}RJfS6!5%pCOVUUX5Y&&*chsP8+|H4Z?M5QxdT4dy?jr6v zHtEwa%-=uynR}=qwzf_b{LEaSYtnaha7gg0-+;=jztDN+CI=f71-Jw?Et?t z`16xdvWBa|l=z>EHZMDxlHuVR{tv$}Pj2szY$@|IIac2-yzz6fb9-<}S9I$?X+E7w zzRYw#eQ@hb7dD!ia~D6iQ;V<#P;=J+^AXRWM3@-2j39u#7r>MtoZ{(vH4z}Z8WQFU zC^LL+Jd~!_Y%8b;Q4X+;ao!Zj5zY4iIz&}WIa|B*@P_@;(UK3AReJ7PxX_QrB2vypdTeQmCwZ8W zv>vuOVl^Uf${%0W=PSD6VZtJKa#ygZpV;I-q!Z)R+jRfNzWj+;Sv#{PA_`#AhvJ#X*RT^juOQ)E1DaxenglF8Nm7; zg#Lp%nbZ5WZkpuZt*idZtM-{ff3U;p#4&}Awj z(#A%1Q+dVJ-t$o@J}b(SARNHL&P@!Ul9xJn{H3|3?Nu&|5ZS;Njmx4(X3_ppt{q1g z>}ZNhu@+H;;iV8qMijS8jRH;0uo%cU>b>s}j@5XL3&U1Ab?k4?4<+1ID;<27yB3>wFNLK1x9Dh$ZwHFILyao}M0O$u zc7^EyFt35Xb}&Aip+;ode9-$9CfC#1oOm&%-TjSfLkLv?Z>;92hd!vDAfZD$3{#k8 z|eLMC(YW^lXnRe<$U+(FKd`jg8oyr5NELh;tu zdQ{aDAfueZ;suJ-Zf;-q-C${3(CC4Pri-rm*-U{HCU%A4k5sF|7c)w;(=Ib6mtwTo zH>?F88pRY()nzskY?lPgcw=B3MtnYr==+T`vVnIb!p8^#$_!vK(5DgxLp&ZGCq4lcjBR12tv&BI zZ*q|AVsvJ6-{xPch7S+P$z9-N*jw9#EVjPC%rZjz$rUY;Fc$BMa{eTpn-j}jtP@$| z36;$bxCzeP{_%H?3e(vB?DbG9O@Ut`RTXE8{||LP?nmvc3^VXf^G#p$YYEx=oy-eK zp`PbE_}(fc+xqT2)||TyPD4M;Czm}Pvr|Fq!1W5=)N zw>Wq>VY)m>yC^6E$$yU^(0dge9i+De_FAGo{ZNZA&^6px-2ou$CODOZ_s1r3(yyO; z_`6u1uRX#~>A3eVC*xZ9E*>eEWq-X)cvr^v@g5AfCMeTkGul}p(H(h*^XH+%mPCmd zTMMor2OB-a{QBtj&HTo(YhLAzBewXCa2tk9AlK_yzX~^sylG6H+}Qhyrv{0^*h^%0 zfrVVJ6ssI0w!Zv5?idSx&BaCY+yd;#3nrYNe|Fe`tJ^KPI=F%Q&0zbID~;`|qQ-gf z>m_Q>uaf6FJ``FoD}SMJcMUtjZz~S9&xQd}KW%INs;Qu7mWc&o5@hm9lBAi&6-1o#SiTaY)le(Q%rmwY-Z z9P&EZXRnV3rAlde9({m|Y0F4@)Cs%GZK;`_tM%G$OmjVv{Y5kTOVZ97zP~oy%pslS zw{)^%t!m_bWxC8*zRRoL>(q~qCP`&advSa9pJ+|g?_VHi4||%A2ewB!JdeCunVe;C zejm~)da8Zub83A0?)1bH2bUIiBy|@9zEQt{5~b5LT1@{QKn3{C{psaKwdl#10o}bL zM!JIJ>3f1WC_k;K->{9?snO}jQ=1V`|M{cc7IW>tsk>?Y+$u23wp%_TMxH-7w9LN2lSGdxyBH!I!=DY=@|Pzl zJMPg=wz-F@u(N_!eZ?=Uw#wG~5>ok)a(79YV{$68N!b*M71G#`-FN87C4ZZ#Gz*Ma zJ+)+XbD7`BFcp=OJdRJ<4~0`FQyh0?^wdFwI8`R|c{+J*3bn)Ftz~A-=BU~{&uuyYfpn4$PDees=%?yB@A>vHpxPjWetLTluAlLSBPrGGN5c~A5EqzocO zgkEbHe(|0twVmtSdYRnOJ7OGVHPpX9Ev#eFPER3|h5c~#;Qpnzb#N;Eg!#Y2#=;6s zkuUSQr+%zGzXl_(#x`T=R|nD_U18k@YPz@r=;GutI-2G^IsgdVC5*d1KrVn8V(X@_ zX9;k=to)sRcZnzxI077EA>JE8tFR;qTq#Uk)+A0qj@b6Vk*&}Us4en+G`g!-;~n5W z9za2u{yBTW)8Ma)zl(E73v8;{Ih@kP+iX<(22pkYEg`kAFqSCbL%>{(hu0ZH1XS9@ z$P0}Fl7h}g*;%lnpgI=8Vo0o4@K~;pjkgllq;vogIudTwui?FzjxuFC(j?s|_w=cN znsWj?gnLQ3D(VNfdthpX0tRA=zQH&(GJc%)9a(wgag5Nnzg|Pob(&Z?EZ<2J@#}sTPPW#a_7BRBxmCrB>lK&e3rWHOv2o=q zvJ^maTAzuG@aWAGj~rmwd+}+fNEJPL!(6z$8X#J>o2`|fo_xvzt5Cz9u&eDz~f zc`l0OdV65f!1W6QXPDk8Q*UWW9LBbbj{lbckrNtWm3ddyxZ&4^Ti+1hPAV=xnxw8> zeltl^#$T1AeCpkk49#E}R@UgHV*ckp5%qOE4vS#JD~$C2e=j(9jeliP`f5m`oaL{n z&Hk&AnRi#UUMo&`4x#%2)1I;kw4Bl!19mQFE802x&7pnap;9t4rawI8+cC8lmO*CC zenpYr=iLE&ugj?j368Xd^Wh#QtcB-i0;RF+t{t*mx1Nn+=XeqV5ioXA*pMlvVFdgx zt=IPpQ4fFzQlsD~33T3XJu4y=VRH-=+S-jf@ma1WTNjvm?3L46MoIWawe$!ut)w z8{4@Mwv#bd{TF*qvQf3e3gN2p+{ED^p&Z`%xqb?;w+rVR;RYw**w@_?XB%0?k!Wrb znicMjl9L(pug~qb6^t`awizxNOWv1^s?Ya5iA+N*H*faD#yOu{9q&xHyhZOEU#r)D zET%`g}|v1`vxbNH@lY z%mjLc{N(Z9^ei&!BnI}+{Z;CS6*^1Ou+8_2?3VZ6>S;!$b~;|0ywz1P%spOp{+%IK zEw@DJtsg#k;dC)+L@~dtkH_j|-@7J6B8>@6T?VEJTGq`;p`%!M=vC-?v6h$ks>vG3o!BqM}DY4 z=0*+0b_XxtAslbO6ebM*<6->7=hh|97vt@Qd~F8-pC|mUMJH z3R55RU>b`py8(%}laMKyWU6&uL05#qyJJJ4yi9k+bzbqyyI#rvJ%`YR>*=;s!&t)I zoUryDVLod{7$7qOXZeYR)iCy6ILq%8B6CdK3%<=QicG-IBwx_May*~05n-0TUldpo zmmD`7)v=&rhG8;E*c?D}0%}8uj_fUC=T#Gpkn?``-RXY0UZmqiSWpw7&!g_z8gon? zKD2Z3%U4(Jyf>1uz2+CQo$^lm=U;Fm)RIG{>#NLUSz~@ymV<<1O_gt?y==IU3{aUJW%uHk~OkR9i(!ic>Y84GWr&lCz`5M3T?` z7&N$Es<0C<80%eq{qNj!_C1;8Of5CkH>^LL*=Z|P_aW$9W}mZOJ>Bizcou65xD9YV zH8TO@Vv7JxmMD*^sPHA29^;xE)OQMHfKC|kmIzNBugL;PrFtX0p^|2;w{vY-*FsSI zg73Vk%njg2%;Jd?_QC}w8e^5Ols!0CHc$X=HBf1q|NZs8#QP_Zg`hG~v7>ZmTA<~}TD8oDM5kgQRyQr>lcW3Yy z*fz>yxhcyKZ^1<8;sb+w)TS_5(3%Ail%Yfh7olg1VO*efH}j*#W)FLlMLDl|l!0yZ zD?b+M^iOkvT*FTA<7$QRz=5MbeifyD<#kpe8B*I;YGhPzX&hr)CQ2VN*!LmN&wM`r zBUwzI2yUlgv@TqVP!36Z5wDAz3#5c4li(w`j(C6@fJ%5lga4;Fqu=APWk*5lt+e@? zGm!`L(8OOfGNc}Kg?k3kiTmVHMsH9R?*jTeSmTtMq?CdM+*#Qrl`p~H)kl6x(8g7w3mw_z^LA4E z(ok?YWVz`-4sH#(KC;uh=;rCQsBXilnwfS{gJ*qL?|{6zxqKMaK5ge_)vpeei?rP6 zqQF{?@jU+i9F!jMOYt_He8M+5(P@!ZIHj{XZJgb*dj6b8svMe6eCmA3vgq>6fI|GP zm|mZ|;tjn6Mr0_(<~SB}D_0L@WO2j~oLBvFRh+S6K*##O)3TWtm2`ndJN#grigf~> zznLAmxKbnZsd}_YnzJjloe9iBZ!`V2k_Bd78)a0{%yQ(?UkzJPwsaWn1gKVUvs}G_ z{aR$dZFN5$D5FZ%C3GFy_Ac3l3%I^raC4&(w!rYsC%SXwofSmJ6<0t9zPe5((|RHr zI%SI+z5Kp(r&x`m86ULp4IVb?lDMi3kwEBwi5>OAS*v|q1X0BEu_)tLkpOy9o18at zXg^dMvx`w#kexs*S2P~4bJuxTTCYdbiE^c|u8-ydsBDcf9Zlo)8 zB-^b+mgi5{hr2ArmXfAuf{T(vg>5pi zFH*7C!>>IqMgB&qXa;03(OeYJN|hb$k{7FS@!s|Q_l%s*@ICt8=^*CoxR^VoLwWBk zj2p%*-`=Vvbbr8W)U=-FLADGdpo&=u0ccco-kyf5arT&I>P9EmS3JJxqlJ~K@{PQ} z@@k{;b&dBC{{pz`cE%68w`LfPt9RE!Kezw}b+O1g3=0MrAH(D=;yfMr!2qenN6TVKM`c zf|F%M%V*Bm-MKHe>wvtwD-t`Yo!7CSZbAr`+&h|Bt$Lh8%RM6L=UfbOak`?!34cCX z&N5m@w!&)0GIYU1ebqk@9(EykYhxWr5yTf)Bgy(-z2o^&zhOf1^d0YdCEb2yX5f94 zyLJ2&oJ#(O{+J$_NC3WkTEnC(z5iRw$2NzkMlfck=0Qy#X^djVB#}9>7T4!??ka|< z?c{0I*g7(<_~kK7r*_c|S`8fwPR{*|la$P$N8f#4>lqu}F3DT82X^-V6Cm~P=pFqR z5X@fhd_Jjjao4ri;~s~#zqG$(iftqr0BykZ zrM8gKC#(uoLl9ki$x_KfB?2kevDzlS0q9&6MUeSeBsjY4Z@9_`)z zln*xat;lsnr^8oU5crH{ioZb$FEEIH4%6t{E;Zr1z*88HU;1jh!g?0`6LxvaEiD^h zNA?2e2_v}a{JGCG$unGf>5@I`#tG9{S|$~G>2cI{_tZUSP5hh=JI2qRuE}rI^dk_h7K4o*CKMm~vUw`P#+o^wX)X zi!Om5#ngb@pbqz!Z@F#!de9KyfM$fmMpGGquc=XF9^Yotuz_SscOwOlKuegzz}<5G zxb=OysNmg}`GW0GT3ua0R9V2yOb;I60V5^XJ)S4tsjk}I=T7PRR(i*m7(@D=kBo;r zBEv}oQtTa7R_G8<9HBJ@g|X2ld{-czpgMr{T^P_HI27QxFuW6`Bd))d>-S4b8u`u6 zz{*ObSF%?!vhj#aHeB+YwUsV9U7#$KfBdW8?@UCTt){RgsgN>&YLaR#3&fD3^A6o4X436`RcS%L&;gv29>EYWwZ!GHj>Y6(8T(^$MqQWARS zdao2o+=!#TP;AC!%d;aIS8q<7J!ZLZ1{!H9(TO$?sHiY@sPAeNup;?lhM<*Evxa(P z5LC!D3*otq1{}Y78VfLqrS(~9xM(~(lS7O!%)VrIV=d=2tlI7#0_{4i!@|4-DR!(> z&)2#YX^iS|Z+1(Mt3o?GP}cATBkZX}Ex1%CcaJ1*>aL$}Q^?uzmsyYX&Vi?ox~4>~ z4w_MS1zy?HaoiKR&3tWW|3e~e!dyk;;cYJ)ST*zee{!AFb@Hjjdwuj4u8sXrx+wfS zC_6#wbZ7IB${lZ-#0**{p8|lsaKMQ|^Oq`vu))_rpHs_I4=?Aj;JOko{t?_3K#|mH z-A#{@R7HBX{+uep)nz%&&&p2|tjpT1^*h1OwO{skV-Pz=Me}A}NMc|z*O}-j!i}o6 zjX9z_t|@W@a;@saaC@t<98uX6ZUY)0nCo}Hx!(t_-7N~ zUXT6jkm9q*bW`vuZg)A++;7RNpPiizp*%J|*AAHx_gu?rCl)qxh`F!ocS3K=c0W;MNdre+Q&c&_QzsftbO26 zI5&?tskEBY>9?8?N2K*O_Eg%72X|I`BT2N3o<32IGg4Z(x5E_Xb=>m*yaw6-KXEad z_*q`>)EW40u=TU$K0V*w=ialb&ax4YOYOZcs5zd95uYWCvW$X{3>W)NaS13&G>EjI zitmYLxGgiz%izo zZip>EMS(V$4q$i}BAUb525pc^!s^@Jr*^;qPy^ctfTBSk0Uz#q#@()dD6v{bBx2ZqZqKJRG2 zrgjfGi8H%`@h!(zH;XJ7JSrk-_hgAWL4?3ap#0g|Fn^+v*)^)8IlY8aODBecu!55w zm4-3O-h(laOBr?9&Vd*7cFb5hWM1H`-TUP26QVPbXM2SYsC^|P2G964i%6%HZ><*3 z{{G!W--oh_czK&(UwYdIaW19({wEqu+d3Id>O9ofU$|_nPU>vQ6fEcaPP}+U_h8+R zw~^R+D~a%Y1N`WscI%F%E&E8j)KcIv3M)*H36^DN=xop+C~n-9r2^yB>?LNu1(lE zaD)_}o(v*Va*KR!>6^@6a(_*V_*uupP*%z{Ma$dh`{i&4A;qYa_*xnpom|2=%CNfy zT*nVi*UF?^Y%erCH=85dd-EhJ&5jAR1(!8Kx7X{+**`ClL{}b2kr9psCquvXZR@XS zmEp{e07tsrO%KP6VwUAuMlva}B&P$|;hJ}*`k5y#k7a*T{lN_FSS$Z_b26$&f2Y>M zA7m!9R%CkTWX!Pjp=dg_ME6ZcsL#ES6fTQxSZH&qU{}Ru2{E$i~mp zd<+xV!y%`n=Qs?M9DMA6m;{s0=@j&s zqMe5yWj(y66Ei5@t?CuGIChC>+C-L#hE;IZyrz63ffO$$ON?&%MB z?lh2A6mHy#*q_Jib&cVyOF(q! z7ieaE<%Y`J$1)L1g^F1>1IlNh+dQ%5guwR*X?hBG2f<G0Uw6YZbcp1W$z< zjxd(rCj#PUVpYG}iAnOBpx=9HQWYn|2g1FWDo6<*#8+reG&O7Jy6c0D{-&e1P zx=ew&nysgB!zO;{gSmMnZdN%77FTL_Bl_udeb>8eQfJW`9YDrLGM}0T6#(G!!lwkQ zk^SPE=F~DA@Ch)3VFvvrDojg~mU!hX3X{Ft=#RKIDU{Jj!7T|Azb#hl!_M2N^_kjU zq36n_zZ^(*8TZvlhm@=zri}n7A*%Img~@*^G%eRi&080D2=OooEJokxo!9?-;LGx* zAa(J^q-iA;P0uZazw=XBK+fl5>CDp9*9=hDpX(M({M;2iXLNVUymw442A^1_`Xoc+ z*xBtlc)@p3oOA0&ee%NBz7=%}=i}rzYdzy>Z%Jo89<;6gG~Q}SMdZNdexg8?{Hz~H z5JRM42U*3vr!I0U;CM;;3T zrZvY7^mL=vEnG&}X>!MFrahz0PK1!;`h?EyByq#O*%#R^>nR zkgP>{-|b7iQ5w@+O@@8y^TbAuH@~oJ7}ih_8i3Op&0i*<;{`cy7Wn>$1!`c5x;if7 z5HlF~wxTJ^#r?iXc+!s+BjfqYRJ2iZuClQEUy>?eqWoxvOBp>Y5F7!eUWK7ev=jFC zc){WGCPOwFF9)W%yVetNdnKd^U^{^>COc56HXXx-kmt#0&>+4B>RNZg>P(R^QBRG+}spXJwX+FnqUV^zU zys%tOaVoG~Wx_2VV?*UH@6O?}fbX5=ZeI3VpnR;;ifj}tUp1Ef@?RT==?~`*oEx7w zDM>E*hgZ`|n=_@S0*G7|8258=nIPlc%UWXf;xE)GhpZfp)a3%?f*p2v-| zKRHe77gzjji!?{4-X87rvXQAi{C5ia{^-Lg)w})Kj&mXHWM{d3RN~23#N~(nc|2%S zcvCXtY3(vA)ijw*Ded`axLbNTF#eWCtBn`&6tMFMjYF5cHq)mz{)9fE z!pRA=iu?uOS+WL_>&ia^^qepq|7A^l{F%RDTQATOvoegCXnudsi=w zdiwx%6nj;qWiS2RK$8f8vR(4pAJSRs;CfOZC-;@1Q|DvuGU9C0)YEo``=c5MXr(*1 z2F3-&Jy%?Y_s%anX>PW9`w5 z)IWL9U#L$ET}KtTDf-(A4kgC9kH$uW^6%e|zoi6!GB;O?f2JIaw#62n-~08*(T^Eb zVi~5|?u)_5Z@>6;wNXJt(aPkUAazn8F12o4u~#Uwvge<_$3X-fT`mS+=5@K_e z^KF%#<_j)CEN?Oom5JBFZbc`Y{*emH87P;zV2hV9E0Hga(x(*%ffO-u&a$i)iIb^$ zyF|uh=zV?=ei68s@kA&EN}=oZ9Kh8gSUd07=eyEhX8ZSzF!IPWK1N+wDBL0;eB+Dk zJ){d*J;D2Z+CFiBbMs%%&&TG$MU;iA8WsW_3|#Tjk@3=IqUtL@t0Tu9zk3UkdnkOrf_z)of7^n}#+L{=Q^3Whyu zw1%!Y@-;H(k2uJB+{}k|=aD0#{Fmc2IRREe&X}*4Gwyw)&cMgP$k&vHE}wB{x2+mq z|Ae45eI#g?D}0oQ$?+2j;dh)tg6|U^if3Q0Ui4r;?*Np`9x$F|`D~4wlbozLt{)#+ zMg~>%9%4P$#U=%9VZD~FQ8zTACQMBZ>{m5=ZZggXqbKqHzz_$=KulswZtZ7ZI^WE; zljtymYv8Z_B}Gu1jKfBZcB6V3p1hv%xBCqYj9jUxbt@$>pCrbK7)&8 zNrHkjE||;VQe}md3|?}FTg$9kEi4Q^1#W#ZXE1}`*X_ocwI|9ErCT;`O>N&`~~{*drUI7vID7ZP03@bH)s zJH|U@MuMSg&iI&=tdxoykwWK@tse&v4MLT zTiK~Xbwp2x52mdweJgqGU%WXiX?npr`=uR+qU4+7my?qJF}ni8&R6B^u-U=>NKGSS zJ=9i-ZGx`RZ5uk(zO=>C@;Dwtm_GJkmow7;YF(f2tG$eDRKGa)ZN*=+AqPfR+A<;O zFoh*r7pKS^iiv@2(=bn2HKvWVoeCB#L~a5=kxk9qvoHb%^;I7`fJ=kby_x`-ihE&Ko%>Yo`;X8FZ=nkFtyJC_w5J z?ya|@{$}CjsRVso5@Ir?Q99RQ#Z%UI&l9BodF3`!`1rGG! zq2MXu-NDT$eW{zaEoEbAAw1__(JU4OMBzLcBnmnYRbJ2mKA2F2=L_l z<~=g@PB&7f2-Pl9hBy2HmExncWjU&O*ZO8-2%VCkvAb<+zeg{*#7t1y7@JhSln-{z zZs~j(nIBNmY2?3~Dd5^C%W#VgC%MAZ4ghhfz1*4n+F16zt^rA6t0cW%IXI6mS!9;2 zy|_ee*Vq0v93Rz28X%s&jUiqD?h3#ovrR}plkq=Sk=&V5N*SPf3aVKsf?RUOb9$>B6Wu!s=@8_(IPub}TyIlN$qJ$mw;z%7 zs@*7@ayz)*@sMsMCNCq!TV_oZzKN@FgUCSeC}Eac``t29nfKblVZ(Q{a?HQl!Jsgr z$PKIkdd597gKV#h37zf$R`cp~Vb{u+NEs3JNrUqiWi4SXav zJx8^Mu0R@_|NoiRV;&?OZ`hAm zTCuXT2Vc+giwAFN*o^LVeFBb4Sj&^MO&5kb`v41PS58II>~nA;{lo znEnhDnY~wMKv0_TfHa*m$un=mPK0J(z#VRV!pt+LH$NM170C>7J^)Jet z#=L{YpR#(v30$CW&c)K&!*{fP)es1AncNe(ewc2cbnLCDkhtw8P@7tC+rY-nNg?I+ zY?WuBd{B8sHqKtK%$D3kI6(CMK&yJ!^%4T2vWU`wD!h_ml2?FXR>ep*_+xo}ugC6}cLtw+}kTsE>< zvjhI?u1*Uc>Z()vaY&P|(_E6VALaQz6nn8kR@rt)pwDMN^u=o(jx1=#JFd;jvww+B zWQspwjPE;_vJE2Qzto7fZBVp@zI9@u%1&m);AJ|PvG5N_btz_c#IE13L z#`pVXfVbI=*V@9`?~{1@?RYc0HLtyN!@51z+K_xv{s5v{a&Yhw%aUjS3He7{g0F?l z8>lelR*g#34jcUv$HHc{C`I+o{ck;(=?c$9TFiQp)@#g_SO@f+IEho-W-w$KNDYK0-7AYZ!=Q={-)9x8o=?00(e7=s^4bfAD_6edt;Co?>Z#zLx5jNhOn;atB8yY%&wvSjodY*fCU-A$w6VaZF8bo@$-=0{P6d^twa~c$Hx;#}T zr|;Q$7v@r4?Ca?R_YlPt9-*){h*TLkf?gQK0?0*u!LuYQ;r$ew6VWb9Y9cy`H2Z|S z>nSWzVvIp%VoW_uOH5-Qtr8#Cgu-2`ERmJ-`Mb+@bVlQ&HCw!QqO`N7o=opDQgD?} za+r92!V;sj2|ew1TRu_Xjw|`p_Q@mq*p_y8A#5v9W?!ZbWPq*={fQ^wK^BGtPLLIr zTGCFltgBbI1>aU zxR&3`%`Lf;1M!m-=D)-g4wk`#;F0|r9n4t;uM3&BKA%g?{8+$+Zoeu)1;&vDVRc3l zP2#^kV&Vx*)ul<_oRsH&tQ?;nU?DEi_~@nGh&v4WZLKSpuB1FTeeYGNROI|Kj->Ir zNZw}8k(Q1}Ze8A77+C93(OnAH<3eC6{W@Z6hduVf^z!>H*u$oS_wcNQgyZrXO-$eq z!Y+yzdqQkoBXaU@m%J@(f0g^yj+2{jH}l)SHQS61ZXVMX(gGH%&i1yOFN4)(tzGrc zjh~&r`TM)2{F6R8*+1g~-@9{0RDW=p$T4Z=nE$#nj54%4iG<0d31l}wO-TuH&kFNjqqB@Rs5$QsE?E zJo>@{W!~$laP0tkB@Gj2D%)Z%o8a&VaZiqMu#dA;?)C7uB1lWb%S$fThsI0kp zXYYo~%V+}c7TDgbPw8D%R){aI7kq^y97Z1D8_~-wv-vVm(7^ACb{lS_3dWHLCRy@K?TjA3`HTviI z+VdBBWw`ek;SDMkM_GzuUb{>;Y>h|6EH7$8g@3`pODrcRlSy6qx}W&9qN|wNu0k)( zEDR;t)*Bangf9%!cTO{ke9bTx%=AJl68C9+5<4{(=KGR0{Wg*7bb8-P;FMmTsfC)N zJC5l}{H0in>pD7BbPxQ25#yYc-3So$2{DXp1*2rG;nFi>P=wydV&%n7by}c@u+f_M zXse)`yaB<8rmYK4P1BS=iX9VZxkOq^bPqinJvcfu0o@0X_re|In`BV!2?lq|UiM}Q z%8bOJ$M1A-UoJl_ZK;}~x_Kc=lfl~VM6kLrqG*86D-hOq*2_bSt29nIW|MEzWtCqn zuo*t_h-NMgThT%boJ_+P^ne#Yc8tMPmK_+USw^%HLZhXmnj0*~7uTh}su&KR%j!^v z#Nu?wDVYkq8Y>JY>fueDQ+rfjYf9wDC5}t^%M-G~NTuDsR$e8m*XQx~w-((V74b=d zfpBGNOw-Ix7oNBQfd_jIRyMW-q%T3Ep-P8t=Yy8-v&G<5VB|#we6VjM19xLuV*W;-UkysgA+}6om< z$WSn2{h!D^Q2%>2(}wMf4}KGayk1D}iTy<)idN%B-tH3jMhA6-*B>E0E9ViG?|1?d zoq5TJA=vc2G8*d6MnX?x^Dz!hGdhR9^$3=4@;&)j5wA|_ zfbeO-Gj}Tt9?UW(u_e!D(>Mn^ShyWgcPlj>+N55>?w+RizGM3Ay3@*=*zPlJ!EVpg zB=pU7LYsabrQ|bRv_HlXJc6JIimO?{ucc+|UMr2zbF;~Ofr+{LgUh7FN$^{d9ZF08 zJQKYm$k0R+Nih|xxS0PohUtXex3SL4H4dm_6Kq(wOkdwkkc?-F`hr2EFo;KGrf$%CFU?Bxfqo+hEEZ6e$N~=|03D$uLMgN12eSS zd~=sAQuu86ge|F%3+!L?_sM*>JMtY|B-jyL`uE9{h7e@8{Nky6f=Ws?1AHog z;R>QtEsB37KkIS0>~3r#M`FIN8ob6-t=xD!MkdNdskDMe+ZSWU9dEcW)j@n-$UGxw z0CBI4DX=NTQ@lP0F~!XJ7+XdDSpTX?4pJjzzQAn}>N`vz3a>)h9dfjs*-o&}C}EC# z=--c^VWY((LM3wqUG1{hkVzm_p(B1m-%~bJc!=8U+rHznNYX?=B--&_c2K*M5C$~O zsK0s@(=y#-pNUCyPC{9wPbj)S<_G(!IbJC3L=F258za+1GN&J}J{=-<+cIDOr-ht8 z@dwYo>w~=i6LFX`Tfh2Glk*Pjr8>lUN4eH!)R#L*3ASZo0N7mD5vjuGn2r)w_oZU} zoxAH4)+-Fg=zB8Zz#h^-d>D~#%TN>QiFo{3pi8{HeMS87Ad!N#i;Dt_4mi^p-aqqe z-d>(5+Wm-{ic;A^U{QBaf)Y^jF6CTUN4icf9#@M@K-h>z-0^%fT|UWg=F(cpUiS!89m!^V%qj<K(`t%|9&1z%`Sno^9 zPezHNEQ-waw7YbtF@$-jwAL2-&S|C&HCaR#26T1LruvJ`zx&BNnlNfyi(i^f^9S^G3+ zBq}jC5734Kf%{d+MrB{!hyD|m$-WeFp+qY@$YiBws|sBRc#c3%s4L+6*;{3wk{_y} zk`I}Nlik74ib8FVat-t0Lcrc*>RQ%30=|2~oaNaiAMxHPV^vuG)qIh^tJX_dWVU$>53GUCr#-7Q~6DLCnVhnC$~ zets94Xp=)DvabM5^SjwM5Ey9&b3E`k;)lAWcC%}ISFgK@AoBKXUizif_UN3z$1S$q z65S$lX}UnDsC-4!MA~S>3a<##z*Wjh*e7-w5mH2r597sx~x_MUYd z=A4t8NWE-$Rf2Kpj!MR2HR*4d0*}qLnRS9EbeW3f#d8vBQ1~2@h87t$tr{qXB8$$u z&P;!GZM4i`Ie(d4xG?MAO$WYHD_~lJrdLpA-y;u;Mr|9N#fdZU)IJAW$6q>a(mn5Z zVd~?tE4QmK@`YYVrTj|RU?wZ^-}gux{(i)5B6rluZvWNCHl0-oDnUL9E$5~Zc?k_iV+1UT!z7V~tCVLJgtEGD>DEwoa^a$a< zBnBkDun`aDjd;0e=Fi4Y2~ZS}-4mfzf9&r=N{E^c-mtly+}v31qi24&(X-nFLW+MT zme1kAh9KKGI9&sukQ6absq_SLb*MjnPa*76a}4naKeAmc2yfG5BEBIjMiCDT35fDg zm{Sa4?-vi}XungeUlP^Uxn;$80u;u0u!v2La|=lmG^+`)hh0r9vtkoKf?NDYkCr!x zzVuaEw7O8dm)oN?P0a&9sEYsn_#>{trduabZ|Z*6y+quuFb4nXQ?m8+lHTv-!oIkW z19{`_E*6O395<6Rk`JK0)F}_Qu^_5roU{-^Yhh=oq9Wd`V@QefK=f6DPSz<@dWC!r5HG8Q& z8TEBIn;%$_8DQyr?9*B1QYM1*$KTia0cV9luBMJC~k*pwBu z{&B=~nAf7ubf?y4TrB=E@#`aWMqddV&SCW92A6E(v)P`}R6w>~C*R zDSmGcbCcTPC%8@+c>kO=;mu}oIxSWZD+A*d``J%zVZzlD`O^h8P$`J}8nD8VJb5nE z_yPSWLd83h(l1*|Twr5`AMUEh2L9%kYQorgn&V`#w|YaF=G1b~KilmxbpUpXI68UQ z+IaTmtkt$6Y(;6#G6x zZ+_{+H#HguYgN9KF4@8uh0ceaQ`7f}euMxANi!Sn2`cTzA5Hz}EN`oe!-}stE`TpIr z2S>IEZINBnu=h-2!&VXOOZJW8z`JB`Q`j5ckmHiOi?d_t9$BB#=$AkbP{$ z{ty4Gr+&36#$C5a$dDmzmZQdbfTv&bp0`!UkA&E4aogB;q~XYzFOmXX#W(wDm*X^a z*2l}V<|(IF@sJ>(Ztc&7Jr2b%Jg4npj1J2GnFWX`#p=qM5~vt{_&ZHac1zbsDBc2S zsO>@&?qWEvwA3Tay&oBdKDz~iHB_u{!=LULF1;#>7hq#Y@^hxCs_*|T7ExS{*EXjC z)9+~q0j#ApbEy=1WFNf{sgN*Dzi;N&0r){rM|s%J$#MGJ55%Sygn1oh2MT)TE8Kw0 zaVUZHmB{2W9rf(U__Mi*Z%z$?pBnFw6FLjP^mJ~}ch1Qt7A`A4gXCX;i3qQfM z6_1}M{yndsbe%{NDvQcw)j$yk5oAnLd0WHEvE=!TNGGz|gLr?ei7KVt&CY~JE3Pdx z;A31D^)vNqm7JaQJf@KC9d5A(RZE@R)>DJkKa7F5a%Zv6IUJ)c_z&A*>iVNFxkASvxrL|x|iCmEm zGs^Eiq7eg0tb@-VAg(BWE_Op%#{fK=#9ql4MLK*7G?4gkvn?<1l3kuusU^rr;UZzl zRgTYdDYA&8E4qq50+4SCa};WnS=_k?t}^2A%P*qJ3iM&99x9Z}TMm3TznM%e+Z9`Ne%PN+Y;tT&b9SOH>P-HhJCGV~Wq5KUh{f z!EZdi#=F%&!1cLC3oJ1f%U?;x`yG5h>j+DgU;P!aX*L_nHLWZEAG@*}@$a8zGUjO* z@3WW`tkxD%v+Q$su7BM+;cAEY_57Ak~ z^d_m^mu2zO(=CWw5oJ8%z z6&8x8A%@EHX-hW%-}@&uVGS*uYTf~4J`HZl;}k=LBYQ|w%F1^n?2-1-&YwF34toNd7QeVWCPlhm7U)4&4p=8^G^p9?Ki=7hb~R+y!1+nAbJ`@1G}E;jWH_ zrrqp?_Qa0=5L^tYp&g7{4Ze4#{B(faw1V*0L4?0vLxe!@!XlQa;!OrgYy;o&lIG6X z`A9HRn*WyUZbBK^u2-jjLMUEqk+2?Cqw7cz$7XW#i`dShi$@2)g%6hvS;~0D8@?xM zcct<|8~#8#QbdbVjY3E>Rp7;`J&!wx(;1u{fTxh72qHng2;$WlewlhU33A=e1@#&` zI#d0W(EAMJY@?TPkVI2|vYkaxn^w)5ON?a%ZOui$%GjK#oBD%%aVi(}`yTO{3_dvx zd@H?Bc7X7kK~+u84o2~6A=6v{!;Vo}H9I!RH6ru?Zka5_EtEv$8-9>|kl*n68SDW< zy}v)8$`UNXMGUyxaeLl2K#@)A({$Rri__U!A}kxTK2s&QGt=_D$Nma zf29rRioD5)YrcVOZ^`VBzh@J*f91M?a~NG7^y%|=J);h77y>25zsZh29E&TyV`4(REOcwipokg?H~ z%0CV$9VT-|R|xbxPV0RrflbBvX16>t{2eq&zBX2}_b`yJQmlu!Uj&1E7%kT05=!79 zp=D88Z@tud;~yw>j(kZnl#9|5-*$&aE>S3t(OAn!QaFAn;5B1AB+aLl$hEs!*q^n% z<$0ck>vWSR_LfSyR~}{ifosDG@=_Uhq}pQ}HO_BbDvoegZ`NlxeC1zE$P2}`Gg#~+!zc0@Q^f11FmH3ubJp(&&!>Z+#E zz4G|Oa50(eDHkT%?CuEuOHWZ;L5^>%h~LYyfp)Ktl+~CGO;>n+Dz+;Ec?=>;GLmRa z1vLVyCs#|I3BI4nLZ9}{-xg^foPEgr#o8Yjf)64mjQTz2Y*7I+Q>S#F!gOv8bWNR| zl*k7{#1;1o`T<#tkx9%-B&ymTOy92T8h!Y{7< z8jy$8LU_J^cAfqbT&%l5f_!I93i%B9q%E;ol+RefU7n!x_C9osL-cQ9{?UEbCutL; zD(rWRqH6mw8cQF{=dBKi0H=d>z(=cIxiKmi$At`p;gO0!4?6YIfIy)zxVJe zCtC$H+{991DAHH7_xikU9y zVV{d9t(&DVl~5WW3K4PGHy{8Y+)+7-ccV7 zs)qPod`zl;&`f6(_0?bXLE7sAh-qb^f0$jWS`?9naLNu8*E7u+tU=?1&QXF#VH z6T-lrh(%Y^16>sW zeiACm+@})DWwdLo8}N}W3bpCEL9n_+Y3mgIZ_~M({X}_NMikO>+5M+&?^%J`H{|Kv z^iCV6aK5FQ7cSm4=j}N|v2?S>RChlwmEB5HZIu1CWmQs3z_<=kSq4*| zxTH$<-~rULE-8gBc75A_1Gn_^q=K!|8{5gORe42=Keb^c411lUS_83)lSzg^ii=f&BwJgEKL? zl1(s$lBCuG{V}TOS-M{+GH_B@sVdEqM4w=kgi(A~D|B}km1br8E=6CAH+soTV z#tDX0YAZ%o3zT5Azytk$){Ct0W~l?#&A|c&4FJGbbIf@nd$+x=sa`FxWknll$|e-E zHg75NLE^nv14EzDCX8x91^J_F+p1vHck*@(j50xkYn5=n-wt;~skVggHUR2!ftCn8BSd zl}xXqtMn}N9mAH^4jy{%DF3`j=6kd?!pKg?@4oq{UV_XO1zKsfr$qHTrvmtO%02~x zOF1;w41E8sJ@yL>O2r+~z?Z^A3|NcJ|1j%2yO<*OnsJgRm%`0_oz2bnM(VcPD>n%@ zxvya0b2Ow9)!*qB*d{m6a7&V)8+s(`KmOv`pJHSoc?MULvG(w~oVf_0?DzSa7ciaw z9IOK-8dH^!CQo~?#bfmOq~BVt)a%ysO`Kca-d5=R%k2wVdF(ommZ7(#a zU;eX=)8fsMFs`85+`bhDY-~%d?8*YU5>wc!}JNrIRSWB zuu|ym%9o6EGMTc`Pz4hCngEP@*n{(oyj8LfIamH%X)E5_ubo|2+ahU9Rfur zJ<0Lb?E-m!QJNdA&H{cKf>PZr0$QSrXuai*Gl$uH!H)GUlN2Om-6=F!nG`AcQcm9u z>I&)pyQsd(XDFl!*y+2Z?rK+|4*syL#l`BnR=(=uUb7FbN1J9$-t{Rl*BRrFc+}q&!pMKzR><6&91hPx>L5o<8Uv0txhfiq1L8R8k&{djrW)kw@h|=Lj0&A`n zDdsg)4v5rM=SZ?DUb4RB%IsQr+@CBrz2z^{P^gpZEk7#Bg_A7(^MQiyW9ON!G-O%% zd{uRo$BtoUjiXL_`sZ#m%_wcX9~&3eMS1R2;!-VK%ndha!*n!!`8hq1PKRy7 z-6odsffA6gVo7FeNeX(zG}xSQnM(b7;nIL#B)9nOG&V0O7);MfSkZ&K$#X zN2b$Oiw|uh@<(SViQ_W2pr4%+Wp39&YVHceT+dZM4z7yqH&3n>|CHamRwhyYfVQ+o ztPAPI&aSY?J*WQ|C#G9^GfLw}`@rTkt~LBb=5uGPu^GHX!+3RJwaCZ$w40X7rH%BN z$j2fvme(H`kVd&@9{W8!ntrh3l!S9P7i3Y#D5YO&CmxUpe|YY8bbvQgmXY z#5|Q~Y77s7BiEj`#jm|0Gqmk2YBox(x3qh$z1Zn~LFaW%iT@*pa^It8|4_@bGq{v6p5SV0HWc&4}3G7OJ%+64bJ?1+>;K5z{^ZK6V z{7cRej8XAV^$;X6v4nKI7*}kQPv9$x_*t$P!sy=>w3PycfPgeq`URNOM*O&rVF?0T z8B6fChPYocl9IRf*SNao@$Q9KQvL~=aOQ1h$|4Vz7_zM@rid;(BFdCE<3g?wqB;Tj>J zV(Dnl!2!5ae%fm^hpD;UOisXdf^VYuxlnz%nZ9J8pIsWJqK#nW08p*w0w7c!Lw0Er zCpHOmf7BSi$~6xc*NeW--PzCc0@B%6CX(n%#y+KHO$7nm|KL24E3nh;3TGevUU5@& zg~w(H&Tu=?*!%Dov}0UAR`HwcbU~0^^<)V)(L30gX)j!8Nuqy3{PehQgb*owf5bl$ ziSE%e!*+TvlL*nt&{ZaLw5ye#HgA(5&>utr66#SYy;`@)$QGu8yKfuaoPJ97Tg#e~ z^^|kw6l4VtMADSL?IK%=by&_=URX-#kCF6ICcVK!bSWn6ob?gA1Mp^|FNqs7 z>3aLfp{J5x726B3r--cf&gIY(kJ9jgr=`Kj{jXU+hv+Z6dg4pK-c?c7>z@;xU#KLi zPPur+J111j`fg!}8vW+r{U78|ik@D|5Df|1$J$eFA`*!il(&X7=IJhp>lfZxyIqgS zALcGh$NyJTlXC3iV)M~gQ;h_SA-{N@L=b&muT`@9Wb@NrU20q;qY1uOReuB0Z{Fxt zHNn8_P3L?xyVmWIQ_LcB|Et^eRKh$xiSkEpHLSH}&x26jCG7@f0|@`3Sxtws2)>sU z0&7DHz@_`6%>YpfZ{c>QRH9aoyD8{+w29~_Cz<+RmO8jK#K*@s)~A2^+a)e|#CZ;V zoBs;L3a8Z$I!dN2AXnU9obEX0Bzf`%_sI~%>MNlWMaEzB`n#}f5tts#_yYAWs+aF# zjOYYQ+&3Z2mE{GWfBK{Yx`S4Q0(8TP=$#wTg@LoFijQsTF?!YaZFXvTq9wY6nAm;{ zXK5$)?J@-`?I$1*h%;eAz@wB1$5d6He&^57w6-C^+yCEfMAsZ&_ai^nhr+@%tu1lk zZo{gpk;TDngL?U;;|&2PiI@YeT(X*D`z^o62+}=dR;H6CQ<%idU$Qg}adtE^l~F5e zD$k@u4+<^)eN(j+$#lrr;yi%4YlVv%5R_7gfDDlf1ftcEL9|a=d884h>_RQ|T4>MV zTdjW3;VqK$>8jo1HV;dkU6v^H>$kjBqA2NdW`c6&3$b?Bm1t@$Uy5d?Oe07elyEzM zJ+An4?lE0@RL3Z(pDeV6fszgEwjkgIhr&CPY`*_!_>P7&KNd@KX-FC`l$rca!pGsu z31pfr0=fJ~M4cRlsLD{%m2=RAs;2DP^W^&;6qF}{XXAqIqw5yB3G)#5{QqJQ_%tJV zr-`j(v<~sV*qffsbYDw7A;b9Lzx5&|X)x9@j!D|(%3^l5cOy|M!z8t1GWc3Xa#h?F ztV#7;=338u*@&DZbfZm|sd;DwI#ua$@xhBkJbcaTD|hOV4}rGstAc1JTSUpD%B(0BlU#1$Byb?N zKo(O|_eMu|q|iRc&@UYJHF#pLXjRvM4?|%Pp7Bytzg5Q*W({{ zdLG|)F6M%3Kj{3eeMFvwVW0dKc-Zi^UakzY*147|nH{WFpVa)iEc4pDQTMX2TowPr zZKaJ`i+!loX)JP^w!$viMY08n$^R|0<+U4GMtc?Rv{F0X-{N08xNYyjzQ;yDX}npd z@#S2X-(lRBL*)gSUs_4m?S?fNR^HgFH51mUnt=op1YYt6s* zA2W|>8yS?Fl%JoQ`zF7B{@9uFt6Ok~?Fr+pn$WX4W@4E%zk9K+mKU|mB=C~+8+{Ll z?aYV607n%la}Fxir%CDq?ED;Rs{$aV;}9S5%58KQ*s9*1AOU?A@-y;<#GZqU$ev z;oEJGF?@fjy*=4ibw6vLIwF4Btiz#4+}jb9f!w(fhj9%+?O^O5O7F5izEvuD)xsnW zU-Ql_djSJGbClS=2DLjjgXSyRU7T$PFa(;`1>drN+0xh@2>^V_>Cs#`=vwC+7T?uH z1z>&OhQEKp^?Y=XGF$FraFnHDv-9MhNu9tz+ZR>b!qCONMa-_v`gPgYnN-60@~Un~ zWCbm7G>6X*Y&W#IqISsBnb(TTFjUWLYYkcsoanOm6z$yc??~xJ+VtDADTXUzRhvBMcy#j7(azF+sfQk z34U|jR$mvp)4Fk)8DtbQ%^n`)zJ_>`j3rRpzu_yBrf-9;ue-W&>&HTj_a5wc)Xv0X z5g-Xd-4*>#81IThVw;E(4H_YkPQb(Em<;ahG-Jw7uHspw1<^!Vx`==@1a~RzKzWi| z(W^n?yIG}{Jn|?zWgavAC`cHfY`=?k?W@u^smbUsUA4B@Uq%00|PUK1-8^r`8%h^&n$MdAh#pEr==__VV_p^T6`H3zbVWl#;5<%E-D&LQnzMSrn1mwXEZwI}i#HTKEL?)yaLAg)lbGK@2?#4wJR zaHU%LYzU~kK(j%60C7bA8|U@Wy?qbA^{4KbkB!ob2@TTLo8i?+9Xk z4Ioa!_eTeMSD=~5?2xk7cVirTC%?W#rH2Q!c;SqK(S-qaU$pP67u7V|@=K@BSBI6# zPk)k&Ix03uuv;Oc5-(F~1b|%L%=s(yaX2J@*C#NpNYG9YNC=qHp$Vp2 z%VqJ8)aEiH(}w*yAL*f;LVfrg?3x4b@4$U`Di1S{#B>|f3oPYsv_jlcSz6t;XB~D> zw1O85`|+yyf#f6ZYW@96Ij6an0M}&^x&dMylxHD6+y7+f7P6h%ov&a)dnTB#bw>2H zP&AE%YxeOtF6g3-CEa4R9?IPDV9D$f!uu5&@$kuDr-s>PE^#>NZ>QCgbyYyjbT2A)T0RFR54RJplu}Dr z_-IJbT|4#`n<=OEZ(UdjY+TGO)r&TUmLg|-6{!3fn(ytZ>M~9&Wf5~ww1;K2JiZKL zQiZfz0iNEU51n$ZI4XqhkiT55<_Lv+#|{)~DVy$u7}pnxW&Ls{o?>Lae)fr7diO1o zK{Z+y0ca%FQuKXAlr=Cxo|cg^7q56tdqz1-GGL$08_+dMF(3j9%g>OeGUzeNT{06< zZP)LV75ky|-rsWGdI0e6|5YVZ`_^Y(=dmJeEnd~R#wyn926rc1pR?AbzsZVwTM}~` z{P58qej&O!)!k?Vl)%z4dS2!%vsm3$R6g*tcO7f;aF@1CZm$Ro|rl>91Ze6BW#m!pA>$b$*W>MVXTc)1gb5EaX!xw{zw~s(CgxR_zt71c6Qkx#_eh!JaLE$7p=1Ga) z8OFOnzuH`?b?FBX4N!J50`s8HNWm04w_4jq=|Z_fSzFNL)0{%OiZbM`(ba*kIp}Ef z!t|>(+cd){6yru1hzrWo*zXVcalQYs=6Pw%Pjmqdq2N7SWYV6px;8whqC=Nrulkz zzh`{G^>3@lVaseO``oACU#2eitIO*^mGR9G@wXo@j-Q@76~g>mudjU?js4Y(h2U<5 z7=4@GPrA|~b~pDoW-zE^)(eBViBJcZ+jYF&CR6QuJ)uTtu9*!@&K?96mCfj%Yw_f! zwZ!8s`H5gg_a`e0Z6dAOLp!3m{=_^{ykhhi-hW9RGG=8da?o91mm3WUh-;|n2GC|Q z(YBpvNQQf&ugo%{69~|QDk!6W(TClW}JK#!E@jsJ=jd>U+OymDrU)YP||HeXCp+wL}9jCKrmWIWBk4VgJ7+TUNZuE&>mUdUesuo4~*`yshr)>C|HZCTcu zX)gHcV&NnqArJTLCGE~5-7S?WgBh7WKitr!!RT8bE=r`=MD=9{n(O|ck^%$TYwNlt zb|7ph2O#UtXUi;AO-1Z#lk{7B@!8fHOE<-3_so%}hYOqU;R}zZx$*M zM>`F=T9Tz^{>ul`?9 zxYi&-ka1x>z2+J6BNIt766<_-p9Pqk)SXRn9e1h?H&<<8c=_p?$>B7873CC3GPdYA z5t8@-`rC=hwtp4?Wyb!i^cTt=Ns6l5gRE3%Z7wy}AdLV&-@A|> zUC{tBcu`sG8JRW!rNJca&;^T~Js2-{5q|=FBhJ-mKQ)!`wwEz6?C!L8$HFvIqcFe_ zezyj6=xz4Be^EC-2VUh28-+#=^>1^ao@&P-Rf@FdS^HY6r|i}VGvk+_Qc!db_s)F| zCx;p_GB8PqI16vV`xB6|C=3oaV+Z<+&vYa^bbDC5u9usX7)54Pfuyy8_xYV5-y+aO zV6Gch5%IM1oL%&jYX&WsQP5>6;)ppJd0&$Tj)I}BWCTFLl*@jD1JhW6I5lzU{PC~; z9o!D)f?UR#d%=LRzbr7>rxemB{aWnjm6_g=BAa@Esj+h1n3w&QVauoa6VJ7#eBx&i>=7_=#i!EJwQQu@H1$pN?Y`fh0Em>ulV)4i}Zf z?%$ZR$87B-VJEfgA6dy4)I)|TlxIukNWV?0SqIi16CG_P1KP;vrQY5B?b2#S4%D}m zwC`F=?tMH!7uNK{?Tln0_4e9yC=pUQCg~+AqzP0>guYuY1t&=ZE8iqFr)vVCKeLfU zU~MEw?Lx6vu94;}w6LFrb3o~*FONXGdSG4EoR$Hm0=Rf|Z<`61HJf^26WXc0}g-%Obyj-0H%k&o&e_Qj}? ziO@9uP0G#9c0unbZ@qj*t%Oq5O(tBvNdtDSYS2Z7I`nx0f0)LE`HV04t zqJ`~cII3}9#4@g%Dovw>plgUWQ{+Q{+&e2F z0SOeJZbGI2U%m02b_t|MQf1AJ#7EEK_((}K0Q|&c0lq&wv;ev`yug64HWNnryTP}x zE)#eU1d*`i%af4=PfUpLt9*|t)_<#>Q%k|=!=JB#wvCbpy9P6+_db{R&M%#>K35T+ zaFw$1M9+W2**-bSy>4&XB23`e=g|J<+%@ywh$j8Yc#lunuXes%6_cKPgns44>puXe zkrqhKn5MfOL$lvtzZ+Wu!k2ea3T94F%(LR&XvJ|1Oz%pQ$LyRq}9_y6O+l-oyByXuMpH}lx zhn64up6ppb=nX%rQtq92%)yPsUp}ITX%z64#q2kcuBqbor-l3rsVk6GYHPQ!e!wU+ zXhmeUmw4Sez7rE9hp)Zm=RRJU>D(MY z{qwL3muq5~EA`@fveB=dJ(s0EXRTkS758vgj}hfO9s!mWaNtU zGlCh2!*spDde2E~<59-FUR5pyeZfZggE?u%g#MVFo5&)~wA#Im7kRUVY_V2s9z5q*iIUTTf<>{0x#$j~=0K1cP%is-XomRX~ zi`{)|Wx~{)d|K}ua`x7O%TBq2WCn*fmexx&3HUuwkK6?-7@`{xI`UC7G;f#@6 zQ-;^ak7b6jXKzqh!US7=xQgu7(!PB7A0Lm*PR>MBbfjma;k~;z3;%h5eiGY=MXpsq z1tcsCgjGf*scj}cwPk1VX8#GJaq^zKDGtd`H+}On#Fi^veTy)tP72ba00S8)n?Rt- zw>9KpJLnK!`Uis$USx^mHY`?(}Jx5Zc^5RnnV1pNR--3*v1rhSZyI>#_x=1T5raC?flG$UC zO|5BxR(<9%_wq-Nz0K~&miWaS>RW98Y6%o`O277XU--x93}!5DQ#2Z<+^z9RZW(gv z^**LLjyk_nythKgfXwez&Xi+YKfc@_Vaza5!N~W>+gt>Hd@ImjRoKxbu7$?s%Nn@! z;d|Y%`A?Bn_U`4PoDh{=UbXGkihx@0YO3oJ5S;&9M2y_tsg2}jg zyM?)E)4=nqp*x8y++1boN?A&G9x4+gMd2R|G$tfwiYrW|~x0c=H~E-GiBh zo?l0Fn`1x0XUXE&7m;>g?C<8nIb~+?g9e=h)N5`9&1(jS4r+U10;&62_-Amwc{{jc zXvb@22&+2oT6$xbdpmsyZBd`MD=d`uzPtxJzSeU$c(`dB+;uaf%G=r{)bFZq`eFiy z&^SgiL=F3IphfP~t>{i_NizlHmQJ{3*gE^ipsP#}*@WHL6Uy<55ww|{n>T~Rb%&~* zUgH0Fm6s(gi0}OVkmwnB}qKw)-O1?Z9i zl!&E{^^j@U>U{R@&I>&MWTyMe(ubJ)(zIQK2zwXZOZzSWc4eezbmIRCN1Vw9QD^B1MFS)}m z!@zZr(UV|l!J$L$b&J}-^R{|z7Yp6XO$`{T#7|u8Xma;6E>*D5aV+SyagnXwb@r^x zfol`}U$b?L>h8}*mgI;CgN`o+m~s>%dpz8uK8gtaZ_q}KERR7)z=%mT+OqjttC2ab z<0w4D*xA;bKGD2px)AC(^p^Wa+Kp#n#YEOp9*61XrXc`1r^swp`T8J2RCR=>=C~qu zxD6b7Yv}7fh5f?NJY9HiF)8A&%4nd2e_M!RD=j=rTtc!Jq~diIF8zP zIO$#e{ALB2;o2{iwT;M9Pb18m(2Cjhs7{qPdZIsS>Q(}e78)5k4Ho|S3nPa7WO3~_ zZBa~UG4B`Y`+5w|28vO_%ZwGcSYN&4e}6 z4zBF;R_2^yUtbTD5quWBg1dq+E58BUGR*o>#+sU2Hw{5&5+ptnbPK7CAnDSPvOU@) z7#1NLZ<>9c#}Sk1-3gBU6ymGtJTbkHCYWKz%LwToeaagI?UGpB(k8096`D`j{u#tf z`H4`=G;_Alp`oErqwqC0)3fgTBaCZVoJ?;==xGO-(ZDD>E@f(Eb2j2r$j|;snCq>F z3fFLMXhzklqK5{CK^$GqUl4vIU@$^EaJ!4!vZR<~^wk!`{=G<7(1@HcYgbg&7?huE zy;xuHP?@z$&SUNad0;Vkh4UBjFx7wv|Jev-kM05s1B}q{%%2;E0kZQWs!Tho0lT9{ zE_y8}77o`@rY@jsEHnEavE9#J@!c0eHB)TpH8y1qz?R0?Vg03&Y)?4uD0(1&Va{fh z{#&id!2mf|G)-AtJGS>b{w1wtaNM@_Exu8>~tUtvpJw!~SRgc}r{!Z;HKkUwAzx=hbufOKG!J52uDYI{9DPYTfnm zvF5Z2X8kyjBh&XkOJ(j&EynJj{vJ$VXGr2NDy2!bt1l82L{9;qjA5%IK4fg3x5a>m z!OIMJk_SL2&`jxm^evbuw1A4H@1EkmyY0&n1!d;U7rZ1feK)u^pkX;oGKrF$j+`9{ z5bk~?d!dq14SKV6K_#uWAN@*g zHAx4T=h7;DM6sh=d8OVOk_~&W)7rooaa$Z1vO3O$%>v}_Pj#j_5aNX4_!qRJ!-AV9 zg);q&Z2DK^*5R-%uRsU_Tf}fvFGLF?C6sdnZ5&g$&-nnD30I7`LC|b!4<}6A+2QF(2h_VwmL=%_+nC-n0yEwQu4h5Y^{MRq-j$3~ zUA`0!F;2=s{7EjW5|wlJ=x5&@PJa)yzL=Uz`QvI^DW%Wq(fi^wMEB~Rgc79 zKgY*^4Xpl`{%mxblz|vhB0jbVR8Bp3&4HfVciF{$-O*Ohprjax!|qe>a$Sx!Ehxcc zo&joKS1!oCx-2oMm$kb72eR7ZMBXrI(hd4DPy*lD=0ZZgai#X%)qB8x6gR@P0GIB7 z>!_L%6&dBr7fpX_67l1Mtc@OL9GFR+_D*Dy5qB9sQUA(LmuThJk(0+8cIA>d;k}wl z<6^^L{R~PJdW(vggcoa)1Dhv|Sh-TqUc`ksGTgS-pyVnas^H6lgq+tHXLj8|0m-eS zXuKh-a4}+T7iia-E(PL7)Ei&ozOr&cZ$O)5B^b5EntP*v%we?3#Ls2TE}sJ$N3&V0 zEziQumGCC6Tv)GC3`>ZI{$Moc?-_XFTAvkl>@p%D!wcZ*;U*(pS#7fXG)=tYwE*Pf zfB1W{KUol;W*9&b77ydgYv`nKwXmFxBrtK2A1db7u?=R4#RYf@6FS4W7Q4c^!QcQ? zOTYZve>&S&lz;1|YrTuIVr=X6@^^LVe%JN}Lgc}J#p4Od)4k8x3{fTSn^U=5;@Pv3 z2L)zzf$i3vo9nad9PtDG0XDk|e~uPvJ%1VPe>RH^nHqm-DEf28ZWVlyZ6@>;CCh2W zc<_0aVD>OJI&y|}Y{12VMgNbf zuMUf{d;VSo6ls+1P)b5ULAnv?Mq0YNb3wXGKmF(}sq`RA?J9giD(dYYn&!2nk zbz;uUXY$M~G-9A0T$d41B=)m^1^Sisi;a2m4K?tX$?7d7Es1_X#-RbJ5LaLLDdx^N z-cCk1A?J>p?-6L6klJsx3f0a5Q1iAfvLeX+dTgD*IL2i$AA^YXi&mouLoN{ zg*jIh`tI`?LBblQ2d3pk4i@ekxss1U9CG+Gc$&$TVAa@V zaO?RK)~nFx5e`k|P-q|<9QK?{jW)@HAB`c5!Ivy3llAi0NRsOOVizqtu$R9uV2k!LfK*RY}sgl1D zqVbwlnwfD*|NZOMiiEVF4z?CVp%fHq;pjEH<=i(+_Tj>}9nRBcK`8eIMBPc)W;%qH z(@#4<^m1XR?N^bv}>>#PghL(xuW zsyNS&BcAY@PO2xDJMHvRFK?7tHmALJUzv7-oeHO&!Jq}hgFAkgXU;h5c9U#V-mLU>u#(;h)JcCrU#egM}@&+hI5-Wm4EQ4V1!EuszK(K z&AT#~Mi%U$5M-B-0*JOGGQ6VFo52QMpN-z}%offTpQoL(M=B_8ZdaTnS8H5b%#mz? z9VQ@h67ckG2`y~BH_q?n^G8fwbOfGuI2|m4(PW&9u4|Nh`n!VezstjXD1)^wHmE4k z(U22FQ7lc>5vSZr5o&rhzyF-?=G}bb*D${qBg;NF52>A?nc82wVGTqgiB?|?bZnBM z2?SB^A0Ej@cV*wLKzxvRinS9^P`*fjEFmF zdgC}z^+o0vH@T0{1qoe)vE^Qk>S^AN^q@-4}CN;u;_L7J#47BuCU*~ zGAETf*(}woI)*Rby#5jq$WO-gpSwZRji$$}C$I+38s3_Bf9QYRz9;~TN$Q)?0&6kc zUgS)_W~kX*rr!;s+e|Ad+}^Uvx$mmk(BUPdMP4F3vs1X;j~M1Yd=+#m^E~m!?Ew4C zK?9chK84R-V_~c#*54mMyiYIiTHY;2 zqUhsNeqF`-lqg3@`N%f8`nm77N2}DIwrnE6NS?S;zP3J0Cpy?2*fZ!T=)t1YgU(T| zf8u_;eQ%>JQ04*e?6w;MA)5K6b&TAB_Brh4DX~5`HvTwsk&h9EJ|FL8?JyaVBCci& zx&neIsS>rT%`jLpJ1W5m36X^~3B zlDa+&Jh8&36Q&?i;~+s>4fCe5E$RG5MDc^AOFdf7>?r}7=ra;kBCoddMWY66N1&o| zsEWWB9x)8RKkI!<BF)>nev&|w?7q%p420J`7)&i<&BBB~!gO>%BnU5O= z4^uNPnp)XL;3;^Zl%`E~n_1BEb)2=!(-)&GxIF^)!G<(BqR45%FBHvG7x%e_k$kaY z6m-!3P!11!Uz9&#pZsGV;v#5iM8AokAk;Md7-zKdmET)^QlC4NVqPS4f5M~>)=vpg znhz!T6lIr75RvWL2I|!9s2}0@QBp}z*&$Q}lw^?Yj*1H-YNl)v`S{P_2`%*0Rqedm zm@a%2!w+Q7fJbp>`|gwFKi*+mxQb~3eT@7n{E_e7wnx%ii_mXmG}gI!xu{Ju7brYW zVkrl?%u8>Zf40J|Q&ydKSi$ke#ngR1KH#7xy!>XL~*=$`bwsv$)Q#-|DAVm2Txqn*^W zXkK&}Dtg$LWeiy`I`jn+CCziC;i`r7_xlk@-%|W9$ni+B-0=3N52#_jD{Lq@8>fr4 zZ!?x$4jG@Dw6kH^5jj^h{tA8-D3YB7%P!1-wx>oQvL2*)$TVYsRTvM&FZLJ0rG(pf zDW~;eusIhOjdM=SFRxj)`Br~46J0yIuzO6fB{aW$k;34PrXjBPcqSI%09Hzy_POPL zQM&OZ9{*rf^0`YYo&Ram2aSG=JF9xp`e#R&%jOhDVzNl1YF&G(Jh=Il@oyh6nZZ?N}kziMi zHF+cAXW~3;W>Ct9?53j(&28fMP;hb!CWl#ZSqAHdQlz@~j=@uZJHe83nr*BtYKh;c zHy^zc1akbmZayto{$bTCcE|RRl1M2(Yn>KjLf-hmwf~yKpLAz}0qZux_c**IeRo{k z=^3JD)CN)CAAAlXGy~ip)Nf>e4(dHgZau*^GRwY=CM!+uc?5LuQW1l9kP&OZMsc0C zUhnQ;$*GG_@v{{a{I(O2*R!ZqxsV?U@9DOGvz(k&aGgdMy{tGan|VZ+7ezs>OGI@CltW++P|vGB0CKTeQh%2=#F zh`XG5=ES)M1Y>Cb_rmD$oCM1@w5eN9bD-6JcvdOT+C#XFX&bTvifI>oZIB({KY>8y z3o279QSuq&g1eKb<%kZRRMWHO6FRG6zjmD=?>-TaR*CB4Q;&UWt~0M!!-1$mzbW3& z4e*cg)TwGFXlo90U0O&+c+tR;LpI))T{u3|x*ccIq^`Qmo0g~EY{P68foF&k8(e#By75^ars~$r@=!h+yRP(?jGil6oRc!r_ zWUEfnl9U6@r!PM$vP!jCTBZ(@yB+;lz_d z#Nlt*%byd&Ani|@yg1ilqFJCpE@N3+nOISWW+KM2+{ zlA1o6DT&%?WUb}ek#27A!NKG_&C1GVlqF5t{6eP~rR1jzpY_m2N*0j4EXs0`98!t0 z(1_3yvY}`~rwaBHAB+`Qn_6G@X-X7oPS0zmrzH)(wt_!PQV{H%24}&gL@Cc*+MITn zcWkwJA4v&}>(wIY7c`z|QvZyw9AUBg+}^JXJz3Pf>(rfZzZuQOGIw`cf>4TRcU-#$BP@IYm0~Wfc`!rcAv!j}h`E4JNuZ=AmG8=@>YQidKF-*Y*5d^FvN%B|z`AX`wqPc-&8?yEOr=(B{k z$NoxfRX8h^V5M2D0E;nmwxSqnGSMd@dBXBzwkhJS?C-rH$O3jfJMxsJajKmW+0rME z5!K9#z54rz=i{ZCx`NWR^1L3rR5u690N(M=W@1=J~#1W@{Xxq1rikrJ{M+Cq9MI)Wn) zMnezwp(*<#738Y9#ftqC1nxleeFD+=-2F({9~AcJ4Rw+znlI8*bj()d_@JNNvbZR9 zhhHdBKTDipcZe&e`Cf2Ne65?XcvU%EVb@hMsqfhfkf z4Rmtbr`BXo8ETq;5{%ndmi$N)Dg3J%wj_sG(#@CX3>L@>A8azDj}^(t^$0k0;dhIN z_lttllx^6|(GEYTfihSZXPO;LyiCEF(%&z!a7v#|OW_KwpZhL{{*;S|v2eDd6uug# zZF_PYyxe};@#%T!`Wx8}a&jvFM;XXG5m6PTb4Wq~IfC1i_}Fx3#SZ-OA}N2cM6n>$ zq9vo*Ra_smMw|UHFTXU*ZjtAp03i>R=^y4gtmwvC`PRcaK??}8KsyWeb1Ma~$q6w- zlywZaM!NY2W9j*FgPj2o%3_O==P*yJH8UvW&qeolTMhF74Ah`vt$JJh`dOup$G!|M zV~b`|8hybSBMa}hmcAaNmh!+$PfsdNT+wmJZj2UHWet05$dPO;3sM!vZ(FZ`HZZER zSL-D`KV`b3Qoy50<3Ksu(}#xaK>e%3?dAMA(7oi~uCTA@bnZ{`NpfOzN#28llKE&g z{KXuykL~B6CtCxe$s6K*IM(aW-h3!9$VTL0rmP93Ibjr7Wn?ac@Mg&MP>N1>`k43; zIS2=&vVM)oK^*m){%nH0P7))g5u;Rs=AZh4^iN(CnZjdxsh`U`g6}^+*KCgyPR6mV zmzY-na^)jRGc_xw_4(CmL`8Yb=T#m#v@d>#eYBMHQSvP;kaZ(pQwxe=6fYs{5rPh) z^N)wTC&RSsf^`SDf#4f)5ZAVQjdPbt8H|YLyU%61DzxNX;qvf?Jkg%X6ZD$LFlKS1P~C z1MjnVk!6~9MUKEvbD)yg&uNWU0A7pmgXu&U618SgS9k-|e&=vjMUGSO<#+p@`3G6Q z;&HWKYY9X0B)y}xUKg0N8jnojZGif(kz8JHmT3kj1h^6>IDP8MvLz_86qALgPZ3@;FXRbS!eHBl{m%$*hNm&0X@4uh+cHKqN7zDb$0+!s z#YJaVJUqMJr5P`*9j?6Pr-7L!#qj?@pL%CGYpv`tb)zg$tH*~{w_#8d$XqXK?YQxhF)vgeBB|%dfMiH z+T*<*#YWYDxq28}jk6QfZR>iMiMbQlZ8ja5BjSZ%6?XbZbf(L;=EqB#2erL<=}pxm zu>RYnzQ#e@_Hw%xmbD*-RK1foSAklB>6}IM6L=yC4;$vs9yDSi4D_9wK&w^ez5`FN zlLY?O*Eb@-y$k{>_Po6=$?SQSvA_WegN4bQdRSsx_ zf!^iM+~JP_zYzdVZr*l9nTr#L#b6Yv=g~dgZws4AU7cOz%pU&a`LTZA;4!O7m}?e6 z4K*y^t={pF?yGl z(F_gQ&igfkRavX2x1jQZfa=Lwa}PCSPV(NS3K+Gkc|sx=kA||0I)j!0S0_#fuPleP01^>HfeIn$Fn!OU=Kx zZku&=H0r?mf_F~XnSmUAkjEu%+S*XS(Q^#}E3fVzC{lrsTNGniBrnEO;XhUr-WkBe z823&05F*{X#ids)yw$)RqDtC~eowdt$|(xyJ8rnV-oh|7D^T|vNLd5#@OhNgW!0S? z`Un6OdlzqJ2WNRC8&lqvt*Ok_nZNfQ6x+JIJ-AWVc?9$m07}ss2`I5OvU?8bCrmyJ z-**3Pj`L8Zn-wVu9DX{H4 zi1xdAWy2`bVZ!nLXW@B}BI3CZWrqwCx#@(}fEJN=3G$9ffkaqA6mIl8YN zC0m90VY#9GET+oe<3D}s(Ea(Mv4>wp($aLjIDd9gMHGmlSWFV-Bmv(_0OEJvY%@q6Xlx z?O)P@I0X)&Sc|g-l|Xs>A<$hIJ@T#*BJLfYs1Fae{T(#~(LAHMQZe<$(51X87Ztm) zE-u{#<`b;p6YAPV_1oS&hz^G=g(ht4_dzFhy$B`n1MmTg_a1h4-zyQ}mhCjqu{aUK zoaYH^V4-t4wA@T;_b(Cr<6E5BJ&ckPQ(7mrBF=ABtPb<^_aq=6%Nvw2t7xqZmozIE z`bD#Ap$+!>B2cvl*0ptRtBQfz3^7us`L-NAt;ciH*o!KzgZy`8>?`Y=wXk1r5v(Ud z0X8tQd}q#@3K3&(hIDPZ>8b-0!s~*Z8H%HzVcrj|0#o?5b)1$dEUPEe=IEe+b=vY8 zTcv!DJGpZ6UnlkdXfgfnhLz)x_59IzAGFL|Q;n$6rNGNBJHF9^11so`&qT_0sO)wq z`-%$KOqlqgk%e2m3fR0pso81StP0&XJ%)@(Q)RAxcbeZB+Wg&V=^QfXkOq(-49>>+`vbZU=ig^V z|8fOn3-)nmLf~H+*rPZ+s4DYo^s`X^KQ{P8Uln?j_SI9NuE|^z2qF^isSRJ*-yAD_ zIJL=}JrGL=K0)*Q>-0p(c&`}~s1 zB)re_PhbZ3a#9{R9-!tlJ>t|j=vSoUf^ zqkrUoj%zzqJ4`&f#7cP%;f8 ziv`UMzow5SAEuqIFKIm5$-H(6hA2)d9fmg&e!phcEWE2+P`&8+KO87AY1K@Z+qQmn z`lS*G99aMp(^Z7sshN28LOyxRS1z0i&Lnli+o?XtRLQ07>E=!Q4Xoa~LGDhDRqQh% zO4IdRUGnux}Di3h?ExWT-B7%BVv*8LtE`$T-h`e`ch%=+py&uYPjo`UJPh+qY^h&@fU2rJZJ3;6KV*e28T_H<{$7S`~}(Ql5EQF&NGQ+@VXgO zfV7oaZtzKia~2uO1X4F%y5%R`539@%H}03k$O0jszf*o9 zZ8nCq>}=jVN1{A{Jzgw*5t9y30DO_KdEZn7j~x6RDR|p&%LHC+S!=*J;;p~1f<@zqKbwI?CE1gtmL12Mknb7qH)C?W+?---g?9O!i(29msgFNbQS zpq;YMrI(=kZx`f?gAWy&*9OL^3v=e1iONW+X}rl^AfXp=m6>t4O48ptkMlYV_A zbRNnMg}!uqjn3P0cW@SpHeC2WE90pNGjPeb@e8pfa|1fsfYjnGblspCyCogG1-^yw zS5#U20IO7l;mb&$uI7U&XA%jo4s*_jjss5wiOePoz zYWtOgz4v03Ka4t&KEoAPK8!N)&q0+k5(DmNJO}&7>R{!_x;e!XX4Ee^^pdquQ@mL# zE~cr#FMSGlc)6j|Rt}bOMG5s*(8IX>Hu9t-7q&Pzy57TE7;hTtV8iKvVVl5V+@*Z# zE6`Wg=TQ+u9hGSz!*`p4sOVW$0bxnC34uz=x;!9Ye2xBrPsDn=!9o!AHg~96wQ=^( zYig1y)!N%OGVL608=l&*KvGH%=Dlo8ehH^0&@yJ!i=LO2Qf`Jqf;5Cxry3%){*wbV z^vb#d@ad+P7ckhepk?EUd`k72PED0eQ*39Zc2P_AryD#bq8iFuRxeFnUW6m^Ta z)j-kh7F0)dZ)QZ-S13`@7mkI8f^~W=1aq3a;j`KL1~p0IlPtQIx?RanTu-t=JBJ1n zkJ`SI98a50xFxpjdHxJWmM5tjph_DizA1K14El_Ue)B3xbiJywYlJnY$r(Nkz0zbb z*{gdgceWd5U>5FUBW|#sz9U)@4YHA}F89lC z%2s_sQsm#!t^}Qj3a{9%{CBl1w40JD!gFEm3yF^RR1Mr$UyeY*z0(|;h+b&mcA+)oIgs{zEt(Phi_@Q+>vdTb|Uv(1*G99ZP(LcA<%F? z+LHR@dot@jZ??MABHy2DM9hqjxbnw1x_+fSEUjBh?F4hV^gh*egkfz z>k42~2y$nOTA^vRt#0#9*v_xrJZHQn!ditKyz~xqC|cy zqMImXE}+ioH64>zOaY$|cTG3dusJyE`D~QRmL@AlQjAx5R@34jf!Ct5qDqs+V2{bV zH)Om@@0E&ApZZ(|_ys|f77aI)1d*uov$;)}12cq;#$=tx)MR1`3qB!L?7Ir*+Q^J1 za$Q=#@**(-1!tF_OH|B{_m+6I&M)H0@4J7aT!lSye`HM0~v4ykk$TN~OIC$q<*L zR0`h$`+9q9!fIiviO9%i$S!_XH(#->_wST>f=vO`TuNY-gMMSjbbxL$UE_x?YPP`z z19HDjqq+KIuRoDQdB5vv0GZ3MqG78s%pzhtiMQ;dXIIq|h0n_w#@ysd)!w_ye*c7N9Q#Rmn@lAwDBCsW;WLGuhBCW*{Ms#Adz0S|2szG4u9l9pMfcKom#z)~UntGRzRPgg z&>)so@Pfd%CZEZ6yiMAx+gx>8q63IG%|S{1-EaFri$RY=joy@BLC(pe4DkbqKQ#(} zu$>{+{&F_sC@MrXMQV4pD-u|Lrs3etzw?jy=A?)El($Cl&4l?w)#P^FlqS0{$9HY5 zGBeK&0H_1=n+>hPw#49oLw zUfOz`gWq5v#f@V%_)PTi?WFS;u_{`e8g4Hm8+7(RXEzmPYn(C(U@P(h?k4xr*KJJeH6E4jAxWm(kj z-V-?a2`LSBFZ=G@vx*1hex(AeQLOrQjB<1a7MG71lFJvbD^Y)C=MC-r>e3L+#fn9C zxt0Rb>w@tObe*$quEy|mm_Q0Yhoav|)ry`G>}7XFD*Y8GP}O8trf&RwMzXAFHlUe) z92Zf}_&2r6Y>76Q2wbeq@0hiBNu&;sEcSB7ZFX}Zv;qCmf@NVUW+I2EhDd(7js0lN zo3S@D-gsCD@QpG?@&uNlVV^+v^G*w0_6E}F@U~3rw+p1lhELoVr4{S7PCVsq>{3B` zZJK>2&PD1XFo)MPgf%_b>PZ~>>>Ro^-3R~f^(Rxn#Gl`x7_)oVxwtB+HjQg;vn*hu zh1g_#zJY$Ou|TLtlEcPojKy7@EGXV}S8?hsM2d)}PTm`jHF{GBt+T#LUV#VDV_F%v@LchmLIxS$=kMP!^6C9bW+NM~^NSb4i zG78FVWadwksvqbfy2hMUKR-u=pYlr5b7xS(>rLbba+TCq!I!VdNs``GP8^_OZpipO zplR7wOSDancz)JFl?gY_M`P)G3P78@e|EFPX6MBXPEh%)$pt$dU(zfkl5FN%wX0gJ zM9Yfv4>pI^F}{C66F7J3*qLe9tKZf93ZPefG^d#@3%QvjT}5Y^Cdh-L^XC>@?{;PK z!1l^Nf>C1gI)1u9bKoYg7jLFC7ak1vSGKc4BI9;N@=j9_jGj#rfAq+%*7 z?r~dG1L#+MWHfMVRl0SCSI|F@E`uj(HyQ|r$5l;mBqLI4UV=E6ZagM9`4iGJ@0hV7 z^54nca(J3dHU4h?N~bTNoXXk|Rb92t%(CDZUWX3xh=kCdmv3;Z-`r3ZJ#YC)OHPs+ zVj<8^OI?ht{CT=9Gt8bDBm19~;xl4j`deSssc1ya`MY;QzwDnXr_GT#%Q%_IN zu9;GOt4cOQy3O=kuMTfj2wv4zoY|{fUEFo2N0#3_B$yISzkQVvCEpwZV~agzF^)GVw# z`l4rvQ4M&-U9 z{GufpQb}b+?@-d3FKQWnb$Mw`VlJE7-Q=U8mlwz?BA&Upo)N|?Yzk?UHhls1P7%^VysEx0Ijra%s=G*HOfKQt{q?Y32xqntbE8BA}BC=jx)Bv+F~Cj zWltx2UMX}{Xf-Mj3#g>%cjxCqxP&#)G4IZPYa6el?|*#+7h-I-_ z&#V^WV90H8*VtLPvMz!Db2$|WI_lo=bQm9Co>Bp@WH0_JAlvkxnF}gut(TGT>gBAg z*wt#)@n#gOuf8}fbhw1uF=}4g*yIh<`v1U4UCD#s<|SbqU>oc@)1wyMQ9NFG#fV=z(+(@e&j!MCmssn6 zgi+9#!huJc*SX}M-UU$ilby`^&sjWG=X~7yag3X2bDu z?N}~1IH#b9s9oF$fEyyETN_{w$V|;ekfY))t=tkE9#-OEKZ6-w>X!wRshFsxo%k^= zK99u7E6dXG=|Cwc)`(`Sq&A_cObE=%YH%V?w0P! zhDYQfLFz(h(3?IMY^^=EAWwdhDVt-9>}TB>hSVp-tn~Ghgg3|&Q<(BzmSGqsIYUJ^ z8v51>owl>5$x4{ajug*G=ACrefGip=zz)a>p ztEF~kU(EF8P5ZQo#7oc-^M92Up935M?&wD%dP2PtA85&WSeK-~1Gkx@L(icsp69 z)qs|!rq1mXq|$TeT*4Kdx=1~xm3Sgxd2f?<9)J$O2&3E&a=c(&u7%vg~uwfvz<@^~e~ZL(qV+Eb6il~V)f zUV$p5YpWTt%hgx|MtAnzd0KXNS2$EU-Z}V&e|}w2Xr`o_keZbi&uIpiiyUowjw>mr zZorPf6a(MN70&JQ@vcMF`|_5fPp{(vx6OBI6rBo1y6}h+5Hc6IYa3$GJos%E?z%s_ z3A7Zhw-eF2t%2`a;9=J$y64xg((#L*fkJBR?&9UcrjS4&UkSR3F+(1Di?z|>yrF9RV+l|*Dh3~NK;k8{IP~+a>uHPr zkwyIl6F${lkDrLv)R(@++TE(_Ha=%L0Gy?TVVd$|FMvRg6XpGZDYXBQVU!2CaNJM%a6Q z26q$IS7WjY(ko45APb+x-)w6GVTajD=9>?2z&r~ASV!3>>j7)@u7CLy6F(WJh0d}%;%iizbb6DMIR=hB&ZJfRebsRpnYWxG@$0$Wx=on zyT!$0;0r`#Lk|Ak4h8h1SqrS!ACPRJnN~W@zeT@(xnYUizzI_;Yq0H`fmp6xG>x5_$-QL0Q8m&+gQ84;mm|n`-qAs2;pK?;MHJ6=NLmQLaeW=<2F_ zB<}I|sI7w#BK$3pQXyz#?uovLo>i0;o@dB8gUh~C`*0!TR|1;x-eEdExY7QGCQ5=}i}x{j(f)XQA7CVJK|tJnmKYT+k9b_k zkV7gTo!rKpk~%@(xsl;*QXO8_pDgqWJ;zufN^ZxYB-8J10_>*ol1p{`IwV# z-^ZqXZz=W=>d0rYht{t1q0g9Lf_{ds17ROS9zly(5&mp|5`U<;rx@}M%%004KhaTY z>wKp=fr$rq$PL$SnAMf$o{hW>83gR2N{gG+3@mdlvatfHO~p_y2ZmF$2h zX6Pr%(OObgJ7$4uF7No@9qK?0QjhT9C7$(?mepwmYW_*G?io_B`(@>2@JYFao4ng^a{aI*fq+cL+7i(SZirQq>-s2QWa%^| zy@mVcLMT9bOtiGzU=H6UaB{lhIH&!%Va{jbEQBr$%<3e-QqsE;p&vAO;Yv|ub~gUz zV(pF!zzJ^|or4{=psi#AkNLrLjKwdpE}kXHfvv-d?I>-pAND5S70mK=mo(|7-z|<5 zn#TWnG}l^2!3pwy<@fT@I;9$N&>P<-B1|1ia{jj#rUZu1XV(B*Iy93Im8VtfAyfy4 zMWn*j&OkTis{l2|M8sZi$#!3kZtuR;Z!Rf3Ul3=%+r+gK1{tWhgezFuE(H$qF^8v= za7SjZYY7Die>vlOFVx|l%mLoD1xgM9@KP}?B;wI-%EV2|;a)0vp1PyJkXQJQ_>Sql zzY)&>+!wVMUmpt3ZN!m?X&u+y92HWhaV~&+b@7ZEmag(mW2ZXFYO@t4<+l+VPIqGq z*JxH4FZULhG{_>wN?Yp|%$YcN4ISnyPo@^L2SYflP8=intRgW+;T(nvcO=zCmgu>P z8?>F8^yup84GVFJ-P;CP@W?{(!v`z>|{kcjGsqFz(VB}hJxK7 z2%7_snd7U%zyP={tFd^F^wKHJz*VnX&uD$L+`C!pQ@_qZelbA@I4 zA}iNb8o03pK1`>uDEvu#*DhM18zd=7#xN;JFdm3zQ1sseye#0=FC4$6BwpfrKGnpuTCGKiW{m6C z^ak#)#aUh%{w_iTnG1D>VsN_k?1+VNu$FOcig480-ld{8FRK!VVsJoIHA`1G!lq7H zFpY3C9#_Ch#BMc3(aYY`il%Xad>CoDrCBAM3w|i4S`a$be99McQurCWqR%asy>uw# z9ts`RqP{>JJ=P@<^`){hJv3aKjdf%LzU~z5e0=K;SYIjy(?p3+*;w*glp$bHgLCMEJT9MVjGNM{#~?dJZ+FDCSlNcd?R!Z z1j2j8k+M#e-OV)HFuzj6z;XQ*m7LqVIoLl~q;I%d4)ed&)tGd^sqoczq?1#KnIoS* zfSzO_`xCJI4Z(t8g$%%van!rgFe9f+RE)UN-~Mm$cE0uX^tHI9h=Q%O z`Vbjv@0BJ*z?_n)pHQ}Z-?Diwb5Pu9C|f_1vZfzqAbbU1RI%3ZO!H6k8F49F)orc} zd{t!v&=>TXR0eL+EJe$!p&}0#z>wLJzEAsW$(#!3j^P}d=?p1EstI|0={fe zb=|;{WnA){DS_S3^;+Pp*45ubF`mpcKn*XatdkNaSg^~PbtwM8MGlYA`Z;~AJ6P*B zY1gw|*q9(R9%Gjq%GlPg913Jj46S%?CbWdXl>hQ3aDNycQhx>O014jbb?Uc!K*I6u z5`(L)PQFHP=9XxuuW|ji;%)G>`LTQ0Ic{H!u8mzJ5q>e&H5_*O45cB~wN#rov;{4? zU#XeU)fwkye}CfSa}IZ{vhIWDRb=8^X{^LyHZo{vs{#8=^Jz?d3_==K%nM*q} zM9lgpn_G8+`*lDYn_-qBFV38AHnUd3n@reK^Y2K-GO@DeO9RO?#E-zAK|hBkL`u`; z=IVn9Y4jt5v&AgSYXcRJ!$oc@bu35LX9-6(DU&(cKUos$1J6$mikl5(owwdEtbSvJ z+Ye=Hs&CtsL1PQO+zia@ZM<$FcN#DN>> zHnw{1Ft1&2B%4;!+pDklN8<{Mg1Vc9J=`yjoVj5#PdB7QCEytCiKJb_Y^uDv~%( zSxlkmdO~hD|jy>6g^+vCcpt_>j|a@~}fRy?eOz zR$BqsZT7A=6v|t9ig}`T>EI*BM-!>uDx)3?fhPPVN-xncSc7X;e@g`(7H=znK+*DO zoq>1c3uM(Mq79RK=X1FwBf9EbNAy9mVZ2a%2Q6k?l6`S1PgOY07Y)aZKZRis%#xm} zxJ(KBw9=_K!w9QNoFgTOh09UIN4ku{vgQ{q#3KC#4QkD}dUg^s`t1fhir(j()cp8_ zXKMDwXIpUPol#8zYWe-0p2Ks1Hp<4mr`?>pbBv>Vo1>PL)=h`C^Rd8CbCe8;ZlXn| zoJRxwmU%KqYA8nNK`sO(if(?v55cN>y25JqzlD1{44(8m-_)(nN>5zp#Zm*C{*oPi z)gS2kySS=;ws)vtRctWVmJAka;2 zl771iN3CalIB8_H+3@C*h`UIR?vK{Pklg9z_G>gq!{@Qcuu+feQNzZ3Kg&{~6~2v~ zX9?X_>20%=)<+dT+8inJImpDN|Ef8Iy6|n~nW1G3cKuQ-d5CanE7uJHiE+dt53DDA z;rEZ+Esem<;uH@FXwO_^HS6p5;$s}<6fjPl>QLB%@MR?5kAr(f?Zb>mpuBPE(iQq- zttxI8)7y%31RN6P*;U;i0;{%n`S{&Xg%0eHQxCvEwSx+iES2}_#Tk<85*#PU@Bpu& z1d4L^_1lN2RcA>1!?>yx(A3hOj7&e!5JILqrgk)*D z?ffZbs)ml=iUCJ=6t>;Vb8W*iaD~*mO{Ecaw<>R{mjtkno zT-@sekGCH>dc3=jZ_pb~J1EE&(W)OV9>&FpU$9@aLWM(-Eo`UXa{NTT)OBsVi!|gG zyX>DlRCL;RKK5!4H75SWy+oNly-a!^hS9Bck#uK`CdXXRIC6XKX4^%6ZKDHL_MQsJ zRsq5Z*xv#XKzJ%joq;sH?kt{iM?FYCs7t~_d0uGItyWY2$XG3Ltv9GGr!N&3Ot+ee zbh*;*;?HfHgYfOOg&Zcb%?Afp?pV4{P36C+qv|dT@)J%-c9Gh3P7m)>O)# z)X?P`rt1rmQ^7H7{2L3qec+jLsG()SuBTrDH50ZOL8D9N^7Q>?99rtloW|v_0FT!v zsK+U0KZW=0Q*$)*AsWgmed5QUtIUK@i~xgfh0>qPRqe0Fa(}!?f^D$O@z*RJ>+p1u z55shL(oT<|jfQcF(xpP@d>e)GU{&9((6wRe3x}cf=np7xU;CXQU{0?vKmLu1X$Ps! zJH^ll%~A5VD)j{~s4eWM;pK}rfhour&ARVK3%l2Y7TnGC9NUM&x&nSiFOR(qe;J|M z`W78zt7_31*mhjjEZqN?fLO~iUHx069w%orIDF5gVJfn;`}ESR|7-=?$h3OOF$`!U z!1eShPTg4;_R}4nl*=iZ3p~4i@WFGaxY%zTdPgyAnx=U@a{#JTq|nC$xKf~RB(mdC zR-Afix>WS<+X$(dlsDcYF*J45+;- zDqcZbNVQCI@T^`c<#!BkOaTjO>e9H7-7iu#C9Gc_T6c?-QlVd-HCFM&aVFqOEp$H&V}3;yPM-$jfU$d=OIzs`WBcs8MZ4~i zSUaU$ulZp!g|V@Kq59oNUoLST7;E7qt#YT~Js!zh#zyhBxlUPy8LsQ`V+PBl9x|is z0GiwGJ!<@rD^$F|;Y2U8hWyS!>N3o&+mFp_kJTOX+*-RD8Ntw4t9SBtLOm7heCtD3 z)j7i~7&>7XIz_$39<$kl$DPIKfA*A*f`f0x-o}pKpuZd@I(axl53xozP@!;`*yT&WQkzAas3)AUiClWt(*onFjm=MWo~F!RgVL1OzISn zcM}yo#)RQF4<9+gBoQ$|eX_rMUR3leZ;`x6aw6HaV+>fw4OLxQMET#}_>=rED#o`T zvzIA46$!N|hx)GQqjr7^Z5FEJ#>i}Z6J|#EFrWN+Sx7JgPn{l+Ta5$612sz#Jv>x4 z{iwYxYr(>vxN9|Q5HU;7u>w=UIqe}E3rews`zYmE7e#>81=Q*Q^+C~!&&c-Nzf0m= z;i3wB2Ba50e2CRWbQ!DVe>KUT(e}@q=iKxZ%(ajmxf3o>^5juPGvIo+0J8P)*FFsk z(c!PDlB4frZeszVpf0yY>@LSS@AQ!j+}FThh*zS&lXoZX;@Ch&I?SYejqy+#0Q!<> zlPG9#XrbWH7&^km_cC;w4~UYENFrfBX6 zlv9*GgtV?7i#a=r6dp*oDv25v38*k}gri<8D4>kJe%Kl)<##p)2@OxYeqpW`;KvKC zU{(ywVvsfz`R=ZQ#NW~T|Dz-b(MXz^8K^^Y%o%zo(Xg#2c`|jc!T7!hpIC9CNmb;f z-aopG*K1MRSI*aA%vQ-58|qG6KmO?Kh$xqx6L3y4k4x;D_p#(q93yAOjBt$|j@a}vUS?LGUYg-hz%FEJ5-SiNE_)T3;t-2RPn$*)dHZtOMp(5%Wip3Ity%tF8SO8$Pk zsNL?-wkd#WIdX7DLDD)OAvgjX!Guj zj#oM3ux;M@_;HL56y8vp;Qvg|m(OU7^c)Qo*e+@gB?U2+exms+8D2BknEc<`_c(uW zjt7b$yo0+P_!S-zD*5NVT=s0uDqrJ%G1y@>IOT83Tut;d} zh=_6ThEbYHaEh_ZAd+xfZv+~ImGp){M*zAW*MCAy&gF7Xh;eT`5Ut$|Kic#L+C3&;9>-GaaLO~oK`NlcK02~3H{?VS7;iIAi*ZIISG8Q z+X!j?wIjk4Z&Wm`CwXGtk9+~@!i%oM{LNtk8f&+o7XOzJu)U_)6-Q=bJv0eMS$Yid z1dB~*YL!wn91*Fp0|ANwnxOzyd*oM=T!tX+VANzF9tXupLWQC!u<=swt8-1T>~kR( zwk$#_Ius2F73{g4bWp@WLy+z-fqw77`~nPL5HOIH`)4G|_@umVA|Gud<>*0ZI!Rr~ z#M2t`R@Cg%Ycw5i zUW*g_cG{?b1tyM)_AyRa;(MiJ>*_5g{>zVFH+NGg$D!D0kVs(d+Sr)b^O%U7>9}PQ zmiskr3R=S8kA6<(Ii-SI1yMmsP>)JeeWZ^@K+%v{?H1kfZU13$?(V`sK@D>F90>ZZ z@HYldZ;GcjMQG~v;4Lh{Q{(A9ObU<0Ir*Qd3os99R=;2WlmlS1NIK5AYu2f&#}H*S z@Zf2C5aabn<&|$8`E2+E`dafZYmPPs$!VUno@WK&<3q7L@JK`B`D;I(xg!DxMO-ho ziyWy$TT_zIFrNZy@Q|l6dvcx5coQsbQ1Tr%8}F6xzxRAIyp?Q)sq08VUq5_Zp@eR` z0ex2>U{&&WmvN*Mr!J8PurY|2;vn@FP~qx}LU^>sAxQh1Hyj^3q@($oA8Q%_JcsgZ&eqHG!Gt-^d|ygX7J_#nPa``=kHN}S z)a6mT8{7^RsPXtB#dnkWpfR}~@?u++A_PgmpH=G#=BKPz_)P?G$&g7BFx%n__(aZ6 zglAFE?<9HZn7Kb`$^i9+_R9px{z#T8|67{}Lp|BlIQk29f+a2TQq{fI7ip^dN&Jfb zQ|tOIqxN??l_F<1V8JRx9sdacnXzUHU_MHg0VDw8)u0S)IKCW7`nBT>Yb~lsfyR@c zgHlDI1S;i zRikUMYCIxW0urqdo3?KeK+R$zIIEOC=PDZJZW-r~5I`UnpNJ&RRxbQYU%hCO5H6NY zKFnNc%Ovj6az4vTTY|67hKPf-xUsADlwidkC23x%z5^rpjX5O(P7bAhMT`r^i;CzVk|ca#Ak0@T(t zlKsl-wvsFU25Pi2tKkBp89C%*vco&{R9hh@Ov;9xAREYui5?>bm{zCvf z22r;r_ZyAG36=pmZVQ5o)q##aEM!(Cn#)*^DMYUqKB)9@QeZ05-cgshtHVtK_8oBc z00My#li*jLXWD$MFJv<=Op0?+)hHe(@`3lSm_iW%BS4cl#`O-k{FxsB&jq*%0$}wX znn2FFy-|X{5YC7PjSwnra~?9E{NKmYtMV6LR9pF^H=k&nEHxIupKDW;mG3l}lL~sR zHT@9zxu&V8mKImQJ0IX!ASe#iVsBr87$h;6@b7%vWKZYlRfV~E?ejPpj&Af9&De+> zJ8qDAexzWFdE5=+D-d19jX%7@Bazvog_{S+Wky4;UQxOmb?BeL6;p^>X}e~*x1>Lf z2NCGEnG$urHWOb1MY73dIK1Bwx{OJ%BVAuc6ShsCtb#h3Y<=GX{?ApbH2rnfv}GIR z-^Ds^iYA~?GaD`~_U~eS!9tPh_h#+2?kM@)S)m2rL;>F|4JYWqWVwZio0gfO&)~4VNLnbssM|aDOfLOk;9kgrTLP~!4c7g6 zVSFOW+SHh3^ZUtLJpye%ebLtNU-ywS%?fvYaN(+g)9>U;X46`u0g&SFEWU`I>s|aX z{#ev)9sdXxN#W#vcAVM5F<s*aAHD)Z0O#=!2^&)X?zTzXImF-`d?1JWoi; z6_*uE>&B7G^2I?#e1Hc;1NkpQ^bw$N(=%{n;tT2xkymC-8-i?eb0~fs_Z*_9Wx1b4@-~bl1=X& zbRBYaKavQZ#fEnKfz@Prb_HL(UW%+a=eqIwqSIsdd4O&N@lvpzJFjRyO0B9oBGRz$ z`KJQB1Dqx(*I<%ah1$*Zz?&O)-usVB`E0|j`-S_bg(&f+)JWmMxHvEiy1<>R z^SaYJ4d|>p9G_l;KfJt-*1~?GiuV`*HUMZtq115NGOsrumsOmwXa>9ns|*eS1@$=4 zG3Q2*iUlJ*NiUO4NXQ?Fumj=%$am$rN&NmjsdQ_taT%Y#gR2n65~Kg=6AG}!LIK%P z5%3_rB5;=bzo+B#ahuSywkd_M;L%KYlE@nn!Q6mlst|ijjXn~0JqQF}{_F43boyno zS5`=gfE}eH)lzQ9g$o@%NH`~}*o~y*X8v&}51`rL(T^l(#LRsU=~p$8gnYts{ZD1zfFPj~3D-Cyakh9=g`)Ee) z$&PFE3SXpPGw{>XbPHDfzEG>p|F~e_rh(y2B%RjugxXGfB@;sT5e`)4RxWVs*r*X( zgC-zgfs>80WcQH%9ci{D%k*hf41PM%b0VI)r$9i^u-CH+TgyP;!<2MWL8$QGwAUF7 zxBi|nYi}P8K$1y?H(oD8d5yQ_OSwK53+{Q;kV0faolLv-%q-hHKg0^>i0!BqHpY-3 zS82+iE!v1Q4FY{!RE(%UP7Pbp{_f-NmK-`P(;o$iz?bGifeKM1+nFjnFu#{@XumH=#dExT*M*cmTl8ufI_q~31e0Mbm4R4`x;n37k z*ZNUnD0&KfgX}r(Er|TAYsld|C`Tq+58bay6^rejm%g!#w)^q zUzte1Ww++>mhXiVF0q2e=5VR;;^2v&zK?OQOv=25Rc~^x$`f__Lq)@X$3qHGKsJj;bxi-KZkCtSuQw2R{4pO7lS1Jvik8i2< z(HC`bJ`!zQ_+TMap3YS|+CUEPZwsw4I&vW`z78tbeWxd#5td27yBh+!i&8(1etlA4 zhnPB?!$yVjd`HFG2X+$NcZ25;q3ARKT#JQgJw;WuleX2JzHl6=fp&Q(ELQh;_!-QNAogj+G%>)3G*%HI5pVq&q|8omdAu?5;Sjd9ysqg9Q&Y9Esmwl%Hd zwFj%MC5^K;N3f0#9^(nMqgKZ)JF&5DKXcQ#jT1^YL8kQdno-hHOUlj?bc~i%fdQB4 zJCCISTFyfv=E}{V`&(qK%>4e{uvJOl(bnDOf|q*CyoXhoQx{Lc0MIdj(}}co@*-qLyedd;Wc-JvUGA_pSfBEIm8M@)Z_Pe?_er(*f2rq1 zCJ;^<%2DdtNpT$CPZfXtCF9hc?v*?rKJ>9=bOiVEpzj%?D-@$0pV+Z%xICl=ABrsp zMWksozl|^;Uihchvvhf@(g-?l-27}bF;TzMg7?Gii7wU1kxDw&k@np1QOeZwloYI- z3`v3R;BU??}CGO%@f#+m9!;OW`wUz3VOx*|$?pz3*BSTB?NW#ua6VBZFruOgkh zq4WA{UEykHg)l-IXNGR ze-uYvV)O|@TUUQ?`v%>mcX*GPg*?{X^sr&MS`*N_)M0$>C0l)$d0H zNQOj60wgJ$bNeu->zMzDcD`Isy!!|!+;cGZB43eJNZ}DX>v+%?pD;S@A8Nl(t!O^j zC7S!#)KW-!QF_pPT05(t6=l#k%+r{6q4Ce%a8{l2QsoxfAgA*%^34|Sya~qMI4sT# zR54F(7Z2`bi{(A=$Wby0#E8`nvB7{K0s4p?eA889X%TqY{6fop4Z~@p#o}TS5$r3> zbwV+Z1q@?0Z*AMV2ATV(VOKL=zCy@Lt=)Kyp3Rxh8b4HxN7cZR#3j|q27|s2Y4Wp(r!FwK)uU!X$!gU_~$Qv$FEA$foa&p!5>R&^DgX zb@w=RoXC16b(_*MT(}Tl>A3RDE>NZ@vwg=%&F2+sV_SZs<#LEGThtF5(9n5JeOqq9i zb=nyY#0wwF)=q;y6ED7WAoZwL3h73gI-sxvVl%6paz!zPZypZU@Tbf$(J+mf#CH0= zD!GHKaUdp*2t?(mr8L$p`7CrK#n5n;sa)p{SGGC`dHW4Ndr)bq9Ij90NJG#4CQe*T z6JI(r3Y~$4^y33qMz_tQ(vWFxk_U_<#{&3>r|H1hElc49TUyTF&&tX3MrsH4UNqQ{ zGYnlZ-|IWq1`wc-vg~K|F_g?mc~;z22qel?^5cUNe5mRT8GhKs^p$z0Kk1yaUYm00 zecemz2Q+^5wYU>1-b93%qRmq72B9324`5f5=p4Iu@X*G{l6>IuaC3O_$dWsRHBLPC z$55>+6rEYk>fzdh&zQGn!qXdM(Lk*J+H!l4Dp&FMv199;h|8a-{klc%ag4RWQ$>H~ zdt*(q5Pcs2&>)y}>ua*V=QV+KlwhCtrmj2V-R{`Z7uUET)nKU6RD^h}9WGc9z}_mZ zktwb|w`;%zOk%C~l6*;x2l#QdV_0kXuSSyhT1XGPXKt(Z-ZWJk(M~B^lo5~r^da!F zH-niGZOp=R%#bH9EjM8xyP{@{ApjhzgRg%G0j%Fnl9BPq_b%~=C8*RdA6Z>qk+Cu- z3&KLbc(i?W+u@(HoL{v&bH8K57#;3Zgnd>WGk&#i@EM9eA(Gu&787=2fMi_SuTde-G(F2OmJ-7fuEXODBM-%4o^S_BwegR1|bNq+L z8d^z~4VOQ^<_35+?xzi9SgX|ll%{8|X8!5lfie0yLlOqt!<*l<4=h_oik$J=95!uf zUi=Yur!Cz{RgxLm?YCn&D&YV2e#u<3Td>|833?^(T8a*%rI|LjPzILNAyVDkTh~C< zo_6UbjJE~^Q4mObKNV%|drNOqGtD~gbl&BJBlo%1F&FDhK`8Tpb?g-F*1Ry!^05}M zd`@4jgVGsyyZx%Vv=91w@7Xy0%fjEl#~}_Nw+TSj^7k|Js3grNn*3-@AwT%; zrC=?Yt|=P;M|XJOoCCa@4zFhuV|r@+Trc`Ta_o$&?Fiqh^XSczKLN?U!3Gq)yPCPE z7P9y2ag^kz!)v^3-Adn*9L!m*&H^2nYxf_mhsxnGK6J4+@?(`&@e<4>n+Z*Z0xwEp)?-j~yYmW>gDIlI~?*<~xwh zdarfSwnw&i;Byw>t?w2?&xt$g88mF1R)=Ur98%eTXDJ6F)XIgHB9d0)p}ozCP_ z&IDsOGM$%Qat`2&1>r3R|2C+c>M-F?N(<6qQIpJoe?MiL09(8mypX2E4`SZx?W1WO=_&*F-=Thi#_D(Pl&|lTFIdPsQ!pVRO~vba zfzMl|LSI=sUP{_>>Dhe@a$e4YI%jTuNh5;(p=JE%C`5ZUw~0So+%~Fu9?MIIJJ9ZJ zIFr+9TYR!SP6b7eWoRk{+oTf^IH8T&>%F1iE~!KT0jAwX@Yo_81Cx-8b>$R zw1>Hmrw72SYkzY3Gw-+BSorf*oa@fLRwE12_o-d)JS1by9a(1J{dqj=QkA=Yq&guS zI-?M((lENZkeC;b^uC${>>`^&UqI1WH3NTyE}xqE^UZHmu_emc50xLkBrZy=y*R-6 zo2Y`ESs6$hrWWN}f>m+jd6pCVl2y-cJrb$F9AQC_kYE{YAzW#N@PFcO3orb$2r3{obq! zR=3}p70vZQ-# zsMt+WBFhGH_D$!U*L>RIFD*xh8y&cvx_oFk%hobdS-ZV23^?7()1_U9QaYhrp`p5FQst# z>HoETWw**uBlJB7;M7LO2%H6Uo!+E>?E1DLMjIOau9Je7=d${%mJ=?@Go6$Cr}6d2 zmXkay1oAzd7xSB@{TM1gS&{!|@Q(TrZa8)DEvo{!rISwnh`<~gD?US2x_$|@$)S%zMZQp>U+poT=c3s)0 zWwdV?|Dwm`X#B!}ZBaxt>-gktMf(oevi}vmi5UmOVFhg_6}^FuGX z-RD+9({GQu^RbIh>waH9v7WdveTb~LYwy`(zTA*d=ti2B@QfZAG&7%wH*BbDS&p{I z#65UPd<)<3!{^s41J3XT>5MPn6q|U?`nDH5X6c-$mGH?ziYBpmP!9f=IqLyGYG+= zH0{zo7X;Fh2e(A2YW=yDvA0Q3Bgr7m00 zNI5qb6y1ZQ6vWm|WBSCF_AVp2QZ*d@p){F{g=&WNd4`_14Jr+To%Q#tb&cZT-AL4I zi(a`iv$f?yYjd#;1A{xp-C;0vcr5|~*|dbSH32x!*-&HDmV$}(G;qKBRqv%IjASUQ z?y}<+TF89BubNVe^IJ}XSbOX68ku1PSTx|aTrrnNA9k2#y!$k2K59v>V#Fr*FCFn7 z@!~XPej|37`;c@&x0X!5kbDs9EB?~u=w0IsqPB%e_OEB7ZLt>fE0*%fb8+HP+8Y~Z z>dZB(doKSi7@t;t)f;#E%$0FYnH<@^Y3%c$q+5OLHZlPPZi1%k5;U|S# zX&0f%;*vaHZFO~zlNR2W3(D2JTao9pv={l6{DQgn+LmAGi0qs*Q7%HO>EJVVEq9I6 zEB6uwdy~vqD!yo9vDe3L_IM%o7W4OS$%Jpso3+Q&ox{knJyYiBb#L^27B|DC;zY@v zoaL9xlgT=hocLX4zGy1Yd+_fJ4W5A4+SPH%*VU2j=!+6tX4hAh7T)|_JZ4!TvuMCC zc#@Q79(Q>AE3j36BM4fTTavjpT2(zzY0$oPX}*LVKn3M<@u5#raOyIz1%5kFT@swA z+vjs%6jNF~d*Cl*xz?|7&RGoQUYR*L8_F;m@4@RreT!eZ#d~y+69_(i(n>aH)l3r< zgwJNGsOL7sdF|pv0h6*Fu^#(u?^O!>c6@c$1hw3S=j?Z@}#l5-^t4>SSV@q!}>cIDUCAT^K0bN0|mKp?_A|4 zu9g2It#FHvqmH}I@5LD1g;x)!9>+VZeGKP>_J`~kzi! z@sv1X?gtF3lGh&@xT}zy{--x)YQ>;fo{>~d!W%%1?7Z_|3uc&3B}z=Z>v~C?tI9Sj z9Iv#rUOMLe*RMm$SG+U%M2zgwnl^3Me;q0K=kpo|iee<`VcG8gX3YJ4NMWArbwX8= ziBvuoY4}TJm?~peGm*XnIXNX!0dK2ouM(mnM~#F}Dk8Fl|G(4N;u5@B7JXNmd_gB8 z&tvN;*O;0o{HJlY`g%k&-V`kh6Sc3YomyYBmB(`D z7N3mx@_|OEoTAAYDkN3WIh^@Onc(SdXo(ry< zo$U`ryr4MrG{Wx5dgLRV3#x0v@mt*SM%q~4$GF5D(vHfV1rgh* z2r*B&H1&@g@E^4bfSuiEX!rN};>^){qxMZum*;-VW~#Hq1TU(nKlKnGf1Q#0BK_JG z#305QN&=j(X0Fs4!%YZ22ZHa}hm{7H4`&T9;DYp}oYl0Mf;MiRVO^Zu`14I-z!m@K zjf$Z4$%c+D8poHi3uIG;RzX!+B0z3-E!yz4jD6Rra*Q#SvzX|BK{}+{&n9y5f3dW1 z4(Gv3D7p}l;y<9$@B<0l#EkVvl}qy<%Z-0nr=QB4RcKTcOQ!XQHU3{}m0~x{LAcEt z;B3CX%a&k?R&o{^2dxe;yl&wB*pOL1xwZ@=r{>j?t%>p17qyH`7HL;PQzm#U!r73{ zwo(fUP9k2Cta(fbi)It3LkHdn5c6U+Wut7r74T2L$O zTNfhTmlAedVlySF3?*$$RMB%xnQY*^&7Kc`QnzWFM1gqOtje7kh?Qnb5j-2&@%V+Y z=J`-MX)eZ(^K}~DXE+)DJTBW*V$OI_wucCVcG0sHO(fXdt&6ke>lK3U%1ny=c!vmfM{Nc?S+C%3 z{Rpz1vf0KcAG5D^TYa%pmuY^)%OrFA)1My$=SVqU?Oj!rzau%r_@B&-i3Zh!83B>g z_N7wn1;T92)6#yt!1q_Xm+PM1qupMyUFR;wQV(Q+oFie zM<+xLOV&iEMSdXkVs--627|4p&8dX5<72o`Q0v4$c2Q<^W)s zPoIoa&<4p7JQOTFoGmyL_k`vr}G87m)tTY=&~_cpsH~^W0?8EZ4p}q7W)sA zTr_vBW%;T{M*Co33D(1{(Q}xa_$^QQ!p+Zhos8AByBkv;p3^4;_Lp~db~-H{K|2kR z4=Iu?eJQO@-W{RNmbprrXH^@YA67n)-W~J?Xv+0Zto5{vFI|=dpjTyW30CE8nPOwY zfzxK>7k1buUCZvOUAP)Qoqq%AiIo z=w(y@ve?3_wOmkqpQ z{9;wDmrmB|8!Nb0LFjLL|9loh9$3OTS_<3IIT8BPaOuFzFL7F6@iyZwm2czIM++O` zrjhTv4ue&>4V|T|&M5B>+{#U6Kb`sd4&c)3;CPm#pC5VR9K{7VV%PqR{6{e3)%u*t z_@yZ&^+d)$Ayu2fZ>mhzdN*5+kjFX#ML(xO{3e(ZAITW{DU0K`AhgUhG!fUy>^dSNtf`c zHoGN`XA6TqUNzS}oZv;bMyA1fuUci;foXd5J-*7MiB|s^BdHVbBHwH{*rM)y zEMR^O@D~DX-Md3^x;oMp{ethY;~Tg-2q-&b3DPWk!GQ1U#3 zQ|xyZT;gj|)$wBHo9^|s>YKUhGpz9sYYq=(WT^ zS0(vT-0z3<2EP3?{)aB)6ud&WjX}<_DA=Ci^?&wHKytKV_Ap9R z;(MB%uK9S?$1Vx%hwSqnV=;cQy>(CNpx%dX`0{bgYpfcqR&=0GH31Wwa8Alvp#Q`o zY*!rhwFCp1H3TAAtzxnn_+|PFx6yG^f59>)h)}vUU*{C@Y3E9l2##($k3VWEN=n+U zo_*2S^Jr(uWAe*)(T6?$8=O*}#cee*o;E%M8-E>Yf*x^*?E?Hv0W*gpnR4#~boHX% zHu2Ke0s0m78M7-bvn&VeZb017P1Pc2%`RT-hn(}C*WIl*%>lX-Ye#~a-8e1G`bdn= zFBmm=_hlNK7W&sx1!XaV1-}2_*ps_R^!Y5v3{lVopCZ|tpiV-;Xv+DXi1@Nx6cHli z8xT5Um;W=l(}9)rWh4e^{i*#CtXG*)=dl~tch-lb8B5v^<|J1(P(FLYDDg9jY zv@BWkT#`xDXyT%h2$BsQ=V5@~mz0{D($rbmnuKX+ZW-aG8CnK1%$%4?=Jji#>U4LMcnoOlQ%a7Cyb{l=@ zBXCamHm}vN_afA@hc0(RY`MCx*VNF*ay5$t14jRS8>4k#vz55}R0#u37)FR)Vlu}- z$+$+omKqzRM!Cj`Ju~sMM;*YV?2nvaU~FaW4`$c5?#m^|D<&|r#H0ML?p%-{JWUeb z7RdYzd~(jT+$3)I4nGl*m*Pdw!0t>zNzDH=Ct6%`M=v96pynlSgi_HQHmq*cQpZ6J ze3xb9?6CAsWj+jg@?53&=sZrP{!l-B z9vVvF_%4-@T{q@GUtLRYU$;8R+?QRYjvib@;hc0@3vjfq3xsG+IXPp0e7+YlZ)!V1 zPswZgexh=1*&#rUY7HN_Bt}Nd+mryRYI*LkeNw(3`@}aqN3DWVn6eA0DTMR_pHU!7 zAig)V3_%h^#hAswfwW1|2e211qII`E1SQsp8QZE2=&K^=aEJRBH(HbedUGoc%r%bt z+*i@^I001!=Mk(dnyNV2zGGE4_-Q+?OBNP7?Sq z+U*yEKJro!r7`$W#1~*@8_R4cm_PNQK0p5E*HlKqY@?>OX>-erf=Yh;I3S2v6&GLZ zw=ecQ@$$!|EpcU2T>O~@G3f-IEOA*+{D*x#?AFXzAzWbI%*C{rQkZ>yaJq-=Cttog zx+Rrv6};=LvR`{!m7!kP<*5YekaPUdr$Nhp`aMybu1t*z;wA4q-w`eTTEt!g$7+7P zn8bdw21$X;*w?r3N8~nTYr3>o& zC8@~T=-ADU!t50=LwdlF4d z*R{;7Z%YDb?X4$N^P9JD8y<#6$0{l@vO@#i^lfDickgo^tPixjsOSs%JkSQD+n+{4 z0xQCW#L8bNDlolZctMyUBWN_Q{==Vs^BdKZ@XM#4NWQ&W@Wn+zR9|1CE_|XHm$IKi zN=Vk|6pJ3F|B{BN?498oB85)kCy%LkVCSTk#s@QZiz)jpixaK7Q>{p_5vSIJw{5%+ zH_3LPk&2F{T)9=akCwgW9TBoDv8ptCQ&dlx@P7@%9z$4+)nS(v=8W`=EOM?b)s*A` ze7g1{sd(Zwa%DB%ti+GQO32Yi+t*sX88alV z`24;i`s5*xu5j8s?Mh%Oj3fMOO-+p8UK+t!Hr*9Lc9M(CS=u&J{%`7!Gw(S(tG9!$ z@7;OPs0Zu6Z4>4PlTb}{yY1XG1Qn?zbK=he0 z-p$z^RSlu>MFcJsRv348-S4NfdL1oiz|_2o(f50nrNFQ`Y4@CPI>d^28bV0?OBJEx zO0l0TmzSal4{I8&k3q@(<3N1|Qbg_dYQ%wG{vTVPnqy>5(w%gvLz=w$!!MU`GBb6E@_U;XS=at8NI5b7=k@XAsnYWg3s}XTIc$u1JSS zi;K|pJCJXn*tgbSbJw!4*|MG)(^IVASLMglQp^6)VwBoV)i2)L_jA*CHmHK#93RXp za5FUMBC>H8i(4Ccq&OO=WdsTg-)8b^7sN-faP%uiGki3qyWmq~a3)CDH!VU6p1B)n zCc-Br<@*d%$vu`$e>V@=%=yZv6b={z;aG#&GoKDNUc9QL$U~$fH?J9yF!Bjg$M%oM zMW1G8G4i?h(465dA~h@AIZS>Tu+#o$i$`VE~Ui}D17}@fqMRLfHpQgA5oy`>0EIDCi``9{j zPsn)NO{B`qhp@K~2-=o%iLYT7CDZi}wuY9x-q0s?Tehdup2d zGzx$GXMSBHQOi7*FNII_?1MY9UA1Gww2IR>KP}_cUQP4Ev~O1S8I0^{Nb?f~LROtD zkbJWDI-^`hT@b!)oaY#|q&%=2#@R}fSSLm-tl`Uy;aH@ssNG5OG2+~GJ0!O?IK5B; zjwJ=1l01yBbqgb+S-*rrwz7Sj)539Bx3t$$b{TgwdPwG|9yv=vNS?)Inp5%Zs?$bJ zx?p5~#Dx^k#cS3GO{%F?Hp8!%l?AOaQjWgLUzUGDETtU6w*3_c?zB8xlHjISsrocM zS2-+iAE&U8C57?*E6-wOzXzsW>NhDjs8h78Z`i4D;xGCWh8jC-eh~XRUQXSZ^L;g{ z#3=04Z-gcYq4;XHTDo#ZeBG%+;I+WMY4$ z`qy8p{WtvACzl$ zcjG5 zejj$%xcv{*JY)Lvf!u!Uyy8ihXX|VXwr7&Fk+|uv(+5` z*|RK5&Y_e1#ZMItNT!JhyJnDg&u^qGbqn_j&l1u*J485Ku}8zL2WeeSwgJESzSv*jd<7mtIyf)PNg3i8*vh! z8mx@i_AyMCKP4kDRFXAB5}_r`<20|W)RuWKwq z&)us`PH2Cj%*tyDlW_DtN=?99L;F!7^Pz#jz1DU+5&_Uw2f@dnnE8q z&Bjs)VGRetsf>TB-G#oyZzJB)Bd=r== zqT2j+*AWw2MwN$^`Z~L9w##KrF-NCh=|u~cuTETQyxm%P%i&!nWy(=j0>}^(F9)i! z5tM1Yc3#F{%vQnOrcb1VML0f|#kC`7>%Xj_4rXPc=+pJ>yxhSsk9b#wVBWk1^J}JC z(OGKcTd1f8ukf6(r^a#qyw@)<=iW%JwaB3_GPL(F*2f^Pb`oan6Z;}c%3W{uX1LK0 zG0(Jp?(c5f#@#6{9bQb0SzHQ6EPn}^a!b5Wm&8m!4jw`ZyV7XAE*dqxoVv$UO(bEW+6?f(PqWULDR;k%8#oN7I)zLa$g9x_RbDY}h&=3PBb~U#qmS38*cNTbk+xb^9toImHt9U< zW6vZM#bs`6CXWBKK(7rVncC!;uP1sg-K@_kX8zov_;b#XP1%nKQ)1-zFY(X>5?674iwRA|(uJpX`)Bj0+jf)Y;Z- zytT>o}Wxgxpl7J3A`t?#EueY>3BhIneT>LGM-Ph81 z^qxIRf3AW;>@Ah%(StX3lt#B?MkpM7{sOtwGh@<5K2G#QLf6dM9ow>(I-2$mst!lz z2yI%qYR*v$&T9vQVpf9B=}D-khK>{mQj2({&v;f`2!{Pknkvov(A4P$v|Y(nw&VV}yJAbV zy{>iYeU8UAJ;r7vT19qX4}HN(a-D8s7)gXKJWPes;5`2blAy%Uq(1%oBHYZ%6Ua`H zy;>A~EUgU7lgN&46seXPxT-K~-fmPJd0XIW6wd4YiSrZSwb_%f;XHNStSW;8^d#|RScZ4*x?{yRXpk|2BDHHIT71Wd z?X~7?)-U1FcD?Uj-RRMNj+LT)FwOtdn%AH~SHF{$RR7)~c3XJLK(W7d);6mBPZ@4> zf4ZQKO;7d-3o#2ov+?>(v6?88d+WR^yv=QAq!Y?5*qbALFbja>~kDa%MJQIGZqErob%vY#94(l zhG_yuaRS;Sz`77JR^w_Y4mNIGdX%{uF}7Q#P3)lXqN03L;t_-e)e47)+6EaH)80*h zs844{Bh10oqpqTW7RHl^n!qJNxOL4;%s)^2Ec|y5~E+ zmd}3acs^(iRk!jddNm{)b+S*7D2aXLO*e9y?GltogGsrl|G?_agN;TUm~W-y4aEh!f`yVBadp zNa}vRonzEVA>G10*If%B0j^#%mRLM$K36AIYddVkto{Hrn&GqMi-Id&_%pPmUy zXm`PQ!AT*jSd;M>$t+|=cR6@@Ng@4MpWqUbNtebYw;vXztkcEwu96PtRNz8}kZMEu z?XvxUm07$eSOWD#2%)`3{wmgMHbkSw^>#-VDocZ3LWyqxg{Z-6%GPAq(3!>pv%%H` zc{!f}F-@Jt@{tqVqf{i|4eyWYN&jHpTmjJp_>z)&1`}SQ&-s(9^rKG|M3_cGaYJG( zYJ_d4T)s;Zoq#c!v5XjepRHQ!GPDt^Yw+@1oyst z;sR7}>wK+kN9j(D#nVijc_EMk&njBnt^;Zc_t|WnJ!D#T6#gGKhCM zsrtmTnSz}3-^(in9E!K{p|Rc20sSW@^6b>VKSRcl)y74aOK<)StMdNO&miqW%#f_l zG->@#pIxos{oE)J9)6`Al-Z*!uQV>t>m{hKxfQDjvF#cEoo+xItX0$gM&;*I9V|1? z{jPutkwW%|e&o5`e^`o5&B&^b-QH-FLavA|lQ_?xp9_C_Q2+Z{h`W5GJ=C-_og6Ab zoj;C6gr3WQ`qKl<)8EU^k7|)N=X3klIlpi8mK|u=!SERcmsfN-2HJacf4I%e!lH73 z^XA}JgHYtR2sS79%v71N!q?K@%l;94$M1Oh|7W|itY(~?V3NuNtO~$j!RZ!R zt+YgoE3=XFVZ%j;EW-q*wO0xXu9nVt+sA(U;Djx`OTDkX5{7A3P}R}c{$P)+BhS}5UmWgPU~m$r5WAG>)G-iIC~BTDo3oszD##NKqW7##)Y}U|~ zH4M)*?i$H0;{^`JK&|5uNCPE0jvI)7^5XH=X=< lGv&Ci1ZdAN0bmgRqki(56WJMgJKI4@Jzf1=);T3K0RS(67(M_1 literal 0 HcmV?d00001 diff --git a/docs/source/_static/thumbnails/load_csv.png b/docs/source/_static/thumbnails/load_csv.png new file mode 100644 index 0000000000000000000000000000000000000000..a5cfe59b89bf211a4d1baef641356f7c15bc3abf GIT binary patch literal 2674 zcmcIm*FPJIA5E*ciJA#o)Sl&XYf~#qg%T36YqWZUxT;1`gc_|Ts$$PnOQ=1nF11yn zLebLP*xIQ0+1!d4#m_(Rd;C3|a~{si`J9LE_uw4hR=glF5C8z+wT4+B&vg6@+rM#~ zvANuk-I)*^Y*Ch$jjpGsr=^oMx88ap-ra>ZIED1a8Py@mCTp~d%`0Xa{wOr@Y7Ee- zvC*rw)2Oy8q>*+HS$jw9SaM3`KMh}2XeHxshM8k2gBe*P#Raso^}S8{`t%3J$8uV& zXWL)#6nsEeWa?1P_njZ@^Sw>;ZN?RLuC4xcb1i*82EsbA3p)(g#(Ou)9H6Ce#a!K# zkFRt903gxY0*Z<%-XI1RF_ghieY4RWF#!9GJH`VM(?t3d@$_`d3yha%zJi3fVANmQ zYXv-?RZ*@6-1w>**y7e*T&iRpyC+#%xx2Lpw;p|I3E44~>PJZj<;P0GpF;Ru(k;Xk z81KzgkVX>5d@H~1@dS`e8BxEKN&Ku>*|gn{NrchWDg7!<(*A}F%$MKY^B#fGBbkuB z?Kg4nJZV2Bl9VPoQg4P&CuYcM6nX*!_RHkPZyFVGX3J6f?es`wuv`v-@x7C0d@$=} z_!9`fsiAbxk7mU4A=u9L`U~x!{Yh5Bq)B77bj9xQa!_WBtK>$kWhffYHKs^)vc&tg zqU2Uhi{xFdP=BXwi&It9*y2W_7My43c4B!S#frSi&SBo z1>Hz{jm7FnRq!3s->4)_MLv!DQAO+E;yvIL! z&I@sJ%7Z{K2!x=lERAq5D}D9Y0(uUY`2R8LO^3DSOjWL_4xZG2g*`LcP)2TQe$7j9 zO60V%vI3Oaltx|(6@Jytbky;WxaCP?y=;O3*H<9Uy6F@Vtu(hBP7g&JxMbNO9<&&2 z4;}pcYi@1M1fOF+&^3Q9#Jz??2%L1dhBQ^;b+8@5Y-PPvT1u3SLHuBg3!{(MeV_Rz z?!b&NCLg{TRXW>lig_P)nFwDO~23b#T z#CRj7M_VsW@#1jfxQmNFW19=|)I!v>sEPh9FURPArq-poCC9C;wzF!{x?f7nip^4Q z+%<|nU@otdlCzP@?*|vSJPZlM4ck5rdmd1EX26R-;hMTSlJ97jB)=;$Vlfb*#R~Yc zh47-FXwswd>O8Jy&AMwn;XBK~PCeboP)uxxXH&=T&ElDwy6&i1aYv2MWZP6m4EDRF zGY|UWEl11$950HfZWna(GD(paGtaxUuF^{`r)$>4GKm&Nz8*C^d7`NzDPNM(H(Mmk zc>lh+e;p!}648TsE_}(}bK*lUe(#Oy;qmyN4+NyrB7AV&BS5gN3?cdcURmy|7p(N3 zxJQNGB;2n?v!tRN(%9MUp2dh1`iva3COGnqtq3y-KdpV~Qq7>y+sX8BEGu_6t1f{RH#4(pPxjIN*JYk)b|!jJ$tj*bF*oU~s-OUN4@5yQwxe zDZwWply49A4ZeX*|Kxq;xJi_O*5&6b75liuS-}>0Ybn&O+VV6{ry#+t2s7ofLG26? z$oURkIXe-~y&Gy7*(c6qa26d$Th$i~vJ;RXxA)d3Anz2IY zCu17L(zK+aZk}5rd8Jl>dR?E}qJJW%iI+UnZ6`0FDWR@l6b&@0;~rv*JngkKVSdD74zwZXEC!ozVRPbtQ)oO^~V|P_7k#3AB6V7p@LPE`_MBJ({S{1xaqrI(RSYOA~_%6nnhw$=dNR->9!X z6h0Iz%HavZ>oi=>?kl#K$miu_4BBkYLfa^pz+;~d9!3lae^Or#(157bsjnmoxd%6M zzAGz=2;rM_GRsMsm{2ZZy(drcWEtg4pb8FN(~yJ6K45L59=){_vufflO~*Jj8T%OD zN^XB<|Kb4tI5spVhk1oL?C1`s-U}S`$b>u)I4dE}@;Dlb?R3wvkM@AjzI7mC`a+60 zQ5jd;4Y^b7&j0&0V*J-C2AM!*ZU_eS{i921t$5xX_F!tL5C1Z(Jm}%7)%vPmc7@@5 zzT)ygZks~+8pv>6hJo=W%3ih;*VVh$Qsc4eIztX+Wf0MYhHQB{(d+g$<%@Rj&cRVC zrhb|7vCEcT*7+?YKG4y157_NlqG!!5%RY{}4We*&%t_BxhLjbm&8>e~nA6B#*k^j_ z^E{lnb&$eSgwcMII*hM?$a`fQwqr|^ogfv35zKIRi3+X-d{KKxwpSK zzUr6N4wuRi3*x3+0jC^d6TYqm2`QX* Date: Wed, 2 Aug 2023 16:09:34 +0200 Subject: [PATCH 1382/2432] Replace image in gallery view (#7835) --- docs/source/_static/thumbnails/create_gnn.png | Bin 0 -> 47084 bytes docs/source/_static/thumbnails/create_gnn.svg | 53 ------------------ docs/source/conf.py | 2 +- 3 files changed, 1 insertion(+), 54 deletions(-) create mode 100644 docs/source/_static/thumbnails/create_gnn.png delete mode 100644 docs/source/_static/thumbnails/create_gnn.svg diff --git a/docs/source/_static/thumbnails/create_gnn.png b/docs/source/_static/thumbnails/create_gnn.png new file mode 100644 index 0000000000000000000000000000000000000000..9986c09c1d2abfebd3a6230946d74b4632418c3e GIT binary patch literal 47084 zcmZU51z1#F7w#|&-3UlCNVl|fw;(lii8KtI(jeU>0+P}V(jXzyA|Ksd(xK9E591FaND$K{vhz%P6Tl-}PPZ;M1zvpf8o_WQRx~FKu>00v z(bwk_r8Lyu%aFd&&mv^^&Wo2cMn&O`Lxo`vnSovv@=wE^!_3NHk~r@by6VQ$@Amf@ zRl36wW=jOVsk_`M=D}rh&sxk`hSlqI$C8gLw=~n@c>9KYuF}%7V^EFJ8`Jk>B`@@5 zsH#piVDE?R(v3yFAATAzuv011WRQ^j^3LMTu~_pzmh7LABjK!L`8s{QeZ|irF%?$P zoZn%tJ;r`YOE~f4R-`eqNYLa$KI?U{XPJlU_n^18k<-_##_!%lahvP(j_?@vgx=^q zRke75un-peqXiUmtJ>oBzA>*@YoBr3>&%*RV34)UbrdX=l|jtFXH*bE zm^BCjd_n-;q`(^lLQD(=p#Z<}fwy!H_&-;{b2*6r`HYbAaHE8Vl!5~ATjQ0pxw*ZI zm4j>5lx8td)ReWRj;oIHbHP^*aQ2sG4yNYpo^Z#9M?k`!g1|?(x$8@)C*027MbJ~^ z$?rP^fzJY{0s5@u!Od0V$&-hQ{`>Q%o#vj_|EtN~_^&5*{`Vw5_tSqr`L8Sg?~^bWb7v_B zI8ddl=>KNs@5BGT`1e6!j)$KAYb5>*^Y3qgaTY}v=J@ZNiK0iKvnGN-;vfZS2~AIg z{Y*4Z0`rz5<0%MOVFpirraK*~G-Hk;VS-Dj_MXC0I@9~``!f$$i$gn)t^Tj0F7KP= zPK zE)`6%rJ{yF1m6uIrA-r{&IyMesnk=rSme6*6*jnp; zpf}s*_3^#1*H~2=Hl?r*7dN;0+5Rk-)j!aVZ>ti2>%qkW*S~Y4C7tn7#t(l1mW!i= zCGQm|r7{n?6%(ZMSU*|sjY--WFBiFmmam-)A!;>D0+t1!j*?IaMICv6V%%#W?%B&Ae# zFJYC|4_XOwWS?nw*J_MBOmSSH~=CIV~+s{C(lnHRD^PC~ysD;|TFlV|v<*ozuJ zoCBQga~g4T947X2*Bh%qu;v=7&`-+bHr9F#aUCzPGoJsbReE|RKe-zbU%N8xJ7WlX?i_1LHoG$_S6=mX= zQ5+?BYAuCAV951pt@ftYs?(x*j@|v;c}FTkmZxN+$OQ7%LcOhaOegfr#eC0D`S{2SC^t`;V_|Ne@ZVU2Fk`DzsDF@KatY4kEoYLY_g5$*QniK|P zb6)M@-06H! z?BXI-D@bN28=YE+5s`p8|FFn$vmsWau_M>tbd$t&P@%2iY6GEk^Vo`f^g8$7ILEBU z&fQ#OL>-!%#dWc@R71w}jgF~qzsphoqS|lrr2M_?0zRRtjy6o+u2{);aza+3Ftsr= zn-=)gsr;FC-`X9#!w*&AZcgY-y_vPlo}}D5Z%G*nKQUumb*1K1f0^;HW7gt8hH`61 zSuVY_xI|!&W#7hfgXY>64#rvbY&(DS5zTB(7u(e3t4tB);Xh- zDSZc~k~_g73Vlw{gH82zVPcPmJ_fMgTj!}|JoDjAixyKtD0xi;&wxF z-bqSKDvmWd*k&{s%85D%6Ig9Bh+RfB47mMS?f9r<2+VUYr3PZxV4@>;NuVY?dE^e3sgL@%ka z!z|CF0xsivi7Yl6VkeVRSEtFJr56$73jaHnz=-|?dc5aVE%UF!#lbJB#btz8iD%LN zu9QE9{X5#zPuW>0`am;`A2Bj9shwIpe*73i)Uefk6856a=pBnW->B)}v1%yD58~CJxebQ}x0v{jWHN)QUQy$Q8r@hZ4i`5x`!~56p zKCB9Kkt`)J_LCguvBw-n1Uh9ROcPm!nlRX=_2b5{gid1*jwx-N`a_P0f79F&)XSP*Y#P&Ba$vmuYh>UcxQ~##9@7bYsYn!dww4 zd9339)#3-YR}AqIMU##I5o}{TW@n7#Nw)UM(Fq$m#neuin9x*EQF%+RoG!1cn`U3g ze|k1FT&vEhr87Zcwk~`5HqPyXl9JN+|CYEan(8Qa3Uq`akQC~8d*vqJariz@UaC1% zGNx^+(S8yAayN#I|7~(I!6sr4DHY?CY_^?2G2o}2k5?qld8-a|SH#{b`HfzXn8-4w!n)AYe^PLQM)ZjoW<$XqDVdP4jr* zbDC;-2{MR-JahH7VOf0s4?b}%ilfESz|E}4dr^S}=H2lRmdDM-$>(SqL0Y>%tL9B0 zuz;V6n?0so;Xp9Edggsrek$O9*M^@@Tz)OL*y2XWkE2rz3u*ECvGtan>MnL<+6k4| zZn`q_J<#g2Ty4dT-P-B=)#N^(Oq-0`V-}9(w&(m=2{dxMbE`J{yQA|}l~DA#fPN-JfPL>jC`scpq zaoKvF32XJ)LsuZbt$r2sgk`FoX6|No<(cKT_p0&727fJJHLbYJJU#}0qWB7=(x7=w zt2|ee#`xE820!AVJghWVM=CeAP#w;Dv7P0uvNfDhl#io%)>=NNYLsa*sZ~B(iGo=1 zHA^JJo<Ax)mr5MHnP5FnMLuU7GpS zvv$II>Dw;Byj?e9U=t;#W*{#a4mQEx_Juj zq)s*|Ggv>}!aHxUs_Nuu-KbdJHIRK;*c0_w&fBU@^p9DHegw3Co&VkV50#v@HUqVC z*rs57IaIYgSGCc4Tvo5qz5>Gbx-W23V5aE!2~xn&=#By*;pD8w$u>~Y7{ zE5o;V3n2hF(v2^T|BZwIbC^J@I%Hmh@hQ}WR<6HY0U8ZCc9RKpoY} zbh_!jMBGj0u1H%DKJ7+9$kD+D0DdSog^hEC^|Q^j>VOHIj$b`P;K#kt>b8k- zt5Dz6X22HhS1jSH-J5T$`nfm#;vpIdM#145Zt&~iOys%spYn`_UjXB9Qa)e^A%tpI z8d#HBR+Ama6U9bmbQy_}Zyzjo7{5N-CyA|`LHXT+0Kfpi5pnrm1%tTWY;13{4Dy0W za_z2Z1_lPk8sPd0N=h1URkZVN(zs)r-4D{$Rpsi{jayt5RMD4Gb3Bzl$i^gh8+mOK z8an2`wy$WQpxYGcD65(9-?UwFhn4wAeQNU-$p1my+_BH^T(K5fH>8-L^(m3oSF^N^ zYF-mR1L;U+`)61cA_}&A9Hq#@`#%7d7FQIoncaPfA~ITq(+RCdEJIxXaH4}d{TUyygS53a#cS}P8FVzJon zZdI0CahJN#&z;JBe|FKAKho}?2)lG!=gIdAZgE~-_Ero-Q&zsdnKdyVINug-B@gC_ zxj5AL18Y9N036OmT5TUGEht3K4daCWma=?GwfrRTm*7;zlxpeATv;MQ&9W~Ha*pE}ecd5B%(c;4SgofoQVCr|iNeX=Vt`Ea!S!e4o}yy7h5%=MGPh^2hC z09^QB%l)sVKz#=o<$#2fH&%}Vo_OrfaQ3{8*^O?8RGr~9p?bu}QW)A}GBLHQINkW^ zoUaOgbXacBVb;0=o0tzzWd(clSbifPVftx+?r|G^r{3X0Rg%ifY{J5yyf^sXP2I`r zBh!o;1xl&t-Llv%O!0okNHL0ea+or;+Roo$5oA3^H@eQb@E;Hr_~7MH*(k|)!K&oI z1eG$*-04)lEC^LXHHz@~{*MGHpB-_1jokI;YO`)-Nc*l1DI#ew$#R|b_>H0^YRAEL zwtss|P<~icS6r*#^^$sP7u^5owc%AuT$|VE<}3e`=&|-YSeAhc)9f7A?ja&C3y$z{ zfU}`6t|ozDDu;gpg8)@lQYre!z3?rbT$sL6@ZKM!mHS|km?g5PIKik^Z$@~_rycQ5 z=#Cq;d@FKI01PoQ&E(W<{A)yu(1Gnu2EW6VYe5&{S72||7hM|2mHYran(meTwXi?{ zVm#UPi2vQkKJwYrSS|Uk-6&`Kb3&z_)*satT(5L3K-wF*f6?7a8n9hP?3x(RA(h4* zl}*Cp07O9T8OflfrDbHNuGRw(_@-j?(lbaa&aA8|(j$BN;$u`>kMqODPwxEIW3+UO zxzoX{?w>OuQ*g*Kb)45TBq{??4k z8o1)3hvY4jB?l&gnAC{6evc;?^E1e?^M0gLIzB#L;IjQK()r-$Pn)GJ(j8xu2=r9d z1|hdy+T!R4ezb^)DZjyL3z{O&r(@;1++M38kCSm~lUViYhASBWHt>iPBV zj^;mkH}22X58ZBo98gs|Arc?7{0=_7{&m{n!Xfv$2a$?#soK?HzCqdPQBWDK8Q?b# zKjM8UEzFj3dVhNAoF?Mk;4P;8H%jTO1We=nuWw(Dn{eY%_IMWv+Xxc_3rOikURw8{ zC_?FkA;$0s<@T8z)`}o;^ z+%PiWhDUQK!=~RT1=)OJXJR^0%uz_9kK81!q*gs-T%`+mKbP%yRboCTc44{fwVer8 zYE-*AKXO>mT!))wODM|etY+}RN%49*dKr2!5`WW2Us!=sIlXNUI63pz=094-t$7~| zzs9l!+%O{&mP`gd3Ankrxz>JBP-DA)hRgH|YL7W&%JjoZV3F-~W$!TWl58OXto`Ox ziLNI{4{Wq}cyziix-2krdl1!^@Q0xaAOz~r(rjX*1vC2kt$ji3nSN~lBnwhWjaoj` z->=G=RAZDo`Y0@Y23tEpIfD<40RDl;enE%+`8z1;)1S2N)fTVp9QoCeNYvw*%m20N8lZnI-ifUA&MfTQi(o=`7F^ z>;6oQl(Vz*g09R&*%MogTE9+MUn+Dw9Ojr`_2cs=Dnj;(QoTG=MliGY`60zA-3Zku zW+f^C#(R~ZhUY%&>>dGNG}2&+T4GXC3i-s{#pmwy%rxsb#Rw#%t2M4UFKP~cXCHot zC4<)4z1O|BY|2QsrGr&Ncbk?eac3JFL}-aVa(VeUN-5DhZ24|y2kIwtubNc;4zVn- zqhh3I&J;oDype-Y7NlBjPyCB(fEUzIt4;ec_o$%XPR_`gG3$7U51p$SlS*`mJvHp#k+c0UYp^bBxm7t#W-RFI)^%cPfn);^^j`qCo#N8 zXUCkzVVrSXMw8w*9qxS>b2r=T)mdRSTWikcd*!;$9(Ukvv_p?!armogc^sQ@!9Tm& zu&`rJqh;7`XXmN?AN(Bd0(5l!24ltc3K%L)X8zA3(;5LZ$&~)5R3TjoUDn zn_4?Ep{sp=s-&~^^s%^k)KK`J{T%|>XFohvH0U-jSGagr!=P4R(uzOU9xs7iE2G)l zq=?lRPh81M=ZitI+l|%Q;KWT`U0n&hY9&Q6%%#-%aQ7e-Y;skf`rF}atjCEAvDgVB zRYZziE%*Y>^)n6yWQVj2TE_V>eQ(6Sv$;YKgdH(KEYN^*>g%%u4T8qVNOUDeEK(j- z@Wx88^1ZE+G;b{)yKuJeSz1Aru+K(cG|3~)6Do-K=X44?>UZ+bAL>0x2V9ia7NxwS z82|<#R4{gmiulB~GIg(=miLQpGK%QT=J$~wckRz>P2ik_aDJ@HKM|Z2uwAFd!q?q& zWOGf97F)Gd*%uY#?@2{l`~8R4xU}Bb*pn^s*)6tACad82oyyOKP55>#(X^)Ug&heP zNP_V$Y`asC@8-JN;ehK`I}R-Nn~8AQ?T-x!X~}&r;3)II=<^|rh2cB3ylL&ql?qE{ zeUfD$t*)+~{~}v7GUZVuIo)f>G49XH?Q9Hn7E&Y!Qq?B93E@p3oha2C` zI=XyDdFxpLEd-VK#!*!*l8?QXWWF~Gg~T%egmk?4d%Tq@+4qZ%2d+O(Pm_b_`BaBCHAk0l)4E5PmC9g1e?HDH;O^io`B%p)gm~TZuM3mmrE42!sKS&B& z4Hf$ftsakfZ}07InUPg3B}q^|O(J8QHP);@fdOt6;9&YzGv%)M8L6~-o(FsYVlYJz zMKZDCPqfYo6t9|9c2i8xH|o)tWOvASGX8vlah=S#_&9#W$Z5&R8(Bi8$nimAIwH=~ zm4^829>gsC2@~#E;B11Czhp7S!|Gq-Th7`52xTM*Q=lOq-}8d|;tQUzu?;(m@lOc{ zuC1BUt>7sEj=*Z7SgiqFk0+LM;w*(lJHNdA>jc(TOqdK@jgoN=5gAjJYZHnQrL}r@ zI>eFkoHhaV__@N1r!jG4e?2DXp+8;cAVpH}b~$>Q$yL8E77`f@k?9NyV&N-wc=C+! zu1`-Qp2pzyafDkcCCj}XuceTIY~8Z88NEafd;1XToJ`1b$c3a@*w`nVN#-;zb4B?> zURdg{b4hRG|Hpcn7GoJ3G0u2q+PeyL@;rL`hCNrhp%Ez>uBv+7AwBDk483H*$O!7r-X8Jw_Tf#&!4 zBU+(D5dEUg~U<_6<&4uR7|H$XSIZfyP9Yln?t(#`9y6m|H`tx8T>s*ueJ-A50U9#r@Sirm2<^kXVZBdW&`*VyP~ke_G^1) z*4JEJ?e%b(twI#G0++Y8`$EMN{eo}0S^AT;_j@h z6L6f~P3dWbzadX7z;n+RHljBNHp1r%8F7261{UBql2KNJc@{yp@{($zHJtyn5Cds0I1R=ffv+e&};d2rMZtm>>M zahKmRh1h~A5->>FMMb>NqFvMzGDMSYV57Y-lgWnQJi|n7yk>=umVbcEJiv#+lk_1h zjODrV))vV^hfPv7ru;TvZdzseM(qKvi1B=wnSrl2bdBk_M;3upaRar}7h=8-lc*sx zbBbt4-pC+?L!|y@eb#~jPJwaRGI_6Nv23r7M~#2q>vQpy!prPgilR|kin&se{a^ao(`%ANE+VL>XN z>BV2bbdO$BK5^O^8&luR(rrFp(S!({?M^BoV-n|-Uw@Bn?Un}h_yJjk{Myq7(uXnO!XTJb905|c3*3>idFBm182|qYL6qM*+z%#ovf1WE;z!@ zm0q1yuKgMpI>eExE|QPZEbWyjI)6|q?Qcus2+YudEZ!`kCh854(?-(b`_Q?|_xh(* zp)Yq(geh5?G&Y4`z8QWX@4TXF!Xu*nx;mb+#RaWyNOX!C2F9-tQyb4`lpNaCCDN*2 zk-ZxJ2HZ8u0M~@IV9aQz-)PUPE4?$Kk9Zv11i7hFJERnC*9sEv^SixL0LZR#cNhI+ z)Rh>pI+GR*#zfQE%@(Bf;j$3nZl=gZ(M|dx44AIvS8|SmSf`E3>sjz7{v0hC`x_Ds z2 zeqd{Iy6CxQVpU}64~cpH1V|AP3KQ$1ssA#!wB&|m6Ov~* z=+~Hrd^5yI5jY`YngjDE!ursJV_$O=78OU zh2UCBr~Ij>`b{hew`_FE-o&lJ6nP-G0E^SOwp78b`ydL_W{R)2m zbj{3*N%nVFtrL1~#eDI=s68s<7@RfK-uTjMDv<3BD`l?q-l)F*B}sB)*V1?Um-jO< z20K+*Z3A?08hQ|{4)3@LAX)p z+*XFn`n;TVG|OpmDVXXI;7JAmE3X{3hT;b>4GpZfcBFQg_QmDpn#p?rDUCWMqF`fF z`txv6AI;tD?D31__pKBT^qMkcAH|8FLM(I4JCW)iF%-&{6n5?ZrnJdrp_POOK;o6D z5o6FjgYT|j^mE)fkm^*{QZGD+I^?I)vb`{->BKW17a5@NSjc6Zam724X5?#FzdM}$ z*e62rqJ(S_WyWmtCV_>I1TE|j8Hs1g6OYK&3~%7;SAq>#A%L6n|B()){#Ajz(0Y8ocz7?TG?KCWxo&r! z@yvnPQvLz_awxqS1i@>c^8;LyU}BnhC(E~Hp!=*jnHvseyibOU^E`sK> z$<4(@%fh0x5~Uo$0rqSfYw=hWdSxj?wsT)jTcGjNIY2)qZ2r%H-!R~&M)HkacJ*{p zNlc1w^ztE3aITC}yKB%`qHte>=eRXi8)Q*oFW<_o-^2U1bWA(rWWZ``(BQ9Q-G%S z&JO1L*f>-co`c#iQJrON#9%9TAz_?CiUlEgi!BTlv+ix$T2w@W#*ZU&(aX54rve3oc{>jI;S9Oilgm7awhTB>^McBb6AetFWAS|tyKWBwp+zltEr=_kU)ej)_aV|hkhdr zkQ=Xts4UiXGN(k{c9Zi=x(BVEr&+yM8ZT5X034cPAc+vOLhnf}90x4_DT1F82#Ph3 z-`wx@iw1@9wnh~3_{kX#@{7h3Q2wj*s@2J310cRa!Ima`6H_BQD#vi8Q_bRXY7`ce;uLh$z$v z?6x}2A|r)9;m!bbwH0Cxo@HQ|rO;qo`}~HFUz)L$l6?a76O|PkqJN^SQls&gKe~kp z;lOZ~IaJX%NN6E6?8fb^=H`w5(vgvo1O<;%uAtTGe6~~1b>$p0I#Nhe9^C-Ual3D= z`4G`~0x?eHv=!n`JSDdIgnqq^xnLF3un+;nh@zS)2UTtQd+kubu8xgU zF96IUE(0WIo7JdICygz#xuhflVBAUk#Mz-MXl#KK!J1eVZ zvh7Aih%-fWXL^JvrEYm7-l(3Bl)fIk+62(F{Ongyc6F&1odE`+=Q-8U!HBTd;l z2vkk7JVXvo7!o$3S4L%;O_+}Z0V$RCc_Zq5--$<0tU3@1z4hD988Duo*6C<<4tToV zz@fPC0$d{l3M~hyjlo1ZrDSRB_OA*&=g z4wUVYVG;Wd?JX2SQaq2RmI_k;9Pdve5xWGL($aZxqKRToHvOSBh~IR}*E1f$wkgD* z&B}07#hb`8*ahHZQi^Pa^oZ-%H(OlL?B?ne072bK7RXN%;KNt!=bbjkM$vWe%hsKp zZEKQ=(v&`u^8TGQZHIr%Wf}tI#IlGr(#)W4yBO{!o7BY zH06tyZ2L4sA5|0$4Xs^e)ej#v4P=V=mfdjo5Cz8#h%}tPX0S9RZ}_{bsZ??#Bnc@B zx4ac@fLHVEd~S3)BpW8g59b6|oBY*zhJ=qihc(@=i`~(zO-F8GyveufmVtyA`ZR}i z+MwO<0oxx#eN!SH4Ri=q{iccW3_U5Qs;d2J@Iwa5w-E3GAhe;Qs^mz;_b;FQK2Ftj zvD!$%?m^^VY2Elr75Nsh5RuMUNb}m_G`%6C8mZwmkwRmpTg zm1MDdw^>P&U#sb9y_9|mypg%G|CdIy^KF@Mv)0OBla|(Q0{s$5vN_Bt2>)4N4&gzj zkO#AtU71qp-DEWwWe=r9PZ3XkX8;R9bW)@6`N?j_?MWY1RMlXRmdkNH)^z9)V$hin z({=k*?X@4I_OmHg4=K~-3jIc0o!zXuTBVT3AuHjBiDGa7iG}S@8r{n@UYfh<`|#uvyaE^`L-aAuGGHOlb9a&1(T#49(3$XWxdQ z3JgnDoJ`JFq0l9|u~R@>fr?3O6BI3P%rSP0Qf?(RFxTYt!QAUqoOL+xIp$lJ3mttV z?zQwJdga=mMZI2t?i6)H{cYc8^tgS-rXAJl?o7O%(>%4Ma68W9`cUVX>BDNXYno%* zuuamz`m3|g**`on(WKKsL0LeCiek~jYj>g~2t~38%|PK!^ku8{3}!eF^(r#HBM*WZ z-X)+xt%~AQV}KwDSx-nMK6OoRVPYLB8!}2dWgB9vSODU{w}AFU<~drg6jJoN^JV{z z2Z_VWv=?iCCRlL>3n+I23|Bc~gc>fl#VSwbf8yr^ID^NHHm&RmIL#T}L!F zo$QgBcVMmjKuTXe>4?#V7@3aWKWzZZwrxULJwQsaHU82hmJSiU-sh$p&KE>(OCS~X zX_|BW>ilB?e0dY+Rd^!mZJlgQ#t$$I&yql_t&Y=A&GDU@!+6MVb9jh<$bX@CAKz3| zuQ{Vo3Td?*$vWN2aFC^?g9c4#SLhXd#Rye5lXPmisIclzK}^!REt!n){>WI?_r_&s zI8PLaA{%QD;&{nkg@uq#?Ru)0I*F+kp>7t#dPvWwjmvnOa!u(Cf>&Az_bRzpZ|-WV zzTP!^gi>P1`H}j3f!*|*1;*ry?x9|)4JUZ~m?e@o+4iDh zz5R*>6%wM3HNNPs zNI3P5sC<8_^|i(WP2@0cHyD8A(HT6dd@{GETd|Hz^Gw{d{+XvwjS@Xz6fO3% ze&pj$*oOGJ7O=Jf&M;6(i`*x|%I=T*7leUkR{;&!Rz?Z+Z#*u!@A4BWNsU zEa-o6A@|4pKyqF$^ErQ)W4C@}{dCejw+ia=o<9>wR+@J8qL;sYwY@M&OV~!^c$s&} zkI13lf@T zykqI3(=D?0R2-(Kyi8Gg7+<>gvjz6dATb~Y4Vpr=U|64cy0~ne(V!nzn|agEk^j>{%q0$s z$ZO*+7HM-vq~vM24)Yu%`UgVD07+Dr)%mIW_oB)4%gM^= zS2+ekI=3FK02ZsWu?`VD-KBDj!E%XyvH+W_xI_p3dsOaHvgN95GAzNOuV@$p&6_7uU#B}LLpbVKryraWE=v=U8YO2fz zdtgMRPaTnbH^na#WD9Y8;=Azd{&^KeT5G!c5S_=E1>$U?YG?HPt{7XY$D z)dljPt8FD_?MCHtO`lLWVh?g^RjijfcBcwKM8P86s4I~+YZH{wzkU^5wu;h!7$}id zoMYFdHOi;A`!aKid0a=E(55Nqjg%@e;#(WdsxSCdjFXr;m?#g2W>%>|WesNzRFs2i zZ3EUNPghmO1cjtjRi49IJXl$IBm`?}X6j-da<(>EAZ7>K0;6qK__*5Muhw7FDVF~0 zG5p0={=O(~kqzmz2A^8xake!%_10dNgmKVk)#a$JKT-jt?d~k&rIO0oyN>u4#m+w2E8=QN(;}-jFimbUtQp`Y>N#8nR7$G5X+B9YzNR7_N>jw{0RNBGd5g@G2D zaY4<1pz1s)!Ji6do_RP1)7=+giSYhxK#hQ3tOoA?nwz3GY`Eh^5kK}q$l~DjVkKgo za?(+=z2$pe*T@D^;bJbw78xS+8m-Z@X>&xews%54B`-^4O$gpPB3RE<=SGGp;X?z7 zsX;Q{z`=<_NwsQwnm%_xcStVMy}&C|c+yBUC~o_Rz#baN_^3E@fpw4u_24wpp}F@3 z*(d6HPRxW8c0n1TDHAItFH`&8_v6R8S}9hHlRAC)R7Lu=rUypOqO`aQ;0yggO@Btm z0`&5^#a0jg+eOv$d0#ZKgRvi5o=IB1X-e~l=dtTizF1lW(iLnKHZiAcRdPt!N(O_e0h{$jw%+) zIC&^7fs79@D6z076-|z0XcM#uAM+MV={ZJhC!vAwzA;()=yc*Rlxr22%hRJWun1^NbcBYf z-e_!u&=&;&Ku-_{0*Qhh1DW^&#<3K08cu;1_B13K-#JLomiCh%V7h>JxD^xdY1q8$ z#_LSYPq3^=fXtE~52fk&M$Q8)7+O>)D(UQ7AfI)3=m#hm6_pXrkx|OuFTTudubao* zz77-wiG|67MbSE8${9;XS0x4|{@cXa+q0Je8JpU!-Q4Ze69;up>s$A`+tc4|?n?wU zQW_tb&gH?&op(0cR+j9aoCD&vH*w+sTjG|P%i7B4onT+2Oh$J}SeQ^h{Jd~JJOce8 z7lI3hLL0G=%P zT-|6)=Ro^hO9TMdXmPm$BlGqWJ9ZNd1a16sel`BUj%nS&=G6;!6Pg8rq{hAQnu*t} z%q8;Ma===CU>J|}v@;@hCQ39EVY-Z~DkoxC1Egt9eTEtVhQYXK)}o`xU{+{} zDH+J<42{WQD+2kJ1`-YwNw7OZ0BF6j?_i8+mYCall4?ek_zC(CTj5VZ5;i4kV4?m%N^KrzImZpTkpen_v=lYK>pP|LGnJ~2*oI6 zUOCQ=ux#VOTvmNcLlP;h`HJ7~5rc~B*fEWuR%RZ&&Ps@F%t&eQJD;gf_&=0Gzt7iI zKZ#FEE18dOtJba(Tqktc+m}7PWt$DMqiKh{71U|U#Z{GuPN8;YB5GOq93!LHL+{WR zM1b_$$+_dXlV><=nN*i)_L-ye)bsNbosxwPwP|1?n^&BCDqd`H$HE6ODxa-H+b*bfjKS@IC z`XnTlFH*q=^ygRk+uZrEd$ZXyV+KHDw!0#Kj$m9RzS@A_e194wWctO3IX2)+kBAQQ2op@3TrfFRPbtf2QYGEm;U(4=rz#@z*GWV`&3& zj9KW&lGGLp+j^LMgs*cnQ}%o-oa}+%HGkwiUr?j0#`d}Do$;mf_-C)ui5KN8=WK(? zFV|6`zNKC&MrhjKsET~$96GJXaC=1|=GRt9afFo}$v22&J8mF=F!Pp8-?f{k<;!>X z+TV#4BUFHWl)}d^_06m@?qCN*d_SB+eLbjei3x)H8nE8x#uZ@r9mfRm1Q6gm3I@Vl zV&8GS(HT~NH7%1iD?Kh+88C;S|1$Ldp1+5FUzPPtP}*d(;YzGPg!tOh*R@Efr*Jg2 zdH)AeEPCV{-d5*vt^Yg5;7PN7ielijLRjbbqPrOKZpQmChbODmZ0*RQlhvS8q{qwHpIdM&p|tc ziqEI?@rwbq(_vfgH&!EL*J0l%S;{{T-hL4s!Kn#P4M@aNYGMYo;wiu>x%!eO6U}np zzQu;?!O0g(6IggeSHBM{vn`QXxP(TbD&kh@)O;RB4h*TbAuB6r#;G2S)5v z54#-oF`^yP zE!5a`AWgBP^l58wv1|MzeZFG0+gdwV{<0RFGxQNiq0n|nj`ij1^@>8L zf#cpEH+vo%yL#xiyM@rk`svs(TwXYL6O>jD?RZu^Z2br6fb<9eqfI%5G3^lhTZmVHY^m4=^FF z9|Fy@rtB#2AC8Cy0UM=16pO57MAiuYN;urR2vI9ut&171d!0L3RYUO<3!AeS71~2< z@T_iG`JtuJ?~iPb^J<`&PgjIK-DXd*(`YP}yWa<`VLZk$4(wvqyMWPfh?Gwz;Of+8 zdu@>zP6IrP{AGHh+x-4SXD}#o-)fV;<1(gU;8zuzr)FJl4(Toas^lp!Ei>3lAsCO% zfD?l)PSnb|ocaNWpx@_Mp&;<_`ijHv z1Vdt_UaOMbTPm9HasBm&dOx>WTlQ2uU%Gx2N>B0Vz|2w7?8VBxUniJaa0rnN#x%Le zYfUG%9U$w^wiT4vujlWKiN13KaO4cXb|*jf5DTCJU}^>$&^C8{ev}IQON3^IIj4RAkS+-+X{0-(kyJvuySuxQMp96^ySux-ef$2tKUvG=%ssPb?<>ypxa`gd z_V_B2<8T)nJc#~?CPeuOPqlViM*6{^2YB9&MW|HW=YYO+^!mrGA&O76T5^p9EL5h0 z(-E)-ptcrmY-T3s;85*3vvu=AS*)-ioqlv(%~$5jZ}d_jikfrFjUfB2 zq4;Bh^%G1+NtgY}!M2fOCVB&-crv$6Bby^3rwBGl1lKQiGYBGj}_Kw2P|^I z4R-X43N4GHk>yKzu?N!2r=YyOi~70wu|9OgObCb zTuuISxdBUJ<{HZtl%m{YRHx;uEiI;7l`vvnmDb|EY2|8co|$*F?>0az+S=FGg$iQF zUNXO};Y3uOJynavrs~zH;%lk*OJ@CBT)cIwy?Aq?OQ_pKNpMG;5N@6@}Vh2j@9L>f5df8(v zs8!{mrJr;A&vzM*b#JyhSES=h354*$Vbu3(>C{WB&Vf_5Hj;#YdNT)_2fHajKA9t5 za*6YFAHUKn9OhT1Y(q540yqxWXrUTgK!JT_iN*gH2C2ZCtH!U#(07`=&29Hnbspy1 zoNaiLOQST%w{lQ>hY`%YmBbnILZUJJTv6y7skZ`NPnDOV&ilwlK1+k?A5KQe>Y~eE zl*t!fly?)m%Z-s8!2W)t!4<%`Sw7ZFMiDX7h7ylvmlc^hJu0%~=4#nH*eJ*aefbpwxr zeP{1n`(l`{RJB>6X$hQVPDLM6t2;?i5GVU)0mx%9Nzb^&0rmgpI!!V`t-3O zMsyuafUT8LAN`0oyJ91S-{lZf^KZFvFVJk#5I5VNiS@^MtH(8k1DRhMTt&+x{A*Sn zdeB{Ua$P9b=8?j!7WMv|d)ZPLANC?QLx}pwp_gg1gROZVJd9`2{OJ?-XpydZh_UdT zdW)du(>2^(O?~m>(i+^mm*LS3{%PbaF_15|6 z<}k}Lw;zBB8{AR;AwJ8iE*z&1XAXGrSmEKP_p*rJu29Ix+uB)R zfRzI{)PB{QfNbnIt%1G)HlWq(;d(#&e=m9#Ig4*ILvX5#+wF+gd0*9HEJtFt5^og! zh3j)++9(5;bMhjkXy8j;o3Ws}jSMGAFB)|gMU}g)pBtk_1+RQ#$VqaeJVlkXk5#%* zd_e_lDvtrN>eXxdnLbSp#h#!K7v*+d!*xV=_gJuw`;!b4Le@nqc45?)q_obh=twP~ z9wJ_G^rSQ7T2`Z4>OKq+c{Zm!z+WT(H1YbH#&W){cvUU9V6l;FR6qQBlJ3m%)97VJ)x?q$1YXfmINPiRCS9(<6sbkpZ$9fQ!25aZD_-Vm-i?UgFWnS(e6=LJ*gBZP24Q^saxduiJQ{e%M`k-n?= zBmc`RY{uYF?p1`^I=`8XXtqX7*)bjwGsJ zsoxn}r1>JeGI_hoH3mqVZ=b0WeJyz*lvE9!h> zt~Re_!ylrWkos6Is4KOEE_Z+NN4WA0%an}{f+V@IgmzmRo3Q0olR;w9(d4-H2co|jx($`7Ye94cvl~h_+ z&*t}XJ=vHZPdhS`)+EdclL7jPH60x^+Gkf%SsRHRe;S=W*0k_(1pHo(x2Ye-UpU<+ zwmb9MBj|dCFWEa7OZVW#h8&;BvUwu|O&lEO<@0=zNwpnH(y2u3V?K*(6Ay=L zE3nrkjI8L9jTC3ql-4;zG}hP-w`U)gKiifL+lUQEqWP)6Exq3hJW=W_8mPv`n`O^G z(SFgV)mL;(5M=Q&G!>FW3=8@4ui&_?Zw{Kmn~kX3aPn*`XAvyLfRK$>`$j^C*paBx-9; zIefl1LAH|^bdZ}$``U2$uGePNcR3jl5yE)1h^;gsJkr48X*MfwVYe^VIU>qNLR=BP zP%4qzo10CB?`ZzRh7EKc&m{i!!M*&X9a-m02CBKTvCT(VWkQT{{vYeLfTQ_F+=S+uPhUZU)Z_-j->t`mlYT8eKAz7fuW{K!ZmuB1_2uaX^_&ZVa$t|7Xr-~iN2|G>6Jjg@^oc?QNz0ub zxi!fTn}w)BR*Gva-;i%{a6=EtlUJ;AKeh;(5E5L~e&D#Bb3(*HUio`=1BGWCjfLjV zb1`d5v+Pe^9)V#>6lV`-`7s$iP|tj4(t&sCg|?2>pT8BfZ42lG#18<#qlv%wHH;bq zY&3A*0MdnvNr}kl$H;(~2)4ZaZvkwyeX{@RKX!0R-XwM#n8d}h0MpHqzo%>05LAaG{x7V9~ff!s)SWTVl zu*2NT8QN+>#BWW*WXMo>#N5z;=;pu0r5a&jq}hERIK4`9_z>4D|H3|dBppqq?I(yI z;ha#CXcsr)c@B8{MGmocL7c*{4cpw1uI>x%T{HJGJ$;VAW&WOK0Xys)_xi}Menk>w z^WL3{VdD*b#es9PNX>!^qSJJ4W|vJ$l&yP!?@?NmuJc{f@^8Ea)q}j00)T=#nk6?V z?MPzLR@VrHSvFvvuvyT0c~{hkx+SO#h1eDEHlzxF;5PrgQ(xAr&EHu7`TZ$bzzYQ~ z?qd)rVZcJ?T~8oLb}OI@#nM-7Q8Q?MwC2^JgDd@no<_6;w;D+{x;S$>LVEkLC5)WZ z=4ig2_BGZ|0;weorVWbEJ@nNIo$4XnCBui0vHH6e4dhUz!&T3XGKtHT{YQ%;jABP$yMv54>4 zSLio>;My#<`=3Qp9}Z-rbV^P2FdCet8vXM|hc)d$j4KyPS0kpBrU2DGr#^jH-Eo|U z8rJlRO6rr%21<$*xkE^oj_L>4{=`S8RzQtFS)K9v5yWjU@TLt zt`)PwG^o@$NEu8`4}sc!okGX7;TSgx^Ol8$MGM;_V9dqM2=p026i?)SN=U0GXnJ^7 zu$fuLxgixe#yhEn3O(OdrfdS2+x|FV^2}y*F8d@KW!l?4{Iba<1`er(era9|Ep}>>&>1 zdD&ButEAqrzmXtLS>x&lqrQ8Gvsr z9z*z1DNzzrg7!2Mh5(ixax!4D!npZLX|Wlzt85Lkb;J?8HKli;+~?+JY3wky88NL& zO+St)y<6zt~t{ptVI!Zr4s_|I;p@at~MoG`1RDC`nd5sAt_aW`w7_PyOGN>+NUSua7 zfea9kdLymkamX@R!Q=lIr_hj_rOF-0o%=V%h8lZ`dztUitJ?)qnSSt2pVy|mC5K<} zN4y#EY52qjF-UvrGvO|3Xx8#POPs9xej+!L7I@G^^q|AKyafccWRB2PmMI9vL8XQO zj)30TjLeQ-g6LhiYz@204B4jiiWzQGoJiJhDwH?yw5p;aJwx$Kcr)0}ME~H4n2gXr zb>AWgf+$_u-ysy$nYn$GP19@a98;^7rCQvCig}JH<02iF2s%=!H~Qwj{&3`6CGjdj zG^!`Vf-K(s-BHe8tm(2(II4fCwoCTOGBLzkcUYd zT6*Qhs{7={DqWIk>wCS2zG4JH%hL9BW}k1Q;Ti^*dC}6=jU?IThzH}@^>9 zV~RDx!C7G^bPWym1pr-O`Bc-Zx`)MD0gToOqv^0l1LOlqPbF??Ni4vzLuVXS?7uc9%g^kFc z-|td=G{u@+PZIKKVWn=2;lI7F07)RW zrkV4rMu`NG;~j|tRPs!&#1!!(lRu0DSjjwsZ`X>z|PIB!4} zjg-7$g%bo-(>A^t)dRiX+H2!OvQeh2)$PAJq*myad~?dt@Voh#-}@oy&AjhwcRR~r z#q635O8K$*bh9$^X*4lr;KW%AD%v+nQs&%7S(9ss@ z>9RG1M{5F@#3Lts%*nb(dT^;ZLK=dHvv|4IchES3tr_LYTxlH<$)1d;g9fsrFf-~MS-Mh&rYgZm1Yl@9ik@k7AvT$G7O44YfdAyPs?2E5EF1u%{#T$kr&NVL$ zu!3nLIPC}o(zpL`eh9|_MRl{YLId2L57pdN+|t#S8kvfap^9NUOA%u z;}r$Ot9NM%L~si|>VIQ%OT((%VT z)oiad9VEjU?{X^T41(Af9A2&NDs~aTaTH+4M8hSHr?;;Ax5DJPd=M7mOFJglB8RSk z2x~xV*|g+bv(Z+UK)|ia3D74yN!{NeCn2i}bEMu#c!gHmD^z*U`+eyBNE-uEb9Nk= ztpD5vK(jy!2K>ooqB#$IpPfUdJfeoNbk}GNmrJaI`h$}`-i!Zf7Cb4}jNzRpQk~+` zOjXP_gyL`8T~%q=4pu)@GWQE)CLnSEAGY_0w6V`a-l` zH=9-WcjT6J-rKX